Skip to content

Commit

Permalink
Apply some clippy lints (paritytech#11154)
Browse files Browse the repository at this point in the history
* Apply some clippy hints

* Revert clippy ci changes

* Update client/cli/src/commands/generate.rs

Co-authored-by: Bastian Köcher <[email protected]>

* Update client/cli/src/commands/inspect_key.rs

Co-authored-by: Bastian Köcher <[email protected]>

* Update client/db/src/bench.rs

Co-authored-by: Bastian Köcher <[email protected]>

* Update client/db/src/bench.rs

Co-authored-by: Bastian Köcher <[email protected]>

* Update client/service/src/client/block_rules.rs

Co-authored-by: Bastian Köcher <[email protected]>

* Update client/service/src/client/block_rules.rs

Co-authored-by: Bastian Köcher <[email protected]>

* Update client/network/src/transactions.rs

Co-authored-by: Bastian Köcher <[email protected]>

* Update client/network/src/protocol.rs

Co-authored-by: Bastian Köcher <[email protected]>

* Revert due to missing `or_default` function.

* Fix compilation and simplify code

* Undo change that corrupts benchmark.

* fix clippy

* Update client/service/test/src/lib.rs

Co-authored-by: Bastian Köcher <[email protected]>

* Update client/state-db/src/noncanonical.rs

Co-authored-by: Bastian Köcher <[email protected]>

* Update client/state-db/src/noncanonical.rs

remove leftovers!

* Update client/tracing/src/logging/directives.rs

Co-authored-by: Bastian Köcher <[email protected]>

* Update utils/fork-tree/src/lib.rs

Co-authored-by: Bastian Köcher <[email protected]>

* added needed ref

* Update frame/referenda/src/benchmarking.rs

* Simplify byte-vec creation

* let's just not overlap the ranges

* Correction

* cargo fmt

* Update utils/frame/benchmarking-cli/src/shared/stats.rs

Co-authored-by: Bastian Köcher <[email protected]>

* Update utils/frame/benchmarking-cli/src/pallet/command.rs

Co-authored-by: Bastian Köcher <[email protected]>

* Update utils/frame/benchmarking-cli/src/pallet/command.rs

Co-authored-by: Bastian Köcher <[email protected]>

Co-authored-by: Bastian Köcher <[email protected]>
Co-authored-by: Giles Cope <[email protected]>
  • Loading branch information
3 people authored and ark0f committed Feb 27, 2023
1 parent 356988c commit fcdc7da
Show file tree
Hide file tree
Showing 368 changed files with 1,938 additions and 2,247 deletions.
2 changes: 1 addition & 1 deletion bin/node-template/node/src/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ where

io.extend_with(SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe)));

io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new(client.clone())));
io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new(client)));

// Extend this RPC with a custom API by using the following syntax.
// `YourRpcStruct` should have a reference to a client, which is needed
Expand Down
4 changes: 2 additions & 2 deletions bin/node-template/node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ pub fn new_partial(

let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, _>(
&config,
config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
)?;
Expand Down Expand Up @@ -263,7 +263,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _, _>(
StartAuraParams {
slot_duration,
client: client.clone(),
client,
select_chain,
block_import,
proposer_factory,
Expand Down
2 changes: 1 addition & 1 deletion bin/node-template/pallets/template/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ pub mod pallet {
// Read a value from storage.
match <Something<T>>::get() {
// Return an error if the value has not been set.
None => Err(Error::<T>::NoneValue)?,
None => return Err(Error::<T>::NoneValue.into()),
Some(old) => {
// Increment the value read from storage; will error in the event of overflow.
let new = old.checked_add(1).ok_or(Error::<T>::StorageOverflow)?;
Expand Down
2 changes: 1 addition & 1 deletion bin/node-template/runtime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,7 @@ impl_runtime_apis! {

let storage_info = AllPalletsWithSystem::storage_info();

return (list, storage_info)
(list, storage_info)
}

fn dispatch_benchmark(
Expand Down
2 changes: 1 addition & 1 deletion bin/node/bench/src/core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ pub fn run_benchmark(benchmark: Box<dyn BenchmarkDescription>, mode: Mode) -> Be
durations.push(duration.as_nanos());
}

durations.sort();
durations.sort_unstable();

let raw_average = (durations.iter().sum::<u128>() / (durations.len() as u128)) as u64;
let average = (durations.iter().skip(10).take(30).sum::<u128>() / 30) as u64;
Expand Down
29 changes: 9 additions & 20 deletions bin/node/bench/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,33 +85,22 @@ fn main() {

let mut import_benchmarks = Vec::new();

for profile in [Profile::Wasm, Profile::Native].iter() {
for profile in [Profile::Wasm, Profile::Native] {
for size in [
SizeType::Empty,
SizeType::Small,
SizeType::Medium,
SizeType::Large,
SizeType::Full,
SizeType::Custom(opt.transactions.unwrap_or(0)),
]
.iter()
{
] {
for block_type in [
BlockType::RandomTransfersKeepAlive,
BlockType::RandomTransfersReaping,
BlockType::Noop,
]
.iter()
{
for database_type in
[BenchDataBaseType::RocksDb, BenchDataBaseType::ParityDb].iter()
{
import_benchmarks.push((
profile,
size.clone(),
block_type.clone(),
database_type,
));
] {
for database_type in [BenchDataBaseType::RocksDb, BenchDataBaseType::ParityDb] {
import_benchmarks.push((profile, size, block_type, database_type));
}
}
}
Expand All @@ -120,11 +109,11 @@ fn main() {
let benchmarks = matrix!(
(profile, size, block_type, database_type) in import_benchmarks.into_iter() =>
ImportBenchmarkDescription {
profile: *profile,
profile,
key_types: KeyTypes::Sr25519,
size: size,
block_type: block_type,
database_type: *database_type,
size,
block_type,
database_type,
},
(size, db_type) in
[
Expand Down
2 changes: 1 addition & 1 deletion bin/node/bench/src/state_sizes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
// along with this program. If not, see <https://www.gnu.org/licenses/>.

/// Kusama value size distribution
pub const KUSAMA_STATE_DISTRIBUTION: &'static [(u32, u32)] = &[
pub const KUSAMA_STATE_DISTRIBUTION: &[(u32, u32)] = &[
(32, 35),
(33, 20035),
(34, 5369),
Expand Down
2 changes: 1 addition & 1 deletion bin/node/bench/src/trie.rs
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ impl core::Benchmark for TrieWriteBenchmark {
let mut db = self.database.clone();
let kvdb = db.open(self.database_type);

let mut new_root = self.root.clone();
let mut new_root = self.root;

let mut overlay = HashMap::new();
let mut trie = SimpleTrie { db: kvdb.clone(), overlay: &mut overlay };
Expand Down
6 changes: 3 additions & 3 deletions bin/node/cli/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -113,10 +113,10 @@ pub fn create_extrinsic(
let signature = raw_payload.using_encoded(|e| sender.sign(e));

node_runtime::UncheckedExtrinsic::new_signed(
function.clone(),
function,
sp_runtime::AccountId32::from(sender.public()).into(),
node_runtime::Signature::Sr25519(signature.clone()),
extra.clone(),
node_runtime::Signature::Sr25519(signature),
extra,
)
}

Expand Down
6 changes: 3 additions & 3 deletions bin/node/runtime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -631,7 +631,7 @@ impl Get<Option<(usize, ExtendedBalance)>> for OffchainRandomBalancing {
use sp_runtime::traits::TrailingZeroInput;
let iters = match MINER_MAX_ITERATIONS {
0 => 0,
max @ _ => {
max => {
let seed = sp_io::offchain::random_seed();
let random = <u32>::decode(&mut TrailingZeroInput::new(&seed))
.expect("input is padded with zeroes; qed") %
Expand Down Expand Up @@ -1148,7 +1148,7 @@ where
let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?;
let address = Indices::unlookup(account);
let (call, extra, _) = raw_payload.deconstruct();
Some((call, (address, signature.into(), extra)))
Some((call, (address, signature, extra)))
}
}

Expand Down Expand Up @@ -1911,7 +1911,7 @@ impl_runtime_apis! {

let storage_info = AllPalletsWithSystem::storage_info();

return (list, storage_info)
(list, storage_info)
}

fn dispatch_benchmark(
Expand Down
19 changes: 8 additions & 11 deletions bin/node/testing/src/bench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -280,8 +280,7 @@ impl<'a> BlockContentIterator<'a> {
let genesis_hash = client
.block_hash(Zero::zero())
.expect("Database error?")
.expect("Genesis block always exists; qed")
.into();
.expect("Genesis block always exists; qed");

BlockContentIterator { iteration: 0, content, keyring, runtime_version, genesis_hash }
}
Expand Down Expand Up @@ -569,15 +568,13 @@ impl BenchKeyring {
genesis_hash,
);
let key = self.accounts.get(&signed).expect("Account id not found in keyring");
let signature = payload
.using_encoded(|b| {
if b.len() > 256 {
key.sign(&sp_io::hashing::blake2_256(b))
} else {
key.sign(b)
}
})
.into();
let signature = payload.using_encoded(|b| {
if b.len() > 256 {
key.sign(&sp_io::hashing::blake2_256(b))
} else {
key.sign(b)
}
});
UncheckedExtrinsic {
signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)),
function: payload.0,
Expand Down
2 changes: 1 addition & 1 deletion bin/utils/chain-spec-builder/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ fn generate_chain_spec(
}

fn generate_authority_keys_and_store(seeds: &[String], keystore_path: &Path) -> Result<(), String> {
for (n, seed) in seeds.into_iter().enumerate() {
for (n, seed) in seeds.iter().enumerate() {
let keystore: SyncCryptoStorePtr = Arc::new(
LocalKeystore::open(keystore_path.join(format!("auth-{}", n)), None)
.map_err(|err| err.to_string())?,
Expand Down
2 changes: 1 addition & 1 deletion client/allocator/src/freeing_bump.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ fn error(msg: &'static str) -> Error {
Error::Other(msg)
}

const LOG_TARGET: &'static str = "wasm-heap";
const LOG_TARGET: &str = "wasm-heap";

// The minimum possible allocation size is chosen to be 8 bytes because in that case we would have
// easier time to provide the guaranteed alignment of 8.
Expand Down
2 changes: 1 addition & 1 deletion client/api/src/backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -499,7 +499,7 @@ pub trait Backend<Block: BlockT>: AuxStore + Send + Sync {

/// Returns true if state for given block is available.
fn have_state_at(&self, hash: &Block::Hash, _number: NumberFor<Block>) -> bool {
self.state_at(BlockId::Hash(hash.clone())).is_ok()
self.state_at(BlockId::Hash(*hash)).is_ok()
}

/// Returns state backend with post-state of given block.
Expand Down
2 changes: 1 addition & 1 deletion client/api/src/execution_extensions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ impl<Block: traits::Block> ExecutionExtensions<Block> {
where
T: OffchainSubmitTransaction<Block> + 'static,
{
*self.transaction_pool.write() = Some(Arc::downgrade(&pool) as _);
*self.transaction_pool.write() = Some(Arc::downgrade(pool) as _);
}

/// Based on the execution context and capabilities it produces
Expand Down
34 changes: 13 additions & 21 deletions client/api/src/in_mem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,23 +166,19 @@ impl<Block: BlockT> Blockchain<Block> {
body: Option<Vec<<Block as BlockT>::Extrinsic>>,
new_state: NewBlockState,
) -> sp_blockchain::Result<()> {
let number = header.number().clone();
let number = *header.number();
if new_state.is_best() {
self.apply_head(&header)?;
}

{
let mut storage = self.storage.write();
storage
.leaves
.import(hash.clone(), number.clone(), header.parent_hash().clone());
storage
.blocks
.insert(hash.clone(), StoredBlock::new(header, body, justifications));
storage.leaves.import(hash, number, header.parent_hash().clone());
storage.blocks.insert(hash, StoredBlock::new(header, body, justifications));

if let NewBlockState::Final = new_state {
storage.finalized_hash = hash;
storage.finalized_number = number.clone();
storage.finalized_number = number;
}

if number == Zero::zero() {
Expand Down Expand Up @@ -266,9 +262,9 @@ impl<Block: BlockT> Blockchain<Block> {
}
}

storage.best_hash = hash.clone();
storage.best_number = number.clone();
storage.hashes.insert(number.clone(), hash.clone());
storage.best_hash = hash;
storage.best_number = *number;
storage.hashes.insert(*number, hash);

Ok(())
}
Expand Down Expand Up @@ -362,7 +358,7 @@ impl<Block: BlockT> HeaderBackend<Block> for Blockchain<Block> {
finalized_hash: storage.finalized_hash,
finalized_number: storage.finalized_number,
finalized_state: if storage.finalized_hash != Default::default() {
Some((storage.finalized_hash.clone(), storage.finalized_number))
Some((storage.finalized_hash, storage.finalized_number))
} else {
None
},
Expand Down Expand Up @@ -428,16 +424,12 @@ impl<Block: BlockT> blockchain::Backend<Block> for Blockchain<Block> {

fn justifications(&self, id: BlockId<Block>) -> sp_blockchain::Result<Option<Justifications>> {
Ok(self.id(id).and_then(|hash| {
self.storage
.read()
.blocks
.get(&hash)
.and_then(|b| b.justifications().map(|x| x.clone()))
self.storage.read().blocks.get(&hash).and_then(|b| b.justifications().cloned())
}))
}

fn last_finalized(&self) -> sp_blockchain::Result<Block::Hash> {
Ok(self.storage.read().finalized_hash.clone())
Ok(self.storage.read().finalized_hash)
}

fn leaves(&self) -> sp_blockchain::Result<Vec<Block::Hash>> {
Expand Down Expand Up @@ -810,15 +802,15 @@ impl<Block: BlockT> backend::LocalBackend<Block> for Backend<Block> where Block:
/// Check that genesis storage is valid.
pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> {
if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) {
return Err(sp_blockchain::Error::InvalidState.into())
return Err(sp_blockchain::Error::InvalidState)
}

if storage
.children_default
.keys()
.any(|child_key| !well_known_keys::is_child_storage_key(&child_key))
.any(|child_key| !well_known_keys::is_child_storage_key(child_key))
{
return Err(sp_blockchain::Error::InvalidState.into())
return Err(sp_blockchain::Error::InvalidState)
}

Ok(())
Expand Down
6 changes: 3 additions & 3 deletions client/api/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,10 @@ pub mod utils {
/// represent the current block `hash` and its `parent hash`, if given the
/// function that's returned will assume that `hash` isn't part of the local DB
/// yet, and all searches in the DB will instead reference the parent.
pub fn is_descendent_of<'a, Block: BlockT, T>(
client: &'a T,
pub fn is_descendent_of<Block: BlockT, T>(
client: &T,
current: Option<(Block::Hash, Block::Hash)>,
) -> impl Fn(&Block::Hash, &Block::Hash) -> Result<bool, Error> + 'a
) -> impl Fn(&Block::Hash, &Block::Hash) -> Result<bool, Error> + '_
where
T: HeaderBackend<Block> + HeaderMetadata<Block, Error = Error>,
{
Expand Down
7 changes: 3 additions & 4 deletions client/api/src/notifications.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,9 @@ type ChildKeys = Option<HashMap<StorageKey, Option<HashSet<StorageKey>>>>;

impl StorageChangeSet {
/// Convert the change set into iterator over storage items.
pub fn iter<'a>(
&'a self,
) -> impl Iterator<Item = (Option<&'a StorageKey>, &'a StorageKey, Option<&'a StorageData>)> + 'a
{
pub fn iter(
&self,
) -> impl Iterator<Item = (Option<&StorageKey>, &StorageKey, Option<&StorageData>)> + '_ {
let top = self
.changes
.iter()
Expand Down
Loading

0 comments on commit fcdc7da

Please sign in to comment.