From d8a979256a07e94c0b33b373dcaa2c66888fd227 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 13 Sep 2023 19:11:36 +0400 Subject: [PATCH 01/19] migrate primitives to v6 stabilize async backing and enable API on Rococo --- .../chain-bridge-hub-cumulus/src/lib.rs | 2 +- .../src/blockchain_rpc_client.rs | 10 +- .../src/rpc_client.rs | 25 +- cumulus/test/relay-sproof-builder/src/lib.rs | 2 +- polkadot/node/collation-generation/src/lib.rs | 7 +- .../node/collation-generation/src/tests.rs | 8 +- polkadot/node/core/backing/src/tests/mod.rs | 2 +- .../src/tests/prospective_parachains.rs | 8 +- .../dispute-coordinator/src/initialized.rs | 12 +- .../src/fragment_tree.rs | 10 +- .../core/prospective-parachains/src/lib.rs | 13 +- .../core/prospective-parachains/src/tests.rs | 12 +- polkadot/node/core/runtime-api/src/cache.rs | 74 +- polkadot/node/core/runtime-api/src/lib.rs | 33 +- polkadot/node/core/runtime-api/src/tests.rs | 22 +- .../src/collator_side/tests/mod.rs | 4 +- .../tests/prospective_parachains.rs | 8 +- .../src/validator_side/tests/mod.rs | 2 +- .../tests/prospective_parachains.rs | 10 +- polkadot/node/network/protocol/src/lib.rs | 2 +- .../protocol/src/request_response/vstaging.rs | 2 +- .../src/legacy_v1/tests.rs | 12 +- .../src/vstaging/candidates.rs | 2 +- .../src/vstaging/cluster.rs | 4 +- .../src/vstaging/grid.rs | 4 +- .../src/vstaging/groups.rs | 3 +- .../src/vstaging/mod.rs | 2 +- .../src/vstaging/requests.rs | 4 +- .../src/vstaging/statement_store.rs | 2 +- .../src/vstaging/tests/mod.rs | 4 +- polkadot/node/subsystem-types/src/messages.rs | 24 +- .../subsystem-types/src/runtime_client.rs | 46 +- .../src/backing_implicit_view.rs | 2 +- .../src/inclusion_emulator/mod.rs | 1436 +++++++++++++++- .../src/inclusion_emulator/staging.rs | 1450 ----------------- polkadot/node/subsystem-util/src/lib.rs | 4 +- .../node/subsystem-util/src/runtime/mod.rs | 27 +- polkadot/primitives/src/lib.rs | 31 +- polkadot/primitives/src/runtime_api.rs | 30 +- polkadot/primitives/src/v6/async_backing.rs | 132 ++ .../src/{v5 => v6}/executor_params.rs | 0 polkadot/primitives/src/{v5 => v6}/metrics.rs | 0 polkadot/primitives/src/{v5 => v6}/mod.rs | 12 +- polkadot/primitives/src/{v5 => v6}/signed.rs | 0 .../primitives/src/{v5 => v6}/slashing.rs | 0 polkadot/primitives/src/vstaging/mod.rs | 118 -- .../node/backing/prospective-parachains.md | 2 +- .../runtime/parachains/src/configuration.rs | 2 +- .../src/configuration/migration/v6.rs | 2 +- .../src/configuration/migration/v7.rs | 2 +- .../src/configuration/migration/v8.rs | 3 +- .../parachains/src/disputes/slashing.rs | 2 +- .../parachains/src/runtime_api_impl/mod.rs | 3 + .../parachains/src/runtime_api_impl/v6.rs | 22 + .../parachains/src/runtime_api_impl/v7.rs | 120 ++ .../src/runtime_api_impl/vstaging.rs | 108 -- polkadot/runtime/rococo/src/lib.rs | 64 +- polkadot/runtime/westend/src/lib.rs | 55 +- 58 files changed, 2009 insertions(+), 1993 deletions(-) delete mode 100644 polkadot/node/subsystem-util/src/inclusion_emulator/staging.rs create mode 100644 polkadot/primitives/src/v6/async_backing.rs rename polkadot/primitives/src/{v5 => v6}/executor_params.rs (100%) rename polkadot/primitives/src/{v5 => v6}/metrics.rs (100%) rename polkadot/primitives/src/{v5 => v6}/mod.rs (99%) rename polkadot/primitives/src/{v5 => v6}/signed.rs (100%) rename polkadot/primitives/src/{v5 => v6}/slashing.rs (100%) create mode 100644 polkadot/runtime/parachains/src/runtime_api_impl/v6.rs create mode 100644 polkadot/runtime/parachains/src/runtime_api_impl/v7.rs diff --git a/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs b/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs index c1dbc6db36f6..cd281324ee55 100644 --- a/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs +++ b/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs @@ -52,7 +52,7 @@ pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); /// This is a copy-paste from the cumulus repo's `parachains-common` crate. const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts(constants::WEIGHT_REF_TIME_PER_SECOND, 0) .saturating_div(2) - .set_proof_size(polkadot_primitives::v5::MAX_POV_SIZE as u64); + .set_proof_size(polkadot_primitives::MAX_POV_SIZE as u64); /// All cumulus bridge hubs assume that about 5 percent of the block weight is consumed by /// `on_initialize` handlers. This is used to limit the maximal weight of a single extrinsic. diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index 57e16bc4283c..3f4c08ecbb83 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -22,8 +22,8 @@ use futures::{Stream, StreamExt}; use polkadot_core_primitives::{Block, BlockNumber, Hash, Header}; use polkadot_overseer::RuntimeApiSubsystemClient; use polkadot_primitives::{ + async_backing::{AsyncBackingParams, BackingState}, slashing, - vstaging::{AsyncBackingParams, BackingState}, }; use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError}; use sp_api::{ApiError, RuntimeApiInfo}; @@ -346,16 +346,16 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { Ok(self.rpc_client.parachain_host_minimum_backing_votes(at, session_index).await?) } - async fn staging_async_backing_params(&self, at: Hash) -> Result { - Ok(self.rpc_client.parachain_host_staging_async_backing_params(at).await?) + async fn async_backing_params(&self, at: Hash) -> Result { + Ok(self.rpc_client.parachain_host_async_backing_params(at).await?) } - async fn staging_para_backing_state( + async fn para_backing_state( &self, at: Hash, para_id: cumulus_primitives_core::ParaId, ) -> Result, ApiError> { - Ok(self.rpc_client.parachain_host_staging_para_backing_state(at, para_id).await?) + Ok(self.rpc_client.parachain_host_para_backing_state(at, para_id).await?) } } diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index c1e92b249d77..b1fd7d1ab7d9 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -30,9 +30,8 @@ use parity_scale_codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain::{ - slashing, - vstaging::{AsyncBackingParams, BackingState}, - BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + async_backing::{AsyncBackingParams, BackingState}, + slashing, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, OccupiedCoreAssumption, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, @@ -599,30 +598,22 @@ impl RelayChainRpcClient { } #[allow(missing_docs)] - pub async fn parachain_host_staging_async_backing_params( + pub async fn parachain_host_async_backing_params( &self, at: RelayHash, ) -> Result { - self.call_remote_runtime_function( - "ParachainHost_staging_async_backing_params", - at, - None::<()>, - ) - .await + self.call_remote_runtime_function("ParachainHost_async_backing_params", at, None::<()>) + .await } #[allow(missing_docs)] - pub async fn parachain_host_staging_para_backing_state( + pub async fn parachain_host_para_backing_state( &self, at: RelayHash, para_id: ParaId, ) -> Result, RelayChainError> { - self.call_remote_runtime_function( - "ParachainHost_staging_para_backing_state", - at, - Some(para_id), - ) - .await + self.call_remote_runtime_function("ParachainHost_para_backing_state", at, Some(para_id)) + .await } fn send_register_message_to_worker( diff --git a/cumulus/test/relay-sproof-builder/src/lib.rs b/cumulus/test/relay-sproof-builder/src/lib.rs index 69a82d05d816..fbd2692a36b4 100644 --- a/cumulus/test/relay-sproof-builder/src/lib.rs +++ b/cumulus/test/relay-sproof-builder/src/lib.rs @@ -63,7 +63,7 @@ impl Default for RelayStateSproofBuilder { hrmp_max_message_num_per_candidate: 5, validation_upgrade_cooldown: 6, validation_upgrade_delay: 6, - async_backing_params: relay_chain::vstaging::AsyncBackingParams { + async_backing_params: relay_chain::AsyncBackingParams { allowed_ancestry_len: 0, max_candidate_depth: 0, }, diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index 27779f3d1acb..4e13755deedf 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -43,9 +43,8 @@ use polkadot_node_subsystem::{ SubsystemContext, SubsystemError, SubsystemResult, }; use polkadot_node_subsystem_util::{ - request_availability_cores, request_persisted_validation_data, - request_staging_async_backing_params, request_validation_code, request_validation_code_hash, - request_validators, + request_async_backing_params, request_availability_cores, request_persisted_validation_data, + request_validation_code, request_validation_code_hash, request_validators, }; use polkadot_primitives::{ collator_signature_payload, CandidateCommitments, CandidateDescriptor, CandidateReceipt, @@ -208,7 +207,7 @@ async fn handle_new_activations( let (availability_cores, validators, async_backing_params) = join!( request_availability_cores(relay_parent, ctx.sender()).await, request_validators(relay_parent, ctx.sender()).await, - request_staging_async_backing_params(relay_parent, ctx.sender()).await, + request_async_backing_params(relay_parent, ctx.sender()).await, ); let availability_cores = availability_cores??; diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs index da6b343e6aee..9094f40cca84 100644 --- a/polkadot/node/collation-generation/src/tests.rs +++ b/polkadot/node/collation-generation/src/tests.rs @@ -153,7 +153,7 @@ fn requests_availability_per_relay_parent() { } Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( _hash, - RuntimeApiRequest::StagingAsyncBackingParams( + RuntimeApiRequest::AsyncBackingParams( tx, ), ))) => { @@ -235,7 +235,7 @@ fn requests_validation_data_for_scheduled_matches() { }, Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( _hash, - RuntimeApiRequest::StagingAsyncBackingParams(tx), + RuntimeApiRequest::AsyncBackingParams(tx), ))) => { tx.send(Err(RuntimeApiError::NotSupported { runtime_api_name: "doesnt_matter", @@ -332,7 +332,7 @@ fn sends_distribute_collation_message() { }, Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( _hash, - RuntimeApiRequest::StagingAsyncBackingParams(tx), + RuntimeApiRequest::AsyncBackingParams(tx), ))) => { tx.send(Err(RuntimeApiError::NotSupported { runtime_api_name: "doesnt_matter", @@ -494,7 +494,7 @@ fn fallback_when_no_validation_code_hash_api() { }, Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( _hash, - RuntimeApiRequest::StagingAsyncBackingParams(tx), + RuntimeApiRequest::AsyncBackingParams(tx), ))) => { tx.send(Err(RuntimeApiError::NotSupported { runtime_api_name: "doesnt_matter", diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index a981487db445..2a8b98f2cb51 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -241,7 +241,7 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == test_state.relay_parent => { tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); } diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index d6e93fb04d34..4ba690e17e48 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -20,12 +20,12 @@ use polkadot_node_subsystem::{ messages::{ChainApiMessage, FragmentTreeMembership}, ActivatedLeaf, TimeoutExt, }; -use polkadot_primitives::{vstaging as vstaging_primitives, BlockNumber, Header, OccupiedCore}; +use polkadot_primitives::{AsyncBackingParams, BlockNumber, Header, OccupiedCore}; use super::*; -const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParams = - vstaging_primitives::AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; +const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = + AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; struct TestLeaf { activated: ActivatedLeaf, @@ -56,7 +56,7 @@ async fn activate_leaf( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == leaf_hash => { tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); } diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index 9cd544a8c536..e44530b3f1bb 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -43,7 +43,7 @@ use polkadot_node_subsystem_util::runtime::{ self, key_ownership_proof, submit_report_dispute_lost, RuntimeInfo, }; use polkadot_primitives::{ - vstaging, BlockNumber, CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement, + slashing, BlockNumber, CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement, DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, }; @@ -385,7 +385,7 @@ impl Initialized { &mut self, ctx: &mut Context, relay_parent: Hash, - unapplied_slashes: Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>, + unapplied_slashes: Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>, ) { for (session_index, candidate_hash, pending) in unapplied_slashes { gum::info!( @@ -422,11 +422,9 @@ impl Initialized { match res { Ok(Some(key_ownership_proof)) => { key_ownership_proofs.push(key_ownership_proof); - let time_slot = vstaging::slashing::DisputesTimeSlot::new( - session_index, - candidate_hash, - ); - let dispute_proof = vstaging::slashing::DisputeProof { + let time_slot = + slashing::DisputesTimeSlot::new(session_index, candidate_hash); + let dispute_proof = slashing::DisputeProof { time_slot, kind: pending.kind, validator_index: *validator_index, diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs index ed2988fcb39f..292e4ebe5282 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs @@ -96,10 +96,10 @@ use std::{ use super::LOG_TARGET; use bitvec::prelude::*; -use polkadot_node_subsystem_util::inclusion_emulator::staging::{ +use polkadot_node_subsystem_util::inclusion_emulator::{ ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, PersistedValidationData, }; @@ -981,10 +981,8 @@ impl FragmentNode { mod tests { use super::*; use assert_matches::assert_matches; - use polkadot_node_subsystem_util::inclusion_emulator::staging::InboundHrmpLimitations; - use polkadot_primitives::vstaging::{ - BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData, - }; + use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; + use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData}; use polkadot_primitives_test_helpers as test_helpers; fn make_constraints( diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 6e5844a62a16..fcca0dd0b536 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -22,7 +22,7 @@ //! backing phases of parachain consensus. //! //! This is primarily an implementation of "Fragment Trees", as described in -//! [`polkadot_node_subsystem_util::inclusion_emulator::staging`]. +//! [`polkadot_node_subsystem_util::inclusion_emulator`]. //! //! This subsystem also handles concerns such as the relay-chain being forkful and session changes. @@ -42,13 +42,14 @@ use polkadot_node_subsystem::{ overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::{ - inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}, + inclusion_emulator::{Constraints, RelayChainBlockInfo}, request_session_index_for_child, runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, }; -use polkadot_primitives::vstaging::{ - BlockNumber, CandidateHash, CandidatePendingAvailability, CommittedCandidateReceipt, CoreState, - Hash, HeadData, Header, Id as ParaId, PersistedValidationData, +use polkadot_primitives::{ + async_backing::CandidatePendingAvailability, BlockNumber, CandidateHash, + CommittedCandidateReceipt, CoreState, Hash, HeadData, Header, Id as ParaId, + PersistedValidationData, }; use crate::{ @@ -792,7 +793,7 @@ async fn fetch_backing_state( let (tx, rx) = oneshot::channel(); ctx.send_message(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingParaBackingState(para_id, tx), + RuntimeApiRequest::ParaBackingState(para_id, tx), )) .await; diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index eb12ea4537f7..d2cd23fe95fc 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -25,7 +25,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - vstaging::{AsyncBackingParams, BackingState, Constraints, InboundHrmpLimitations}, + async_backing::{AsyncBackingParams, BackingState, Constraints, InboundHrmpLimitations}, CommittedCandidateReceipt, HeadData, Header, PersistedValidationData, ScheduledCore, ValidationCodeHash, }; @@ -219,7 +219,7 @@ async fn handle_leaf_activation( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == *hash => { tx.send(Ok(async_backing_params)).unwrap(); } @@ -284,7 +284,7 @@ async fn handle_leaf_activation( let para_id = match message { AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, - RuntimeApiRequest::StagingParaBackingState(p_id, _), + RuntimeApiRequest::ParaBackingState(p_id, _), )) => p_id, _ => panic!("received unexpected message {:?}", message), }; @@ -303,7 +303,7 @@ async fn handle_leaf_activation( assert_matches!( message, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingParaBackingState(p_id, tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ParaBackingState(p_id, tx)) ) if parent == *hash && p_id == para_id => { tx.send(Ok(Some(backing_state))).unwrap(); } @@ -499,7 +499,7 @@ fn should_do_no_work_if_async_backing_disabled_for_leaf() { assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == hash => { tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); } @@ -1569,7 +1569,7 @@ fn uses_ancestry_only_within_session() { assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == hash => { tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len })).unwrap(); } diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 7f41d74e616c..e05e5823a282 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -20,12 +20,12 @@ use schnellru::{ByLength, LruMap}; use sp_consensus_babe::Epoch; use polkadot_primitives::{ - vstaging, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; /// For consistency we have the same capacity for all caches. We use 128 as we'll only need that @@ -61,14 +61,11 @@ pub(crate) struct RequestResultCache { LruMap<(Hash, ParaId, OccupiedCoreAssumption), Option>, version: LruMap, disputes: LruMap)>>, - unapplied_slashes: - LruMap>, - key_ownership_proof: - LruMap<(Hash, ValidatorId), Option>, + unapplied_slashes: LruMap>, + key_ownership_proof: LruMap<(Hash, ValidatorId), Option>, minimum_backing_votes: LruMap, - - staging_para_backing_state: LruMap<(Hash, ParaId), Option>, - staging_async_backing_params: LruMap, + para_backing_state: LruMap<(Hash, ParaId), Option>, + async_backing_params: LruMap, } impl Default for RequestResultCache { @@ -100,8 +97,8 @@ impl Default for RequestResultCache { key_ownership_proof: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), minimum_backing_votes: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), - staging_para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), - staging_async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), } } } @@ -401,14 +398,14 @@ impl RequestResultCache { pub(crate) fn unapplied_slashes( &mut self, relay_parent: &Hash, - ) -> Option<&Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>> { + ) -> Option<&Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>> { self.unapplied_slashes.get(relay_parent).map(|v| &*v) } pub(crate) fn cache_unapplied_slashes( &mut self, relay_parent: Hash, - value: Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>, + value: Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>, ) { self.unapplied_slashes.insert(relay_parent, value); } @@ -416,14 +413,14 @@ impl RequestResultCache { pub(crate) fn key_ownership_proof( &mut self, key: (Hash, ValidatorId), - ) -> Option<&Option> { + ) -> Option<&Option> { self.key_ownership_proof.get(&key).map(|v| &*v) } pub(crate) fn cache_key_ownership_proof( &mut self, key: (Hash, ValidatorId), - value: Option, + value: Option, ) { self.key_ownership_proof.insert(key, value); } @@ -431,7 +428,7 @@ impl RequestResultCache { // This request is never cached, hence always returns `None`. pub(crate) fn submit_report_dispute_lost( &mut self, - _key: (Hash, vstaging::slashing::DisputeProof, vstaging::slashing::OpaqueKeyOwnershipProof), + _key: (Hash, slashing::DisputeProof, slashing::OpaqueKeyOwnershipProof), ) -> Option<&Option<()>> { None } @@ -448,34 +445,34 @@ impl RequestResultCache { self.minimum_backing_votes.insert(session_index, minimum_backing_votes); } - pub(crate) fn staging_para_backing_state( + pub(crate) fn para_backing_state( &mut self, key: (Hash, ParaId), - ) -> Option<&Option> { - self.staging_para_backing_state.get(&key).map(|v| &*v) + ) -> Option<&Option> { + self.para_backing_state.get(&key).map(|v| &*v) } - pub(crate) fn cache_staging_para_backing_state( + pub(crate) fn cache_para_backing_state( &mut self, key: (Hash, ParaId), - value: Option, + value: Option, ) { - self.staging_para_backing_state.insert(key, value); + self.para_backing_state.insert(key, value); } - pub(crate) fn staging_async_backing_params( + pub(crate) fn async_backing_params( &mut self, key: &Hash, - ) -> Option<&vstaging::AsyncBackingParams> { - self.staging_async_backing_params.get(key).map(|v| &*v) + ) -> Option<&async_backing::AsyncBackingParams> { + self.async_backing_params.get(key).map(|v| &*v) } - pub(crate) fn cache_staging_async_backing_params( + pub(crate) fn cache_async_backing_params( &mut self, key: Hash, - value: vstaging::AsyncBackingParams, + value: async_backing::AsyncBackingParams, ) { - self.staging_async_backing_params.insert(key, value); + self.async_backing_params.insert(key, value); } } @@ -515,16 +512,15 @@ pub(crate) enum RequestResult { ValidationCodeHash(Hash, ParaId, OccupiedCoreAssumption, Option), Version(Hash, u32), Disputes(Hash, Vec<(SessionIndex, CandidateHash, DisputeState)>), - UnappliedSlashes(Hash, Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>), - KeyOwnershipProof(Hash, ValidatorId, Option), + UnappliedSlashes(Hash, Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>), + KeyOwnershipProof(Hash, ValidatorId, Option), // This is a request with side-effects. SubmitReportDisputeLost( Hash, - vstaging::slashing::DisputeProof, - vstaging::slashing::OpaqueKeyOwnershipProof, + slashing::DisputeProof, + slashing::OpaqueKeyOwnershipProof, Option<()>, ), - - StagingParaBackingState(Hash, ParaId, Option), - StagingAsyncBackingParams(Hash, vstaging::AsyncBackingParams), + ParaBackingState(Hash, ParaId, Option), + AsyncBackingParams(Hash, async_backing::AsyncBackingParams), } diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index ec9bf10fa6e3..19b2f5565a22 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -166,12 +166,11 @@ where .requests_cache .cache_key_ownership_proof((relay_parent, validator_id), key_ownership_proof), SubmitReportDisputeLost(_, _, _, _) => {}, - - StagingParaBackingState(relay_parent, para_id, constraints) => self + ParaBackingState(relay_parent, para_id, constraints) => self .requests_cache - .cache_staging_para_backing_state((relay_parent, para_id), constraints), - StagingAsyncBackingParams(relay_parent, params) => - self.requests_cache.cache_staging_async_backing_params(relay_parent, params), + .cache_para_backing_state((relay_parent, para_id), constraints), + AsyncBackingParams(relay_parent, params) => + self.requests_cache.cache_async_backing_params(relay_parent, params), } } @@ -297,13 +296,10 @@ where Request::SubmitReportDisputeLost(dispute_proof, key_ownership_proof, sender) }, ), - - Request::StagingParaBackingState(para, sender) => - query!(staging_para_backing_state(para), sender) - .map(|sender| Request::StagingParaBackingState(para, sender)), - Request::StagingAsyncBackingParams(sender) => - query!(staging_async_backing_params(), sender) - .map(|sender| Request::StagingAsyncBackingParams(sender)), + Request::ParaBackingState(para, sender) => query!(para_backing_state(para), sender) + .map(|sender| Request::ParaBackingState(para, sender)), + Request::AsyncBackingParams(sender) => query!(async_backing_params(), sender) + .map(|sender| Request::AsyncBackingParams(sender)), Request::MinimumBackingVotes(index, sender) => { if let Some(value) = self.requests_cache.minimum_backing_votes(index) { self.metrics.on_cached_request(); @@ -569,19 +565,18 @@ where ver = Request::MINIMUM_BACKING_VOTES_RUNTIME_REQUIREMENT, sender ), - - Request::StagingParaBackingState(para, sender) => { + Request::ParaBackingState(para, sender) => { query!( - StagingParaBackingState, - staging_para_backing_state(para), + ParaBackingState, + para_backing_state(para), ver = Request::STAGING_BACKING_STATE, sender ) }, - Request::StagingAsyncBackingParams(sender) => { + Request::AsyncBackingParams(sender) => { query!( - StagingAsyncBackingParams, - staging_async_backing_params(), + AsyncBackingParams, + async_backing_params(), ver = Request::STAGING_BACKING_STATE, sender ) diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index bb7c29689611..fb97139a8028 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -20,9 +20,9 @@ use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfigurati use polkadot_node_subsystem::SpawnGlue; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_primitives::{ - vstaging, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, @@ -213,7 +213,7 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { async fn unapplied_slashes( &self, _: Hash, - ) -> Result, ApiError> { + ) -> Result, ApiError> { todo!("Not required for tests") } @@ -221,15 +221,15 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { &self, _: Hash, _: ValidatorId, - ) -> Result, ApiError> { + ) -> Result, ApiError> { todo!("Not required for tests") } async fn submit_report_dispute_lost( &self, _: Hash, - _: vstaging::slashing::DisputeProof, - _: vstaging::slashing::OpaqueKeyOwnershipProof, + _: slashing::DisputeProof, + _: slashing::OpaqueKeyOwnershipProof, ) -> Result, ApiError> { todo!("Not required for tests") } @@ -250,18 +250,18 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { Ok(self.authorities.clone()) } - async fn staging_async_backing_params( + async fn async_backing_params( &self, _: Hash, - ) -> Result { + ) -> Result { todo!("Not required for tests") } - async fn staging_para_backing_state( + async fn para_backing_state( &self, _: Hash, _: ParaId, - ) -> Result, ApiError> { + ) -> Result, ApiError> { todo!("Not required for tests") } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index b452c84c2cd8..b30f8215941c 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -198,7 +198,7 @@ impl TestState { overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingAsyncBackingParams(tx) + RuntimeApiRequest::AsyncBackingParams(tx) )) => { assert_eq!(relay_parent, self.relay_parent); tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); @@ -330,7 +330,7 @@ async fn setup_system(virtual_overseer: &mut VirtualOverseer, test_state: &TestS overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingAsyncBackingParams(tx) + RuntimeApiRequest::AsyncBackingParams(tx) )) => { assert_eq!(relay_parent, test_state.relay_parent); tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index bd55c35852fa..ea8786ca1898 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -19,10 +19,10 @@ use super::*; use polkadot_node_subsystem::messages::{ChainApiMessage, ProspectiveParachainsMessage}; -use polkadot_primitives::{vstaging as vstaging_primitives, Header, OccupiedCore}; +use polkadot_primitives::{AsyncBackingParams, Header, OccupiedCore}; -const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParams = - vstaging_primitives::AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; +const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = + AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; fn get_parent_hash(hash: Hash) -> Hash { Hash::from_low_u64_be(hash.to_low_u64_be() + 1) @@ -52,7 +52,7 @@ async fn update_view( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, - RuntimeApiRequest::StagingAsyncBackingParams(tx), + RuntimeApiRequest::AsyncBackingParams(tx), )) => { tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); (parent, new_view.get(&parent).copied().expect("Unknown parent requested")) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 1cb656e325d3..06e40a12130e 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -444,7 +444,7 @@ async fn assert_async_backing_params_request(virtual_overseer: &mut VirtualOvers overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingAsyncBackingParams(tx) + RuntimeApiRequest::AsyncBackingParams(tx) )) => { assert_eq!(relay_parent, hash); tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index e2a007b308e5..863d7bf3bb3a 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -20,12 +20,12 @@ use super::*; use polkadot_node_subsystem::messages::ChainApiMessage; use polkadot_primitives::{ - vstaging as vstaging_primitives, BlockNumber, CandidateCommitments, CommittedCandidateReceipt, - Header, SigningContext, ValidatorId, + AsyncBackingParams, BlockNumber, CandidateCommitments, CommittedCandidateReceipt, Header, + SigningContext, ValidatorId, }; -const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParams = - vstaging_primitives::AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; +const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = + AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; fn get_parent_hash(hash: Hash) -> Hash { Hash::from_low_u64_be(hash.to_low_u64_be() + 1) @@ -97,7 +97,7 @@ async fn update_view( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, - RuntimeApiRequest::StagingAsyncBackingParams(tx), + RuntimeApiRequest::AsyncBackingParams(tx), )) => { tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); (parent, new_view.get(&parent).copied().expect("Unknown parent requested")) diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs index 1bed2c12fe20..ca3601297fe6 100644 --- a/polkadot/node/network/protocol/src/lib.rs +++ b/polkadot/node/network/protocol/src/lib.rs @@ -594,7 +594,7 @@ pub mod vstaging { use bitvec::{order::Lsb0, slice::BitSlice, vec::BitVec}; use parity_scale_codec::{Decode, Encode}; - use polkadot_primitives::vstaging::{ + use polkadot_primitives::{ CandidateHash, CandidateIndex, CollatorId, CollatorSignature, GroupIndex, Hash, Id as ParaId, UncheckedSignedAvailabilityBitfield, UncheckedSignedStatement, }; diff --git a/polkadot/node/network/protocol/src/request_response/vstaging.rs b/polkadot/node/network/protocol/src/request_response/vstaging.rs index 34a17b4baaa6..c79663abb8a5 100644 --- a/polkadot/node/network/protocol/src/request_response/vstaging.rs +++ b/polkadot/node/network/protocol/src/request_response/vstaging.rs @@ -18,7 +18,7 @@ use parity_scale_codec::{Decode, Encode}; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, PersistedValidationData, UncheckedSignedStatement, }; diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs index 17a66a9ff792..ca3038f9b3f3 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -793,7 +793,7 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == hash_a => { @@ -1033,7 +1033,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == hash_a => { @@ -1563,7 +1563,7 @@ fn delay_reputation_changes() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == hash_a => { @@ -2043,7 +2043,7 @@ fn share_prioritizes_backing_group() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == hash_a => { @@ -2365,7 +2365,7 @@ fn peer_cant_flood_with_large_statements() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == hash_a => { @@ -2590,7 +2590,7 @@ fn handle_multiple_seconded_statements() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == relay_parent_hash => { diff --git a/polkadot/node/network/statement-distribution/src/vstaging/candidates.rs b/polkadot/node/network/statement-distribution/src/vstaging/candidates.rs index d6b68510f1c1..e660df5da173 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/polkadot/node/network/statement-distribution/src/vstaging/candidates.rs @@ -27,7 +27,7 @@ use polkadot_node_network_protocol::PeerId; use polkadot_node_subsystem::messages::HypotheticalCandidate; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ CandidateHash, CommittedCandidateReceipt, GroupIndex, Hash, Id as ParaId, PersistedValidationData, }; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/cluster.rs b/polkadot/node/network/statement-distribution/src/vstaging/cluster.rs index 55d847f83157..8adb8353ca92 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/vstaging/cluster.rs @@ -55,7 +55,7 @@ //! and to keep track of what we have sent to other validators in the group and what we may //! continue to send them. -use polkadot_primitives::vstaging::{CandidateHash, CompactStatement, ValidatorIndex}; +use polkadot_primitives::{CandidateHash, CompactStatement, ValidatorIndex}; use std::collections::{HashMap, HashSet}; @@ -459,7 +459,7 @@ pub enum RejectOutgoing { #[cfg(test)] mod tests { use super::*; - use polkadot_primitives::vstaging::Hash; + use polkadot_primitives::Hash; #[test] fn rejects_incoming_outside_of_group() { diff --git a/polkadot/node/network/statement-distribution/src/vstaging/grid.rs b/polkadot/node/network/statement-distribution/src/vstaging/grid.rs index 4fd77d0ced1c..b26c74682778 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/grid.rs +++ b/polkadot/node/network/statement-distribution/src/vstaging/grid.rs @@ -63,9 +63,7 @@ use polkadot_node_network_protocol::{ grid_topology::SessionGridTopology, vstaging::StatementFilter, }; -use polkadot_primitives::vstaging::{ - CandidateHash, CompactStatement, GroupIndex, Hash, ValidatorIndex, -}; +use polkadot_primitives::{CandidateHash, CompactStatement, GroupIndex, Hash, ValidatorIndex}; use std::collections::{ hash_map::{Entry, HashMap}, diff --git a/polkadot/node/network/statement-distribution/src/vstaging/groups.rs b/polkadot/node/network/statement-distribution/src/vstaging/groups.rs index b2daa1c0ac7c..d917b2090529 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/groups.rs +++ b/polkadot/node/network/statement-distribution/src/vstaging/groups.rs @@ -17,8 +17,7 @@ //! A utility for tracking groups and their members within a session. use polkadot_primitives::{ - effective_minimum_backing_votes, - vstaging::{GroupIndex, IndexedVec, ValidatorIndex}, + effective_minimum_backing_votes, GroupIndex, IndexedVec, ValidatorIndex, }; use std::collections::HashMap; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/mod.rs b/polkadot/node/network/statement-distribution/src/vstaging/mod.rs index 4639720b3221..8768d3504e0a 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/mod.rs +++ b/polkadot/node/network/statement-distribution/src/vstaging/mod.rs @@ -45,7 +45,7 @@ use polkadot_node_subsystem_util::{ reputation::ReputationAggregator, runtime::{request_min_backing_votes, ProspectiveParachainsMode}, }; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ AuthorityDiscoveryId, CandidateHash, CompactStatement, CoreIndex, CoreState, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, IndexedVec, SessionIndex, SessionInfo, SignedStatement, SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, diff --git a/polkadot/node/network/statement-distribution/src/vstaging/requests.rs b/polkadot/node/network/statement-distribution/src/vstaging/requests.rs index 79925f2115d4..e96b891f82bb 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/requests.rs +++ b/polkadot/node/network/statement-distribution/src/vstaging/requests.rs @@ -45,8 +45,8 @@ use polkadot_node_network_protocol::{ vstaging::StatementFilter, PeerId, UnifiedReputationChange as Rep, }; -use polkadot_primitives::vstaging::{ - CandidateHash, CommittedCandidateReceipt, CompactStatement, GroupIndex, Hash, ParaId, +use polkadot_primitives::{ + CandidateHash, CommittedCandidateReceipt, CompactStatement, GroupIndex, Hash, Id as ParaId, PersistedValidationData, SessionIndex, SignedStatement, SigningContext, ValidatorId, ValidatorIndex, }; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs b/polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs index 9ea926f24aa8..c20e7fe45f7c 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -25,7 +25,7 @@ use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use polkadot_node_network_protocol::vstaging::StatementFilter; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ CandidateHash, CompactStatement, GroupIndex, SignedStatement, ValidatorIndex, }; use std::collections::hash_map::{Entry as HEntry, HashMap}; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs b/polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs index 48ceebb1949b..818c91c53565 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -31,7 +31,7 @@ use polkadot_node_subsystem::messages::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ AssignmentPair, AsyncBackingParams, BlockNumber, CommittedCandidateReceipt, CoreState, GroupRotationInfo, HeadData, Header, IndexedVec, PersistedValidationData, ScheduledCore, SessionIndex, SessionInfo, ValidatorPair, @@ -380,7 +380,7 @@ async fn handle_leaf_activation( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == *hash => { tx.send(Ok(test_state.config.async_backing_params.unwrap_or(DEFAULT_ASYNC_BACKING_PARAMETERS))).unwrap(); } diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index a53908d3c2cb..eb94f1696c9d 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -39,12 +39,12 @@ use polkadot_node_primitives::{ ValidationResult, }; use polkadot_primitives::{ - slashing, vstaging as vstaging_primitives, AuthorityDiscoveryId, BackedCandidate, BlockNumber, - CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupIndex, - GroupRotationInfo, Hash, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, - PvfCheckStatement, PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, + async_backing, slashing, AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, + CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, + CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, + Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; @@ -695,14 +695,12 @@ pub enum RuntimeApiRequest { ), /// Get the minimum required backing votes. MinimumBackingVotes(SessionIndex, RuntimeApiSender), - /// Get the backing state of the given para. - /// This is a staging API that will not be available on production runtimes. - StagingParaBackingState(ParaId, RuntimeApiSender>), + ParaBackingState(ParaId, RuntimeApiSender>), /// Get candidate's acceptance limitations for asynchronous backing for a relay parent. /// /// If it's not supported by the Runtime, the async backing is said to be disabled. - StagingAsyncBackingParams(RuntimeApiSender), + AsyncBackingParams(RuntimeApiSender), } impl RuntimeApiRequest { @@ -726,10 +724,8 @@ impl RuntimeApiRequest { /// `MinimumBackingVotes` pub const MINIMUM_BACKING_VOTES_RUNTIME_REQUIREMENT: u32 = 6; - /// Minimum version for backing state, required for async backing. - /// - /// 99 for now, should be adjusted to VSTAGING/actual runtime version once released. - pub const STAGING_BACKING_STATE: u32 = 99; + /// Minimum version to enable asynchronous backing: `AsyncBackingParams` and `ParaBackingState`. + pub const STAGING_BACKING_STATE: u32 = 7; } /// A message to the Runtime API subsystem. diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 06aa351efb4b..3007e985b4f7 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -16,9 +16,9 @@ use async_trait::async_trait; use polkadot_primitives::{ - runtime_api::ParachainHost, vstaging, Block, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Hash, Id, InboundDownwardMessage, InboundHrmpMessage, + async_backing, runtime_api::ParachainHost, slashing, Block, BlockNumber, CandidateCommitments, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Hash, Id, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, @@ -190,7 +190,7 @@ pub trait RuntimeApiSubsystemClient { async fn unapplied_slashes( &self, at: Hash, - ) -> Result, ApiError>; + ) -> Result, ApiError>; /// Returns a merkle proof of a validator session key in a past session. /// @@ -199,7 +199,7 @@ pub trait RuntimeApiSubsystemClient { &self, at: Hash, validator_id: ValidatorId, - ) -> Result, ApiError>; + ) -> Result, ApiError>; /// Submits an unsigned extrinsic to slash validators who lost a dispute about /// a candidate of a past session. @@ -208,8 +208,8 @@ pub trait RuntimeApiSubsystemClient { async fn submit_report_dispute_lost( &self, at: Hash, - dispute_proof: vstaging::slashing::DisputeProof, - key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + dispute_proof: slashing::DisputeProof, + key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Result, ApiError>; // === BABE API === @@ -232,7 +232,7 @@ pub trait RuntimeApiSubsystemClient { session_index: SessionIndex, ) -> Result, ApiError>; - // === STAGING v6 === + // === v6 === /// Get the minimum number of backing votes. async fn minimum_backing_votes( &self, @@ -240,21 +240,21 @@ pub trait RuntimeApiSubsystemClient { session_index: SessionIndex, ) -> Result; - // === Asynchronous backing API === + // === v7: Asynchronous backing API === /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. - async fn staging_async_backing_params( + async fn async_backing_params( &self, at: Hash, - ) -> Result; + ) -> Result; /// Returns the state of parachain backing for a given para. /// This is a staging method! Do not use on production runtimes! - async fn staging_para_backing_state( + async fn para_backing_state( &self, at: Hash, para_id: Id, - ) -> Result, ApiError>; + ) -> Result, ApiError>; } /// Default implementation of [`RuntimeApiSubsystemClient`] using the client. @@ -454,7 +454,7 @@ where async fn unapplied_slashes( &self, at: Hash, - ) -> Result, ApiError> { + ) -> Result, ApiError> { self.client.runtime_api().unapplied_slashes(at) } @@ -462,15 +462,15 @@ where &self, at: Hash, validator_id: ValidatorId, - ) -> Result, ApiError> { + ) -> Result, ApiError> { self.client.runtime_api().key_ownership_proof(at, validator_id) } async fn submit_report_dispute_lost( &self, at: Hash, - dispute_proof: vstaging::slashing::DisputeProof, - key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + dispute_proof: slashing::DisputeProof, + key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Result, ApiError> { let mut runtime_api = self.client.runtime_api(); @@ -489,19 +489,19 @@ where self.client.runtime_api().minimum_backing_votes(at) } - async fn staging_para_backing_state( + async fn para_backing_state( &self, at: Hash, para_id: Id, - ) -> Result, ApiError> { - self.client.runtime_api().staging_para_backing_state(at, para_id) + ) -> Result, ApiError> { + self.client.runtime_api().para_backing_state(at, para_id) } /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. - async fn staging_async_backing_params( + async fn async_backing_params( &self, at: Hash, - ) -> Result { - self.client.runtime_api().staging_async_backing_params(at) + ) -> Result { + self.client.runtime_api().async_backing_params(at) } } diff --git a/polkadot/node/subsystem-util/src/backing_implicit_view.rs b/polkadot/node/subsystem-util/src/backing_implicit_view.rs index 83c15fdef959..a14536a17666 100644 --- a/polkadot/node/subsystem-util/src/backing_implicit_view.rs +++ b/polkadot/node/subsystem-util/src/backing_implicit_view.rs @@ -20,7 +20,7 @@ use polkadot_node_subsystem::{ messages::{ChainApiMessage, ProspectiveParachainsMessage}, SubsystemSender, }; -use polkadot_primitives::vstaging::{BlockNumber, Hash, Id as ParaId}; +use polkadot_primitives::{BlockNumber, Hash, Id as ParaId}; use std::collections::HashMap; diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index 1487077d9eda..77360ba0afe1 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -11,4 +11,1438 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -pub mod staging; +//! # Overview +//! +//! A set of utilities for node-side code to emulate the logic the runtime uses for checking +//! parachain blocks in order to build prospective parachains that are produced ahead of the +//! relay chain. These utilities allow the node-side to predict, with high accuracy, what +//! the relay-chain will accept in the near future. +//! +//! This module has 2 key data types: [`Constraints`] and [`Fragment`]s. [`Constraints`] +//! exhaustively define the set of valid inputs and outputs to parachain execution. A [`Fragment`] +//! indicates a parachain block, anchored to the relay-chain at a particular relay-chain block, +//! known as the relay-parent. +//! +//! ## Fragment Validity +//! +//! Every relay-parent is implicitly associated with a unique set of [`Constraints`] that describe +//! the properties that must be true for a block to be included in a direct child of that block, +//! assuming there is no intermediate parachain block pending availability. +//! +//! However, the key factor that makes asynchronously-grown prospective chains +//! possible is the fact that the relay-chain accepts candidate blocks based on whether they +//! are valid under the constraints of the present moment, not based on whether they were +//! valid at the time of construction. +//! +//! As such, [`Fragment`]s are often, but not always constructed in such a way that they are +//! invalid at first and become valid later on, as the relay chain grows. +//! +//! # Usage +//! +//! It's expected that the users of this module will be building up trees of +//! [`Fragment`]s and consistently pruning and adding to the tree. +//! +//! ## Operating Constraints +//! +//! The *operating constraints* of a `Fragment` are the constraints with which that fragment +//! was intended to comply. The operating constraints are defined as the base constraints +//! of the relay-parent of the fragment modified by the cumulative modifications of all +//! fragments between the relay-parent and the current fragment. +//! +//! What the operating constraints are, in practice, is a prediction about the state of the +//! relay-chain in the future. The relay-chain is aware of some current state, and we want to +//! make an intelligent prediction about what might be accepted in the future based on +//! prior fragments that also exist off-chain. +//! +//! ## Fragment Trees +//! +//! As the relay-chain grows, some predictions come true and others come false. +//! And new predictions get made. These three changes correspond distinctly to the +//! 3 primary operations on fragment trees. +//! +//! A fragment tree is a mental model for thinking about a forking series of predictions +//! about a single parachain. There may be one or more fragment trees per parachain. +//! +//! In expectation, most parachains will have a plausibly-unique authorship method which means that +//! they should really be much closer to fragment-chains, maybe with an occasional fork. +//! +//! Avoiding fragment-tree blowup is beyond the scope of this module. +//! +//! ### Pruning Fragment Trees +//! +//! When the relay-chain advances, we want to compare the new constraints of that relay-parent to +//! the roots of the fragment trees we have. There are 3 cases: +//! +//! 1. The root fragment is still valid under the new constraints. In this case, we do nothing. This +//! is the "prediction still uncertain" case. +//! +//! 2. The root fragment is invalid under the new constraints because it has been subsumed by the +//! relay-chain. In this case, we can discard the root and split & re-root the fragment tree under +//! its descendents and compare to the new constraints again. This is the "prediction came true" +//! case. +//! +//! 3. The root fragment is invalid under the new constraints because a competing parachain block +//! has been included or it would never be accepted for some other reason. In this case we can +//! discard the entire fragment tree. This is the "prediction came false" case. +//! +//! This is all a bit of a simplification because it assumes that the relay-chain advances without +//! forks and is finalized instantly. In practice, the set of fragment-trees needs to be observable +//! from the perspective of a few different possible forks of the relay-chain and not pruned +//! too eagerly. +//! +//! Note that the fragments themselves don't need to change and the only thing we care about +//! is whether the predictions they represent are still valid. +//! +//! ### Extending Fragment Trees +//! +//! As predictions fade into the past, new ones should be stacked on top. +//! +//! Every new relay-chain block is an opportunity to make a new prediction about the future. +//! Higher-level logic should select the leaves of the fragment-trees to build upon or whether +//! to create a new fragment-tree. +//! +//! ### Code Upgrades +//! +//! Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade scheduling +//! logic is very path-dependent and intricate so we just assume that code upgrades +//! can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep, +//! in practice and code upgrades are fairly rare. So what's likely to happen around code +//! upgrades is that the entire fragment-tree has to get discarded at some point. +//! +//! That means a few blocks of execution time lost, which is not a big deal for code upgrades +//! in practice at most once every few weeks. + +use polkadot_primitives::{ + async_backing::Constraints as PrimitiveConstraints, BlockNumber, CandidateCommitments, + CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, PersistedValidationData, + UpgradeRestriction, ValidationCodeHash, +}; +use std::{ + borrow::{Borrow, Cow}, + collections::HashMap, +}; + +/// Constraints on inbound HRMP channels. +#[derive(Debug, Clone, PartialEq)] +pub struct InboundHrmpLimitations { + /// An exhaustive set of all valid watermarks, sorted ascending + pub valid_watermarks: Vec, +} + +/// Constraints on outbound HRMP channels. +#[derive(Debug, Clone, PartialEq)] +pub struct OutboundHrmpChannelLimitations { + /// The maximum bytes that can be written to the channel. + pub bytes_remaining: usize, + /// The maximum messages that can be written to the channel. + pub messages_remaining: usize, +} + +/// Constraints on the actions that can be taken by a new parachain +/// block. These limitations are implicitly associated with some particular +/// parachain, which should be apparent from usage. +#[derive(Debug, Clone, PartialEq)] +pub struct Constraints { + /// The minimum relay-parent number accepted under these constraints. + pub min_relay_parent_number: BlockNumber, + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: usize, + /// The maximum new validation code size allowed, in bytes. + pub max_code_size: usize, + /// The amount of UMP messages remaining. + pub ump_remaining: usize, + /// The amount of UMP bytes remaining. + pub ump_remaining_bytes: usize, + /// The maximum number of UMP messages allowed per candidate. + pub max_ump_num_per_candidate: usize, + /// Remaining DMP queue. Only includes sent-at block numbers. + pub dmp_remaining_messages: Vec, + /// The limitations of all registered inbound HRMP channels. + pub hrmp_inbound: InboundHrmpLimitations, + /// The limitations of all registered outbound HRMP channels. + pub hrmp_channels_out: HashMap, + /// The maximum number of HRMP messages allowed per candidate. + pub max_hrmp_num_per_candidate: usize, + /// The required parent head-data of the parachain. + pub required_parent: HeadData, + /// The expected validation-code-hash of this parachain. + pub validation_code_hash: ValidationCodeHash, + /// The code upgrade restriction signal as-of this parachain. + pub upgrade_restriction: Option, + /// The future validation code hash, if any, and at what relay-parent + /// number the upgrade would be minimally applied. + pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, +} + +impl From for Constraints { + fn from(c: PrimitiveConstraints) -> Self { + Constraints { + min_relay_parent_number: c.min_relay_parent_number, + max_pov_size: c.max_pov_size as _, + max_code_size: c.max_code_size as _, + ump_remaining: c.ump_remaining as _, + ump_remaining_bytes: c.ump_remaining_bytes as _, + max_ump_num_per_candidate: c.max_ump_num_per_candidate as _, + dmp_remaining_messages: c.dmp_remaining_messages, + hrmp_inbound: InboundHrmpLimitations { + valid_watermarks: c.hrmp_inbound.valid_watermarks, + }, + hrmp_channels_out: c + .hrmp_channels_out + .into_iter() + .map(|(para_id, limits)| { + ( + para_id, + OutboundHrmpChannelLimitations { + bytes_remaining: limits.bytes_remaining as _, + messages_remaining: limits.messages_remaining as _, + }, + ) + }) + .collect(), + max_hrmp_num_per_candidate: c.max_hrmp_num_per_candidate as _, + required_parent: c.required_parent, + validation_code_hash: c.validation_code_hash, + upgrade_restriction: c.upgrade_restriction, + future_validation_code: c.future_validation_code, + } + } +} + +/// Kinds of errors that can occur when modifying constraints. +#[derive(Debug, Clone, PartialEq)] +pub enum ModificationError { + /// The HRMP watermark is not allowed. + DisallowedHrmpWatermark(BlockNumber), + /// No such HRMP outbound channel. + NoSuchHrmpChannel(ParaId), + /// Too many messages submitted to HRMP channel. + HrmpMessagesOverflow { + /// The ID of the recipient. + para_id: ParaId, + /// The amount of remaining messages in the capacity of the channel. + messages_remaining: usize, + /// The amount of messages submitted to the channel. + messages_submitted: usize, + }, + /// Too many bytes submitted to HRMP channel. + HrmpBytesOverflow { + /// The ID of the recipient. + para_id: ParaId, + /// The amount of remaining bytes in the capacity of the channel. + bytes_remaining: usize, + /// The amount of bytes submitted to the channel. + bytes_submitted: usize, + }, + /// Too many messages submitted to UMP. + UmpMessagesOverflow { + /// The amount of remaining messages in the capacity of UMP. + messages_remaining: usize, + /// The amount of messages submitted to UMP. + messages_submitted: usize, + }, + /// Too many bytes submitted to UMP. + UmpBytesOverflow { + /// The amount of remaining bytes in the capacity of UMP. + bytes_remaining: usize, + /// The amount of bytes submitted to UMP. + bytes_submitted: usize, + }, + /// Too many messages processed from DMP. + DmpMessagesUnderflow { + /// The amount of messages waiting to be processed from DMP. + messages_remaining: usize, + /// The amount of messages processed. + messages_processed: usize, + }, + /// No validation code upgrade to apply. + AppliedNonexistentCodeUpgrade, +} + +impl Constraints { + /// Check modifications against constraints. + pub fn check_modifications( + &self, + modifications: &ConstraintModifications, + ) -> Result<(), ModificationError> { + if let Some(HrmpWatermarkUpdate::Trunk(hrmp_watermark)) = modifications.hrmp_watermark { + // head updates are always valid. + if self.hrmp_inbound.valid_watermarks.iter().all(|w| w != &hrmp_watermark) { + return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)) + } + } + + for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { + if let Some(outbound) = self.hrmp_channels_out.get(&id) { + outbound.bytes_remaining.checked_sub(outbound_hrmp_mod.bytes_submitted).ok_or( + ModificationError::HrmpBytesOverflow { + para_id: *id, + bytes_remaining: outbound.bytes_remaining, + bytes_submitted: outbound_hrmp_mod.bytes_submitted, + }, + )?; + + outbound + .messages_remaining + .checked_sub(outbound_hrmp_mod.messages_submitted) + .ok_or(ModificationError::HrmpMessagesOverflow { + para_id: *id, + messages_remaining: outbound.messages_remaining, + messages_submitted: outbound_hrmp_mod.messages_submitted, + })?; + } else { + return Err(ModificationError::NoSuchHrmpChannel(*id)) + } + } + + self.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( + ModificationError::UmpMessagesOverflow { + messages_remaining: self.ump_remaining, + messages_submitted: modifications.ump_messages_sent, + }, + )?; + + self.ump_remaining_bytes.checked_sub(modifications.ump_bytes_sent).ok_or( + ModificationError::UmpBytesOverflow { + bytes_remaining: self.ump_remaining_bytes, + bytes_submitted: modifications.ump_bytes_sent, + }, + )?; + + self.dmp_remaining_messages + .len() + .checked_sub(modifications.dmp_messages_processed) + .ok_or(ModificationError::DmpMessagesUnderflow { + messages_remaining: self.dmp_remaining_messages.len(), + messages_processed: modifications.dmp_messages_processed, + })?; + + if self.future_validation_code.is_none() && modifications.code_upgrade_applied { + return Err(ModificationError::AppliedNonexistentCodeUpgrade) + } + + Ok(()) + } + + /// Apply modifications to these constraints. If this succeeds, it passes + /// all sanity-checks. + pub fn apply_modifications( + &self, + modifications: &ConstraintModifications, + ) -> Result { + let mut new = self.clone(); + + if let Some(required_parent) = modifications.required_parent.as_ref() { + new.required_parent = required_parent.clone(); + } + + if let Some(ref hrmp_watermark) = modifications.hrmp_watermark { + match new.hrmp_inbound.valid_watermarks.binary_search(&hrmp_watermark.watermark()) { + Ok(pos) => { + // Exact match, so this is OK in all cases. + let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1); + }, + Err(pos) => match hrmp_watermark { + HrmpWatermarkUpdate::Head(_) => { + // Updates to Head are always OK. + let _ = new.hrmp_inbound.valid_watermarks.drain(..pos); + }, + HrmpWatermarkUpdate::Trunk(n) => { + // Trunk update landing on disallowed watermark is not OK. + return Err(ModificationError::DisallowedHrmpWatermark(*n)) + }, + }, + } + } + + for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { + if let Some(outbound) = new.hrmp_channels_out.get_mut(&id) { + outbound.bytes_remaining = outbound + .bytes_remaining + .checked_sub(outbound_hrmp_mod.bytes_submitted) + .ok_or(ModificationError::HrmpBytesOverflow { + para_id: *id, + bytes_remaining: outbound.bytes_remaining, + bytes_submitted: outbound_hrmp_mod.bytes_submitted, + })?; + + outbound.messages_remaining = outbound + .messages_remaining + .checked_sub(outbound_hrmp_mod.messages_submitted) + .ok_or(ModificationError::HrmpMessagesOverflow { + para_id: *id, + messages_remaining: outbound.messages_remaining, + messages_submitted: outbound_hrmp_mod.messages_submitted, + })?; + } else { + return Err(ModificationError::NoSuchHrmpChannel(*id)) + } + } + + new.ump_remaining = new.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( + ModificationError::UmpMessagesOverflow { + messages_remaining: new.ump_remaining, + messages_submitted: modifications.ump_messages_sent, + }, + )?; + + new.ump_remaining_bytes = new + .ump_remaining_bytes + .checked_sub(modifications.ump_bytes_sent) + .ok_or(ModificationError::UmpBytesOverflow { + bytes_remaining: new.ump_remaining_bytes, + bytes_submitted: modifications.ump_bytes_sent, + })?; + + if modifications.dmp_messages_processed > new.dmp_remaining_messages.len() { + return Err(ModificationError::DmpMessagesUnderflow { + messages_remaining: new.dmp_remaining_messages.len(), + messages_processed: modifications.dmp_messages_processed, + }) + } else { + new.dmp_remaining_messages = + new.dmp_remaining_messages[modifications.dmp_messages_processed..].to_vec(); + } + + if modifications.code_upgrade_applied { + new.validation_code_hash = new + .future_validation_code + .take() + .ok_or(ModificationError::AppliedNonexistentCodeUpgrade)? + .1; + } + + Ok(new) + } +} + +/// Information about a relay-chain block. +#[derive(Debug, Clone, PartialEq)] +pub struct RelayChainBlockInfo { + /// The hash of the relay-chain block. + pub hash: Hash, + /// The number of the relay-chain block. + pub number: BlockNumber, + /// The storage-root of the relay-chain block. + pub storage_root: Hash, +} + +/// An update to outbound HRMP channels. +#[derive(Debug, Clone, PartialEq, Default)] +pub struct OutboundHrmpChannelModification { + /// The number of bytes submitted to the channel. + pub bytes_submitted: usize, + /// The number of messages submitted to the channel. + pub messages_submitted: usize, +} + +/// An update to the HRMP Watermark. +#[derive(Debug, Clone, PartialEq)] +pub enum HrmpWatermarkUpdate { + /// This is an update placing the watermark at the head of the chain, + /// which is always legal. + Head(BlockNumber), + /// This is an update placing the watermark behind the head of the + /// chain, which is only legal if it lands on a block where messages + /// were queued. + Trunk(BlockNumber), +} + +impl HrmpWatermarkUpdate { + fn watermark(&self) -> BlockNumber { + match *self { + HrmpWatermarkUpdate::Head(n) | HrmpWatermarkUpdate::Trunk(n) => n, + } + } +} + +/// Modifications to constraints as a result of prospective candidates. +#[derive(Debug, Clone, PartialEq)] +pub struct ConstraintModifications { + /// The required parent head to build upon. + pub required_parent: Option, + /// The new HRMP watermark + pub hrmp_watermark: Option, + /// Outbound HRMP channel modifications. + pub outbound_hrmp: HashMap, + /// The amount of UMP messages sent. + pub ump_messages_sent: usize, + /// The amount of UMP bytes sent. + pub ump_bytes_sent: usize, + /// The amount of DMP messages processed. + pub dmp_messages_processed: usize, + /// Whether a pending code upgrade has been applied. + pub code_upgrade_applied: bool, +} + +impl ConstraintModifications { + /// The 'identity' modifications: these can be applied to + /// any constraints and yield the exact same result. + pub fn identity() -> Self { + ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: HashMap::new(), + ump_messages_sent: 0, + ump_bytes_sent: 0, + dmp_messages_processed: 0, + code_upgrade_applied: false, + } + } + + /// Stack other modifications on top of these. + /// + /// This does no sanity-checking, so if `other` is garbage relative + /// to `self`, then the new value will be garbage as well. + /// + /// This is an addition which is not commutative. + pub fn stack(&mut self, other: &Self) { + if let Some(ref new_parent) = other.required_parent { + self.required_parent = Some(new_parent.clone()); + } + if let Some(ref new_hrmp_watermark) = other.hrmp_watermark { + self.hrmp_watermark = Some(new_hrmp_watermark.clone()); + } + + for (id, mods) in &other.outbound_hrmp { + let record = self.outbound_hrmp.entry(*id).or_default(); + record.messages_submitted += mods.messages_submitted; + record.bytes_submitted += mods.bytes_submitted; + } + + self.ump_messages_sent += other.ump_messages_sent; + self.ump_bytes_sent += other.ump_bytes_sent; + self.dmp_messages_processed += other.dmp_messages_processed; + self.code_upgrade_applied |= other.code_upgrade_applied; + } +} + +/// The prospective candidate. +/// +/// This comprises the key information that represent a candidate +/// without pinning it to a particular session. For example, everything +/// to do with the collator's signature and commitments are represented +/// here. But the erasure-root is not. This means that prospective candidates +/// are not correlated to any session in particular. +#[derive(Debug, Clone, PartialEq)] +pub struct ProspectiveCandidate<'a> { + /// The commitments to the output of the execution. + pub commitments: Cow<'a, CandidateCommitments>, + /// The collator that created the candidate. + pub collator: CollatorId, + /// The signature of the collator on the payload. + pub collator_signature: CollatorSignature, + /// The persisted validation data used to create the candidate. + pub persisted_validation_data: PersistedValidationData, + /// The hash of the PoV. + pub pov_hash: Hash, + /// The validation code hash used by the candidate. + pub validation_code_hash: ValidationCodeHash, +} + +impl<'a> ProspectiveCandidate<'a> { + fn into_owned(self) -> ProspectiveCandidate<'static> { + ProspectiveCandidate { commitments: Cow::Owned(self.commitments.into_owned()), ..self } + } + + /// Partially clone the prospective candidate, but borrow the + /// parts which are potentially heavy. + pub fn partial_clone(&self) -> ProspectiveCandidate { + ProspectiveCandidate { + commitments: Cow::Borrowed(self.commitments.borrow()), + collator: self.collator.clone(), + collator_signature: self.collator_signature.clone(), + persisted_validation_data: self.persisted_validation_data.clone(), + pov_hash: self.pov_hash, + validation_code_hash: self.validation_code_hash, + } + } +} + +#[cfg(test)] +impl ProspectiveCandidate<'static> { + fn commitments_mut(&mut self) -> &mut CandidateCommitments { + self.commitments.to_mut() + } +} + +/// Kinds of errors with the validity of a fragment. +#[derive(Debug, Clone, PartialEq)] +pub enum FragmentValidityError { + /// The validation code of the candidate doesn't match the + /// operating constraints. + /// + /// Expected, Got + ValidationCodeMismatch(ValidationCodeHash, ValidationCodeHash), + /// The persisted-validation-data doesn't match. + /// + /// Expected, Got + PersistedValidationDataMismatch(PersistedValidationData, PersistedValidationData), + /// The outputs of the candidate are invalid under the operating + /// constraints. + OutputsInvalid(ModificationError), + /// New validation code size too big. + /// + /// Max allowed, new. + CodeSizeTooLarge(usize, usize), + /// Relay parent too old. + /// + /// Min allowed, current. + RelayParentTooOld(BlockNumber, BlockNumber), + /// Para is required to process at least one DMP message from the queue. + DmpAdvancementRule, + /// Too many messages upward messages submitted. + UmpMessagesPerCandidateOverflow { + /// The amount of messages a single candidate can submit. + messages_allowed: usize, + /// The amount of messages sent to all HRMP channels. + messages_submitted: usize, + }, + /// Too many messages submitted to all HRMP channels. + HrmpMessagesPerCandidateOverflow { + /// The amount of messages a single candidate can submit. + messages_allowed: usize, + /// The amount of messages sent to all HRMP channels. + messages_submitted: usize, + }, + /// Code upgrade not allowed. + CodeUpgradeRestricted, + /// HRMP messages are not ascending or are duplicate. + /// + /// The `usize` is the index into the outbound HRMP messages of + /// the candidate. + HrmpMessagesDescendingOrDuplicate(usize), +} + +/// A parachain fragment, representing another prospective parachain block. +/// +/// This is a type which guarantees that the candidate is valid under the +/// operating constraints. +#[derive(Debug, Clone, PartialEq)] +pub struct Fragment<'a> { + /// The new relay-parent. + relay_parent: RelayChainBlockInfo, + /// The constraints this fragment is operating under. + operating_constraints: Constraints, + /// The core information about the prospective candidate. + candidate: ProspectiveCandidate<'a>, + /// Modifications to the constraints based on the outputs of + /// the candidate. + modifications: ConstraintModifications, +} + +impl<'a> Fragment<'a> { + /// Create a new fragment. + /// + /// This fails if the fragment isn't in line with the operating + /// constraints. That is, either its inputs or its outputs fail + /// checks against the constraints. + /// + /// This doesn't check that the collator signature is valid or + /// whether the PoV is small enough. + pub fn new( + relay_parent: RelayChainBlockInfo, + operating_constraints: Constraints, + candidate: ProspectiveCandidate<'a>, + ) -> Result { + let modifications = { + let commitments = &candidate.commitments; + ConstraintModifications { + required_parent: Some(commitments.head_data.clone()), + hrmp_watermark: Some({ + if commitments.hrmp_watermark == relay_parent.number { + HrmpWatermarkUpdate::Head(commitments.hrmp_watermark) + } else { + HrmpWatermarkUpdate::Trunk(commitments.hrmp_watermark) + } + }), + outbound_hrmp: { + let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); + + let mut last_recipient = None::; + for (i, message) in commitments.horizontal_messages.iter().enumerate() { + if let Some(last) = last_recipient { + if last >= message.recipient { + return Err( + FragmentValidityError::HrmpMessagesDescendingOrDuplicate(i), + ) + } + } + + last_recipient = Some(message.recipient); + let record = outbound_hrmp.entry(message.recipient).or_default(); + + record.bytes_submitted += message.data.len(); + record.messages_submitted += 1; + } + + outbound_hrmp + }, + ump_messages_sent: commitments.upward_messages.len(), + ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(), + dmp_messages_processed: commitments.processed_downward_messages as _, + code_upgrade_applied: operating_constraints + .future_validation_code + .map_or(false, |(at, _)| relay_parent.number >= at), + } + }; + + validate_against_constraints( + &operating_constraints, + &relay_parent, + &candidate, + &modifications, + )?; + + Ok(Fragment { relay_parent, operating_constraints, candidate, modifications }) + } + + /// Access the relay parent information. + pub fn relay_parent(&self) -> &RelayChainBlockInfo { + &self.relay_parent + } + + /// Access the operating constraints + pub fn operating_constraints(&self) -> &Constraints { + &self.operating_constraints + } + + /// Access the underlying prospective candidate. + pub fn candidate(&self) -> &ProspectiveCandidate<'a> { + &self.candidate + } + + /// Modifications to constraints based on the outputs of the candidate. + pub fn constraint_modifications(&self) -> &ConstraintModifications { + &self.modifications + } + + /// Convert the fragment into an owned variant. + pub fn into_owned(self) -> Fragment<'static> { + Fragment { candidate: self.candidate.into_owned(), ..self } + } + + /// Validate this fragment against some set of constraints + /// instead of the operating constraints. + pub fn validate_against_constraints( + &self, + constraints: &Constraints, + ) -> Result<(), FragmentValidityError> { + validate_against_constraints( + constraints, + &self.relay_parent, + &self.candidate, + &self.modifications, + ) + } +} + +fn validate_against_constraints( + constraints: &Constraints, + relay_parent: &RelayChainBlockInfo, + candidate: &ProspectiveCandidate, + modifications: &ConstraintModifications, +) -> Result<(), FragmentValidityError> { + let expected_pvd = PersistedValidationData { + parent_head: constraints.required_parent.clone(), + relay_parent_number: relay_parent.number, + relay_parent_storage_root: relay_parent.storage_root, + max_pov_size: constraints.max_pov_size as u32, + }; + + if expected_pvd != candidate.persisted_validation_data { + return Err(FragmentValidityError::PersistedValidationDataMismatch( + expected_pvd, + candidate.persisted_validation_data.clone(), + )) + } + + if constraints.validation_code_hash != candidate.validation_code_hash { + return Err(FragmentValidityError::ValidationCodeMismatch( + constraints.validation_code_hash, + candidate.validation_code_hash, + )) + } + + if relay_parent.number < constraints.min_relay_parent_number { + return Err(FragmentValidityError::RelayParentTooOld( + constraints.min_relay_parent_number, + relay_parent.number, + )) + } + + if candidate.commitments.new_validation_code.is_some() { + match constraints.upgrade_restriction { + None => {}, + Some(UpgradeRestriction::Present) => + return Err(FragmentValidityError::CodeUpgradeRestricted), + } + } + + let announced_code_size = candidate + .commitments + .new_validation_code + .as_ref() + .map_or(0, |code| code.0.len()); + + if announced_code_size > constraints.max_code_size { + return Err(FragmentValidityError::CodeSizeTooLarge( + constraints.max_code_size, + announced_code_size, + )) + } + + if modifications.dmp_messages_processed == 0 { + if constraints + .dmp_remaining_messages + .get(0) + .map_or(false, |&msg_sent_at| msg_sent_at <= relay_parent.number) + { + return Err(FragmentValidityError::DmpAdvancementRule) + } + } + + if candidate.commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate { + return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { + messages_allowed: constraints.max_hrmp_num_per_candidate, + messages_submitted: candidate.commitments.horizontal_messages.len(), + }) + } + + if candidate.commitments.upward_messages.len() > constraints.max_ump_num_per_candidate { + return Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { + messages_allowed: constraints.max_ump_num_per_candidate, + messages_submitted: candidate.commitments.upward_messages.len(), + }) + } + + constraints + .check_modifications(&modifications) + .map_err(FragmentValidityError::OutputsInvalid) +} + +#[cfg(test)] +mod tests { + use super::*; + use polkadot_primitives::{ + CollatorPair, HorizontalMessages, OutboundHrmpMessage, ValidationCode, + }; + use sp_application_crypto::Pair; + + #[test] + fn stack_modifications() { + let para_a = ParaId::from(1u32); + let para_b = ParaId::from(2u32); + let para_c = ParaId::from(3u32); + + let a = ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: { + let mut map = HashMap::new(); + map.insert( + para_a, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map.insert( + para_b, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map + }, + ump_messages_sent: 6, + ump_bytes_sent: 1000, + dmp_messages_processed: 5, + code_upgrade_applied: true, + }; + + let b = ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: { + let mut map = HashMap::new(); + map.insert( + para_b, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map.insert( + para_c, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map + }, + ump_messages_sent: 6, + ump_bytes_sent: 1000, + dmp_messages_processed: 5, + code_upgrade_applied: true, + }; + + let mut c = a.clone(); + c.stack(&b); + + assert_eq!( + c, + ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: { + let mut map = HashMap::new(); + map.insert( + para_a, + OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }, + ); + + map.insert( + para_b, + OutboundHrmpChannelModification { + bytes_submitted: 200, + messages_submitted: 10, + }, + ); + + map.insert( + para_c, + OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }, + ); + + map + }, + ump_messages_sent: 12, + ump_bytes_sent: 2000, + dmp_messages_processed: 10, + code_upgrade_applied: true, + }, + ); + + let mut d = ConstraintModifications::identity(); + d.stack(&a); + d.stack(&b); + + assert_eq!(c, d); + } + + fn make_constraints() -> Constraints { + let para_a = ParaId::from(1u32); + let para_b = ParaId::from(2u32); + let para_c = ParaId::from(3u32); + + Constraints { + min_relay_parent_number: 5, + max_pov_size: 1000, + max_code_size: 1000, + ump_remaining: 10, + ump_remaining_bytes: 1024, + max_ump_num_per_candidate: 5, + dmp_remaining_messages: Vec::new(), + hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![6, 8] }, + hrmp_channels_out: { + let mut map = HashMap::new(); + + map.insert( + para_a, + OutboundHrmpChannelLimitations { messages_remaining: 5, bytes_remaining: 512 }, + ); + + map.insert( + para_b, + OutboundHrmpChannelLimitations { + messages_remaining: 10, + bytes_remaining: 1024, + }, + ); + + map.insert( + para_c, + OutboundHrmpChannelLimitations { messages_remaining: 1, bytes_remaining: 128 }, + ); + + map + }, + max_hrmp_num_per_candidate: 5, + required_parent: HeadData::from(vec![1, 2, 3]), + validation_code_hash: ValidationCode(vec![4, 5, 6]).hash(), + upgrade_restriction: None, + future_validation_code: None, + } + } + + #[test] + fn constraints_disallowed_trunk_watermark() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Trunk(7)); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::DisallowedHrmpWatermark(7)), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::DisallowedHrmpWatermark(7)), + ); + } + + #[test] + fn constraints_always_allow_head_watermark() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Head(7)); + + assert!(constraints.check_modifications(&modifications).is_ok()); + + let new_constraints = constraints.apply_modifications(&modifications).unwrap(); + assert_eq!(new_constraints.hrmp_inbound.valid_watermarks, vec![8]); + } + + #[test] + fn constraints_no_such_hrmp_channel() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + let bad_para = ParaId::from(100u32); + modifications.outbound_hrmp.insert( + bad_para, + OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 0 }, + ); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::NoSuchHrmpChannel(bad_para)), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::NoSuchHrmpChannel(bad_para)), + ); + } + + #[test] + fn constraints_hrmp_messages_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + let para_a = ParaId::from(1u32); + modifications.outbound_hrmp.insert( + para_a, + OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 6 }, + ); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::HrmpMessagesOverflow { + para_id: para_a, + messages_remaining: 5, + messages_submitted: 6, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::HrmpMessagesOverflow { + para_id: para_a, + messages_remaining: 5, + messages_submitted: 6, + }), + ); + } + + #[test] + fn constraints_hrmp_bytes_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + let para_a = ParaId::from(1u32); + modifications.outbound_hrmp.insert( + para_a, + OutboundHrmpChannelModification { bytes_submitted: 513, messages_submitted: 1 }, + ); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::HrmpBytesOverflow { + para_id: para_a, + bytes_remaining: 512, + bytes_submitted: 513, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::HrmpBytesOverflow { + para_id: para_a, + bytes_remaining: 512, + bytes_submitted: 513, + }), + ); + } + + #[test] + fn constraints_ump_messages_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.ump_messages_sent = 11; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::UmpMessagesOverflow { + messages_remaining: 10, + messages_submitted: 11, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::UmpMessagesOverflow { + messages_remaining: 10, + messages_submitted: 11, + }), + ); + } + + #[test] + fn constraints_ump_bytes_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.ump_bytes_sent = 1025; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::UmpBytesOverflow { + bytes_remaining: 1024, + bytes_submitted: 1025, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::UmpBytesOverflow { + bytes_remaining: 1024, + bytes_submitted: 1025, + }), + ); + } + + #[test] + fn constraints_dmp_messages() { + let mut constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + assert!(constraints.check_modifications(&modifications).is_ok()); + assert!(constraints.apply_modifications(&modifications).is_ok()); + + modifications.dmp_messages_processed = 6; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::DmpMessagesUnderflow { + messages_remaining: 0, + messages_processed: 6, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::DmpMessagesUnderflow { + messages_remaining: 0, + messages_processed: 6, + }), + ); + + constraints.dmp_remaining_messages = vec![1, 4, 8, 10]; + modifications.dmp_messages_processed = 2; + assert!(constraints.check_modifications(&modifications).is_ok()); + let constraints = constraints + .apply_modifications(&modifications) + .expect("modifications are valid"); + + assert_eq!(&constraints.dmp_remaining_messages, &[8, 10]); + } + + #[test] + fn constraints_nonexistent_code_upgrade() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.code_upgrade_applied = true; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::AppliedNonexistentCodeUpgrade), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::AppliedNonexistentCodeUpgrade), + ); + } + + fn make_candidate( + constraints: &Constraints, + relay_parent: &RelayChainBlockInfo, + ) -> ProspectiveCandidate<'static> { + let collator_pair = CollatorPair::generate().0; + let collator = collator_pair.public(); + + let sig = collator_pair.sign(b"blabla".as_slice()); + + ProspectiveCandidate { + commitments: Cow::Owned(CandidateCommitments { + upward_messages: Default::default(), + horizontal_messages: Default::default(), + new_validation_code: None, + head_data: HeadData::from(vec![1, 2, 3, 4, 5]), + processed_downward_messages: 0, + hrmp_watermark: relay_parent.number, + }), + collator, + collator_signature: sig, + persisted_validation_data: PersistedValidationData { + parent_head: constraints.required_parent.clone(), + relay_parent_number: relay_parent.number, + relay_parent_storage_root: relay_parent.storage_root, + max_pov_size: constraints.max_pov_size as u32, + }, + pov_hash: Hash::repeat_byte(1), + validation_code_hash: constraints.validation_code_hash, + } + } + + #[test] + fn fragment_validation_code_mismatch() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let expected_code = constraints.validation_code_hash; + let got_code = ValidationCode(vec![9, 9, 9]).hash(); + + candidate.validation_code_hash = got_code; + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::ValidationCodeMismatch(expected_code, got_code,)), + ) + } + + #[test] + fn fragment_pvd_mismatch() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let relay_parent_b = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0b), + storage_root: Hash::repeat_byte(0xee), + }; + + let constraints = make_constraints(); + let candidate = make_candidate(&constraints, &relay_parent); + + let expected_pvd = PersistedValidationData { + parent_head: constraints.required_parent.clone(), + relay_parent_number: relay_parent_b.number, + relay_parent_storage_root: relay_parent_b.storage_root, + max_pov_size: constraints.max_pov_size as u32, + }; + + let got_pvd = candidate.persisted_validation_data.clone(); + + assert_eq!( + Fragment::new(relay_parent_b, constraints, candidate), + Err(FragmentValidityError::PersistedValidationDataMismatch(expected_pvd, got_pvd,)), + ); + } + + #[test] + fn fragment_code_size_too_large() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let max_code_size = constraints.max_code_size; + candidate.commitments_mut().new_validation_code = Some(vec![0; max_code_size + 1].into()); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::CodeSizeTooLarge(max_code_size, max_code_size + 1,)), + ); + } + + #[test] + fn fragment_relay_parent_too_old() { + let relay_parent = RelayChainBlockInfo { + number: 3, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let candidate = make_candidate(&constraints, &relay_parent); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::RelayParentTooOld(5, 3,)), + ); + } + + #[test] + fn fragment_hrmp_messages_overflow() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let max_hrmp = constraints.max_hrmp_num_per_candidate; + + candidate + .commitments_mut() + .horizontal_messages + .try_extend((0..max_hrmp + 1).map(|i| OutboundHrmpMessage { + recipient: ParaId::from(i as u32), + data: vec![1, 2, 3], + })) + .unwrap(); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { + messages_allowed: max_hrmp, + messages_submitted: max_hrmp + 1, + }), + ); + } + + #[test] + fn fragment_dmp_advancement_rule() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let mut constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + // Empty dmp queue is ok. + assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); + // Unprocessed message that was sent later is ok. + constraints.dmp_remaining_messages = vec![relay_parent.number + 1]; + assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); + + for block_number in 0..=relay_parent.number { + constraints.dmp_remaining_messages = vec![block_number]; + + assert_eq!( + Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), + Err(FragmentValidityError::DmpAdvancementRule), + ); + } + + candidate.commitments.to_mut().processed_downward_messages = 1; + assert!(Fragment::new(relay_parent, constraints, candidate).is_ok()); + } + + #[test] + fn fragment_ump_messages_overflow() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let max_ump = constraints.max_ump_num_per_candidate; + + candidate + .commitments + .to_mut() + .upward_messages + .try_extend((0..max_ump + 1).map(|i| vec![i as u8])) + .unwrap(); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { + messages_allowed: max_ump, + messages_submitted: max_ump + 1, + }), + ); + } + + #[test] + fn fragment_code_upgrade_restricted() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let mut constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + constraints.upgrade_restriction = Some(UpgradeRestriction::Present); + candidate.commitments_mut().new_validation_code = Some(ValidationCode(vec![1, 2, 3])); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::CodeUpgradeRestricted), + ); + } + + #[test] + fn fragment_hrmp_messages_descending_or_duplicate() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ + OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![1, 2, 3] }, + OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, + ]); + + assert_eq!( + Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), + Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), + ); + + candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ + OutboundHrmpMessage { recipient: ParaId::from(1 as u32), data: vec![1, 2, 3] }, + OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, + ]); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), + ); + } +} diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/staging.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/staging.rs deleted file mode 100644 index eb0632297528..000000000000 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/staging.rs +++ /dev/null @@ -1,1450 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! The implementation of the inclusion emulator for the 'staging' runtime version. -//! -//! # Overview -//! -//! A set of utilities for node-side code to emulate the logic the runtime uses for checking -//! parachain blocks in order to build prospective parachains that are produced ahead of the -//! relay chain. These utilities allow the node-side to predict, with high accuracy, what -//! the relay-chain will accept in the near future. -//! -//! This module has 2 key data types: [`Constraints`] and [`Fragment`]s. [`Constraints`] -//! exhaustively define the set of valid inputs and outputs to parachain execution. A [`Fragment`] -//! indicates a parachain block, anchored to the relay-chain at a particular relay-chain block, -//! known as the relay-parent. -//! -//! ## Fragment Validity -//! -//! Every relay-parent is implicitly associated with a unique set of [`Constraints`] that describe -//! the properties that must be true for a block to be included in a direct child of that block, -//! assuming there is no intermediate parachain block pending availability. -//! -//! However, the key factor that makes asynchronously-grown prospective chains -//! possible is the fact that the relay-chain accepts candidate blocks based on whether they -//! are valid under the constraints of the present moment, not based on whether they were -//! valid at the time of construction. -//! -//! As such, [`Fragment`]s are often, but not always constructed in such a way that they are -//! invalid at first and become valid later on, as the relay chain grows. -//! -//! # Usage -//! -//! It's expected that the users of this module will be building up trees of -//! [`Fragment`]s and consistently pruning and adding to the tree. -//! -//! ## Operating Constraints -//! -//! The *operating constraints* of a `Fragment` are the constraints with which that fragment -//! was intended to comply. The operating constraints are defined as the base constraints -//! of the relay-parent of the fragment modified by the cumulative modifications of all -//! fragments between the relay-parent and the current fragment. -//! -//! What the operating constraints are, in practice, is a prediction about the state of the -//! relay-chain in the future. The relay-chain is aware of some current state, and we want to -//! make an intelligent prediction about what might be accepted in the future based on -//! prior fragments that also exist off-chain. -//! -//! ## Fragment Trees -//! -//! As the relay-chain grows, some predictions come true and others come false. -//! And new predictions get made. These three changes correspond distinctly to the -//! 3 primary operations on fragment trees. -//! -//! A fragment tree is a mental model for thinking about a forking series of predictions -//! about a single parachain. There may be one or more fragment trees per parachain. -//! -//! In expectation, most parachains will have a plausibly-unique authorship method which means that -//! they should really be much closer to fragment-chains, maybe with an occasional fork. -//! -//! Avoiding fragment-tree blowup is beyond the scope of this module. -//! -//! ### Pruning Fragment Trees -//! -//! When the relay-chain advances, we want to compare the new constraints of that relay-parent to -//! the roots of the fragment trees we have. There are 3 cases: -//! -//! 1. The root fragment is still valid under the new constraints. In this case, we do nothing. This -//! is the "prediction still uncertain" case. -//! -//! 2. The root fragment is invalid under the new constraints because it has been subsumed by the -//! relay-chain. In this case, we can discard the root and split & re-root the fragment tree under -//! its descendents and compare to the new constraints again. This is the "prediction came true" -//! case. -//! -//! 3. The root fragment is invalid under the new constraints because a competing parachain block -//! has been included or it would never be accepted for some other reason. In this case we can -//! discard the entire fragment tree. This is the "prediction came false" case. -//! -//! This is all a bit of a simplification because it assumes that the relay-chain advances without -//! forks and is finalized instantly. In practice, the set of fragment-trees needs to be observable -//! from the perspective of a few different possible forks of the relay-chain and not pruned -//! too eagerly. -//! -//! Note that the fragments themselves don't need to change and the only thing we care about -//! is whether the predictions they represent are still valid. -//! -//! ### Extending Fragment Trees -//! -//! As predictions fade into the past, new ones should be stacked on top. -//! -//! Every new relay-chain block is an opportunity to make a new prediction about the future. -//! Higher-level logic should select the leaves of the fragment-trees to build upon or whether -//! to create a new fragment-tree. -//! -//! ### Code Upgrades -//! -//! Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade scheduling -//! logic is very path-dependent and intricate so we just assume that code upgrades -//! can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep, -//! in practice and code upgrades are fairly rare. So what's likely to happen around code -//! upgrades is that the entire fragment-tree has to get discarded at some point. -//! -//! That means a few blocks of execution time lost, which is not a big deal for code upgrades -//! in practice at most once every few weeks. - -use polkadot_primitives::vstaging::{ - BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, - Constraints as PrimitiveConstraints, Hash, HeadData, Id as ParaId, PersistedValidationData, - UpgradeRestriction, ValidationCodeHash, -}; -use std::{ - borrow::{Borrow, Cow}, - collections::HashMap, -}; - -/// Constraints on inbound HRMP channels. -#[derive(Debug, Clone, PartialEq)] -pub struct InboundHrmpLimitations { - /// An exhaustive set of all valid watermarks, sorted ascending - pub valid_watermarks: Vec, -} - -/// Constraints on outbound HRMP channels. -#[derive(Debug, Clone, PartialEq)] -pub struct OutboundHrmpChannelLimitations { - /// The maximum bytes that can be written to the channel. - pub bytes_remaining: usize, - /// The maximum messages that can be written to the channel. - pub messages_remaining: usize, -} - -/// Constraints on the actions that can be taken by a new parachain -/// block. These limitations are implicitly associated with some particular -/// parachain, which should be apparent from usage. -#[derive(Debug, Clone, PartialEq)] -pub struct Constraints { - /// The minimum relay-parent number accepted under these constraints. - pub min_relay_parent_number: BlockNumber, - /// The maximum Proof-of-Validity size allowed, in bytes. - pub max_pov_size: usize, - /// The maximum new validation code size allowed, in bytes. - pub max_code_size: usize, - /// The amount of UMP messages remaining. - pub ump_remaining: usize, - /// The amount of UMP bytes remaining. - pub ump_remaining_bytes: usize, - /// The maximum number of UMP messages allowed per candidate. - pub max_ump_num_per_candidate: usize, - /// Remaining DMP queue. Only includes sent-at block numbers. - pub dmp_remaining_messages: Vec, - /// The limitations of all registered inbound HRMP channels. - pub hrmp_inbound: InboundHrmpLimitations, - /// The limitations of all registered outbound HRMP channels. - pub hrmp_channels_out: HashMap, - /// The maximum number of HRMP messages allowed per candidate. - pub max_hrmp_num_per_candidate: usize, - /// The required parent head-data of the parachain. - pub required_parent: HeadData, - /// The expected validation-code-hash of this parachain. - pub validation_code_hash: ValidationCodeHash, - /// The code upgrade restriction signal as-of this parachain. - pub upgrade_restriction: Option, - /// The future validation code hash, if any, and at what relay-parent - /// number the upgrade would be minimally applied. - pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, -} - -impl From for Constraints { - fn from(c: PrimitiveConstraints) -> Self { - Constraints { - min_relay_parent_number: c.min_relay_parent_number, - max_pov_size: c.max_pov_size as _, - max_code_size: c.max_code_size as _, - ump_remaining: c.ump_remaining as _, - ump_remaining_bytes: c.ump_remaining_bytes as _, - max_ump_num_per_candidate: c.max_ump_num_per_candidate as _, - dmp_remaining_messages: c.dmp_remaining_messages, - hrmp_inbound: InboundHrmpLimitations { - valid_watermarks: c.hrmp_inbound.valid_watermarks, - }, - hrmp_channels_out: c - .hrmp_channels_out - .into_iter() - .map(|(para_id, limits)| { - ( - para_id, - OutboundHrmpChannelLimitations { - bytes_remaining: limits.bytes_remaining as _, - messages_remaining: limits.messages_remaining as _, - }, - ) - }) - .collect(), - max_hrmp_num_per_candidate: c.max_hrmp_num_per_candidate as _, - required_parent: c.required_parent, - validation_code_hash: c.validation_code_hash, - upgrade_restriction: c.upgrade_restriction, - future_validation_code: c.future_validation_code, - } - } -} - -/// Kinds of errors that can occur when modifying constraints. -#[derive(Debug, Clone, PartialEq)] -pub enum ModificationError { - /// The HRMP watermark is not allowed. - DisallowedHrmpWatermark(BlockNumber), - /// No such HRMP outbound channel. - NoSuchHrmpChannel(ParaId), - /// Too many messages submitted to HRMP channel. - HrmpMessagesOverflow { - /// The ID of the recipient. - para_id: ParaId, - /// The amount of remaining messages in the capacity of the channel. - messages_remaining: usize, - /// The amount of messages submitted to the channel. - messages_submitted: usize, - }, - /// Too many bytes submitted to HRMP channel. - HrmpBytesOverflow { - /// The ID of the recipient. - para_id: ParaId, - /// The amount of remaining bytes in the capacity of the channel. - bytes_remaining: usize, - /// The amount of bytes submitted to the channel. - bytes_submitted: usize, - }, - /// Too many messages submitted to UMP. - UmpMessagesOverflow { - /// The amount of remaining messages in the capacity of UMP. - messages_remaining: usize, - /// The amount of messages submitted to UMP. - messages_submitted: usize, - }, - /// Too many bytes submitted to UMP. - UmpBytesOverflow { - /// The amount of remaining bytes in the capacity of UMP. - bytes_remaining: usize, - /// The amount of bytes submitted to UMP. - bytes_submitted: usize, - }, - /// Too many messages processed from DMP. - DmpMessagesUnderflow { - /// The amount of messages waiting to be processed from DMP. - messages_remaining: usize, - /// The amount of messages processed. - messages_processed: usize, - }, - /// No validation code upgrade to apply. - AppliedNonexistentCodeUpgrade, -} - -impl Constraints { - /// Check modifications against constraints. - pub fn check_modifications( - &self, - modifications: &ConstraintModifications, - ) -> Result<(), ModificationError> { - if let Some(HrmpWatermarkUpdate::Trunk(hrmp_watermark)) = modifications.hrmp_watermark { - // head updates are always valid. - if self.hrmp_inbound.valid_watermarks.iter().all(|w| w != &hrmp_watermark) { - return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)) - } - } - - for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { - if let Some(outbound) = self.hrmp_channels_out.get(&id) { - outbound.bytes_remaining.checked_sub(outbound_hrmp_mod.bytes_submitted).ok_or( - ModificationError::HrmpBytesOverflow { - para_id: *id, - bytes_remaining: outbound.bytes_remaining, - bytes_submitted: outbound_hrmp_mod.bytes_submitted, - }, - )?; - - outbound - .messages_remaining - .checked_sub(outbound_hrmp_mod.messages_submitted) - .ok_or(ModificationError::HrmpMessagesOverflow { - para_id: *id, - messages_remaining: outbound.messages_remaining, - messages_submitted: outbound_hrmp_mod.messages_submitted, - })?; - } else { - return Err(ModificationError::NoSuchHrmpChannel(*id)) - } - } - - self.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( - ModificationError::UmpMessagesOverflow { - messages_remaining: self.ump_remaining, - messages_submitted: modifications.ump_messages_sent, - }, - )?; - - self.ump_remaining_bytes.checked_sub(modifications.ump_bytes_sent).ok_or( - ModificationError::UmpBytesOverflow { - bytes_remaining: self.ump_remaining_bytes, - bytes_submitted: modifications.ump_bytes_sent, - }, - )?; - - self.dmp_remaining_messages - .len() - .checked_sub(modifications.dmp_messages_processed) - .ok_or(ModificationError::DmpMessagesUnderflow { - messages_remaining: self.dmp_remaining_messages.len(), - messages_processed: modifications.dmp_messages_processed, - })?; - - if self.future_validation_code.is_none() && modifications.code_upgrade_applied { - return Err(ModificationError::AppliedNonexistentCodeUpgrade) - } - - Ok(()) - } - - /// Apply modifications to these constraints. If this succeeds, it passes - /// all sanity-checks. - pub fn apply_modifications( - &self, - modifications: &ConstraintModifications, - ) -> Result { - let mut new = self.clone(); - - if let Some(required_parent) = modifications.required_parent.as_ref() { - new.required_parent = required_parent.clone(); - } - - if let Some(ref hrmp_watermark) = modifications.hrmp_watermark { - match new.hrmp_inbound.valid_watermarks.binary_search(&hrmp_watermark.watermark()) { - Ok(pos) => { - // Exact match, so this is OK in all cases. - let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1); - }, - Err(pos) => match hrmp_watermark { - HrmpWatermarkUpdate::Head(_) => { - // Updates to Head are always OK. - let _ = new.hrmp_inbound.valid_watermarks.drain(..pos); - }, - HrmpWatermarkUpdate::Trunk(n) => { - // Trunk update landing on disallowed watermark is not OK. - return Err(ModificationError::DisallowedHrmpWatermark(*n)) - }, - }, - } - } - - for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { - if let Some(outbound) = new.hrmp_channels_out.get_mut(&id) { - outbound.bytes_remaining = outbound - .bytes_remaining - .checked_sub(outbound_hrmp_mod.bytes_submitted) - .ok_or(ModificationError::HrmpBytesOverflow { - para_id: *id, - bytes_remaining: outbound.bytes_remaining, - bytes_submitted: outbound_hrmp_mod.bytes_submitted, - })?; - - outbound.messages_remaining = outbound - .messages_remaining - .checked_sub(outbound_hrmp_mod.messages_submitted) - .ok_or(ModificationError::HrmpMessagesOverflow { - para_id: *id, - messages_remaining: outbound.messages_remaining, - messages_submitted: outbound_hrmp_mod.messages_submitted, - })?; - } else { - return Err(ModificationError::NoSuchHrmpChannel(*id)) - } - } - - new.ump_remaining = new.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( - ModificationError::UmpMessagesOverflow { - messages_remaining: new.ump_remaining, - messages_submitted: modifications.ump_messages_sent, - }, - )?; - - new.ump_remaining_bytes = new - .ump_remaining_bytes - .checked_sub(modifications.ump_bytes_sent) - .ok_or(ModificationError::UmpBytesOverflow { - bytes_remaining: new.ump_remaining_bytes, - bytes_submitted: modifications.ump_bytes_sent, - })?; - - if modifications.dmp_messages_processed > new.dmp_remaining_messages.len() { - return Err(ModificationError::DmpMessagesUnderflow { - messages_remaining: new.dmp_remaining_messages.len(), - messages_processed: modifications.dmp_messages_processed, - }) - } else { - new.dmp_remaining_messages = - new.dmp_remaining_messages[modifications.dmp_messages_processed..].to_vec(); - } - - if modifications.code_upgrade_applied { - new.validation_code_hash = new - .future_validation_code - .take() - .ok_or(ModificationError::AppliedNonexistentCodeUpgrade)? - .1; - } - - Ok(new) - } -} - -/// Information about a relay-chain block. -#[derive(Debug, Clone, PartialEq)] -pub struct RelayChainBlockInfo { - /// The hash of the relay-chain block. - pub hash: Hash, - /// The number of the relay-chain block. - pub number: BlockNumber, - /// The storage-root of the relay-chain block. - pub storage_root: Hash, -} - -/// An update to outbound HRMP channels. -#[derive(Debug, Clone, PartialEq, Default)] -pub struct OutboundHrmpChannelModification { - /// The number of bytes submitted to the channel. - pub bytes_submitted: usize, - /// The number of messages submitted to the channel. - pub messages_submitted: usize, -} - -/// An update to the HRMP Watermark. -#[derive(Debug, Clone, PartialEq)] -pub enum HrmpWatermarkUpdate { - /// This is an update placing the watermark at the head of the chain, - /// which is always legal. - Head(BlockNumber), - /// This is an update placing the watermark behind the head of the - /// chain, which is only legal if it lands on a block where messages - /// were queued. - Trunk(BlockNumber), -} - -impl HrmpWatermarkUpdate { - fn watermark(&self) -> BlockNumber { - match *self { - HrmpWatermarkUpdate::Head(n) | HrmpWatermarkUpdate::Trunk(n) => n, - } - } -} - -/// Modifications to constraints as a result of prospective candidates. -#[derive(Debug, Clone, PartialEq)] -pub struct ConstraintModifications { - /// The required parent head to build upon. - pub required_parent: Option, - /// The new HRMP watermark - pub hrmp_watermark: Option, - /// Outbound HRMP channel modifications. - pub outbound_hrmp: HashMap, - /// The amount of UMP messages sent. - pub ump_messages_sent: usize, - /// The amount of UMP bytes sent. - pub ump_bytes_sent: usize, - /// The amount of DMP messages processed. - pub dmp_messages_processed: usize, - /// Whether a pending code upgrade has been applied. - pub code_upgrade_applied: bool, -} - -impl ConstraintModifications { - /// The 'identity' modifications: these can be applied to - /// any constraints and yield the exact same result. - pub fn identity() -> Self { - ConstraintModifications { - required_parent: None, - hrmp_watermark: None, - outbound_hrmp: HashMap::new(), - ump_messages_sent: 0, - ump_bytes_sent: 0, - dmp_messages_processed: 0, - code_upgrade_applied: false, - } - } - - /// Stack other modifications on top of these. - /// - /// This does no sanity-checking, so if `other` is garbage relative - /// to `self`, then the new value will be garbage as well. - /// - /// This is an addition which is not commutative. - pub fn stack(&mut self, other: &Self) { - if let Some(ref new_parent) = other.required_parent { - self.required_parent = Some(new_parent.clone()); - } - if let Some(ref new_hrmp_watermark) = other.hrmp_watermark { - self.hrmp_watermark = Some(new_hrmp_watermark.clone()); - } - - for (id, mods) in &other.outbound_hrmp { - let record = self.outbound_hrmp.entry(*id).or_default(); - record.messages_submitted += mods.messages_submitted; - record.bytes_submitted += mods.bytes_submitted; - } - - self.ump_messages_sent += other.ump_messages_sent; - self.ump_bytes_sent += other.ump_bytes_sent; - self.dmp_messages_processed += other.dmp_messages_processed; - self.code_upgrade_applied |= other.code_upgrade_applied; - } -} - -/// The prospective candidate. -/// -/// This comprises the key information that represent a candidate -/// without pinning it to a particular session. For example, everything -/// to do with the collator's signature and commitments are represented -/// here. But the erasure-root is not. This means that prospective candidates -/// are not correlated to any session in particular. -#[derive(Debug, Clone, PartialEq)] -pub struct ProspectiveCandidate<'a> { - /// The commitments to the output of the execution. - pub commitments: Cow<'a, CandidateCommitments>, - /// The collator that created the candidate. - pub collator: CollatorId, - /// The signature of the collator on the payload. - pub collator_signature: CollatorSignature, - /// The persisted validation data used to create the candidate. - pub persisted_validation_data: PersistedValidationData, - /// The hash of the PoV. - pub pov_hash: Hash, - /// The validation code hash used by the candidate. - pub validation_code_hash: ValidationCodeHash, -} - -impl<'a> ProspectiveCandidate<'a> { - fn into_owned(self) -> ProspectiveCandidate<'static> { - ProspectiveCandidate { commitments: Cow::Owned(self.commitments.into_owned()), ..self } - } - - /// Partially clone the prospective candidate, but borrow the - /// parts which are potentially heavy. - pub fn partial_clone(&self) -> ProspectiveCandidate { - ProspectiveCandidate { - commitments: Cow::Borrowed(self.commitments.borrow()), - collator: self.collator.clone(), - collator_signature: self.collator_signature.clone(), - persisted_validation_data: self.persisted_validation_data.clone(), - pov_hash: self.pov_hash, - validation_code_hash: self.validation_code_hash, - } - } -} - -#[cfg(test)] -impl ProspectiveCandidate<'static> { - fn commitments_mut(&mut self) -> &mut CandidateCommitments { - self.commitments.to_mut() - } -} - -/// Kinds of errors with the validity of a fragment. -#[derive(Debug, Clone, PartialEq)] -pub enum FragmentValidityError { - /// The validation code of the candidate doesn't match the - /// operating constraints. - /// - /// Expected, Got - ValidationCodeMismatch(ValidationCodeHash, ValidationCodeHash), - /// The persisted-validation-data doesn't match. - /// - /// Expected, Got - PersistedValidationDataMismatch(PersistedValidationData, PersistedValidationData), - /// The outputs of the candidate are invalid under the operating - /// constraints. - OutputsInvalid(ModificationError), - /// New validation code size too big. - /// - /// Max allowed, new. - CodeSizeTooLarge(usize, usize), - /// Relay parent too old. - /// - /// Min allowed, current. - RelayParentTooOld(BlockNumber, BlockNumber), - /// Para is required to process at least one DMP message from the queue. - DmpAdvancementRule, - /// Too many messages upward messages submitted. - UmpMessagesPerCandidateOverflow { - /// The amount of messages a single candidate can submit. - messages_allowed: usize, - /// The amount of messages sent to all HRMP channels. - messages_submitted: usize, - }, - /// Too many messages submitted to all HRMP channels. - HrmpMessagesPerCandidateOverflow { - /// The amount of messages a single candidate can submit. - messages_allowed: usize, - /// The amount of messages sent to all HRMP channels. - messages_submitted: usize, - }, - /// Code upgrade not allowed. - CodeUpgradeRestricted, - /// HRMP messages are not ascending or are duplicate. - /// - /// The `usize` is the index into the outbound HRMP messages of - /// the candidate. - HrmpMessagesDescendingOrDuplicate(usize), -} - -/// A parachain fragment, representing another prospective parachain block. -/// -/// This is a type which guarantees that the candidate is valid under the -/// operating constraints. -#[derive(Debug, Clone, PartialEq)] -pub struct Fragment<'a> { - /// The new relay-parent. - relay_parent: RelayChainBlockInfo, - /// The constraints this fragment is operating under. - operating_constraints: Constraints, - /// The core information about the prospective candidate. - candidate: ProspectiveCandidate<'a>, - /// Modifications to the constraints based on the outputs of - /// the candidate. - modifications: ConstraintModifications, -} - -impl<'a> Fragment<'a> { - /// Create a new fragment. - /// - /// This fails if the fragment isn't in line with the operating - /// constraints. That is, either its inputs or its outputs fail - /// checks against the constraints. - /// - /// This doesn't check that the collator signature is valid or - /// whether the PoV is small enough. - pub fn new( - relay_parent: RelayChainBlockInfo, - operating_constraints: Constraints, - candidate: ProspectiveCandidate<'a>, - ) -> Result { - let modifications = { - let commitments = &candidate.commitments; - ConstraintModifications { - required_parent: Some(commitments.head_data.clone()), - hrmp_watermark: Some({ - if commitments.hrmp_watermark == relay_parent.number { - HrmpWatermarkUpdate::Head(commitments.hrmp_watermark) - } else { - HrmpWatermarkUpdate::Trunk(commitments.hrmp_watermark) - } - }), - outbound_hrmp: { - let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); - - let mut last_recipient = None::; - for (i, message) in commitments.horizontal_messages.iter().enumerate() { - if let Some(last) = last_recipient { - if last >= message.recipient { - return Err( - FragmentValidityError::HrmpMessagesDescendingOrDuplicate(i), - ) - } - } - - last_recipient = Some(message.recipient); - let record = outbound_hrmp.entry(message.recipient).or_default(); - - record.bytes_submitted += message.data.len(); - record.messages_submitted += 1; - } - - outbound_hrmp - }, - ump_messages_sent: commitments.upward_messages.len(), - ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(), - dmp_messages_processed: commitments.processed_downward_messages as _, - code_upgrade_applied: operating_constraints - .future_validation_code - .map_or(false, |(at, _)| relay_parent.number >= at), - } - }; - - validate_against_constraints( - &operating_constraints, - &relay_parent, - &candidate, - &modifications, - )?; - - Ok(Fragment { relay_parent, operating_constraints, candidate, modifications }) - } - - /// Access the relay parent information. - pub fn relay_parent(&self) -> &RelayChainBlockInfo { - &self.relay_parent - } - - /// Access the operating constraints - pub fn operating_constraints(&self) -> &Constraints { - &self.operating_constraints - } - - /// Access the underlying prospective candidate. - pub fn candidate(&self) -> &ProspectiveCandidate<'a> { - &self.candidate - } - - /// Modifications to constraints based on the outputs of the candidate. - pub fn constraint_modifications(&self) -> &ConstraintModifications { - &self.modifications - } - - /// Convert the fragment into an owned variant. - pub fn into_owned(self) -> Fragment<'static> { - Fragment { candidate: self.candidate.into_owned(), ..self } - } - - /// Validate this fragment against some set of constraints - /// instead of the operating constraints. - pub fn validate_against_constraints( - &self, - constraints: &Constraints, - ) -> Result<(), FragmentValidityError> { - validate_against_constraints( - constraints, - &self.relay_parent, - &self.candidate, - &self.modifications, - ) - } -} - -fn validate_against_constraints( - constraints: &Constraints, - relay_parent: &RelayChainBlockInfo, - candidate: &ProspectiveCandidate, - modifications: &ConstraintModifications, -) -> Result<(), FragmentValidityError> { - let expected_pvd = PersistedValidationData { - parent_head: constraints.required_parent.clone(), - relay_parent_number: relay_parent.number, - relay_parent_storage_root: relay_parent.storage_root, - max_pov_size: constraints.max_pov_size as u32, - }; - - if expected_pvd != candidate.persisted_validation_data { - return Err(FragmentValidityError::PersistedValidationDataMismatch( - expected_pvd, - candidate.persisted_validation_data.clone(), - )) - } - - if constraints.validation_code_hash != candidate.validation_code_hash { - return Err(FragmentValidityError::ValidationCodeMismatch( - constraints.validation_code_hash, - candidate.validation_code_hash, - )) - } - - if relay_parent.number < constraints.min_relay_parent_number { - return Err(FragmentValidityError::RelayParentTooOld( - constraints.min_relay_parent_number, - relay_parent.number, - )) - } - - if candidate.commitments.new_validation_code.is_some() { - match constraints.upgrade_restriction { - None => {}, - Some(UpgradeRestriction::Present) => - return Err(FragmentValidityError::CodeUpgradeRestricted), - } - } - - let announced_code_size = candidate - .commitments - .new_validation_code - .as_ref() - .map_or(0, |code| code.0.len()); - - if announced_code_size > constraints.max_code_size { - return Err(FragmentValidityError::CodeSizeTooLarge( - constraints.max_code_size, - announced_code_size, - )) - } - - if modifications.dmp_messages_processed == 0 { - if constraints - .dmp_remaining_messages - .get(0) - .map_or(false, |&msg_sent_at| msg_sent_at <= relay_parent.number) - { - return Err(FragmentValidityError::DmpAdvancementRule) - } - } - - if candidate.commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate { - return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { - messages_allowed: constraints.max_hrmp_num_per_candidate, - messages_submitted: candidate.commitments.horizontal_messages.len(), - }) - } - - if candidate.commitments.upward_messages.len() > constraints.max_ump_num_per_candidate { - return Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { - messages_allowed: constraints.max_ump_num_per_candidate, - messages_submitted: candidate.commitments.upward_messages.len(), - }) - } - - constraints - .check_modifications(&modifications) - .map_err(FragmentValidityError::OutputsInvalid) -} - -#[cfg(test)] -mod tests { - use super::*; - use polkadot_primitives::vstaging::{ - CollatorPair, HorizontalMessages, OutboundHrmpMessage, ValidationCode, - }; - use sp_application_crypto::Pair; - - #[test] - fn stack_modifications() { - let para_a = ParaId::from(1u32); - let para_b = ParaId::from(2u32); - let para_c = ParaId::from(3u32); - - let a = ConstraintModifications { - required_parent: None, - hrmp_watermark: None, - outbound_hrmp: { - let mut map = HashMap::new(); - map.insert( - para_a, - OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, - ); - - map.insert( - para_b, - OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, - ); - - map - }, - ump_messages_sent: 6, - ump_bytes_sent: 1000, - dmp_messages_processed: 5, - code_upgrade_applied: true, - }; - - let b = ConstraintModifications { - required_parent: None, - hrmp_watermark: None, - outbound_hrmp: { - let mut map = HashMap::new(); - map.insert( - para_b, - OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, - ); - - map.insert( - para_c, - OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, - ); - - map - }, - ump_messages_sent: 6, - ump_bytes_sent: 1000, - dmp_messages_processed: 5, - code_upgrade_applied: true, - }; - - let mut c = a.clone(); - c.stack(&b); - - assert_eq!( - c, - ConstraintModifications { - required_parent: None, - hrmp_watermark: None, - outbound_hrmp: { - let mut map = HashMap::new(); - map.insert( - para_a, - OutboundHrmpChannelModification { - bytes_submitted: 100, - messages_submitted: 5, - }, - ); - - map.insert( - para_b, - OutboundHrmpChannelModification { - bytes_submitted: 200, - messages_submitted: 10, - }, - ); - - map.insert( - para_c, - OutboundHrmpChannelModification { - bytes_submitted: 100, - messages_submitted: 5, - }, - ); - - map - }, - ump_messages_sent: 12, - ump_bytes_sent: 2000, - dmp_messages_processed: 10, - code_upgrade_applied: true, - }, - ); - - let mut d = ConstraintModifications::identity(); - d.stack(&a); - d.stack(&b); - - assert_eq!(c, d); - } - - fn make_constraints() -> Constraints { - let para_a = ParaId::from(1u32); - let para_b = ParaId::from(2u32); - let para_c = ParaId::from(3u32); - - Constraints { - min_relay_parent_number: 5, - max_pov_size: 1000, - max_code_size: 1000, - ump_remaining: 10, - ump_remaining_bytes: 1024, - max_ump_num_per_candidate: 5, - dmp_remaining_messages: Vec::new(), - hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![6, 8] }, - hrmp_channels_out: { - let mut map = HashMap::new(); - - map.insert( - para_a, - OutboundHrmpChannelLimitations { messages_remaining: 5, bytes_remaining: 512 }, - ); - - map.insert( - para_b, - OutboundHrmpChannelLimitations { - messages_remaining: 10, - bytes_remaining: 1024, - }, - ); - - map.insert( - para_c, - OutboundHrmpChannelLimitations { messages_remaining: 1, bytes_remaining: 128 }, - ); - - map - }, - max_hrmp_num_per_candidate: 5, - required_parent: HeadData::from(vec![1, 2, 3]), - validation_code_hash: ValidationCode(vec![4, 5, 6]).hash(), - upgrade_restriction: None, - future_validation_code: None, - } - } - - #[test] - fn constraints_disallowed_trunk_watermark() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Trunk(7)); - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::DisallowedHrmpWatermark(7)), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::DisallowedHrmpWatermark(7)), - ); - } - - #[test] - fn constraints_always_allow_head_watermark() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Head(7)); - - assert!(constraints.check_modifications(&modifications).is_ok()); - - let new_constraints = constraints.apply_modifications(&modifications).unwrap(); - assert_eq!(new_constraints.hrmp_inbound.valid_watermarks, vec![8]); - } - - #[test] - fn constraints_no_such_hrmp_channel() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - let bad_para = ParaId::from(100u32); - modifications.outbound_hrmp.insert( - bad_para, - OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 0 }, - ); - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::NoSuchHrmpChannel(bad_para)), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::NoSuchHrmpChannel(bad_para)), - ); - } - - #[test] - fn constraints_hrmp_messages_overflow() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - let para_a = ParaId::from(1u32); - modifications.outbound_hrmp.insert( - para_a, - OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 6 }, - ); - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::HrmpMessagesOverflow { - para_id: para_a, - messages_remaining: 5, - messages_submitted: 6, - }), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::HrmpMessagesOverflow { - para_id: para_a, - messages_remaining: 5, - messages_submitted: 6, - }), - ); - } - - #[test] - fn constraints_hrmp_bytes_overflow() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - let para_a = ParaId::from(1u32); - modifications.outbound_hrmp.insert( - para_a, - OutboundHrmpChannelModification { bytes_submitted: 513, messages_submitted: 1 }, - ); - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::HrmpBytesOverflow { - para_id: para_a, - bytes_remaining: 512, - bytes_submitted: 513, - }), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::HrmpBytesOverflow { - para_id: para_a, - bytes_remaining: 512, - bytes_submitted: 513, - }), - ); - } - - #[test] - fn constraints_ump_messages_overflow() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - modifications.ump_messages_sent = 11; - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::UmpMessagesOverflow { - messages_remaining: 10, - messages_submitted: 11, - }), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::UmpMessagesOverflow { - messages_remaining: 10, - messages_submitted: 11, - }), - ); - } - - #[test] - fn constraints_ump_bytes_overflow() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - modifications.ump_bytes_sent = 1025; - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::UmpBytesOverflow { - bytes_remaining: 1024, - bytes_submitted: 1025, - }), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::UmpBytesOverflow { - bytes_remaining: 1024, - bytes_submitted: 1025, - }), - ); - } - - #[test] - fn constraints_dmp_messages() { - let mut constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - assert!(constraints.check_modifications(&modifications).is_ok()); - assert!(constraints.apply_modifications(&modifications).is_ok()); - - modifications.dmp_messages_processed = 6; - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::DmpMessagesUnderflow { - messages_remaining: 0, - messages_processed: 6, - }), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::DmpMessagesUnderflow { - messages_remaining: 0, - messages_processed: 6, - }), - ); - - constraints.dmp_remaining_messages = vec![1, 4, 8, 10]; - modifications.dmp_messages_processed = 2; - assert!(constraints.check_modifications(&modifications).is_ok()); - let constraints = constraints - .apply_modifications(&modifications) - .expect("modifications are valid"); - - assert_eq!(&constraints.dmp_remaining_messages, &[8, 10]); - } - - #[test] - fn constraints_nonexistent_code_upgrade() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - modifications.code_upgrade_applied = true; - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::AppliedNonexistentCodeUpgrade), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::AppliedNonexistentCodeUpgrade), - ); - } - - fn make_candidate( - constraints: &Constraints, - relay_parent: &RelayChainBlockInfo, - ) -> ProspectiveCandidate<'static> { - let collator_pair = CollatorPair::generate().0; - let collator = collator_pair.public(); - - let sig = collator_pair.sign(b"blabla".as_slice()); - - ProspectiveCandidate { - commitments: Cow::Owned(CandidateCommitments { - upward_messages: Default::default(), - horizontal_messages: Default::default(), - new_validation_code: None, - head_data: HeadData::from(vec![1, 2, 3, 4, 5]), - processed_downward_messages: 0, - hrmp_watermark: relay_parent.number, - }), - collator, - collator_signature: sig, - persisted_validation_data: PersistedValidationData { - parent_head: constraints.required_parent.clone(), - relay_parent_number: relay_parent.number, - relay_parent_storage_root: relay_parent.storage_root, - max_pov_size: constraints.max_pov_size as u32, - }, - pov_hash: Hash::repeat_byte(1), - validation_code_hash: constraints.validation_code_hash, - } - } - - #[test] - fn fragment_validation_code_mismatch() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - let expected_code = constraints.validation_code_hash; - let got_code = ValidationCode(vec![9, 9, 9]).hash(); - - candidate.validation_code_hash = got_code; - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::ValidationCodeMismatch(expected_code, got_code,)), - ) - } - - #[test] - fn fragment_pvd_mismatch() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let relay_parent_b = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0b), - storage_root: Hash::repeat_byte(0xee), - }; - - let constraints = make_constraints(); - let candidate = make_candidate(&constraints, &relay_parent); - - let expected_pvd = PersistedValidationData { - parent_head: constraints.required_parent.clone(), - relay_parent_number: relay_parent_b.number, - relay_parent_storage_root: relay_parent_b.storage_root, - max_pov_size: constraints.max_pov_size as u32, - }; - - let got_pvd = candidate.persisted_validation_data.clone(); - - assert_eq!( - Fragment::new(relay_parent_b, constraints, candidate), - Err(FragmentValidityError::PersistedValidationDataMismatch(expected_pvd, got_pvd,)), - ); - } - - #[test] - fn fragment_code_size_too_large() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - let max_code_size = constraints.max_code_size; - candidate.commitments_mut().new_validation_code = Some(vec![0; max_code_size + 1].into()); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::CodeSizeTooLarge(max_code_size, max_code_size + 1,)), - ); - } - - #[test] - fn fragment_relay_parent_too_old() { - let relay_parent = RelayChainBlockInfo { - number: 3, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let candidate = make_candidate(&constraints, &relay_parent); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::RelayParentTooOld(5, 3,)), - ); - } - - #[test] - fn fragment_hrmp_messages_overflow() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - let max_hrmp = constraints.max_hrmp_num_per_candidate; - - candidate - .commitments_mut() - .horizontal_messages - .try_extend((0..max_hrmp + 1).map(|i| OutboundHrmpMessage { - recipient: ParaId::from(i as u32), - data: vec![1, 2, 3], - })) - .unwrap(); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { - messages_allowed: max_hrmp, - messages_submitted: max_hrmp + 1, - }), - ); - } - - #[test] - fn fragment_dmp_advancement_rule() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let mut constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - // Empty dmp queue is ok. - assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); - // Unprocessed message that was sent later is ok. - constraints.dmp_remaining_messages = vec![relay_parent.number + 1]; - assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); - - for block_number in 0..=relay_parent.number { - constraints.dmp_remaining_messages = vec![block_number]; - - assert_eq!( - Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), - Err(FragmentValidityError::DmpAdvancementRule), - ); - } - - candidate.commitments.to_mut().processed_downward_messages = 1; - assert!(Fragment::new(relay_parent, constraints, candidate).is_ok()); - } - - #[test] - fn fragment_ump_messages_overflow() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - let max_ump = constraints.max_ump_num_per_candidate; - - candidate - .commitments - .to_mut() - .upward_messages - .try_extend((0..max_ump + 1).map(|i| vec![i as u8])) - .unwrap(); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { - messages_allowed: max_ump, - messages_submitted: max_ump + 1, - }), - ); - } - - #[test] - fn fragment_code_upgrade_restricted() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let mut constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - constraints.upgrade_restriction = Some(UpgradeRestriction::Present); - candidate.commitments_mut().new_validation_code = Some(ValidationCode(vec![1, 2, 3])); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::CodeUpgradeRestricted), - ); - } - - #[test] - fn fragment_hrmp_messages_descending_or_duplicate() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ - OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![1, 2, 3] }, - OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, - ]); - - assert_eq!( - Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), - Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), - ); - - candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ - OutboundHrmpMessage { recipient: ParaId::from(1 as u32), data: vec![1, 2, 3] }, - OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, - ]); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), - ); - } -} diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index daee4a8350e5..e60a9ff82eeb 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -43,7 +43,7 @@ use futures::channel::{mpsc, oneshot}; use parity_scale_codec::Encode; use polkadot_primitives::{ - vstaging as vstaging_primitives, AuthorityDiscoveryId, CandidateEvent, CandidateHash, + AsyncBackingParams, AuthorityDiscoveryId, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, EncodeAs, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, ValidationCode, ValidationCodeHash, @@ -227,7 +227,7 @@ specialize_requests! { fn request_key_ownership_proof(validator_id: ValidatorId) -> Option; KeyOwnershipProof; fn request_submit_report_dispute_lost(dp: slashing::DisputeProof, okop: slashing::OpaqueKeyOwnershipProof) -> Option<()>; SubmitReportDisputeLost; - fn request_staging_async_backing_params() -> vstaging_primitives::AsyncBackingParams; StagingAsyncBackingParams; + fn request_async_backing_params() -> AsyncBackingParams; AsyncBackingParams; } /// Requests executor parameters from the runtime effective at given relay-parent. First obtains diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index c078b17d2175..77bcd62f9c9c 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -30,16 +30,16 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ - vstaging, CandidateEvent, CandidateHash, CoreState, EncodeAs, ExecutorParams, GroupIndex, - GroupRotationInfo, Hash, IndexedVec, OccupiedCore, ScrapedOnChainVotes, SessionIndex, - SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, + slashing, AsyncBackingParams, CandidateEvent, CandidateHash, CoreState, EncodeAs, + ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, OccupiedCore, + ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, }; use crate::{ - request_availability_cores, request_candidate_events, request_from_runtime, - request_key_ownership_proof, request_on_chain_votes, request_session_executor_params, - request_session_index_for_child, request_session_info, request_staging_async_backing_params, + request_async_backing_params, request_availability_cores, request_candidate_events, + request_from_runtime, request_key_ownership_proof, request_on_chain_votes, + request_session_executor_params, request_session_index_for_child, request_session_info, request_submit_report_dispute_lost, request_unapplied_slashes, request_validation_code_by_hash, request_validator_groups, }; @@ -377,7 +377,7 @@ where pub async fn get_unapplied_slashes( sender: &mut Sender, relay_parent: Hash, -) -> Result> +) -> Result> where Sender: SubsystemSender, { @@ -392,7 +392,7 @@ pub async fn key_ownership_proof( sender: &mut Sender, relay_parent: Hash, validator_id: ValidatorId, -) -> Result> +) -> Result> where Sender: SubsystemSender, { @@ -403,8 +403,8 @@ where pub async fn submit_report_dispute_lost( sender: &mut Sender, relay_parent: Hash, - dispute_proof: vstaging::slashing::DisputeProof, - key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + dispute_proof: slashing::DisputeProof, + key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Result> where Sender: SubsystemSender, @@ -457,8 +457,7 @@ pub async fn prospective_parachains_mode( where Sender: SubsystemSender, { - let result = - recv_runtime(request_staging_async_backing_params(relay_parent, sender).await).await; + let result = recv_runtime(request_async_backing_params(relay_parent, sender).await).await; if let Err(error::Error::RuntimeRequest(RuntimeApiError::NotSupported { runtime_api_name })) = &result @@ -472,7 +471,7 @@ where Ok(ProspectiveParachainsMode::Disabled) } else { - let vstaging::AsyncBackingParams { max_candidate_depth, allowed_ancestry_len } = result?; + let AsyncBackingParams { max_candidate_depth, allowed_ancestry_len } = result?; Ok(ProspectiveParachainsMode::Enabled { max_candidate_depth: max_candidate_depth as _, allowed_ancestry_len: allowed_ancestry_len as _, diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 9121b3790858..5adb6d253134 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -19,8 +19,8 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -// `v5` is currently the latest stable version of the runtime API. -pub mod v5; +// `v6` is currently the latest stable version of the runtime API. +pub mod v6; // The 'staging' version is special - it contains primitives which are // still in development. Once they are considered stable, they will be @@ -33,20 +33,21 @@ pub mod runtime_api; // Current primitives not requiring versioning are exported here. // Primitives requiring versioning must not be exported and must be referred by an exact version. -pub use v5::{ - byzantine_threshold, check_candidate_backing, collator_signature_payload, +pub use v6::{ + async_backing, byzantine_threshold, check_candidate_backing, collator_signature_payload, effective_minimum_backing_votes, metric_definitions, slashing, supermajority_threshold, well_known_keys, AbridgedHostConfiguration, AbridgedHrmpChannel, AccountId, AccountIndex, - AccountPublic, ApprovalVote, AssignmentId, AuthorityDiscoveryId, AvailabilityBitfield, - BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber, CandidateCommitments, - CandidateDescriptor, CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, - CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, CollatorSignature, - CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, CoreState, DisputeState, - DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, ExecutorParam, - ExecutorParams, ExecutorParamsHash, ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, - Hash, HashT, HeadData, Header, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, - IndexedVec, InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce, - OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, + AccountPublic, ApprovalVote, AssignmentId, AsyncBackingParams, AuthorityDiscoveryId, + AvailabilityBitfield, BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber, + CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateHash, CandidateIndex, + CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, + CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, + CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, + ExecutorParam, ExecutorParams, ExecutorParamsHash, ExplicitDisputeStatement, GroupIndex, + GroupRotationInfo, Hash, HashT, HeadData, Header, HorizontalMessages, HrmpChannelId, Id, + InboundDownwardMessage, InboundHrmpMessage, IndexedVec, InherentData, + InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce, OccupiedCore, + OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, PersistedValidationData, PvfCheckStatement, PvfExecTimeoutKind, PvfPrepTimeoutKind, RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, RuntimeMetricOp, RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, @@ -61,4 +62,4 @@ pub use v5::{ }; #[cfg(feature = "std")] -pub use v5::{AssignmentPair, CollatorPair, ValidatorPair}; +pub use v6::{AssignmentPair, CollatorPair, ValidatorPair}; diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index e5f1aa4276ef..6cb66d40204d 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -114,10 +114,11 @@ //! separated from the stable primitives. use crate::{ - vstaging, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorSignature, + async_backing, slashing, AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateEvent, + CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, + GroupRotationInfo, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, + ValidatorSignature, }; use parity_scale_codec::{Decode, Encode}; use polkadot_core_primitives as pcp; @@ -224,38 +225,37 @@ sp_api::decl_runtime_apis! { /// Returns a list of validators that lost a past session dispute and need to be slashed. /// NOTE: This function is only available since parachain host version 5. - fn unapplied_slashes() -> Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>; + fn unapplied_slashes() -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>; /// Returns a merkle proof of a validator session key. /// NOTE: This function is only available since parachain host version 5. fn key_ownership_proof( validator_id: ValidatorId, - ) -> Option; + ) -> Option; /// Submit an unsigned extrinsic to slash validators who lost a dispute about /// a candidate of a past session. /// NOTE: This function is only available since parachain host version 5. fn submit_report_dispute_lost( - dispute_proof: vstaging::slashing::DisputeProof, - key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + dispute_proof: slashing::DisputeProof, + key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Option<()>; - /***** Staging *****/ + /***** Added in v6 *****/ /// Get the minimum number of backing votes for a parachain candidate. /// This is a staging method! Do not use on production runtimes! #[api_version(6)] fn minimum_backing_votes() -> u32; - /***** Asynchronous backing *****/ + /***** Added in v7: Asynchronous backing *****/ /// Returns the state of parachain backing for a given para. - /// This is a staging method! Do not use on production runtimes! - #[api_version(99)] - fn staging_para_backing_state(_: ppp::Id) -> Option>; + #[api_version(7)] + fn para_backing_state(_: ppp::Id) -> Option>; /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. - #[api_version(99)] - fn staging_async_backing_params() -> vstaging::AsyncBackingParams; + #[api_version(7)] + fn async_backing_params() -> AsyncBackingParams; } } diff --git a/polkadot/primitives/src/v6/async_backing.rs b/polkadot/primitives/src/v6/async_backing.rs new file mode 100644 index 000000000000..1abe87b6dec4 --- /dev/null +++ b/polkadot/primitives/src/v6/async_backing.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Asynchronous backing primitives. + +use super::*; + +use parity_scale_codec::{Decode, Encode}; +use primitives::RuntimeDebug; +use scale_info::TypeInfo; + +/// Candidate's acceptance limitations for asynchronous backing per relay parent. +#[derive( + RuntimeDebug, + Copy, + Clone, + PartialEq, + Encode, + Decode, + TypeInfo, + serde::Serialize, + serde::Deserialize, +)] + +pub struct AsyncBackingParams { + /// The maximum number of para blocks between the para head in a relay parent + /// and a new candidate. Restricts nodes from building arbitrary long chains + /// and spamming other validators. + /// + /// When async backing is disabled, the only valid value is 0. + pub max_candidate_depth: u32, + /// How many ancestors of a relay parent are allowed to build candidates on top + /// of. + /// + /// When async backing is disabled, the only valid value is 0. + pub allowed_ancestry_len: u32, +} + +/// Constraints on inbound HRMP channels. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct InboundHrmpLimitations { + /// An exhaustive set of all valid watermarks, sorted ascending. + /// + /// It's only expected to contain block numbers at which messages were + /// previously sent to a para, excluding most recent head. + pub valid_watermarks: Vec, +} + +/// Constraints on outbound HRMP channels. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct OutboundHrmpChannelLimitations { + /// The maximum bytes that can be written to the channel. + pub bytes_remaining: u32, + /// The maximum messages that can be written to the channel. + pub messages_remaining: u32, +} + +/// Constraints on the actions that can be taken by a new parachain +/// block. These limitations are implicitly associated with some particular +/// parachain, which should be apparent from usage. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct Constraints { + /// The minimum relay-parent number accepted under these constraints. + pub min_relay_parent_number: N, + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: u32, + /// The maximum new validation code size allowed, in bytes. + pub max_code_size: u32, + /// The amount of UMP messages remaining. + pub ump_remaining: u32, + /// The amount of UMP bytes remaining. + pub ump_remaining_bytes: u32, + /// The maximum number of UMP messages allowed per candidate. + pub max_ump_num_per_candidate: u32, + /// Remaining DMP queue. Only includes sent-at block numbers. + pub dmp_remaining_messages: Vec, + /// The limitations of all registered inbound HRMP channels. + pub hrmp_inbound: InboundHrmpLimitations, + /// The limitations of all registered outbound HRMP channels. + pub hrmp_channels_out: Vec<(Id, OutboundHrmpChannelLimitations)>, + /// The maximum number of HRMP messages allowed per candidate. + pub max_hrmp_num_per_candidate: u32, + /// The required parent head-data of the parachain. + pub required_parent: HeadData, + /// The expected validation-code-hash of this parachain. + pub validation_code_hash: ValidationCodeHash, + /// The code upgrade restriction signal as-of this parachain. + pub upgrade_restriction: Option, + /// The future validation code hash, if any, and at what relay-parent + /// number the upgrade would be minimally applied. + pub future_validation_code: Option<(N, ValidationCodeHash)>, +} + +/// A candidate pending availability. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct CandidatePendingAvailability { + /// The hash of the candidate. + pub candidate_hash: CandidateHash, + /// The candidate's descriptor. + pub descriptor: CandidateDescriptor, + /// The commitments of the candidate. + pub commitments: CandidateCommitments, + /// The candidate's relay parent's number. + pub relay_parent_number: N, + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: u32, +} + +/// The per-parachain state of the backing system, including +/// state-machine constraints and candidates pending availability. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct BackingState { + /// The state-machine constraints of the parachain. + pub constraints: Constraints, + /// The candidates pending availability. These should be ordered, i.e. they should form + /// a sub-chain, where the first candidate builds on top of the required parent of the + /// constraints and each subsequent builds on top of the previous head-data. + pub pending_availability: Vec>, +} diff --git a/polkadot/primitives/src/v5/executor_params.rs b/polkadot/primitives/src/v6/executor_params.rs similarity index 100% rename from polkadot/primitives/src/v5/executor_params.rs rename to polkadot/primitives/src/v6/executor_params.rs diff --git a/polkadot/primitives/src/v5/metrics.rs b/polkadot/primitives/src/v6/metrics.rs similarity index 100% rename from polkadot/primitives/src/v5/metrics.rs rename to polkadot/primitives/src/v6/metrics.rs diff --git a/polkadot/primitives/src/v5/mod.rs b/polkadot/primitives/src/v6/mod.rs similarity index 99% rename from polkadot/primitives/src/v5/mod.rs rename to polkadot/primitives/src/v6/mod.rs index 30782f95611f..c69c2a53766d 100644 --- a/polkadot/primitives/src/v5/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! `V2` Primitives. +//! `V6` Primitives. use bitvec::vec::BitVec; use parity_scale_codec::{Decode, Encode}; @@ -57,8 +57,13 @@ pub use sp_staking::SessionIndex; mod signed; pub use signed::{EncodeAs, Signed, UncheckedSigned}; +pub mod async_backing; +pub mod executor_params; pub mod slashing; +pub use async_backing::AsyncBackingParams; +pub use executor_params::{ExecutorParam, ExecutorParams, ExecutorParamsHash}; + mod metrics; pub use metrics::{ metric_definitions, RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, @@ -1116,7 +1121,7 @@ pub struct AbridgedHostConfiguration { /// The delay, in blocks, before a validation upgrade is applied. pub validation_upgrade_delay: BlockNumber, /// Asynchronous backing parameters. - pub async_backing_params: super::vstaging::AsyncBackingParams, + pub async_backing_params: AsyncBackingParams, } /// Abridged version of `HrmpChannel` (from the `Hrmp` parachains host runtime module) meant to be @@ -1803,9 +1808,6 @@ pub enum PvfExecTimeoutKind { Approval, } -pub mod executor_params; -pub use executor_params::{ExecutorParam, ExecutorParams, ExecutorParamsHash}; - #[cfg(test)] mod tests { use super::*; diff --git a/polkadot/primitives/src/v5/signed.rs b/polkadot/primitives/src/v6/signed.rs similarity index 100% rename from polkadot/primitives/src/v5/signed.rs rename to polkadot/primitives/src/v6/signed.rs diff --git a/polkadot/primitives/src/v5/slashing.rs b/polkadot/primitives/src/v6/slashing.rs similarity index 100% rename from polkadot/primitives/src/v5/slashing.rs rename to polkadot/primitives/src/v6/slashing.rs diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index ea341ee5b4fc..1429b0c326ac 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -17,121 +17,3 @@ //! Staging Primitives. // Put any primitives used by staging APIs functions here -pub use crate::v5::*; -use sp_std::prelude::*; - -use parity_scale_codec::{Decode, Encode}; -use primitives::RuntimeDebug; -use scale_info::TypeInfo; - -/// Useful type alias for Para IDs. -pub type ParaId = Id; - -/// Candidate's acceptance limitations for asynchronous backing per relay parent. -#[derive( - RuntimeDebug, - Copy, - Clone, - PartialEq, - Encode, - Decode, - TypeInfo, - serde::Serialize, - serde::Deserialize, -)] - -pub struct AsyncBackingParams { - /// The maximum number of para blocks between the para head in a relay parent - /// and a new candidate. Restricts nodes from building arbitrary long chains - /// and spamming other validators. - /// - /// When async backing is disabled, the only valid value is 0. - pub max_candidate_depth: u32, - /// How many ancestors of a relay parent are allowed to build candidates on top - /// of. - /// - /// When async backing is disabled, the only valid value is 0. - pub allowed_ancestry_len: u32, -} - -/// Constraints on inbound HRMP channels. -#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] -pub struct InboundHrmpLimitations { - /// An exhaustive set of all valid watermarks, sorted ascending. - /// - /// It's only expected to contain block numbers at which messages were - /// previously sent to a para, excluding most recent head. - pub valid_watermarks: Vec, -} - -/// Constraints on outbound HRMP channels. -#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] -pub struct OutboundHrmpChannelLimitations { - /// The maximum bytes that can be written to the channel. - pub bytes_remaining: u32, - /// The maximum messages that can be written to the channel. - pub messages_remaining: u32, -} - -/// Constraints on the actions that can be taken by a new parachain -/// block. These limitations are implicitly associated with some particular -/// parachain, which should be apparent from usage. -#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] -pub struct Constraints { - /// The minimum relay-parent number accepted under these constraints. - pub min_relay_parent_number: N, - /// The maximum Proof-of-Validity size allowed, in bytes. - pub max_pov_size: u32, - /// The maximum new validation code size allowed, in bytes. - pub max_code_size: u32, - /// The amount of UMP messages remaining. - pub ump_remaining: u32, - /// The amount of UMP bytes remaining. - pub ump_remaining_bytes: u32, - /// The maximum number of UMP messages allowed per candidate. - pub max_ump_num_per_candidate: u32, - /// Remaining DMP queue. Only includes sent-at block numbers. - pub dmp_remaining_messages: Vec, - /// The limitations of all registered inbound HRMP channels. - pub hrmp_inbound: InboundHrmpLimitations, - /// The limitations of all registered outbound HRMP channels. - pub hrmp_channels_out: Vec<(ParaId, OutboundHrmpChannelLimitations)>, - /// The maximum number of HRMP messages allowed per candidate. - pub max_hrmp_num_per_candidate: u32, - /// The required parent head-data of the parachain. - pub required_parent: HeadData, - /// The expected validation-code-hash of this parachain. - pub validation_code_hash: ValidationCodeHash, - /// The code upgrade restriction signal as-of this parachain. - pub upgrade_restriction: Option, - /// The future validation code hash, if any, and at what relay-parent - /// number the upgrade would be minimally applied. - pub future_validation_code: Option<(N, ValidationCodeHash)>, -} - -/// A candidate pending availability. -#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] -pub struct CandidatePendingAvailability { - /// The hash of the candidate. - pub candidate_hash: CandidateHash, - /// The candidate's descriptor. - pub descriptor: CandidateDescriptor, - /// The commitments of the candidate. - pub commitments: CandidateCommitments, - /// The candidate's relay parent's number. - pub relay_parent_number: N, - /// The maximum Proof-of-Validity size allowed, in bytes. - pub max_pov_size: u32, -} - -/// The per-parachain state of the backing system, including -/// state-machine constraints and candidates pending availability. -#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] -pub struct BackingState { - /// The state-machine constraints of the parachain. - pub constraints: Constraints, - /// The candidates pending availability. These should be ordered, i.e. they should form - /// a sub-chain, where the first candidate builds on top of the required parent of the - /// constraints and each subsequent builds on top of the previous head-data. - pub pending_availability: Vec>, -} diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md index a48444a46e40..286aeddb986d 100644 --- a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md +++ b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md @@ -122,7 +122,7 @@ prospective validation data. This is unlikely to change. ### Outgoing -- `RuntimeApiRequest::StagingParaBackingState` +- `RuntimeApiRequest::ParaBackingState` - Gets the backing state of the given para (the constraints of the para and candidates pending availability). - `RuntimeApiRequest::AvailabilityCores` diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 33039cd08ca4..f53f986a553f 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -26,7 +26,7 @@ use polkadot_parachain_primitives::primitives::{ MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM, }; use primitives::{ - vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, + AsyncBackingParams, Balance, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v6.rs b/polkadot/runtime/parachains/src/configuration/migration/v6.rs index beed54deaffa..19031a90bab4 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v6.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v6.rs @@ -21,7 +21,7 @@ use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::BlockNumberFor; use sp_std::vec::Vec; -use primitives::{vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; +use primitives::{AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; #[cfg(feature = "try-runtime")] use sp_std::prelude::*; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v7.rs b/polkadot/runtime/parachains/src/configuration/migration/v7.rs index 113651381207..1754b78e0a1d 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v7.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v7.rs @@ -23,7 +23,7 @@ use frame_support::{ weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; +use primitives::{AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v8.rs b/polkadot/runtime/parachains/src/configuration/migration/v8.rs index 5c5b34821835..d1bc90051125 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v8.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v8.rs @@ -24,8 +24,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ - vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex, - ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + AsyncBackingParams, Balance, ExecutorParams, SessionIndex, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::Perbill; use sp_std::vec::Vec; diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index b27a7ab1ad73..9b2b7a48dc8b 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -51,7 +51,7 @@ use frame_support::{ use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ - vstaging::slashing::{DisputeProof, DisputesTimeSlot, PendingSlashes, SlashingOffenceKind}, + slashing::{DisputeProof, DisputesTimeSlot, PendingSlashes, SlashingOffenceKind}, CandidateHash, SessionIndex, ValidatorId, ValidatorIndex, }; use scale_info::TypeInfo; diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs index e066ad825a33..7a35d1fe30c3 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs @@ -25,5 +25,8 @@ //! 1. Bump the version of the stable module (e.g. `v2` becomes `v3`) //! 2. Move methods from `vstaging` to `v3`. The new stable version should include all methods from //! `vstaging` tagged with the new version number (e.g. all `v3` methods). + pub mod v5; +pub mod v6; +pub mod v7; pub mod vstaging; diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v6.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v6.rs new file mode 100644 index 000000000000..debf66a37844 --- /dev/null +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v6.rs @@ -0,0 +1,22 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::{configuration, initializer}; + +/// Return the min backing votes threshold from the configuration. +pub fn minimum_backing_votes() -> u32 { + >::config().minimum_backing_votes +} diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs new file mode 100644 index 000000000000..8724a1f7b3b1 --- /dev/null +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs @@ -0,0 +1,120 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implementation of the Runtime API methods added in v7. + +use crate::{configuration, dmp, hrmp, inclusion, initializer, paras, shared}; +use frame_system::pallet_prelude::BlockNumberFor; +use primitives::{ + async_backing::{ + AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints, + InboundHrmpLimitations, OutboundHrmpChannelLimitations, + }, + Id as ParaId, +}; +use sp_std::prelude::*; + +/// Implementation for `ParaBackingState` function from the runtime API +pub fn backing_state( + para_id: ParaId, +) -> Option>> { + let config = >::config(); + // Async backing is only expected to be enabled with a tracker capacity of 1. + // Subsequent configuration update gets applied on new session, which always + // clears the buffer. + // + // Thus, minimum relay parent is ensured to have asynchronous backing enabled. + let now = >::block_number(); + let min_relay_parent_number = >::allowed_relay_parents() + .hypothetical_earliest_block_number(now, config.async_backing_params.allowed_ancestry_len); + + let required_parent = >::para_head(para_id)?; + let validation_code_hash = >::current_code_hash(para_id)?; + + let upgrade_restriction = >::upgrade_restriction_signal(para_id); + let future_validation_code = + >::future_code_upgrade_at(para_id).and_then(|block_num| { + // Only read the storage if there's a pending upgrade. + Some(block_num).zip(>::future_code_hash(para_id)) + }); + + let (ump_msg_count, ump_total_bytes) = + >::relay_dispatch_queue_size(para_id); + let ump_remaining = config.max_upward_queue_count - ump_msg_count; + let ump_remaining_bytes = config.max_upward_queue_size - ump_total_bytes; + + let dmp_remaining_messages = >::dmq_contents(para_id) + .into_iter() + .map(|msg| msg.sent_at) + .collect(); + + let valid_watermarks = >::valid_watermarks(para_id); + let hrmp_inbound = InboundHrmpLimitations { valid_watermarks }; + let hrmp_channels_out = >::outbound_remaining_capacity(para_id) + .into_iter() + .map(|(para, (messages_remaining, bytes_remaining))| { + (para, OutboundHrmpChannelLimitations { messages_remaining, bytes_remaining }) + }) + .collect(); + + let constraints = Constraints { + min_relay_parent_number, + max_pov_size: config.max_pov_size, + max_code_size: config.max_code_size, + ump_remaining, + ump_remaining_bytes, + max_ump_num_per_candidate: config.max_upward_message_num_per_candidate, + dmp_remaining_messages, + hrmp_inbound, + hrmp_channels_out, + max_hrmp_num_per_candidate: config.hrmp_max_message_num_per_candidate, + required_parent, + validation_code_hash, + upgrade_restriction, + future_validation_code, + }; + + let pending_availability = { + // Note: the API deals with a `Vec` as it is future-proof for cases + // where there may be multiple candidates pending availability at a time. + // But at the moment only one candidate can be pending availability per + // parachain. + crate::inclusion::PendingAvailability::::get(¶_id) + .and_then(|pending| { + let commitments = + crate::inclusion::PendingAvailabilityCommitments::::get(¶_id); + commitments.map(move |c| (pending, c)) + }) + .map(|(pending, commitments)| { + CandidatePendingAvailability { + candidate_hash: pending.candidate_hash(), + descriptor: pending.candidate_descriptor().clone(), + commitments, + relay_parent_number: pending.relay_parent_number(), + max_pov_size: constraints.max_pov_size, // assume always same in session. + } + }) + .into_iter() + .collect() + }; + + Some(BackingState { constraints, pending_availability }) +} + +/// Implementation for `AsyncBackingParams` function from the runtime API +pub fn async_backing_params() -> AsyncBackingParams { + >::config().async_backing_params +} diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index deef19d90710..d01b543630c3 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -15,111 +15,3 @@ // along with Polkadot. If not, see . //! Put implementations of functions from staging APIs here. - -use crate::{configuration, dmp, hrmp, inclusion, initializer, paras, shared}; -use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{ - vstaging::{ - AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints, - InboundHrmpLimitations, OutboundHrmpChannelLimitations, - }, - Id as ParaId, -}; -use sp_std::prelude::*; - -/// Implementation for `StagingParaBackingState` function from the runtime API -pub fn backing_state( - para_id: ParaId, -) -> Option>> { - let config = >::config(); - // Async backing is only expected to be enabled with a tracker capacity of 1. - // Subsequent configuration update gets applied on new session, which always - // clears the buffer. - // - // Thus, minimum relay parent is ensured to have asynchronous backing enabled. - let now = >::block_number(); - let min_relay_parent_number = >::allowed_relay_parents() - .hypothetical_earliest_block_number(now, config.async_backing_params.allowed_ancestry_len); - - let required_parent = >::para_head(para_id)?; - let validation_code_hash = >::current_code_hash(para_id)?; - - let upgrade_restriction = >::upgrade_restriction_signal(para_id); - let future_validation_code = - >::future_code_upgrade_at(para_id).and_then(|block_num| { - // Only read the storage if there's a pending upgrade. - Some(block_num).zip(>::future_code_hash(para_id)) - }); - - let (ump_msg_count, ump_total_bytes) = - >::relay_dispatch_queue_size(para_id); - let ump_remaining = config.max_upward_queue_count - ump_msg_count; - let ump_remaining_bytes = config.max_upward_queue_size - ump_total_bytes; - - let dmp_remaining_messages = >::dmq_contents(para_id) - .into_iter() - .map(|msg| msg.sent_at) - .collect(); - - let valid_watermarks = >::valid_watermarks(para_id); - let hrmp_inbound = InboundHrmpLimitations { valid_watermarks }; - let hrmp_channels_out = >::outbound_remaining_capacity(para_id) - .into_iter() - .map(|(para, (messages_remaining, bytes_remaining))| { - (para, OutboundHrmpChannelLimitations { messages_remaining, bytes_remaining }) - }) - .collect(); - - let constraints = Constraints { - min_relay_parent_number, - max_pov_size: config.max_pov_size, - max_code_size: config.max_code_size, - ump_remaining, - ump_remaining_bytes, - max_ump_num_per_candidate: config.max_upward_message_num_per_candidate, - dmp_remaining_messages, - hrmp_inbound, - hrmp_channels_out, - max_hrmp_num_per_candidate: config.hrmp_max_message_num_per_candidate, - required_parent, - validation_code_hash, - upgrade_restriction, - future_validation_code, - }; - - let pending_availability = { - // Note: the API deals with a `Vec` as it is future-proof for cases - // where there may be multiple candidates pending availability at a time. - // But at the moment only one candidate can be pending availability per - // parachain. - crate::inclusion::PendingAvailability::::get(¶_id) - .and_then(|pending| { - let commitments = - crate::inclusion::PendingAvailabilityCommitments::::get(¶_id); - commitments.map(move |c| (pending, c)) - }) - .map(|(pending, commitments)| { - CandidatePendingAvailability { - candidate_hash: pending.candidate_hash(), - descriptor: pending.candidate_descriptor().clone(), - commitments, - relay_parent_number: pending.relay_parent_number(), - max_pov_size: constraints.max_pov_size, // assume always same in session. - } - }) - .into_iter() - .collect() - }; - - Some(BackingState { constraints, pending_availability }) -} - -/// Implementation for `StagingAsyncBackingParams` function from the runtime API -pub fn async_backing_params() -> AsyncBackingParams { - >::config().async_backing_params -} - -/// Return the min backing votes threshold from the configuration. -pub fn minimum_backing_votes() -> u32 { - >::config().minimum_backing_votes -} diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index e043852901f1..38ef2b07862f 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -45,8 +45,7 @@ use runtime_parachains::{ dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, - paras_inherent as parachains_paras_inherent, - runtime_api_impl::v5 as parachains_runtime_api_impl, + paras_inherent as parachains_paras_inherent, runtime_api_impl as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -197,7 +196,7 @@ impl PrivilegeCmp for OriginPrivilegeCmp { match (left, right) { // Root is greater than anything. (OriginCaller::system(frame_system::RawOrigin::Root), _) => Some(Ordering::Greater), - // Check which one has more yes votes. + // Check which one has more yes. ( OriginCaller::Council(pallet_collective::RawOrigin::Members(l_yes_votes, l_count)), OriginCaller::Council(pallet_collective::RawOrigin::Members(r_yes_votes, r_count)), @@ -1713,29 +1712,30 @@ sp_api::impl_runtime_apis! { } } + #[api_version(7)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { - parachains_runtime_api_impl::validators::() + parachains_runtime_api_impl::v5::validators::() } fn validator_groups() -> (Vec>, GroupRotationInfo) { - parachains_runtime_api_impl::validator_groups::() + parachains_runtime_api_impl::v5::validator_groups::() } fn availability_cores() -> Vec> { - parachains_runtime_api_impl::availability_cores::() + parachains_runtime_api_impl::v5::availability_cores::() } fn persisted_validation_data(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option> { - parachains_runtime_api_impl::persisted_validation_data::(para_id, assumption) + parachains_runtime_api_impl::v5::persisted_validation_data::(para_id, assumption) } fn assumed_validation_data( para_id: ParaId, expected_persisted_validation_data_hash: Hash, ) -> Option<(PersistedValidationData, ValidationCodeHash)> { - parachains_runtime_api_impl::assumed_validation_data::( + parachains_runtime_api_impl::v5::assumed_validation_data::( para_id, expected_persisted_validation_data_hash, ) @@ -1745,24 +1745,24 @@ sp_api::impl_runtime_apis! { para_id: ParaId, outputs: primitives::CandidateCommitments, ) -> bool { - parachains_runtime_api_impl::check_validation_outputs::(para_id, outputs) + parachains_runtime_api_impl::v5::check_validation_outputs::(para_id, outputs) } fn session_index_for_child() -> SessionIndex { - parachains_runtime_api_impl::session_index_for_child::() + parachains_runtime_api_impl::v5::session_index_for_child::() } fn validation_code(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option { - parachains_runtime_api_impl::validation_code::(para_id, assumption) + parachains_runtime_api_impl::v5::validation_code::(para_id, assumption) } fn candidate_pending_availability(para_id: ParaId) -> Option> { - parachains_runtime_api_impl::candidate_pending_availability::(para_id) + parachains_runtime_api_impl::v5::candidate_pending_availability::(para_id) } fn candidate_events() -> Vec> { - parachains_runtime_api_impl::candidate_events::(|ev| { + parachains_runtime_api_impl::v5::candidate_events::(|ev| { match ev { RuntimeEvent::ParaInclusion(ev) => { Some(ev) @@ -1773,55 +1773,55 @@ sp_api::impl_runtime_apis! { } fn session_info(index: SessionIndex) -> Option { - parachains_runtime_api_impl::session_info::(index) + parachains_runtime_api_impl::v5::session_info::(index) } fn session_executor_params(session_index: SessionIndex) -> Option { - parachains_runtime_api_impl::session_executor_params::(session_index) + parachains_runtime_api_impl::v5::session_executor_params::(session_index) } fn dmq_contents(recipient: ParaId) -> Vec> { - parachains_runtime_api_impl::dmq_contents::(recipient) + parachains_runtime_api_impl::v5::dmq_contents::(recipient) } fn inbound_hrmp_channels_contents( recipient: ParaId ) -> BTreeMap>> { - parachains_runtime_api_impl::inbound_hrmp_channels_contents::(recipient) + parachains_runtime_api_impl::v5::inbound_hrmp_channels_contents::(recipient) } fn validation_code_by_hash(hash: ValidationCodeHash) -> Option { - parachains_runtime_api_impl::validation_code_by_hash::(hash) + parachains_runtime_api_impl::v5::validation_code_by_hash::(hash) } fn on_chain_votes() -> Option> { - parachains_runtime_api_impl::on_chain_votes::() + parachains_runtime_api_impl::v5::on_chain_votes::() } fn submit_pvf_check_statement( stmt: primitives::PvfCheckStatement, signature: primitives::ValidatorSignature ) { - parachains_runtime_api_impl::submit_pvf_check_statement::(stmt, signature) + parachains_runtime_api_impl::v5::submit_pvf_check_statement::(stmt, signature) } fn pvfs_require_precheck() -> Vec { - parachains_runtime_api_impl::pvfs_require_precheck::() + parachains_runtime_api_impl::v5::pvfs_require_precheck::() } fn validation_code_hash(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option { - parachains_runtime_api_impl::validation_code_hash::(para_id, assumption) + parachains_runtime_api_impl::v5::validation_code_hash::(para_id, assumption) } fn disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { - parachains_runtime_api_impl::get_session_disputes::() + parachains_runtime_api_impl::v5::get_session_disputes::() } fn unapplied_slashes( ) -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)> { - parachains_runtime_api_impl::unapplied_slashes::() + parachains_runtime_api_impl::v5::unapplied_slashes::() } fn key_ownership_proof( @@ -1838,11 +1838,23 @@ sp_api::impl_runtime_apis! { dispute_proof: slashing::DisputeProof, key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Option<()> { - parachains_runtime_api_impl::submit_unsigned_slashing_report::( + parachains_runtime_api_impl::v5::submit_unsigned_slashing_report::( dispute_proof, key_ownership_proof, ) } + + fn minimum_backing_votes() -> u32 { + parachains_runtime_api_impl::v6::minimum_backing_votes::() + } + + fn para_backing_state(para_id: ParaId) -> Option { + parachains_runtime_api_impl::v7::backing_state::(para_id) + } + + fn async_backing_params() -> primitives::AsyncBackingParams { + parachains_runtime_api_impl::v7::async_backing_params::() + } } #[api_version(3)] @@ -2017,7 +2029,7 @@ sp_api::impl_runtime_apis! { impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { fn authorities() -> Vec { - parachains_runtime_api_impl::relevant_authority_ids::() + parachains_runtime_api_impl::v5::relevant_authority_ids::() } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 7dfc781d2467..e698e384ad51 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -62,11 +62,8 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl::{ - v5 as parachains_runtime_api_impl, vstaging as parachains_staging_runtime_api_impl, - }, - scheduler as parachains_scheduler, session_info as parachains_session_info, - shared as parachains_shared, + runtime_api_impl as parachains_runtime_api_impl, scheduler as parachains_scheduler, + session_info as parachains_session_info, shared as parachains_shared, }; use scale_info::TypeInfo; use sp_core::{OpaqueMetadata, RuntimeDebug, H256}; @@ -1565,27 +1562,27 @@ sp_api::impl_runtime_apis! { #[api_version(6)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { - parachains_runtime_api_impl::validators::() + parachains_runtime_api_impl::v5::validators::() } fn validator_groups() -> (Vec>, GroupRotationInfo) { - parachains_runtime_api_impl::validator_groups::() + parachains_runtime_api_impl::v5::validator_groups::() } fn availability_cores() -> Vec> { - parachains_runtime_api_impl::availability_cores::() + parachains_runtime_api_impl::v5::availability_cores::() } fn persisted_validation_data(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option> { - parachains_runtime_api_impl::persisted_validation_data::(para_id, assumption) + parachains_runtime_api_impl::v5::persisted_validation_data::(para_id, assumption) } fn assumed_validation_data( para_id: ParaId, expected_persisted_validation_data_hash: Hash, ) -> Option<(PersistedValidationData, ValidationCodeHash)> { - parachains_runtime_api_impl::assumed_validation_data::( + parachains_runtime_api_impl::v5::assumed_validation_data::( para_id, expected_persisted_validation_data_hash, ) @@ -1595,24 +1592,24 @@ sp_api::impl_runtime_apis! { para_id: ParaId, outputs: primitives::CandidateCommitments, ) -> bool { - parachains_runtime_api_impl::check_validation_outputs::(para_id, outputs) + parachains_runtime_api_impl::v5::check_validation_outputs::(para_id, outputs) } fn session_index_for_child() -> SessionIndex { - parachains_runtime_api_impl::session_index_for_child::() + parachains_runtime_api_impl::v5::session_index_for_child::() } fn validation_code(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option { - parachains_runtime_api_impl::validation_code::(para_id, assumption) + parachains_runtime_api_impl::v5::validation_code::(para_id, assumption) } fn candidate_pending_availability(para_id: ParaId) -> Option> { - parachains_runtime_api_impl::candidate_pending_availability::(para_id) + parachains_runtime_api_impl::v5::candidate_pending_availability::(para_id) } fn candidate_events() -> Vec> { - parachains_runtime_api_impl::candidate_events::(|ev| { + parachains_runtime_api_impl::v5::candidate_events::(|ev| { match ev { RuntimeEvent::ParaInclusion(ev) => { Some(ev) @@ -1623,55 +1620,55 @@ sp_api::impl_runtime_apis! { } fn session_info(index: SessionIndex) -> Option { - parachains_runtime_api_impl::session_info::(index) + parachains_runtime_api_impl::v5::session_info::(index) } fn session_executor_params(session_index: SessionIndex) -> Option { - parachains_runtime_api_impl::session_executor_params::(session_index) + parachains_runtime_api_impl::v5::session_executor_params::(session_index) } fn dmq_contents(recipient: ParaId) -> Vec> { - parachains_runtime_api_impl::dmq_contents::(recipient) + parachains_runtime_api_impl::v5::dmq_contents::(recipient) } fn inbound_hrmp_channels_contents( recipient: ParaId ) -> BTreeMap>> { - parachains_runtime_api_impl::inbound_hrmp_channels_contents::(recipient) + parachains_runtime_api_impl::v5::inbound_hrmp_channels_contents::(recipient) } fn validation_code_by_hash(hash: ValidationCodeHash) -> Option { - parachains_runtime_api_impl::validation_code_by_hash::(hash) + parachains_runtime_api_impl::v5::validation_code_by_hash::(hash) } fn on_chain_votes() -> Option> { - parachains_runtime_api_impl::on_chain_votes::() + parachains_runtime_api_impl::v5::on_chain_votes::() } fn submit_pvf_check_statement( stmt: PvfCheckStatement, signature: ValidatorSignature, ) { - parachains_runtime_api_impl::submit_pvf_check_statement::(stmt, signature) + parachains_runtime_api_impl::v5::submit_pvf_check_statement::(stmt, signature) } fn pvfs_require_precheck() -> Vec { - parachains_runtime_api_impl::pvfs_require_precheck::() + parachains_runtime_api_impl::v5::pvfs_require_precheck::() } fn validation_code_hash(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option { - parachains_runtime_api_impl::validation_code_hash::(para_id, assumption) + parachains_runtime_api_impl::v5::validation_code_hash::(para_id, assumption) } fn disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { - parachains_runtime_api_impl::get_session_disputes::() + parachains_runtime_api_impl::v5::get_session_disputes::() } fn unapplied_slashes( ) -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)> { - parachains_runtime_api_impl::unapplied_slashes::() + parachains_runtime_api_impl::v5::unapplied_slashes::() } fn key_ownership_proof( @@ -1688,14 +1685,14 @@ sp_api::impl_runtime_apis! { dispute_proof: slashing::DisputeProof, key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Option<()> { - parachains_runtime_api_impl::submit_unsigned_slashing_report::( + parachains_runtime_api_impl::v5::submit_unsigned_slashing_report::( dispute_proof, key_ownership_proof, ) } fn minimum_backing_votes() -> u32 { - parachains_staging_runtime_api_impl::minimum_backing_votes::() + parachains_runtime_api_impl::v6::minimum_backing_votes::() } } @@ -1879,7 +1876,7 @@ sp_api::impl_runtime_apis! { impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { fn authorities() -> Vec { - parachains_runtime_api_impl::relevant_authority_ids::() + parachains_runtime_api_impl::v5::relevant_authority_ids::() } } From 1b191d859e63fb90e4bba03df7a0de74b1f1d02f Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 13 Sep 2023 19:22:40 +0400 Subject: [PATCH 02/19] fix import --- polkadot/runtime/parachains/src/assigner_on_demand/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index fe9a4e52bd07..d07964b69165 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -28,7 +28,7 @@ use crate::{ }; use frame_support::{assert_noop, assert_ok, error::BadOrigin}; use pallet_balances::Error as BalancesError; -use primitives::{v5::ValidationCode, BlockNumber, SessionIndex}; +use primitives::{BlockNumber, SessionIndex, ValidationCode}; use sp_std::collections::btree_map::BTreeMap; fn schedule_blank_para(id: ParaId, parakind: ParaKind) { From 22d0c74bbb64229273246a680d3cb73506b80c18 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 13 Sep 2023 20:20:41 +0400 Subject: [PATCH 03/19] bump rococo version --- cumulus/parachains/integration-tests/emulated/common/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 7461165f2a19..db0c522e4b36 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -79,7 +79,7 @@ decl_test_relay_chains! { Balances: westend_runtime::Balances, } }, - #[api_version(5)] + #[api_version(7)] pub struct Rococo { genesis = rococo::genesis(), on_init = (), From 1b5679dd105440c3fd9432fc8cd656d0cdd0f77b Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 13 Sep 2023 20:52:42 +0400 Subject: [PATCH 04/19] bump wococo too --- cumulus/parachains/integration-tests/emulated/common/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index db0c522e4b36..c920a4b67ad5 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -94,7 +94,7 @@ decl_test_relay_chains! { Balances: rococo_runtime::Balances, } }, - #[api_version(5)] + #[api_version(7)] pub struct Wococo { genesis = rococo::genesis(), on_init = (), From 27d45f7d1e0cff869a5992e905ef1cd995bdd524 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 13 Sep 2023 21:29:11 +0400 Subject: [PATCH 05/19] fix pallet --- cumulus/pallets/parachain-system/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index a8e9a0bf9ae4..67c54e113c0a 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -1447,7 +1447,7 @@ impl Pallet { hrmp_max_message_num_per_candidate: 2, validation_upgrade_cooldown: 2, validation_upgrade_delay: 2, - async_backing_params: relay_chain::vstaging::AsyncBackingParams { + async_backing_params: relay_chain::AsyncBackingParams { allowed_ancestry_len: 0, max_candidate_depth: 0, }, From c264250c53a2e4ab62307d47e179810812968a71 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 13 Sep 2023 21:44:02 +0400 Subject: [PATCH 06/19] fix doc job --- .../src/inclusion_emulator/mod.rs | 200 +++++++++--------- 1 file changed, 100 insertions(+), 100 deletions(-) diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index 77360ba0afe1..8d57e25d0e28 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -11,106 +11,106 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -//! # Overview -//! -//! A set of utilities for node-side code to emulate the logic the runtime uses for checking -//! parachain blocks in order to build prospective parachains that are produced ahead of the -//! relay chain. These utilities allow the node-side to predict, with high accuracy, what -//! the relay-chain will accept in the near future. -//! -//! This module has 2 key data types: [`Constraints`] and [`Fragment`]s. [`Constraints`] -//! exhaustively define the set of valid inputs and outputs to parachain execution. A [`Fragment`] -//! indicates a parachain block, anchored to the relay-chain at a particular relay-chain block, -//! known as the relay-parent. -//! -//! ## Fragment Validity -//! -//! Every relay-parent is implicitly associated with a unique set of [`Constraints`] that describe -//! the properties that must be true for a block to be included in a direct child of that block, -//! assuming there is no intermediate parachain block pending availability. -//! -//! However, the key factor that makes asynchronously-grown prospective chains -//! possible is the fact that the relay-chain accepts candidate blocks based on whether they -//! are valid under the constraints of the present moment, not based on whether they were -//! valid at the time of construction. -//! -//! As such, [`Fragment`]s are often, but not always constructed in such a way that they are -//! invalid at first and become valid later on, as the relay chain grows. -//! -//! # Usage -//! -//! It's expected that the users of this module will be building up trees of -//! [`Fragment`]s and consistently pruning and adding to the tree. -//! -//! ## Operating Constraints -//! -//! The *operating constraints* of a `Fragment` are the constraints with which that fragment -//! was intended to comply. The operating constraints are defined as the base constraints -//! of the relay-parent of the fragment modified by the cumulative modifications of all -//! fragments between the relay-parent and the current fragment. -//! -//! What the operating constraints are, in practice, is a prediction about the state of the -//! relay-chain in the future. The relay-chain is aware of some current state, and we want to -//! make an intelligent prediction about what might be accepted in the future based on -//! prior fragments that also exist off-chain. -//! -//! ## Fragment Trees -//! -//! As the relay-chain grows, some predictions come true and others come false. -//! And new predictions get made. These three changes correspond distinctly to the -//! 3 primary operations on fragment trees. -//! -//! A fragment tree is a mental model for thinking about a forking series of predictions -//! about a single parachain. There may be one or more fragment trees per parachain. -//! -//! In expectation, most parachains will have a plausibly-unique authorship method which means that -//! they should really be much closer to fragment-chains, maybe with an occasional fork. -//! -//! Avoiding fragment-tree blowup is beyond the scope of this module. -//! -//! ### Pruning Fragment Trees -//! -//! When the relay-chain advances, we want to compare the new constraints of that relay-parent to -//! the roots of the fragment trees we have. There are 3 cases: -//! -//! 1. The root fragment is still valid under the new constraints. In this case, we do nothing. This -//! is the "prediction still uncertain" case. -//! -//! 2. The root fragment is invalid under the new constraints because it has been subsumed by the -//! relay-chain. In this case, we can discard the root and split & re-root the fragment tree under -//! its descendents and compare to the new constraints again. This is the "prediction came true" -//! case. -//! -//! 3. The root fragment is invalid under the new constraints because a competing parachain block -//! has been included or it would never be accepted for some other reason. In this case we can -//! discard the entire fragment tree. This is the "prediction came false" case. -//! -//! This is all a bit of a simplification because it assumes that the relay-chain advances without -//! forks and is finalized instantly. In practice, the set of fragment-trees needs to be observable -//! from the perspective of a few different possible forks of the relay-chain and not pruned -//! too eagerly. -//! -//! Note that the fragments themselves don't need to change and the only thing we care about -//! is whether the predictions they represent are still valid. -//! -//! ### Extending Fragment Trees -//! -//! As predictions fade into the past, new ones should be stacked on top. -//! -//! Every new relay-chain block is an opportunity to make a new prediction about the future. -//! Higher-level logic should select the leaves of the fragment-trees to build upon or whether -//! to create a new fragment-tree. -//! -//! ### Code Upgrades -//! -//! Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade scheduling -//! logic is very path-dependent and intricate so we just assume that code upgrades -//! can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep, -//! in practice and code upgrades are fairly rare. So what's likely to happen around code -//! upgrades is that the entire fragment-tree has to get discarded at some point. -//! -//! That means a few blocks of execution time lost, which is not a big deal for code upgrades -//! in practice at most once every few weeks. +/// # Overview +/// +/// A set of utilities for node-side code to emulate the logic the runtime uses for checking +/// parachain blocks in order to build prospective parachains that are produced ahead of the +/// relay chain. These utilities allow the node-side to predict, with high accuracy, what +/// the relay-chain will accept in the near future. +/// +/// This module has 2 key data types: [`Constraints`] and [`Fragment`]s. [`Constraints`] +/// exhaustively define the set of valid inputs and outputs to parachain execution. A [`Fragment`] +/// indicates a parachain block, anchored to the relay-chain at a particular relay-chain block, +/// known as the relay-parent. +/// +/// ## Fragment Validity +/// +/// Every relay-parent is implicitly associated with a unique set of [`Constraints`] that describe +/// the properties that must be true for a block to be included in a direct child of that block, +/// assuming there is no intermediate parachain block pending availability. +/// +/// However, the key factor that makes asynchronously-grown prospective chains +/// possible is the fact that the relay-chain accepts candidate blocks based on whether they +/// are valid under the constraints of the present moment, not based on whether they were +/// valid at the time of construction. +/// +/// As such, [`Fragment`]s are often, but not always constructed in such a way that they are +/// invalid at first and become valid later on, as the relay chain grows. +/// +/// # Usage +/// +/// It's expected that the users of this module will be building up trees of +/// [`Fragment`]s and consistently pruning and adding to the tree. +/// +/// ## Operating Constraints +/// +/// The *operating constraints* of a `Fragment` are the constraints with which that fragment +/// was intended to comply. The operating constraints are defined as the base constraints +/// of the relay-parent of the fragment modified by the cumulative modifications of all +/// fragments between the relay-parent and the current fragment. +/// +/// What the operating constraints are, in practice, is a prediction about the state of the +/// relay-chain in the future. The relay-chain is aware of some current state, and we want to +/// make an intelligent prediction about what might be accepted in the future based on +/// prior fragments that also exist off-chain. +/// +/// ## Fragment Trees +/// +/// As the relay-chain grows, some predictions come true and others come false. +/// And new predictions get made. These three changes correspond distinctly to the +/// 3 primary operations on fragment trees. +/// +/// A fragment tree is a mental model for thinking about a forking series of predictions +/// about a single parachain. There may be one or more fragment trees per parachain. +/// +/// In expectation, most parachains will have a plausibly-unique authorship method which means that +/// they should really be much closer to fragment-chains, maybe with an occasional fork. +/// +/// Avoiding fragment-tree blowup is beyond the scope of this module. +/// +/// ### Pruning Fragment Trees +/// +/// When the relay-chain advances, we want to compare the new constraints of that relay-parent to +/// the roots of the fragment trees we have. There are 3 cases: +/// +/// 1. The root fragment is still valid under the new constraints. In this case, we do nothing. This +/// is the "prediction still uncertain" case. +/// +/// 2. The root fragment is invalid under the new constraints because it has been subsumed by the +/// relay-chain. In this case, we can discard the root and split & re-root the fragment tree under +/// its descendents and compare to the new constraints again. This is the "prediction came true" +/// case. +/// +/// 3. The root fragment is invalid under the new constraints because a competing parachain block +/// has been included or it would never be accepted for some other reason. In this case we can +/// discard the entire fragment tree. This is the "prediction came false" case. +/// +/// This is all a bit of a simplification because it assumes that the relay-chain advances without +/// forks and is finalized instantly. In practice, the set of fragment-trees needs to be observable +/// from the perspective of a few different possible forks of the relay-chain and not pruned +/// too eagerly. +/// +/// Note that the fragments themselves don't need to change and the only thing we care about +/// is whether the predictions they represent are still valid. +/// +/// ### Extending Fragment Trees +/// +/// As predictions fade into the past, new ones should be stacked on top. +/// +/// Every new relay-chain block is an opportunity to make a new prediction about the future. +/// Higher-level logic should select the leaves of the fragment-trees to build upon or whether +/// to create a new fragment-tree. +/// +/// ### Code Upgrades +/// +/// Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade scheduling +/// logic is very path-dependent and intricate so we just assume that code upgrades +/// can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep, +/// in practice and code upgrades are fairly rare. So what's likely to happen around code +/// upgrades is that the entire fragment-tree has to get discarded at some point. +/// +/// That means a few blocks of execution time lost, which is not a big deal for code upgrades +/// in practice at most once every few weeks. use polkadot_primitives::{ async_backing::Constraints as PrimitiveConstraints, BlockNumber, CandidateCommitments, From b23f36cb3bb37d4fd7cac993d8805eedde9c2c0d Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 13 Sep 2023 21:49:08 +0400 Subject: [PATCH 07/19] fmt --- .../src/inclusion_emulator/mod.rs | 51 +++++++++---------- 1 file changed, 25 insertions(+), 26 deletions(-) diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index 8d57e25d0e28..c7b91bffb3d7 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -19,15 +19,15 @@ /// the relay-chain will accept in the near future. /// /// This module has 2 key data types: [`Constraints`] and [`Fragment`]s. [`Constraints`] -/// exhaustively define the set of valid inputs and outputs to parachain execution. A [`Fragment`] -/// indicates a parachain block, anchored to the relay-chain at a particular relay-chain block, -/// known as the relay-parent. +/// exhaustively define the set of valid inputs and outputs to parachain execution. A +/// [`Fragment`] indicates a parachain block, anchored to the relay-chain at a particular +/// relay-chain block, known as the relay-parent. /// /// ## Fragment Validity /// -/// Every relay-parent is implicitly associated with a unique set of [`Constraints`] that describe -/// the properties that must be true for a block to be included in a direct child of that block, -/// assuming there is no intermediate parachain block pending availability. +/// Every relay-parent is implicitly associated with a unique set of [`Constraints`] that +/// describe the properties that must be true for a block to be included in a direct child of +/// that block, assuming there is no intermediate parachain block pending availability. /// /// However, the key factor that makes asynchronously-grown prospective chains /// possible is the fact that the relay-chain accepts candidate blocks based on whether they @@ -63,32 +63,32 @@ /// A fragment tree is a mental model for thinking about a forking series of predictions /// about a single parachain. There may be one or more fragment trees per parachain. /// -/// In expectation, most parachains will have a plausibly-unique authorship method which means that -/// they should really be much closer to fragment-chains, maybe with an occasional fork. +/// In expectation, most parachains will have a plausibly-unique authorship method which means +/// that they should really be much closer to fragment-chains, maybe with an occasional fork. /// /// Avoiding fragment-tree blowup is beyond the scope of this module. /// /// ### Pruning Fragment Trees /// -/// When the relay-chain advances, we want to compare the new constraints of that relay-parent to -/// the roots of the fragment trees we have. There are 3 cases: +/// When the relay-chain advances, we want to compare the new constraints of that relay-parent +/// to the roots of the fragment trees we have. There are 3 cases: /// -/// 1. The root fragment is still valid under the new constraints. In this case, we do nothing. This -/// is the "prediction still uncertain" case. +/// 1. The root fragment is still valid under the new constraints. In this case, we do nothing. +/// This is the "prediction still uncertain" case. /// -/// 2. The root fragment is invalid under the new constraints because it has been subsumed by the -/// relay-chain. In this case, we can discard the root and split & re-root the fragment tree under -/// its descendents and compare to the new constraints again. This is the "prediction came true" -/// case. +/// 2. The root fragment is invalid under the new constraints because it has been subsumed by +/// the relay-chain. In this case, we can discard the root and split & re-root the fragment +/// tree under its descendents and compare to the new constraints again. This is the +/// "prediction came true" case. /// -/// 3. The root fragment is invalid under the new constraints because a competing parachain block -/// has been included or it would never be accepted for some other reason. In this case we can -/// discard the entire fragment tree. This is the "prediction came false" case. +/// 3. The root fragment is invalid under the new constraints because a competing parachain +/// block has been included or it would never be accepted for some other reason. In this +/// case we can discard the entire fragment tree. This is the "prediction came false" case. /// -/// This is all a bit of a simplification because it assumes that the relay-chain advances without -/// forks and is finalized instantly. In practice, the set of fragment-trees needs to be observable -/// from the perspective of a few different possible forks of the relay-chain and not pruned -/// too eagerly. +/// This is all a bit of a simplification because it assumes that the relay-chain advances +/// without forks and is finalized instantly. In practice, the set of fragment-trees needs to +/// be observable from the perspective of a few different possible forks of the relay-chain and +/// not pruned too eagerly. /// /// Note that the fragments themselves don't need to change and the only thing we care about /// is whether the predictions they represent are still valid. @@ -103,15 +103,14 @@ /// /// ### Code Upgrades /// -/// Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade scheduling -/// logic is very path-dependent and intricate so we just assume that code upgrades +/// Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade +/// scheduling logic is very path-dependent and intricate so we just assume that code upgrades /// can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep, /// in practice and code upgrades are fairly rare. So what's likely to happen around code /// upgrades is that the entire fragment-tree has to get discarded at some point. /// /// That means a few blocks of execution time lost, which is not a big deal for code upgrades /// in practice at most once every few weeks. - use polkadot_primitives::{ async_backing::Constraints as PrimitiveConstraints, BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, PersistedValidationData, From 21feed2e34ded5f65b7947c61da9efd1a9535ef9 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 13 Sep 2023 23:48:52 +0400 Subject: [PATCH 08/19] drop LOWES_USER_ID reexport --- polkadot/primitives/src/v6/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index c69c2a53766d..cf9008355178 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -44,7 +44,7 @@ pub use polkadot_core_primitives::v2::{ // Export some polkadot-parachain primitives pub use polkadot_parachain_primitives::primitives::{ HeadData, HorizontalMessages, HrmpChannelId, Id, UpwardMessage, UpwardMessages, ValidationCode, - ValidationCodeHash, LOWEST_PUBLIC_ID, LOWEST_USER_ID, + ValidationCodeHash, LOWEST_PUBLIC_ID, }; use serde::{Deserialize, Serialize}; From 4ffdc0178228726e54d90610ab648dd7dc4d6aa0 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 13 Sep 2023 23:49:52 +0400 Subject: [PATCH 09/19] revert comment --- polkadot/runtime/rococo/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 38ef2b07862f..dc05a34672b5 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -196,7 +196,7 @@ impl PrivilegeCmp for OriginPrivilegeCmp { match (left, right) { // Root is greater than anything. (OriginCaller::system(frame_system::RawOrigin::Root), _) => Some(Ordering::Greater), - // Check which one has more yes. + // Check which one has more yes votes. ( OriginCaller::Council(pallet_collective::RawOrigin::Members(l_yes_votes, l_count)), OriginCaller::Council(pallet_collective::RawOrigin::Members(r_yes_votes, r_count)), From 4049260cb604cbea5153ab7c1dd5b77721b2f239 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 13 Sep 2023 23:59:20 +0400 Subject: [PATCH 10/19] merge runtime api into v7 --- polkadot/runtime/kusama/src/lib.rs | 2 +- .../parachains/src/runtime_api_impl/mod.rs | 2 - .../parachains/src/runtime_api_impl/v5.rs | 397 ------------------ .../parachains/src/runtime_api_impl/v6.rs | 22 - .../parachains/src/runtime_api_impl/v7.rs | 394 ++++++++++++++++- polkadot/runtime/polkadot/src/lib.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 55 +-- polkadot/runtime/test-runtime/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 53 +-- 9 files changed, 444 insertions(+), 485 deletions(-) delete mode 100644 polkadot/runtime/parachains/src/runtime_api_impl/v5.rs delete mode 100644 polkadot/runtime/parachains/src/runtime_api_impl/v6.rs diff --git a/polkadot/runtime/kusama/src/lib.rs b/polkadot/runtime/kusama/src/lib.rs index 659a7052d2b7..9d598eeb313f 100644 --- a/polkadot/runtime/kusama/src/lib.rs +++ b/polkadot/runtime/kusama/src/lib.rs @@ -46,7 +46,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl::v5 as parachains_runtime_api_impl, + runtime_api_impl::v7 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs index 7a35d1fe30c3..ba74e488cd3b 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs @@ -26,7 +26,5 @@ //! 2. Move methods from `vstaging` to `v3`. The new stable version should include all methods from //! `vstaging` tagged with the new version number (e.g. all `v3` methods). -pub mod v5; -pub mod v6; pub mod v7; pub mod vstaging; diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v5.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v5.rs deleted file mode 100644 index 46a609e0368d..000000000000 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v5.rs +++ /dev/null @@ -1,397 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! A module exporting runtime API implementation functions for all runtime APIs using `v5` -//! primitives. -//! -//! Runtimes implementing the v2 runtime API are recommended to forward directly to these -//! functions. - -use crate::{ - disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent, - scheduler::{self, CoreOccupied}, - session_info, shared, -}; -use frame_system::pallet_prelude::*; -use primitives::{ - slashing, AuthorityDiscoveryId, CandidateEvent, CandidateHash, CommittedCandidateReceipt, - CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCore, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, -}; -use sp_runtime::traits::One; -use sp_std::{collections::btree_map::BTreeMap, prelude::*}; - -/// Implementation for the `validators` function of the runtime API. -pub fn validators() -> Vec { - >::active_validator_keys() -} - -/// Implementation for the `validator_groups` function of the runtime API. -pub fn validator_groups( -) -> (Vec>, GroupRotationInfo>) { - let now = >::block_number() + One::one(); - - let groups = >::validator_groups(); - let rotation_info = >::group_rotation_info(now); - - (groups, rotation_info) -} - -/// Implementation for the `availability_cores` function of the runtime API. -pub fn availability_cores() -> Vec>> { - let cores = >::availability_cores(); - let now = >::block_number() + One::one(); - - // This explicit update is only strictly required for session boundaries: - // - // At the end of a session we clear the claim queues: Without this update call, nothing would be - // scheduled to the client. - >::update_claimqueue(Vec::new(), now); - - let time_out_for = >::availability_timeout_predicate(); - - let group_responsible_for = - |backed_in_number, core_index| match >::group_assigned_to_core( - core_index, - backed_in_number, - ) { - Some(g) => g, - None => { - log::warn!( - target: "runtime::polkadot-api::v2", - "Could not determine the group responsible for core extracted \ - from list of cores for some prior block in same session", - ); - - GroupIndex(0) - }, - }; - - let scheduled: BTreeMap<_, _> = >::scheduled_paras().collect(); - - cores - .into_iter() - .enumerate() - .map(|(i, core)| match core { - CoreOccupied::Paras(entry) => { - let pending_availability = - >::pending_availability(entry.para_id()) - .expect("Occupied core always has pending availability; qed"); - - let backed_in_number = *pending_availability.backed_in_number(); - CoreState::Occupied(OccupiedCore { - next_up_on_available: >::next_up_on_available(CoreIndex( - i as u32, - )), - occupied_since: backed_in_number, - time_out_at: time_out_for(backed_in_number).live_until, - next_up_on_time_out: >::next_up_on_time_out(CoreIndex( - i as u32, - )), - availability: pending_availability.availability_votes().clone(), - group_responsible: group_responsible_for( - backed_in_number, - pending_availability.core_occupied(), - ), - candidate_hash: pending_availability.candidate_hash(), - candidate_descriptor: pending_availability.candidate_descriptor().clone(), - }) - }, - CoreOccupied::Free => { - if let Some(para_id) = scheduled.get(&CoreIndex(i as _)).cloned() { - CoreState::Scheduled(primitives::ScheduledCore { para_id, collator: None }) - } else { - CoreState::Free - } - }, - }) - .collect() -} - -/// Returns current block number being processed and the corresponding root hash. -fn current_relay_parent( -) -> (BlockNumberFor, ::Hash) { - use parity_scale_codec::Decode as _; - let state_version = >::runtime_version().state_version(); - let relay_parent_number = >::block_number(); - let relay_parent_storage_root = T::Hash::decode(&mut &sp_io::storage::root(state_version)[..]) - .expect("storage root must decode to the Hash type; qed"); - (relay_parent_number, relay_parent_storage_root) -} - -fn with_assumption( - para_id: ParaId, - assumption: OccupiedCoreAssumption, - build: F, -) -> Option -where - Config: inclusion::Config, - F: FnOnce() -> Option, -{ - match assumption { - OccupiedCoreAssumption::Included => { - >::force_enact(para_id); - build() - }, - OccupiedCoreAssumption::TimedOut => build(), - OccupiedCoreAssumption::Free => { - if >::pending_availability(para_id).is_some() { - None - } else { - build() - } - }, - } -} - -/// Implementation for the `persisted_validation_data` function of the runtime API. -pub fn persisted_validation_data( - para_id: ParaId, - assumption: OccupiedCoreAssumption, -) -> Option>> { - let (relay_parent_number, relay_parent_storage_root) = current_relay_parent::(); - with_assumption::(para_id, assumption, || { - crate::util::make_persisted_validation_data::( - para_id, - relay_parent_number, - relay_parent_storage_root, - ) - }) -} - -/// Implementation for the `assumed_validation_data` function of the runtime API. -pub fn assumed_validation_data( - para_id: ParaId, - expected_persisted_validation_data_hash: Hash, -) -> Option<(PersistedValidationData>, ValidationCodeHash)> { - let (relay_parent_number, relay_parent_storage_root) = current_relay_parent::(); - // This closure obtains the `persisted_validation_data` for the given `para_id` and matches - // its hash against an expected one. - let make_validation_data = || { - crate::util::make_persisted_validation_data::( - para_id, - relay_parent_number, - relay_parent_storage_root, - ) - .filter(|validation_data| validation_data.hash() == expected_persisted_validation_data_hash) - }; - - let persisted_validation_data = make_validation_data().or_else(|| { - // Try again with force enacting the core. This check only makes sense if - // the core is occupied. - >::pending_availability(para_id).and_then(|_| { - >::force_enact(para_id); - make_validation_data() - }) - }); - // If we were successful, also query current validation code hash. - persisted_validation_data.zip(>::current_code_hash(¶_id)) -} - -/// Implementation for the `check_validation_outputs` function of the runtime API. -pub fn check_validation_outputs( - para_id: ParaId, - outputs: primitives::CandidateCommitments, -) -> bool { - let relay_parent_number = >::block_number(); - >::check_validation_outputs_for_runtime_api( - para_id, - relay_parent_number, - outputs, - ) -} - -/// Implementation for the `session_index_for_child` function of the runtime API. -pub fn session_index_for_child() -> SessionIndex { - // Just returns the session index from `inclusion`. Runtime APIs follow - // initialization so the initializer will have applied any pending session change - // which is expected at the child of the block whose context the runtime API was invoked - // in. - // - // Incidentally, this is also the rationale for why it is OK to query validators or - // occupied cores or etc. and expect the correct response "for child". - >::session_index() -} - -/// Implementation for the `AuthorityDiscoveryApi::authorities()` function of the runtime API. -/// It is a heavy call, but currently only used for authority discovery, so it is fine. -/// Gets next, current and some historical authority ids using `session_info` module. -pub fn relevant_authority_ids( -) -> Vec { - let current_session_index = session_index_for_child::(); - let earliest_stored_session = >::earliest_stored_session(); - - // Due to `max_validators`, the `SessionInfo` stores only the validators who are actively - // selected to participate in parachain consensus. We'd like all authorities for the current - // and next sessions to be used in authority-discovery. The two sets likely have large overlap. - let mut authority_ids = >::current_authorities().to_vec(); - authority_ids.extend(>::next_authorities().to_vec()); - - // Due to disputes, we'd like to remain connected to authorities of the previous few sessions. - // For this, we don't need anyone other than the validators actively participating in consensus. - for session_index in earliest_stored_session..current_session_index { - let info = >::session_info(session_index); - if let Some(mut info) = info { - authority_ids.append(&mut info.discovery_keys); - } - } - - authority_ids.sort(); - authority_ids.dedup(); - - authority_ids -} - -/// Implementation for the `validation_code` function of the runtime API. -pub fn validation_code( - para_id: ParaId, - assumption: OccupiedCoreAssumption, -) -> Option { - with_assumption::(para_id, assumption, || >::current_code(¶_id)) -} - -/// Implementation for the `candidate_pending_availability` function of the runtime API. -pub fn candidate_pending_availability( - para_id: ParaId, -) -> Option> { - >::candidate_pending_availability(para_id) -} - -/// Implementation for the `candidate_events` function of the runtime API. -// NOTE: this runs without block initialization, as it accesses events. -// this means it can run in a different session than other runtime APIs at the same block. -pub fn candidate_events(extract_event: F) -> Vec> -where - T: initializer::Config, - F: Fn(::RuntimeEvent) -> Option>, -{ - use inclusion::Event as RawEvent; - - >::read_events_no_consensus() - .into_iter() - .filter_map(|record| extract_event(record.event)) - .filter_map(|event| { - Some(match event { - RawEvent::::CandidateBacked(c, h, core, group) => - CandidateEvent::CandidateBacked(c, h, core, group), - RawEvent::::CandidateIncluded(c, h, core, group) => - CandidateEvent::CandidateIncluded(c, h, core, group), - RawEvent::::CandidateTimedOut(c, h, core) => - CandidateEvent::CandidateTimedOut(c, h, core), - // Not needed for candidate events. - RawEvent::::UpwardMessagesReceived { .. } => return None, - RawEvent::::__Ignore(_, _) => unreachable!("__Ignore cannot be used"), - }) - }) - .collect() -} - -/// Get the session info for the given session, if stored. -pub fn session_info(index: SessionIndex) -> Option { - >::session_info(index) -} - -/// Implementation for the `dmq_contents` function of the runtime API. -pub fn dmq_contents( - recipient: ParaId, -) -> Vec>> { - >::dmq_contents(recipient) -} - -/// Implementation for the `inbound_hrmp_channels_contents` function of the runtime API. -pub fn inbound_hrmp_channels_contents( - recipient: ParaId, -) -> BTreeMap>>> { - >::inbound_hrmp_channels_contents(recipient) -} - -/// Implementation for the `validation_code_by_hash` function of the runtime API. -pub fn validation_code_by_hash( - hash: ValidationCodeHash, -) -> Option { - >::code_by_hash(hash) -} - -/// Disputes imported via means of on-chain imports. -pub fn on_chain_votes() -> Option> { - >::on_chain_votes() -} - -/// Submits an PVF pre-checking vote. -pub fn submit_pvf_check_statement( - stmt: PvfCheckStatement, - signature: ValidatorSignature, -) { - >::submit_pvf_check_statement(stmt, signature) -} - -/// Returns the list of all PVF code hashes that require pre-checking. -pub fn pvfs_require_precheck() -> Vec { - >::pvfs_require_precheck() -} - -/// Returns the validation code hash for the given parachain making the given -/// `OccupiedCoreAssumption`. -pub fn validation_code_hash( - para_id: ParaId, - assumption: OccupiedCoreAssumption, -) -> Option -where - T: inclusion::Config, -{ - with_assumption::(para_id, assumption, || { - >::current_code_hash(¶_id) - }) -} - -/// Implementation for `get_session_disputes` function from the runtime API -pub fn get_session_disputes( -) -> Vec<(SessionIndex, CandidateHash, DisputeState>)> { - >::disputes() -} - -/// Get session executor parameter set -pub fn session_executor_params( - session_index: SessionIndex, -) -> Option { - // This is to bootstrap the storage working around the runtime migration issue: - // https://github.com/paritytech/substrate/issues/9997 - // After the bootstrap is complete (no less than 7 session passed with the runtime) - // this code should be replaced with a pure - // >::session_executor_params(session_index) call. - match >::session_executor_params(session_index) { - Some(ep) => Some(ep), - None => Some(ExecutorParams::default()), - } -} - -/// Implementation of `unapplied_slashes` runtime API -pub fn unapplied_slashes( -) -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)> { - >::unapplied_slashes() -} - -/// Implementation of `submit_report_dispute_lost` runtime API -pub fn submit_unsigned_slashing_report( - dispute_proof: slashing::DisputeProof, - key_ownership_proof: slashing::OpaqueKeyOwnershipProof, -) -> Option<()> { - let key_ownership_proof = key_ownership_proof.decode()?; - - >::submit_unsigned_slashing_report( - dispute_proof, - key_ownership_proof, - ) -} diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v6.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v6.rs deleted file mode 100644 index debf66a37844..000000000000 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v6.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use crate::{configuration, initializer}; - -/// Return the min backing votes threshold from the configuration. -pub fn minimum_backing_votes() -> u32 { - >::config().minimum_backing_votes -} diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs index 8724a1f7b3b1..35d92f71084f 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs @@ -11,21 +11,399 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . +//! A module exporting runtime API implementation functions for all runtime APIs using `v5` +//! primitives. +//! +//! Runtimes implementing the v2 runtime API are recommended to forward directly to these +//! functions. -//! Implementation of the Runtime API methods added in v7. - -use crate::{configuration, dmp, hrmp, inclusion, initializer, paras, shared}; -use frame_system::pallet_prelude::BlockNumberFor; +use crate::{ + configuration, disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent, + scheduler::{self, CoreOccupied}, + session_info, shared, +}; +use frame_system::pallet_prelude::*; use primitives::{ async_backing::{ AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints, InboundHrmpLimitations, OutboundHrmpChannelLimitations, }, - Id as ParaId, + slashing, AuthorityDiscoveryId, CandidateEvent, CandidateHash, CommittedCandidateReceipt, + CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, + Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCore, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; -use sp_std::prelude::*; +use sp_runtime::traits::One; +use sp_std::{collections::btree_map::BTreeMap, prelude::*}; + +/// Implementation for the `validators` function of the runtime API. +pub fn validators() -> Vec { + >::active_validator_keys() +} + +/// Implementation for the `validator_groups` function of the runtime API. +pub fn validator_groups( +) -> (Vec>, GroupRotationInfo>) { + let now = >::block_number() + One::one(); + + let groups = >::validator_groups(); + let rotation_info = >::group_rotation_info(now); + + (groups, rotation_info) +} + +/// Implementation for the `availability_cores` function of the runtime API. +pub fn availability_cores() -> Vec>> { + let cores = >::availability_cores(); + let now = >::block_number() + One::one(); + + // This explicit update is only strictly required for session boundaries: + // + // At the end of a session we clear the claim queues: Without this update call, nothing would be + // scheduled to the client. + >::update_claimqueue(Vec::new(), now); + + let time_out_for = >::availability_timeout_predicate(); + + let group_responsible_for = + |backed_in_number, core_index| match >::group_assigned_to_core( + core_index, + backed_in_number, + ) { + Some(g) => g, + None => { + log::warn!( + target: "runtime::polkadot-api::v2", + "Could not determine the group responsible for core extracted \ + from list of cores for some prior block in same session", + ); + + GroupIndex(0) + }, + }; + + let scheduled: BTreeMap<_, _> = >::scheduled_paras().collect(); + + cores + .into_iter() + .enumerate() + .map(|(i, core)| match core { + CoreOccupied::Paras(entry) => { + let pending_availability = + >::pending_availability(entry.para_id()) + .expect("Occupied core always has pending availability; qed"); + + let backed_in_number = *pending_availability.backed_in_number(); + CoreState::Occupied(OccupiedCore { + next_up_on_available: >::next_up_on_available(CoreIndex( + i as u32, + )), + occupied_since: backed_in_number, + time_out_at: time_out_for(backed_in_number).live_until, + next_up_on_time_out: >::next_up_on_time_out(CoreIndex( + i as u32, + )), + availability: pending_availability.availability_votes().clone(), + group_responsible: group_responsible_for( + backed_in_number, + pending_availability.core_occupied(), + ), + candidate_hash: pending_availability.candidate_hash(), + candidate_descriptor: pending_availability.candidate_descriptor().clone(), + }) + }, + CoreOccupied::Free => { + if let Some(para_id) = scheduled.get(&CoreIndex(i as _)).cloned() { + CoreState::Scheduled(primitives::ScheduledCore { para_id, collator: None }) + } else { + CoreState::Free + } + }, + }) + .collect() +} + +/// Returns current block number being processed and the corresponding root hash. +fn current_relay_parent( +) -> (BlockNumberFor, ::Hash) { + use parity_scale_codec::Decode as _; + let state_version = >::runtime_version().state_version(); + let relay_parent_number = >::block_number(); + let relay_parent_storage_root = T::Hash::decode(&mut &sp_io::storage::root(state_version)[..]) + .expect("storage root must decode to the Hash type; qed"); + (relay_parent_number, relay_parent_storage_root) +} + +fn with_assumption( + para_id: ParaId, + assumption: OccupiedCoreAssumption, + build: F, +) -> Option +where + Config: inclusion::Config, + F: FnOnce() -> Option, +{ + match assumption { + OccupiedCoreAssumption::Included => { + >::force_enact(para_id); + build() + }, + OccupiedCoreAssumption::TimedOut => build(), + OccupiedCoreAssumption::Free => { + if >::pending_availability(para_id).is_some() { + None + } else { + build() + } + }, + } +} + +/// Implementation for the `persisted_validation_data` function of the runtime API. +pub fn persisted_validation_data( + para_id: ParaId, + assumption: OccupiedCoreAssumption, +) -> Option>> { + let (relay_parent_number, relay_parent_storage_root) = current_relay_parent::(); + with_assumption::(para_id, assumption, || { + crate::util::make_persisted_validation_data::( + para_id, + relay_parent_number, + relay_parent_storage_root, + ) + }) +} + +/// Implementation for the `assumed_validation_data` function of the runtime API. +pub fn assumed_validation_data( + para_id: ParaId, + expected_persisted_validation_data_hash: Hash, +) -> Option<(PersistedValidationData>, ValidationCodeHash)> { + let (relay_parent_number, relay_parent_storage_root) = current_relay_parent::(); + // This closure obtains the `persisted_validation_data` for the given `para_id` and matches + // its hash against an expected one. + let make_validation_data = || { + crate::util::make_persisted_validation_data::( + para_id, + relay_parent_number, + relay_parent_storage_root, + ) + .filter(|validation_data| validation_data.hash() == expected_persisted_validation_data_hash) + }; + + let persisted_validation_data = make_validation_data().or_else(|| { + // Try again with force enacting the core. This check only makes sense if + // the core is occupied. + >::pending_availability(para_id).and_then(|_| { + >::force_enact(para_id); + make_validation_data() + }) + }); + // If we were successful, also query current validation code hash. + persisted_validation_data.zip(>::current_code_hash(¶_id)) +} + +/// Implementation for the `check_validation_outputs` function of the runtime API. +pub fn check_validation_outputs( + para_id: ParaId, + outputs: primitives::CandidateCommitments, +) -> bool { + let relay_parent_number = >::block_number(); + >::check_validation_outputs_for_runtime_api( + para_id, + relay_parent_number, + outputs, + ) +} + +/// Implementation for the `session_index_for_child` function of the runtime API. +pub fn session_index_for_child() -> SessionIndex { + // Just returns the session index from `inclusion`. Runtime APIs follow + // initialization so the initializer will have applied any pending session change + // which is expected at the child of the block whose context the runtime API was invoked + // in. + // + // Incidentally, this is also the rationale for why it is OK to query validators or + // occupied cores or etc. and expect the correct response "for child". + >::session_index() +} + +/// Implementation for the `AuthorityDiscoveryApi::authorities()` function of the runtime API. +/// It is a heavy call, but currently only used for authority discovery, so it is fine. +/// Gets next, current and some historical authority ids using `session_info` module. +pub fn relevant_authority_ids( +) -> Vec { + let current_session_index = session_index_for_child::(); + let earliest_stored_session = >::earliest_stored_session(); + + // Due to `max_validators`, the `SessionInfo` stores only the validators who are actively + // selected to participate in parachain consensus. We'd like all authorities for the current + // and next sessions to be used in authority-discovery. The two sets likely have large overlap. + let mut authority_ids = >::current_authorities().to_vec(); + authority_ids.extend(>::next_authorities().to_vec()); + + // Due to disputes, we'd like to remain connected to authorities of the previous few sessions. + // For this, we don't need anyone other than the validators actively participating in consensus. + for session_index in earliest_stored_session..current_session_index { + let info = >::session_info(session_index); + if let Some(mut info) = info { + authority_ids.append(&mut info.discovery_keys); + } + } + + authority_ids.sort(); + authority_ids.dedup(); + + authority_ids +} + +/// Implementation for the `validation_code` function of the runtime API. +pub fn validation_code( + para_id: ParaId, + assumption: OccupiedCoreAssumption, +) -> Option { + with_assumption::(para_id, assumption, || >::current_code(¶_id)) +} + +/// Implementation for the `candidate_pending_availability` function of the runtime API. +pub fn candidate_pending_availability( + para_id: ParaId, +) -> Option> { + >::candidate_pending_availability(para_id) +} + +/// Implementation for the `candidate_events` function of the runtime API. +// NOTE: this runs without block initialization, as it accesses events. +// this means it can run in a different session than other runtime APIs at the same block. +pub fn candidate_events(extract_event: F) -> Vec> +where + T: initializer::Config, + F: Fn(::RuntimeEvent) -> Option>, +{ + use inclusion::Event as RawEvent; + + >::read_events_no_consensus() + .into_iter() + .filter_map(|record| extract_event(record.event)) + .filter_map(|event| { + Some(match event { + RawEvent::::CandidateBacked(c, h, core, group) => + CandidateEvent::CandidateBacked(c, h, core, group), + RawEvent::::CandidateIncluded(c, h, core, group) => + CandidateEvent::CandidateIncluded(c, h, core, group), + RawEvent::::CandidateTimedOut(c, h, core) => + CandidateEvent::CandidateTimedOut(c, h, core), + // Not needed for candidate events. + RawEvent::::UpwardMessagesReceived { .. } => return None, + RawEvent::::__Ignore(_, _) => unreachable!("__Ignore cannot be used"), + }) + }) + .collect() +} + +/// Get the session info for the given session, if stored. +pub fn session_info(index: SessionIndex) -> Option { + >::session_info(index) +} + +/// Implementation for the `dmq_contents` function of the runtime API. +pub fn dmq_contents( + recipient: ParaId, +) -> Vec>> { + >::dmq_contents(recipient) +} + +/// Implementation for the `inbound_hrmp_channels_contents` function of the runtime API. +pub fn inbound_hrmp_channels_contents( + recipient: ParaId, +) -> BTreeMap>>> { + >::inbound_hrmp_channels_contents(recipient) +} + +/// Implementation for the `validation_code_by_hash` function of the runtime API. +pub fn validation_code_by_hash( + hash: ValidationCodeHash, +) -> Option { + >::code_by_hash(hash) +} + +/// Disputes imported via means of on-chain imports. +pub fn on_chain_votes() -> Option> { + >::on_chain_votes() +} + +/// Submits an PVF pre-checking vote. +pub fn submit_pvf_check_statement( + stmt: PvfCheckStatement, + signature: ValidatorSignature, +) { + >::submit_pvf_check_statement(stmt, signature) +} + +/// Returns the list of all PVF code hashes that require pre-checking. +pub fn pvfs_require_precheck() -> Vec { + >::pvfs_require_precheck() +} + +/// Returns the validation code hash for the given parachain making the given +/// `OccupiedCoreAssumption`. +pub fn validation_code_hash( + para_id: ParaId, + assumption: OccupiedCoreAssumption, +) -> Option +where + T: inclusion::Config, +{ + with_assumption::(para_id, assumption, || { + >::current_code_hash(¶_id) + }) +} + +/// Implementation for `get_session_disputes` function from the runtime API +pub fn get_session_disputes( +) -> Vec<(SessionIndex, CandidateHash, DisputeState>)> { + >::disputes() +} + +/// Get session executor parameter set +pub fn session_executor_params( + session_index: SessionIndex, +) -> Option { + // This is to bootstrap the storage working around the runtime migration issue: + // https://github.com/paritytech/substrate/issues/9997 + // After the bootstrap is complete (no less than 7 session passed with the runtime) + // this code should be replaced with a pure + // >::session_executor_params(session_index) call. + match >::session_executor_params(session_index) { + Some(ep) => Some(ep), + None => Some(ExecutorParams::default()), + } +} + +/// Implementation of `unapplied_slashes` runtime API +pub fn unapplied_slashes( +) -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)> { + >::unapplied_slashes() +} + +/// Implementation of `submit_report_dispute_lost` runtime API +pub fn submit_unsigned_slashing_report( + dispute_proof: slashing::DisputeProof, + key_ownership_proof: slashing::OpaqueKeyOwnershipProof, +) -> Option<()> { + let key_ownership_proof = key_ownership_proof.decode()?; + + >::submit_unsigned_slashing_report( + dispute_proof, + key_ownership_proof, + ) +} + +/// Return the min backing votes threshold from the configuration. +pub fn minimum_backing_votes() -> u32 { + >::config().minimum_backing_votes +} /// Implementation for `ParaBackingState` function from the runtime API pub fn backing_state( diff --git a/polkadot/runtime/polkadot/src/lib.rs b/polkadot/runtime/polkadot/src/lib.rs index 45ea561b33fa..b81170c2377c 100644 --- a/polkadot/runtime/polkadot/src/lib.rs +++ b/polkadot/runtime/polkadot/src/lib.rs @@ -34,7 +34,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl::v5 as parachains_runtime_api_impl, + runtime_api_impl::v7 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index dc05a34672b5..f1fdf1bb47cf 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -45,7 +45,8 @@ use runtime_parachains::{ dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, - paras_inherent as parachains_paras_inherent, runtime_api_impl as parachains_runtime_api_impl, + paras_inherent as parachains_paras_inherent, + runtime_api_impl::v7 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -1715,27 +1716,27 @@ sp_api::impl_runtime_apis! { #[api_version(7)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { - parachains_runtime_api_impl::v5::validators::() + parachains_runtime_api_impl::validators::() } fn validator_groups() -> (Vec>, GroupRotationInfo) { - parachains_runtime_api_impl::v5::validator_groups::() + parachains_runtime_api_impl::validator_groups::() } fn availability_cores() -> Vec> { - parachains_runtime_api_impl::v5::availability_cores::() + parachains_runtime_api_impl::availability_cores::() } fn persisted_validation_data(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option> { - parachains_runtime_api_impl::v5::persisted_validation_data::(para_id, assumption) + parachains_runtime_api_impl::persisted_validation_data::(para_id, assumption) } fn assumed_validation_data( para_id: ParaId, expected_persisted_validation_data_hash: Hash, ) -> Option<(PersistedValidationData, ValidationCodeHash)> { - parachains_runtime_api_impl::v5::assumed_validation_data::( + parachains_runtime_api_impl::assumed_validation_data::( para_id, expected_persisted_validation_data_hash, ) @@ -1745,24 +1746,24 @@ sp_api::impl_runtime_apis! { para_id: ParaId, outputs: primitives::CandidateCommitments, ) -> bool { - parachains_runtime_api_impl::v5::check_validation_outputs::(para_id, outputs) + parachains_runtime_api_impl::check_validation_outputs::(para_id, outputs) } fn session_index_for_child() -> SessionIndex { - parachains_runtime_api_impl::v5::session_index_for_child::() + parachains_runtime_api_impl::session_index_for_child::() } fn validation_code(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option { - parachains_runtime_api_impl::v5::validation_code::(para_id, assumption) + parachains_runtime_api_impl::validation_code::(para_id, assumption) } fn candidate_pending_availability(para_id: ParaId) -> Option> { - parachains_runtime_api_impl::v5::candidate_pending_availability::(para_id) + parachains_runtime_api_impl::candidate_pending_availability::(para_id) } fn candidate_events() -> Vec> { - parachains_runtime_api_impl::v5::candidate_events::(|ev| { + parachains_runtime_api_impl::candidate_events::(|ev| { match ev { RuntimeEvent::ParaInclusion(ev) => { Some(ev) @@ -1773,55 +1774,55 @@ sp_api::impl_runtime_apis! { } fn session_info(index: SessionIndex) -> Option { - parachains_runtime_api_impl::v5::session_info::(index) + parachains_runtime_api_impl::session_info::(index) } fn session_executor_params(session_index: SessionIndex) -> Option { - parachains_runtime_api_impl::v5::session_executor_params::(session_index) + parachains_runtime_api_impl::session_executor_params::(session_index) } fn dmq_contents(recipient: ParaId) -> Vec> { - parachains_runtime_api_impl::v5::dmq_contents::(recipient) + parachains_runtime_api_impl::dmq_contents::(recipient) } fn inbound_hrmp_channels_contents( recipient: ParaId ) -> BTreeMap>> { - parachains_runtime_api_impl::v5::inbound_hrmp_channels_contents::(recipient) + parachains_runtime_api_impl::inbound_hrmp_channels_contents::(recipient) } fn validation_code_by_hash(hash: ValidationCodeHash) -> Option { - parachains_runtime_api_impl::v5::validation_code_by_hash::(hash) + parachains_runtime_api_impl::validation_code_by_hash::(hash) } fn on_chain_votes() -> Option> { - parachains_runtime_api_impl::v5::on_chain_votes::() + parachains_runtime_api_impl::on_chain_votes::() } fn submit_pvf_check_statement( stmt: primitives::PvfCheckStatement, signature: primitives::ValidatorSignature ) { - parachains_runtime_api_impl::v5::submit_pvf_check_statement::(stmt, signature) + parachains_runtime_api_impl::submit_pvf_check_statement::(stmt, signature) } fn pvfs_require_precheck() -> Vec { - parachains_runtime_api_impl::v5::pvfs_require_precheck::() + parachains_runtime_api_impl::pvfs_require_precheck::() } fn validation_code_hash(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option { - parachains_runtime_api_impl::v5::validation_code_hash::(para_id, assumption) + parachains_runtime_api_impl::validation_code_hash::(para_id, assumption) } fn disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { - parachains_runtime_api_impl::v5::get_session_disputes::() + parachains_runtime_api_impl::get_session_disputes::() } fn unapplied_slashes( ) -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)> { - parachains_runtime_api_impl::v5::unapplied_slashes::() + parachains_runtime_api_impl::unapplied_slashes::() } fn key_ownership_proof( @@ -1838,22 +1839,22 @@ sp_api::impl_runtime_apis! { dispute_proof: slashing::DisputeProof, key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Option<()> { - parachains_runtime_api_impl::v5::submit_unsigned_slashing_report::( + parachains_runtime_api_impl::submit_unsigned_slashing_report::( dispute_proof, key_ownership_proof, ) } fn minimum_backing_votes() -> u32 { - parachains_runtime_api_impl::v6::minimum_backing_votes::() + parachains_runtime_api_impl::minimum_backing_votes::() } fn para_backing_state(para_id: ParaId) -> Option { - parachains_runtime_api_impl::v7::backing_state::(para_id) + parachains_runtime_api_impl::backing_state::(para_id) } fn async_backing_params() -> primitives::AsyncBackingParams { - parachains_runtime_api_impl::v7::async_backing_params::() + parachains_runtime_api_impl::async_backing_params::() } } @@ -2029,7 +2030,7 @@ sp_api::impl_runtime_apis! { impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { fn authorities() -> Vec { - parachains_runtime_api_impl::v5::relevant_authority_ids::() + parachains_runtime_api_impl::relevant_authority_ids::() } } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 94852ad39f5a..f2202f9411b9 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -30,7 +30,7 @@ use polkadot_runtime_parachains::{ disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, - paras_inherent as parachains_paras_inherent, runtime_api_impl::v5 as runtime_impl, + paras_inherent as parachains_paras_inherent, runtime_api_impl::v7 as runtime_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index e698e384ad51..8bc3900db042 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -62,8 +62,9 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl as parachains_runtime_api_impl, scheduler as parachains_scheduler, - session_info as parachains_session_info, shared as parachains_shared, + runtime_api_impl::v7 as parachains_runtime_api_impl, + scheduler as parachains_scheduler, session_info as parachains_session_info, + shared as parachains_shared, }; use scale_info::TypeInfo; use sp_core::{OpaqueMetadata, RuntimeDebug, H256}; @@ -1562,27 +1563,27 @@ sp_api::impl_runtime_apis! { #[api_version(6)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { - parachains_runtime_api_impl::v5::validators::() + parachains_runtime_api_impl::validators::() } fn validator_groups() -> (Vec>, GroupRotationInfo) { - parachains_runtime_api_impl::v5::validator_groups::() + parachains_runtime_api_impl::validator_groups::() } fn availability_cores() -> Vec> { - parachains_runtime_api_impl::v5::availability_cores::() + parachains_runtime_api_impl::availability_cores::() } fn persisted_validation_data(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option> { - parachains_runtime_api_impl::v5::persisted_validation_data::(para_id, assumption) + parachains_runtime_api_impl::persisted_validation_data::(para_id, assumption) } fn assumed_validation_data( para_id: ParaId, expected_persisted_validation_data_hash: Hash, ) -> Option<(PersistedValidationData, ValidationCodeHash)> { - parachains_runtime_api_impl::v5::assumed_validation_data::( + parachains_runtime_api_impl::assumed_validation_data::( para_id, expected_persisted_validation_data_hash, ) @@ -1592,24 +1593,24 @@ sp_api::impl_runtime_apis! { para_id: ParaId, outputs: primitives::CandidateCommitments, ) -> bool { - parachains_runtime_api_impl::v5::check_validation_outputs::(para_id, outputs) + parachains_runtime_api_impl::check_validation_outputs::(para_id, outputs) } fn session_index_for_child() -> SessionIndex { - parachains_runtime_api_impl::v5::session_index_for_child::() + parachains_runtime_api_impl::session_index_for_child::() } fn validation_code(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option { - parachains_runtime_api_impl::v5::validation_code::(para_id, assumption) + parachains_runtime_api_impl::validation_code::(para_id, assumption) } fn candidate_pending_availability(para_id: ParaId) -> Option> { - parachains_runtime_api_impl::v5::candidate_pending_availability::(para_id) + parachains_runtime_api_impl::candidate_pending_availability::(para_id) } fn candidate_events() -> Vec> { - parachains_runtime_api_impl::v5::candidate_events::(|ev| { + parachains_runtime_api_impl::candidate_events::(|ev| { match ev { RuntimeEvent::ParaInclusion(ev) => { Some(ev) @@ -1620,55 +1621,55 @@ sp_api::impl_runtime_apis! { } fn session_info(index: SessionIndex) -> Option { - parachains_runtime_api_impl::v5::session_info::(index) + parachains_runtime_api_impl::session_info::(index) } fn session_executor_params(session_index: SessionIndex) -> Option { - parachains_runtime_api_impl::v5::session_executor_params::(session_index) + parachains_runtime_api_impl::session_executor_params::(session_index) } fn dmq_contents(recipient: ParaId) -> Vec> { - parachains_runtime_api_impl::v5::dmq_contents::(recipient) + parachains_runtime_api_impl::dmq_contents::(recipient) } fn inbound_hrmp_channels_contents( recipient: ParaId ) -> BTreeMap>> { - parachains_runtime_api_impl::v5::inbound_hrmp_channels_contents::(recipient) + parachains_runtime_api_impl::inbound_hrmp_channels_contents::(recipient) } fn validation_code_by_hash(hash: ValidationCodeHash) -> Option { - parachains_runtime_api_impl::v5::validation_code_by_hash::(hash) + parachains_runtime_api_impl::validation_code_by_hash::(hash) } fn on_chain_votes() -> Option> { - parachains_runtime_api_impl::v5::on_chain_votes::() + parachains_runtime_api_impl::on_chain_votes::() } fn submit_pvf_check_statement( stmt: PvfCheckStatement, signature: ValidatorSignature, ) { - parachains_runtime_api_impl::v5::submit_pvf_check_statement::(stmt, signature) + parachains_runtime_api_impl::submit_pvf_check_statement::(stmt, signature) } fn pvfs_require_precheck() -> Vec { - parachains_runtime_api_impl::v5::pvfs_require_precheck::() + parachains_runtime_api_impl::pvfs_require_precheck::() } fn validation_code_hash(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option { - parachains_runtime_api_impl::v5::validation_code_hash::(para_id, assumption) + parachains_runtime_api_impl::validation_code_hash::(para_id, assumption) } fn disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { - parachains_runtime_api_impl::v5::get_session_disputes::() + parachains_runtime_api_impl::get_session_disputes::() } fn unapplied_slashes( ) -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)> { - parachains_runtime_api_impl::v5::unapplied_slashes::() + parachains_runtime_api_impl::unapplied_slashes::() } fn key_ownership_proof( @@ -1685,14 +1686,14 @@ sp_api::impl_runtime_apis! { dispute_proof: slashing::DisputeProof, key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Option<()> { - parachains_runtime_api_impl::v5::submit_unsigned_slashing_report::( + parachains_runtime_api_impl::submit_unsigned_slashing_report::( dispute_proof, key_ownership_proof, ) } fn minimum_backing_votes() -> u32 { - parachains_runtime_api_impl::v6::minimum_backing_votes::() + parachains_runtime_api_impl::minimum_backing_votes::() } } @@ -1876,7 +1877,7 @@ sp_api::impl_runtime_apis! { impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { fn authorities() -> Vec { - parachains_runtime_api_impl::v5::relevant_authority_ids::() + parachains_runtime_api_impl::relevant_authority_ids::() } } From 3953c5d0d543d372bc8abbbb4b55ab1eabf8c960 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Thu, 14 Sep 2023 00:02:04 +0400 Subject: [PATCH 11/19] enable async backing API on Westend --- polkadot/runtime/westend/src/lib.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 8bc3900db042..5e6a120ba8b4 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1560,7 +1560,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(6)] + #[api_version(7)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() @@ -1695,6 +1695,14 @@ sp_api::impl_runtime_apis! { fn minimum_backing_votes() -> u32 { parachains_runtime_api_impl::minimum_backing_votes::() } + + fn para_backing_state(para_id: ParaId) -> Option { + parachains_runtime_api_impl::backing_state::(para_id) + } + + fn async_backing_params() -> primitives::AsyncBackingParams { + parachains_runtime_api_impl::async_backing_params::() + } } impl beefy_primitives::BeefyApi for Runtime { From 9cb2847a0f08e5147e49074806e8d36e27a9acae Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Thu, 14 Sep 2023 10:17:33 +0400 Subject: [PATCH 12/19] bump westend --- cumulus/parachains/integration-tests/emulated/common/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index c920a4b67ad5..2804128ec014 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -64,7 +64,7 @@ decl_test_relay_chains! { Hrmp: kusama_runtime::Hrmp, } }, - #[api_version(6)] + #[api_version(7)] pub struct Westend { genesis = westend::genesis(), on_init = (), From 4cb5e3598fb72c2ff8dd485cda6515ee2105980a Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Fri, 15 Sep 2023 15:09:44 +0300 Subject: [PATCH 13/19] Enable validation/collation v2 protocols (#1542) Removes the `network-protocol-staging` feature. This is needed for the first release of async backing capable nodes. Renames `VStaging` to `V2` on top of https://github.com/paritytech/polkadot-sdk/pull/1543 --------- Signed-off-by: Andrei Sandu --- .../Cargo.toml | 3 - .../relay-chain-minimal-node/Cargo.toml | 4 - .../src/collator_overseer.rs | 11 ++- .../relay-chain-minimal-node/src/lib.rs | 12 +-- cumulus/client/service/Cargo.toml | 5 -- cumulus/parachain-template/node/Cargo.toml | 5 +- polkadot/Cargo.toml | 1 - polkadot/cli/Cargo.toml | 1 - .../network/approval-distribution/src/lib.rs | 48 +++++------ .../approval-distribution/src/tests.rs | 16 ++-- .../network/bitfield-distribution/src/lib.rs | 19 ++-- .../bitfield-distribution/src/tests.rs | 10 +-- polkadot/node/network/bridge/src/rx/mod.rs | 44 +++++----- polkadot/node/network/bridge/src/rx/tests.rs | 29 +++---- polkadot/node/network/bridge/src/tx/mod.rs | 28 +++--- polkadot/node/network/bridge/src/tx/tests.rs | 19 ++-- .../src/collator_side/collation.rs | 21 ++--- .../src/collator_side/mod.rs | 52 +++++------ .../src/collator_side/tests/mod.rs | 45 +++++----- .../tests/prospective_parachains.rs | 31 ++++--- .../node/network/collator-protocol/src/lib.rs | 11 ++- .../src/validator_side/collation.rs | 2 +- .../src/validator_side/mod.rs | 44 ++++------ .../src/validator_side/tests/mod.rs | 23 +++-- .../tests/prospective_parachains.rs | 26 +++--- .../node/network/gossip-support/src/lib.rs | 2 +- polkadot/node/network/protocol/Cargo.toml | 3 - polkadot/node/network/protocol/src/lib.rs | 60 +++++++------ .../node/network/protocol/src/peer_set.rs | 23 ++--- .../protocol/src/request_response/mod.rs | 24 +++--- .../protocol/src/request_response/outgoing.rs | 14 +-- .../request_response/{vstaging.rs => v2.rs} | 6 +- .../src/legacy_v1/mod.rs | 30 +++---- .../network/statement-distribution/src/lib.rs | 55 ++++++------ .../src/{vstaging => v2}/candidates.rs | 0 .../src/{vstaging => v2}/cluster.rs | 0 .../src/{vstaging => v2}/grid.rs | 4 +- .../src/{vstaging => v2}/groups.rs | 0 .../src/{vstaging => v2}/mod.rs | 67 +++++++-------- .../src/{vstaging => v2}/requests.rs | 4 +- .../src/{vstaging => v2}/statement_store.rs | 2 +- .../src/{vstaging => v2}/tests/cluster.rs | 46 +++++----- .../src/{vstaging => v2}/tests/grid.rs | 86 +++++++++---------- .../src/{vstaging => v2}/tests/mod.rs | 8 +- .../src/{vstaging => v2}/tests/requests.rs | 86 +++++++------------ polkadot/node/service/Cargo.toml | 4 - polkadot/node/service/src/lib.rs | 8 +- polkadot/node/service/src/overseer.rs | 18 ++-- .../node/subsystem-util/src/runtime/mod.rs | 2 +- .../test-parachains/adder/collator/Cargo.toml | 3 - 50 files changed, 469 insertions(+), 596 deletions(-) rename polkadot/node/network/protocol/src/request_response/{vstaging.rs => v2.rs} (94%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/candidates.rs (100%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/cluster.rs (100%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/grid.rs (99%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/groups.rs (100%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/mod.rs (97%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/requests.rs (99%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/statement_store.rs (99%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/tests/cluster.rs (95%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/tests/grid.rs (95%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/tests/mod.rs (98%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/tests/requests.rs (95%) diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index 39eda5075e29..bc8d0d430c77 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -41,6 +41,3 @@ metered = { package = "prioritized-metered-channel", version = "0.5.1", default- # Cumulus cumulus-test-service = { path = "../../test/service" } - -[features] -network-protocol-staging = [ "polkadot-service/network-protocol-staging" ] diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 39056d6b6511..226474d3d38c 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -41,7 +41,3 @@ tracing = "0.1.37" async-trait = "0.1.73" futures = "0.3.28" -[features] -network-protocol-staging = [ - "polkadot-node-network-protocol/network-protocol-staging", -] diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs index bea2fc330a24..a83a18f7cd96 100644 --- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -30,7 +30,7 @@ use polkadot_node_network_protocol::{ peer_set::PeerSetProtocolNames, request_response::{ v1::{self, AvailableDataFetchingRequest}, - vstaging, IncomingRequestReceiver, ReqProtocolNames, + v2, IncomingRequestReceiver, ReqProtocolNames, }, }; use polkadot_node_subsystem_util::metrics::{prometheus::Registry, Metrics}; @@ -63,9 +63,8 @@ pub(crate) struct CollatorOverseerGenArgs<'a> { pub authority_discovery_service: AuthorityDiscoveryService, /// Receiver for collation request protocol v1. pub collation_req_receiver_v1: IncomingRequestReceiver, - /// Receiver for collation request protocol vstaging. - pub collation_req_receiver_vstaging: - IncomingRequestReceiver, + /// Receiver for collation request protocol v2. + pub collation_req_receiver_v2: IncomingRequestReceiver, /// Receiver for availability request protocol pub available_data_req_receiver: IncomingRequestReceiver, /// Prometheus registry, commonly used for production systems, less so for test. @@ -88,7 +87,7 @@ fn build_overseer( sync_oracle, authority_discovery_service, collation_req_receiver_v1, - collation_req_receiver_vstaging, + collation_req_receiver_v2, available_data_req_receiver, registry, spawner, @@ -121,7 +120,7 @@ fn build_overseer( peer_id: network_service.local_peer_id(), collator_pair, request_receiver_v1: collation_req_receiver_v1, - request_receiver_vstaging: collation_req_receiver_vstaging, + request_receiver_v2: collation_req_receiver_v2, metrics: Metrics::register(registry)?, }; CollatorProtocolSubsystem::new(side) diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index 366d428eda70..08e4e8e34aba 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -23,7 +23,7 @@ use polkadot_network_bridge::{peer_sets_info, IsAuthority}; use polkadot_node_network_protocol::{ peer_set::PeerSetProtocolNames, request_response::{ - v1, vstaging, IncomingRequest, IncomingRequestReceiver, Protocol, ReqProtocolNames, + v1, v2, IncomingRequest, IncomingRequestReceiver, Protocol, ReqProtocolNames, }, }; @@ -182,7 +182,7 @@ async fn new_minimal_relay_chain( } let request_protocol_names = ReqProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); - let (collation_req_receiver_v1, collation_req_receiver_vstaging, available_data_req_receiver) = + let (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) = build_request_response_protocol_receivers(&request_protocol_names, &mut net_config); let best_header = relay_chain_rpc_client @@ -212,7 +212,7 @@ async fn new_minimal_relay_chain( sync_oracle, authority_discovery_service, collation_req_receiver_v1, - collation_req_receiver_vstaging, + collation_req_receiver_v2, available_data_req_receiver, registry: prometheus_registry.as_ref(), spawner: task_manager.spawn_handle(), @@ -234,13 +234,13 @@ fn build_request_response_protocol_receivers( config: &mut FullNetworkConfiguration, ) -> ( IncomingRequestReceiver, - IncomingRequestReceiver, + IncomingRequestReceiver, IncomingRequestReceiver, ) { let (collation_req_receiver_v1, cfg) = IncomingRequest::get_config_receiver(request_protocol_names); config.add_request_response_protocol(cfg); - let (collation_req_receiver_vstaging, cfg) = + let (collation_req_receiver_v2, cfg) = IncomingRequest::get_config_receiver(request_protocol_names); config.add_request_response_protocol(cfg); let (available_data_req_receiver, cfg) = @@ -248,5 +248,5 @@ fn build_request_response_protocol_receivers( config.add_request_response_protocol(cfg); let cfg = Protocol::ChunkFetchingV1.get_outbound_only_config(request_protocol_names); config.add_request_response_protocol(cfg); - (collation_req_receiver_v1, collation_req_receiver_vstaging, available_data_req_receiver) + (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) } diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index b53bdbdfc815..b7c274ceecdc 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -40,8 +40,3 @@ cumulus-relay-chain-interface = { path = "../relay-chain-interface" } cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" } cumulus-relay-chain-minimal-node = { path = "../relay-chain-minimal-node" } -[features] -network-protocol-staging = [ - "cumulus-relay-chain-inprocess-interface/network-protocol-staging", - "cumulus-relay-chain-minimal-node/network-protocol-staging", -] diff --git a/cumulus/parachain-template/node/Cargo.toml b/cumulus/parachain-template/node/Cargo.toml index 6de57b185e4a..0c0ad1af43a4 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/cumulus/parachain-template/node/Cargo.toml @@ -89,7 +89,4 @@ try-runtime = [ "polkadot-cli/try-runtime", "sp-runtime/try-runtime", ] -network-protocol-staging = [ - "cumulus-client-service/network-protocol-staging", - "polkadot-cli/network-protocol-staging", -] + diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index b9c132bbff92..0d016978078b 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -72,7 +72,6 @@ jemalloc-allocator = [ # Enables timeout-based tests supposed to be run only in CI environment as they may be flaky # when run locally depending on system load ci-only-tests = [ "polkadot-node-core-pvf/ci-only-tests" ] -network-protocol-staging = [ "polkadot-cli/network-protocol-staging" ] # Configuration for building a .deb package - for use with `cargo-deb` [package.metadata.deb] diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index 794d8c4714af..09f89a36f195 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -80,4 +80,3 @@ runtime-metrics = [ "polkadot-node-metrics/runtime-metrics", "service/runtime-metrics", ] -network-protocol-staging = [ "service/network-protocol-staging" ] diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index 70c20437d125..f76826d7fdf4 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -26,8 +26,8 @@ use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::{RandomRouting, RequiredRouting, SessionGridTopologies, SessionGridTopology}, peer_set::{ValidationVersion, MAX_NOTIFICATION_SIZE}, - v1 as protocol_v1, vstaging as protocol_vstaging, PeerId, UnifiedReputationChange as Rep, - Versioned, VersionedValidationProtocol, View, + v1 as protocol_v1, v2 as protocol_v2, PeerId, UnifiedReputationChange as Rep, Versioned, + VersionedValidationProtocol, View, }; use polkadot_node_primitives::approval::{ AssignmentCert, BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote, @@ -602,9 +602,7 @@ impl State { { match msg { Versioned::V1(protocol_v1::ApprovalDistributionMessage::Assignments(assignments)) | - Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Assignments( - assignments, - )) => { + Versioned::V2(protocol_v2::ApprovalDistributionMessage::Assignments(assignments)) => { gum::trace!( target: LOG_TARGET, peer_id = %peer_id, @@ -644,9 +642,7 @@ impl State { } }, Versioned::V1(protocol_v1::ApprovalDistributionMessage::Approvals(approvals)) | - Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Approvals( - approvals, - )) => { + Versioned::V2(protocol_v2::ApprovalDistributionMessage::Approvals(approvals)) => { gum::trace!( target: LOG_TARGET, peer_id = %peer_id, @@ -1060,7 +1056,7 @@ impl State { route_random }; - let (v1_peers, vstaging_peers) = { + let (v1_peers, v2_peers) = { let peer_data = &self.peer_data; let peers = entry .known_by @@ -1090,9 +1086,9 @@ impl State { } let v1_peers = filter_peers_by_version(&peers, ValidationVersion::V1); - let vstaging_peers = filter_peers_by_version(&peers, ValidationVersion::VStaging); + let v2_peers = filter_peers_by_version(&peers, ValidationVersion::V2); - (v1_peers, vstaging_peers) + (v1_peers, v2_peers) }; if !v1_peers.is_empty() { @@ -1103,10 +1099,10 @@ impl State { .await; } - if !vstaging_peers.is_empty() { + if !v2_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_peers, - versioned_assignments_packet(ValidationVersion::VStaging, assignments.clone()), + v2_peers, + versioned_assignments_packet(ValidationVersion::V2, assignments.clone()), )) .await; } @@ -1395,7 +1391,7 @@ impl State { in_topology || knowledge.sent.contains(message_subject, MessageKind::Assignment) }; - let (v1_peers, vstaging_peers) = { + let (v1_peers, v2_peers) = { let peer_data = &self.peer_data; let peers = entry .known_by @@ -1425,9 +1421,9 @@ impl State { } let v1_peers = filter_peers_by_version(&peers, ValidationVersion::V1); - let vstaging_peers = filter_peers_by_version(&peers, ValidationVersion::VStaging); + let v2_peers = filter_peers_by_version(&peers, ValidationVersion::V2); - (v1_peers, vstaging_peers) + (v1_peers, v2_peers) }; let approvals = vec![vote]; @@ -1440,10 +1436,10 @@ impl State { .await; } - if !vstaging_peers.is_empty() { + if !v2_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_peers, - versioned_approvals_packet(ValidationVersion::VStaging, approvals), + v2_peers, + versioned_approvals_packet(ValidationVersion::V2, approvals), )) .await; } @@ -2017,9 +2013,9 @@ fn versioned_approvals_packet( Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( protocol_v1::ApprovalDistributionMessage::Approvals(approvals), )), - ValidationVersion::VStaging => - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Approvals(approvals), + ValidationVersion::V2 => + Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( + protocol_v2::ApprovalDistributionMessage::Approvals(approvals), )), } } @@ -2033,9 +2029,9 @@ fn versioned_assignments_packet( Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( protocol_v1::ApprovalDistributionMessage::Assignments(assignments), )), - ValidationVersion::VStaging => - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments), + ValidationVersion::V2 => + Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( + protocol_v2::ApprovalDistributionMessage::Assignments(assignments), )), } } diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 1e9ae7b62007..29c7d8aa45da 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -2388,9 +2388,9 @@ fn import_versioned_approval() { let _ = test_harness(state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // All peers are aware of relay parent. - setup_peer_with_view(overseer, &peer_a, ValidationVersion::VStaging, view![hash]).await; + setup_peer_with_view(overseer, &peer_a, ValidationVersion::V2, view![hash]).await; setup_peer_with_view(overseer, &peer_b, ValidationVersion::V1, view![hash]).await; - setup_peer_with_view(overseer, &peer_c, ValidationVersion::VStaging, view![hash]).await; + setup_peer_with_view(overseer, &peer_c, ValidationVersion::V2, view![hash]).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -2431,8 +2431,8 @@ fn import_versioned_approval() { overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments) + Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( + protocol_v2::ApprovalDistributionMessage::Assignments(assignments) )) )) => { assert_eq!(peers.len(), 2); @@ -2450,8 +2450,8 @@ fn import_versioned_approval() { validator: validator_index, signature: dummy_signature(), }; - let msg = protocol_vstaging::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_a, Versioned::VStaging(msg)).await; + let msg = protocol_v2::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer(overseer, &peer_a, Versioned::V2(msg)).await; assert_matches!( overseer_recv(overseer).await, @@ -2483,8 +2483,8 @@ fn import_versioned_approval() { overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Approvals(approvals) + Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( + protocol_v2::ApprovalDistributionMessage::Approvals(approvals) )) )) => { assert_eq!(peers, vec![peer_c]); diff --git a/polkadot/node/network/bitfield-distribution/src/lib.rs b/polkadot/node/network/bitfield-distribution/src/lib.rs index c85d874bc4db..68e381ab6be5 100644 --- a/polkadot/node/network/bitfield-distribution/src/lib.rs +++ b/polkadot/node/network/bitfield-distribution/src/lib.rs @@ -31,8 +31,8 @@ use polkadot_node_network_protocol::{ GridNeighbors, RandomRouting, RequiredRouting, SessionBoundGridTopologyStorage, }, peer_set::{ProtocolVersion, ValidationVersion}, - v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, - UnifiedReputationChange as Rep, Versioned, View, + v1 as protocol_v1, v2 as protocol_v2, OurView, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_subsystem::{ jaeger, messages::*, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, @@ -96,8 +96,8 @@ impl BitfieldGossipMessage { self.relay_parent, self.signed_availability.into(), )), - Some(ValidationVersion::VStaging) => - Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield( + Some(ValidationVersion::V2) => + Versioned::V2(protocol_v2::BitfieldDistributionMessage::Bitfield( self.relay_parent, self.signed_availability.into(), )), @@ -502,8 +502,7 @@ async fn relay_message( }; let v1_interested_peers = filter_by_version(&interested_peers, ValidationVersion::V1); - let vstaging_interested_peers = - filter_by_version(&interested_peers, ValidationVersion::VStaging); + let v2_interested_peers = filter_by_version(&interested_peers, ValidationVersion::V2); if !v1_interested_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( @@ -513,10 +512,10 @@ async fn relay_message( .await; } - if !vstaging_interested_peers.is_empty() { + if !v2_interested_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_interested_peers, - message.into_validation_protocol(ValidationVersion::VStaging.into()), + v2_interested_peers, + message.into_validation_protocol(ValidationVersion::V2.into()), )) .await } @@ -538,7 +537,7 @@ async fn process_incoming_peer_message( relay_parent, bitfield, )) => (relay_parent, bitfield), - Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield( + Versioned::V2(protocol_v2::BitfieldDistributionMessage::Bitfield( relay_parent, bitfield, )) => (relay_parent, bitfield), diff --git a/polkadot/node/network/bitfield-distribution/src/tests.rs b/polkadot/node/network/bitfield-distribution/src/tests.rs index d6795247e786..ba2434ea47d6 100644 --- a/polkadot/node/network/bitfield-distribution/src/tests.rs +++ b/polkadot/node/network/bitfield-distribution/src/tests.rs @@ -1111,9 +1111,9 @@ fn network_protocol_versioning() { let peer_c = PeerId::random(); let peers = [ - (peer_a, ValidationVersion::VStaging), + (peer_a, ValidationVersion::V2), (peer_b, ValidationVersion::V1), - (peer_c, ValidationVersion::VStaging), + (peer_c, ValidationVersion::V2), ]; // validator 0 key pair @@ -1173,7 +1173,7 @@ fn network_protocol_versioning() { &Default::default(), NetworkBridgeEvent::PeerMessage( peer_a, - msg.clone().into_network_message(ValidationVersion::VStaging.into()), + msg.clone().into_network_message(ValidationVersion::V2.into()), ), &mut rng, )); @@ -1201,14 +1201,14 @@ fn network_protocol_versioning() { } ); - // vstaging gossip + // v2 gossip assert_matches!( handle.recv().await, AllMessages::NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage(peers, send_msg), ) => { assert_eq!(peers, vec![peer_c]); - assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::VStaging.into())); + assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::V2.into())); } ); diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index e1125ebc904d..ed81de71d3b7 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -36,7 +36,7 @@ use polkadot_node_network_protocol::{ CollationVersion, PeerSet, PeerSetProtocolNames, PerPeerSet, ProtocolVersion, ValidationVersion, }, - v1 as protocol_v1, vstaging as protocol_vstaging, ObservedRole, OurView, PeerId, + v1 as protocol_v1, v2 as protocol_v2, ObservedRole, OurView, PeerId, UnifiedReputationChange as Rep, View, }; @@ -265,13 +265,13 @@ where ), &metrics, ), - ValidationVersion::VStaging => send_message( + ValidationVersion::V2 => send_message( &mut network_service, vec![peer], PeerSet::Validation, version, &peerset_protocol_names, - WireMessage::::ViewUpdate( + WireMessage::::ViewUpdate( local_view, ), &metrics, @@ -307,13 +307,13 @@ where ), &metrics, ), - CollationVersion::VStaging => send_message( + CollationVersion::V2 => send_message( &mut network_service, vec![peer], PeerSet::Collation, version, &peerset_protocol_names, - WireMessage::::ViewUpdate( + WireMessage::::ViewUpdate( local_view, ), &metrics, @@ -468,9 +468,9 @@ where &metrics, ) } else if expected_versions[PeerSet::Validation] == - Some(ValidationVersion::VStaging.into()) + Some(ValidationVersion::V2.into()) { - handle_peer_messages::( + handle_peer_messages::( remote, PeerSet::Validation, &mut shared.0.lock().validation_peers, @@ -510,9 +510,9 @@ where &metrics, ) } else if expected_versions[PeerSet::Collation] == - Some(CollationVersion::VStaging.into()) + Some(CollationVersion::V2.into()) { - handle_peer_messages::( + handle_peer_messages::( remote, PeerSet::Collation, &mut shared.0.lock().collation_peers, @@ -816,10 +816,8 @@ fn update_our_view( let v1_validation_peers = filter_by_version(&validation_peers, ValidationVersion::V1.into()); let v1_collation_peers = filter_by_version(&collation_peers, CollationVersion::V1.into()); - let vstaging_validation_peers = - filter_by_version(&validation_peers, ValidationVersion::VStaging.into()); - let vstaging_collation_peers = - filter_by_version(&collation_peers, ValidationVersion::VStaging.into()); + let v2_validation_peers = filter_by_version(&validation_peers, ValidationVersion::V2.into()); + let v2_collation_peers = filter_by_version(&collation_peers, ValidationVersion::V2.into()); send_validation_message_v1( net, @@ -837,17 +835,17 @@ fn update_our_view( metrics, ); - send_validation_message_vstaging( + send_validation_message_v2( net, - vstaging_validation_peers, + v2_validation_peers, peerset_protocol_names, WireMessage::ViewUpdate(new_view.clone()), metrics, ); - send_collation_message_vstaging( + send_collation_message_v2( net, - vstaging_collation_peers, + v2_collation_peers, peerset_protocol_names, WireMessage::ViewUpdate(new_view), metrics, @@ -958,36 +956,36 @@ fn send_collation_message_v1( ); } -fn send_validation_message_vstaging( +fn send_validation_message_v2( net: &mut impl Network, peers: Vec, protocol_names: &PeerSetProtocolNames, - message: WireMessage, + message: WireMessage, metrics: &Metrics, ) { send_message( net, peers, PeerSet::Validation, - ValidationVersion::VStaging.into(), + ValidationVersion::V2.into(), protocol_names, message, metrics, ); } -fn send_collation_message_vstaging( +fn send_collation_message_v2( net: &mut impl Network, peers: Vec, protocol_names: &PeerSetProtocolNames, - message: WireMessage, + message: WireMessage, metrics: &Metrics, ) { send_message( net, peers, PeerSet::Collation, - CollationVersion::VStaging.into(), + CollationVersion::V2.into(), protocol_names, message, metrics, diff --git a/polkadot/node/network/bridge/src/rx/tests.rs b/polkadot/node/network/bridge/src/rx/tests.rs index 127f46e0fa37..7c69cce48391 100644 --- a/polkadot/node/network/bridge/src/rx/tests.rs +++ b/polkadot/node/network/bridge/src/rx/tests.rs @@ -1216,10 +1216,10 @@ fn network_protocol_versioning_view_update() { let peer_ids: Vec<_> = (0..4).map(|_| PeerId::random()).collect(); let peers = [ - (peer_ids[0], PeerSet::Validation, ValidationVersion::VStaging), + (peer_ids[0], PeerSet::Validation, ValidationVersion::V2), (peer_ids[1], PeerSet::Collation, ValidationVersion::V1), (peer_ids[2], PeerSet::Validation, ValidationVersion::V1), - (peer_ids[3], PeerSet::Collation, ValidationVersion::VStaging), + (peer_ids[3], PeerSet::Collation, ValidationVersion::V2), ]; let head = Hash::repeat_byte(1); @@ -1245,8 +1245,8 @@ fn network_protocol_versioning_view_update() { ValidationVersion::V1 => WireMessage::::ViewUpdate(view.clone()) .encode(), - ValidationVersion::VStaging => - WireMessage::::ViewUpdate(view.clone()) + ValidationVersion::V2 => + WireMessage::::ViewUpdate(view.clone()) .encode(), }; assert_network_actions_contains( @@ -1268,12 +1268,7 @@ fn network_protocol_versioning_subsystem_msg() { let peer = PeerId::random(); network_handle - .connect_peer( - peer, - ValidationVersion::VStaging, - PeerSet::Validation, - ObservedRole::Full, - ) + .connect_peer(peer, ValidationVersion::V2, PeerSet::Validation, ObservedRole::Full) .await; // bridge will inform about all connected peers. @@ -1282,7 +1277,7 @@ fn network_protocol_versioning_subsystem_msg() { NetworkBridgeEvent::PeerConnected( peer, ObservedRole::Full, - ValidationVersion::VStaging.into(), + ValidationVersion::V2.into(), None, ), &mut virtual_overseer, @@ -1297,9 +1292,9 @@ fn network_protocol_versioning_subsystem_msg() { } let approval_distribution_message = - protocol_vstaging::ApprovalDistributionMessage::Approvals(Vec::new()); + protocol_v2::ApprovalDistributionMessage::Approvals(Vec::new()); - let msg = protocol_vstaging::ValidationProtocol::ApprovalDistribution( + let msg = protocol_v2::ValidationProtocol::ApprovalDistribution( approval_distribution_message.clone(), ); @@ -1315,7 +1310,7 @@ fn network_protocol_versioning_subsystem_msg() { virtual_overseer.recv().await, AllMessages::ApprovalDistribution( ApprovalDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerMessage(p, Versioned::VStaging(m)) + NetworkBridgeEvent::PeerMessage(p, Versioned::V2(m)) ) ) => { assert_eq!(p, peer); @@ -1330,10 +1325,10 @@ fn network_protocol_versioning_subsystem_msg() { signature: sp_core::crypto::UncheckedFrom::unchecked_from([1u8; 64]), }; let statement_distribution_message = - protocol_vstaging::StatementDistributionMessage::V1Compatibility( + protocol_v2::StatementDistributionMessage::V1Compatibility( protocol_v1::StatementDistributionMessage::LargeStatement(metadata), ); - let msg = protocol_vstaging::ValidationProtocol::StatementDistribution( + let msg = protocol_v2::ValidationProtocol::StatementDistribution( statement_distribution_message.clone(), ); @@ -1349,7 +1344,7 @@ fn network_protocol_versioning_subsystem_msg() { virtual_overseer.recv().await, AllMessages::StatementDistribution( StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerMessage(p, Versioned::VStaging(m)) + NetworkBridgeEvent::PeerMessage(p, Versioned::V2(m)) ) ) => { assert_eq!(p, peer); diff --git a/polkadot/node/network/bridge/src/tx/mod.rs b/polkadot/node/network/bridge/src/tx/mod.rs index 7fa1149593ca..f15635f1f41c 100644 --- a/polkadot/node/network/bridge/src/tx/mod.rs +++ b/polkadot/node/network/bridge/src/tx/mod.rs @@ -20,7 +20,7 @@ use super::*; use polkadot_node_network_protocol::{ peer_set::{CollationVersion, PeerSet, PeerSetProtocolNames, ValidationVersion}, request_response::ReqProtocolNames, - v1 as protocol_v1, vstaging as protocol_vstaging, PeerId, Versioned, + v1 as protocol_v1, v2 as protocol_v2, PeerId, Versioned, }; use polkadot_node_subsystem::{ @@ -198,7 +198,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::VStaging(msg) => send_validation_message_vstaging( + Versioned::V2(msg) => send_validation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -223,7 +223,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::VStaging(msg) => send_validation_message_vstaging( + Versioned::V2(msg) => send_validation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -248,7 +248,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::VStaging(msg) => send_collation_message_vstaging( + Versioned::V2(msg) => send_collation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -273,7 +273,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::VStaging(msg) => send_collation_message_vstaging( + Versioned::V2(msg) => send_collation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -296,13 +296,11 @@ where Requests::AvailableDataFetchingV1(_) => metrics.on_message("available_data_fetching_v1"), Requests::CollationFetchingV1(_) => metrics.on_message("collation_fetching_v1"), - Requests::CollationFetchingVStaging(_) => - metrics.on_message("collation_fetching_vstaging"), + Requests::CollationFetchingV2(_) => metrics.on_message("collation_fetching_v2"), Requests::PoVFetchingV1(_) => metrics.on_message("pov_fetching_v1"), Requests::DisputeSendingV1(_) => metrics.on_message("dispute_sending_v1"), Requests::StatementFetchingV1(_) => metrics.on_message("statement_fetching_v1"), - Requests::AttestedCandidateVStaging(_) => - metrics.on_message("attested_candidate_vstaging"), + Requests::AttestedCandidateV2(_) => metrics.on_message("attested_candidate_v2"), } network_service @@ -425,36 +423,36 @@ fn send_collation_message_v1( ); } -fn send_validation_message_vstaging( +fn send_validation_message_v2( net: &mut impl Network, peers: Vec, protocol_names: &PeerSetProtocolNames, - message: WireMessage, + message: WireMessage, metrics: &Metrics, ) { send_message( net, peers, PeerSet::Validation, - ValidationVersion::VStaging.into(), + ValidationVersion::V2.into(), protocol_names, message, metrics, ); } -fn send_collation_message_vstaging( +fn send_collation_message_v2( net: &mut impl Network, peers: Vec, protocol_names: &PeerSetProtocolNames, - message: WireMessage, + message: WireMessage, metrics: &Metrics, ) { send_message( net, peers, PeerSet::Collation, - CollationVersion::VStaging.into(), + CollationVersion::V2.into(), protocol_names, message, metrics, diff --git a/polkadot/node/network/bridge/src/tx/tests.rs b/polkadot/node/network/bridge/src/tx/tests.rs index 21cd134c54f2..48287f8b74c9 100644 --- a/polkadot/node/network/bridge/src/tx/tests.rs +++ b/polkadot/node/network/bridge/src/tx/tests.rs @@ -341,10 +341,10 @@ fn network_protocol_versioning_send() { let peer_ids: Vec<_> = (0..4).map(|_| PeerId::random()).collect(); let peers = [ - (peer_ids[0], PeerSet::Validation, ValidationVersion::VStaging), + (peer_ids[0], PeerSet::Validation, ValidationVersion::V2), (peer_ids[1], PeerSet::Collation, ValidationVersion::V1), (peer_ids[2], PeerSet::Validation, ValidationVersion::V1), - (peer_ids[3], PeerSet::Collation, ValidationVersion::VStaging), + (peer_ids[3], PeerSet::Collation, ValidationVersion::V2), ]; for &(peer_id, peer_set, version) in &peers { @@ -359,9 +359,9 @@ fn network_protocol_versioning_send() { { let approval_distribution_message = - protocol_vstaging::ApprovalDistributionMessage::Approvals(Vec::new()); + protocol_v2::ApprovalDistributionMessage::Approvals(Vec::new()); - let msg = protocol_vstaging::ValidationProtocol::ApprovalDistribution( + let msg = protocol_v2::ValidationProtocol::ApprovalDistribution( approval_distribution_message.clone(), ); @@ -372,7 +372,7 @@ fn network_protocol_versioning_send() { .send(FromOrchestra::Communication { msg: NetworkBridgeTxMessage::SendValidationMessage( receivers.clone(), - Versioned::VStaging(msg.clone()), + Versioned::V2(msg.clone()), ), }) .timeout(TIMEOUT) @@ -398,15 +398,14 @@ fn network_protocol_versioning_send() { // send a collation protocol message. { - let collator_protocol_message = protocol_vstaging::CollatorProtocolMessage::Declare( + let collator_protocol_message = protocol_v2::CollatorProtocolMessage::Declare( Sr25519Keyring::Alice.public().into(), 0_u32.into(), dummy_collator_signature(), ); - let msg = protocol_vstaging::CollationProtocol::CollatorProtocol( - collator_protocol_message.clone(), - ); + let msg = + protocol_v2::CollationProtocol::CollatorProtocol(collator_protocol_message.clone()); let receivers = vec![peer_ids[1], peer_ids[2]]; @@ -414,7 +413,7 @@ fn network_protocol_versioning_send() { .send(FromOrchestra::Communication { msg: NetworkBridgeTxMessage::SendCollationMessages(vec![( receivers.clone(), - Versioned::VStaging(msg.clone()), + Versioned::V2(msg.clone()), )]), }) .await; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs index 627c38b776f7..53f947142d10 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs @@ -22,8 +22,7 @@ use futures::{future::BoxFuture, stream::FuturesUnordered}; use polkadot_node_network_protocol::{ request_response::{ - incoming::OutgoingResponse, v1 as protocol_v1, vstaging as protocol_vstaging, - IncomingRequest, + incoming::OutgoingResponse, v1 as protocol_v1, v2 as protocol_v2, IncomingRequest, }, PeerId, }; @@ -89,7 +88,7 @@ pub struct WaitingCollationFetches { /// Backwards-compatible wrapper for incoming collations requests. pub enum VersionedCollationRequest { V1(IncomingRequest), - VStaging(IncomingRequest), + V2(IncomingRequest), } impl From> for VersionedCollationRequest { @@ -98,11 +97,9 @@ impl From> for VersionedC } } -impl From> - for VersionedCollationRequest -{ - fn from(req: IncomingRequest) -> Self { - Self::VStaging(req) +impl From> for VersionedCollationRequest { + fn from(req: IncomingRequest) -> Self { + Self::V2(req) } } @@ -111,7 +108,7 @@ impl VersionedCollationRequest { pub fn para_id(&self) -> ParaId { match self { VersionedCollationRequest::V1(req) => req.payload.para_id, - VersionedCollationRequest::VStaging(req) => req.payload.para_id, + VersionedCollationRequest::V2(req) => req.payload.para_id, } } @@ -119,7 +116,7 @@ impl VersionedCollationRequest { pub fn relay_parent(&self) -> Hash { match self { VersionedCollationRequest::V1(req) => req.payload.relay_parent, - VersionedCollationRequest::VStaging(req) => req.payload.relay_parent, + VersionedCollationRequest::V2(req) => req.payload.relay_parent, } } @@ -127,7 +124,7 @@ impl VersionedCollationRequest { pub fn peer_id(&self) -> PeerId { match self { VersionedCollationRequest::V1(req) => req.peer, - VersionedCollationRequest::VStaging(req) => req.peer, + VersionedCollationRequest::V2(req) => req.peer, } } @@ -138,7 +135,7 @@ impl VersionedCollationRequest { ) -> Result<(), ()> { match self { VersionedCollationRequest::V1(req) => req.send_outgoing_response(response), - VersionedCollationRequest::VStaging(req) => req.send_outgoing_response(response), + VersionedCollationRequest::V2(req) => req.send_outgoing_response(response), } } } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index ad2ab99568c8..304cabbaac80 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -31,10 +31,10 @@ use polkadot_node_network_protocol::{ peer_set::{CollationVersion, PeerSet}, request_response::{ incoming::{self, OutgoingResponse}, - v1 as request_v1, vstaging as request_vstaging, IncomingRequestReceiver, + v1 as request_v1, v2 as request_v2, IncomingRequestReceiver, }, - v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, - UnifiedReputationChange as Rep, Versioned, View, + v1 as protocol_v1, v2 as protocol_v2, OurView, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_primitives::{CollationSecondedSignal, PoV, Statement}; use polkadot_node_subsystem::{ @@ -577,7 +577,7 @@ async fn determine_our_validators( fn declare_message( state: &mut State, version: CollationVersion, -) -> Option> { +) -> Option> { let para_id = state.collating_on?; Some(match version { CollationVersion::V1 => { @@ -590,17 +590,15 @@ fn declare_message( ); Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)) }, - CollationVersion::VStaging => { + CollationVersion::V2 => { let declare_signature_payload = - protocol_vstaging::declare_signature_payload(&state.local_peer_id); - let wire_message = protocol_vstaging::CollatorProtocolMessage::Declare( + protocol_v2::declare_signature_payload(&state.local_peer_id); + let wire_message = protocol_v2::CollatorProtocolMessage::Declare( state.collator_pair.public(), para_id, state.collator_pair.sign(&declare_signature_payload), ); - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( - wire_message, - )) + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(wire_message)) }, }) } @@ -706,15 +704,13 @@ async fn advertise_collation( collation.status.advance_to_advertised(); let collation_message = match protocol_version { - CollationVersion::VStaging => { - let wire_message = protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation { + CollationVersion::V2 => { + let wire_message = protocol_v2::CollatorProtocolMessage::AdvertiseCollation { relay_parent, candidate_hash: *candidate_hash, parent_head_data_hash: collation.parent_head_data_hash, }; - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( - wire_message, - )) + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(wire_message)) }, CollationVersion::V1 => { let wire_message = @@ -837,7 +833,7 @@ async fn send_collation( let candidate_hash = receipt.hash(); // The response payload is the same for both versions of protocol - // and doesn't have vstaging alias for simplicity. + // and doesn't have v2 alias for simplicity. let response = OutgoingResponse { result: Ok(request_v1::CollationFetchingResponse::Collation(receipt, pov)), reputation_changes: Vec::new(), @@ -868,16 +864,13 @@ async fn handle_incoming_peer_message( runtime: &mut RuntimeInfo, state: &mut State, origin: PeerId, - msg: Versioned< - protocol_v1::CollatorProtocolMessage, - protocol_vstaging::CollatorProtocolMessage, - >, + msg: Versioned, ) -> Result<()> { use protocol_v1::CollatorProtocolMessage as V1; - use protocol_vstaging::CollatorProtocolMessage as VStaging; + use protocol_v2::CollatorProtocolMessage as V2; match msg { - Versioned::V1(V1::Declare(..)) | Versioned::VStaging(VStaging::Declare(..)) => { + Versioned::V1(V1::Declare(..)) | Versioned::V2(V2::Declare(..)) => { gum::trace!( target: LOG_TARGET, ?origin, @@ -888,8 +881,7 @@ async fn handle_incoming_peer_message( ctx.send_message(NetworkBridgeTxMessage::DisconnectPeer(origin, PeerSet::Collation)) .await; }, - Versioned::V1(V1::AdvertiseCollation(_)) | - Versioned::VStaging(VStaging::AdvertiseCollation { .. }) => { + Versioned::V1(V1::AdvertiseCollation(_)) | Versioned::V2(V2::AdvertiseCollation { .. }) => { gum::trace!( target: LOG_TARGET, ?origin, @@ -904,7 +896,7 @@ async fn handle_incoming_peer_message( .await; }, Versioned::V1(V1::CollationSeconded(relay_parent, statement)) | - Versioned::VStaging(VStaging::CollationSeconded(relay_parent, statement)) => { + Versioned::V2(V2::CollationSeconded(relay_parent, statement)) => { if !matches!(statement.unchecked_payload(), Statement::Seconded(_)) { gum::warn!( target: LOG_TARGET, @@ -1006,7 +998,7 @@ async fn handle_incoming_request( let collation = match &req { VersionedCollationRequest::V1(_) if !mode.is_enabled() => per_relay_parent.collations.values_mut().next(), - VersionedCollationRequest::VStaging(req) => + VersionedCollationRequest::V2(req) => per_relay_parent.collations.get_mut(&req.payload.candidate_hash), _ => { gum::warn!( @@ -1322,7 +1314,7 @@ pub(crate) async fn run( local_peer_id: PeerId, collator_pair: CollatorPair, req_v1_receiver: IncomingRequestReceiver, - req_v2_receiver: IncomingRequestReceiver, + req_v2_receiver: IncomingRequestReceiver, metrics: Metrics, ) -> std::result::Result<(), FatalError> { run_inner( @@ -1344,7 +1336,7 @@ async fn run_inner( local_peer_id: PeerId, collator_pair: CollatorPair, mut req_v1_receiver: IncomingRequestReceiver, - mut req_v2_receiver: IncomingRequestReceiver, + mut req_v2_receiver: IncomingRequestReceiver, metrics: Metrics, reputation: ReputationAggregator, reputation_interval: Duration, @@ -1425,7 +1417,7 @@ async fn run_inner( (ProspectiveParachainsMode::Disabled, VersionedCollationRequest::V1(_)) => { per_relay_parent.collations.values().next() }, - (ProspectiveParachainsMode::Enabled { .. }, VersionedCollationRequest::VStaging(req)) => { + (ProspectiveParachainsMode::Enabled { .. }, VersionedCollationRequest::V2(req)) => { per_relay_parent.collations.get(&req.payload.candidate_hash) }, _ => { @@ -1476,7 +1468,7 @@ async fn run_inner( log_error( handle_incoming_request(&mut ctx, &mut state, request).await, - "Handling incoming collation fetch request VStaging" + "Handling incoming collation fetch request V2" )?; } } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index b30f8215941c..7dd2287dab68 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -212,7 +212,7 @@ type VirtualOverseer = test_helpers::TestSubsystemContextHandle>( @@ -236,7 +236,7 @@ fn test_harness>( let (collation_req_receiver, req_v1_cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); - let (collation_req_vstaging_receiver, req_vstaging_cfg) = + let (collation_req_v2_receiver, req_v2_cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); let subsystem = async { run_inner( @@ -244,7 +244,7 @@ fn test_harness>( local_peer_id, collator_pair, collation_req_receiver, - collation_req_vstaging_receiver, + collation_req_v2_receiver, Default::default(), reputation, REPUTATION_CHANGE_TEST_INTERVAL, @@ -253,7 +253,7 @@ fn test_harness>( .unwrap(); }; - let test_fut = test(TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg }); + let test_fut = test(TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg }); futures::pin_mut!(test_fut); futures::pin_mut!(subsystem); @@ -545,7 +545,7 @@ async fn expect_declare_msg( /// Check that the next received message is a collation advertisement message. /// -/// Expects vstaging message if `expected_candidate_hashes` is `Some`, v1 otherwise. +/// Expects v2 message if `expected_candidate_hashes` is `Some`, v1 otherwise. async fn expect_advertise_collation_msg( virtual_overseer: &mut VirtualOverseer, peer: &PeerId, @@ -579,13 +579,13 @@ async fn expect_advertise_collation_msg( }, ( Some(candidate_hashes), - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol( wire_message, )), ) => { assert_matches!( wire_message, - protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation { + protocol_v2::CollatorProtocolMessage::AdvertiseCollation { relay_parent, candidate_hash, .. @@ -634,7 +634,7 @@ fn advertise_and_send_collation() { |test_harness| async move { let mut virtual_overseer = test_harness.virtual_overseer; let mut req_v1_cfg = test_harness.req_v1_cfg; - let req_vstaging_cfg = test_harness.req_vstaging_cfg; + let req_v2_cfg = test_harness.req_v2_cfg; setup_system(&mut virtual_overseer, &test_state).await; @@ -789,7 +789,7 @@ fn advertise_and_send_collation() { None, ) .await; - TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg } + TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg } }, ); } @@ -807,7 +807,7 @@ fn delay_reputation_change() { |test_harness| async move { let mut virtual_overseer = test_harness.virtual_overseer; let mut req_v1_cfg = test_harness.req_v1_cfg; - let req_vstaging_cfg = test_harness.req_vstaging_cfg; + let req_v2_cfg = test_harness.req_v2_cfg; setup_system(&mut virtual_overseer, &test_state).await; @@ -903,15 +903,15 @@ fn delay_reputation_change() { ); } - TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg } + TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg } }, ); } -/// Tests that collator side works with vstaging network protocol +/// Tests that collator side works with v2 network protocol /// before async backing is enabled. #[test] -fn advertise_collation_vstaging_protocol() { +fn advertise_collation_v2_protocol() { let test_state = TestState::default(); let local_peer_id = test_state.local_peer_id; let collator_pair = test_state.collator_pair.clone(); @@ -941,21 +941,16 @@ fn advertise_collation_vstaging_protocol() { Some(validators[0].clone()), ) .await; - // The rest with vstaging. + // The rest with v2. for (val, peer) in validators.iter().zip(peer_ids.iter()).skip(1) { - connect_peer( - virtual_overseer, - *peer, - CollationVersion::VStaging, - Some(val.clone()), - ) - .await; + connect_peer(virtual_overseer, *peer, CollationVersion::V2, Some(val.clone())) + .await; } // Declare messages. expect_declare_msg(virtual_overseer, &test_state, &peer_ids[0]).await; for peer_id in peer_ids.iter().skip(1) { - prospective_parachains::expect_declare_msg_vstaging( + prospective_parachains::expect_declare_msg_v2( virtual_overseer, &test_state, &peer_id, @@ -981,7 +976,7 @@ fn advertise_collation_vstaging_protocol() { virtual_overseer, peer_id, test_state.relay_parent, - Some(vec![candidate.hash()]), // This is `Some`, advertisement is vstaging. + Some(vec![candidate.hash()]), // This is `Some`, advertisement is v2. ) .await; } @@ -1405,7 +1400,7 @@ fn connect_to_buffered_groups() { |test_harness| async move { let mut virtual_overseer = test_harness.virtual_overseer; let mut req_cfg = test_harness.req_v1_cfg; - let req_vstaging_cfg = test_harness.req_vstaging_cfg; + let req_v2_cfg = test_harness.req_v2_cfg; setup_system(&mut virtual_overseer, &test_state).await; @@ -1510,7 +1505,7 @@ fn connect_to_buffered_groups() { } ); - TestHarness { virtual_overseer, req_v1_cfg: req_cfg, req_vstaging_cfg } + TestHarness { virtual_overseer, req_v1_cfg: req_cfg, req_v2_cfg } }, ); } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index ea8786ca1898..fd9d7a746ebe 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -124,7 +124,7 @@ async fn update_view( } /// Check that the next received message is a `Declare` message. -pub(super) async fn expect_declare_msg_vstaging( +pub(super) async fn expect_declare_msg_v2( virtual_overseer: &mut VirtualOverseer, test_state: &TestState, peer: &PeerId, @@ -133,20 +133,20 @@ pub(super) async fn expect_declare_msg_vstaging( overseer_recv(virtual_overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendCollationMessage( to, - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol( wire_message, )), )) => { assert_eq!(to[0], *peer); assert_matches!( wire_message, - protocol_vstaging::CollatorProtocolMessage::Declare( + protocol_v2::CollatorProtocolMessage::Declare( collator_id, para_id, signature, ) => { assert!(signature.verify( - &*protocol_vstaging::declare_signature_payload(&test_state.local_peer_id), + &*protocol_v2::declare_signature_payload(&test_state.local_peer_id), &collator_id), ); assert_eq!(collator_id, test_state.collator_pair.public()); @@ -203,13 +203,12 @@ fn distribute_collation_from_implicit_view() { .into_iter() .zip(validator_peer_ids.clone()) { - connect_peer(virtual_overseer, peer, CollationVersion::VStaging, Some(val.clone())) - .await; + connect_peer(virtual_overseer, peer, CollationVersion::V2, Some(val.clone())).await; } // Collator declared itself to each peer. for peer_id in &validator_peer_ids { - expect_declare_msg_vstaging(virtual_overseer, &test_state, peer_id).await; + expect_declare_msg_v2(virtual_overseer, &test_state, peer_id).await; } let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; @@ -386,7 +385,7 @@ fn advertise_and_send_collation_by_hash() { |test_harness| async move { let mut virtual_overseer = test_harness.virtual_overseer; let req_v1_cfg = test_harness.req_v1_cfg; - let mut req_vstaging_cfg = test_harness.req_vstaging_cfg; + let mut req_v2_cfg = test_harness.req_v2_cfg; let head_a = Hash::from_low_u64_be(128); let head_a_num: u32 = 64; @@ -435,11 +434,11 @@ fn advertise_and_send_collation_by_hash() { connect_peer( &mut virtual_overseer, peer, - CollationVersion::VStaging, + CollationVersion::V2, Some(validator_id.clone()), ) .await; - expect_declare_msg_vstaging(&mut virtual_overseer, &test_state, &peer).await; + expect_declare_msg_v2(&mut virtual_overseer, &test_state, &peer).await; // Head `b` is not a leaf, but both advertisements are still relevant. send_peer_view_change(&mut virtual_overseer, &peer, vec![head_b]).await; @@ -449,13 +448,13 @@ fn advertise_and_send_collation_by_hash() { for (candidate, pov_block) in candidates { let (pending_response, rx) = oneshot::channel(); - req_vstaging_cfg + req_v2_cfg .inbound_queue .as_mut() .unwrap() .send(RawIncomingRequest { peer, - payload: request_vstaging::CollationFetchingRequest { + payload: request_v2::CollationFetchingRequest { relay_parent: head_b, para_id: test_state.para_id, candidate_hash: candidate.hash(), @@ -469,7 +468,7 @@ fn advertise_and_send_collation_by_hash() { assert_matches!( rx.await, Ok(full_response) => { - // Response is the same for vstaging. + // Response is the same for v2. let request_v1::CollationFetchingResponse::Collation(receipt, pov): request_v1::CollationFetchingResponse = request_v1::CollationFetchingResponse::decode( &mut full_response.result @@ -482,7 +481,7 @@ fn advertise_and_send_collation_by_hash() { ); } - TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg } + TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg } }, ) } @@ -552,11 +551,11 @@ fn advertise_core_occupied() { connect_peer( virtual_overseer, peer_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, Some(validators[0].clone()), ) .await; - expect_declare_msg_vstaging(virtual_overseer, &test_state, &peer_ids[0]).await; + expect_declare_msg_v2(virtual_overseer, &test_state, &peer_ids[0]).await; // Peer is aware of the leaf. send_peer_view_change(virtual_overseer, &peer_ids[0], vec![head_a]).await; diff --git a/polkadot/node/network/collator-protocol/src/lib.rs b/polkadot/node/network/collator-protocol/src/lib.rs index 62c033954f75..1edc67664172 100644 --- a/polkadot/node/network/collator-protocol/src/lib.rs +++ b/polkadot/node/network/collator-protocol/src/lib.rs @@ -32,7 +32,7 @@ use polkadot_node_subsystem_util::reputation::ReputationAggregator; use sp_keystore::KeystorePtr; use polkadot_node_network_protocol::{ - request_response::{v1 as request_v1, vstaging as protocol_vstaging, IncomingRequestReceiver}, + request_response::{v1 as request_v1, v2 as protocol_v2, IncomingRequestReceiver}, PeerId, UnifiedReputationChange as Rep, }; use polkadot_primitives::CollatorPair; @@ -83,9 +83,8 @@ pub enum ProtocolSide { collator_pair: CollatorPair, /// Receiver for v1 collation fetching requests. request_receiver_v1: IncomingRequestReceiver, - /// Receiver for vstaging collation fetching requests. - request_receiver_vstaging: - IncomingRequestReceiver, + /// Receiver for v2 collation fetching requests. + request_receiver_v2: IncomingRequestReceiver, /// Metrics. metrics: collator_side::Metrics, }, @@ -121,14 +120,14 @@ impl CollatorProtocolSubsystem { peer_id, collator_pair, request_receiver_v1, - request_receiver_vstaging, + request_receiver_v2, metrics, } => collator_side::run( ctx, peer_id, collator_pair, request_receiver_v1, - request_receiver_vstaging, + request_receiver_v2, metrics, ) .map_err(|e| SubsystemError::with_origin("collator-protocol", e)) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs index 4c92780f2da9..a53e0028b9e7 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs @@ -119,7 +119,7 @@ impl PendingCollation { } } -/// vstaging advertisement that was rejected by the backing +/// v2 advertisement that was rejected by the backing /// subsystem. Validator may fetch it later if its fragment /// membership gets recognized before relay parent goes out of view. #[derive(Debug, Clone)] diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index e8cf769d2e5f..fcb408d54b1b 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -34,10 +34,10 @@ use polkadot_node_network_protocol::{ peer_set::{CollationVersion, PeerSet}, request_response::{ outgoing::{Recipient, RequestError}, - v1 as request_v1, vstaging as request_vstaging, OutgoingRequest, Requests, + v1 as request_v1, v2 as request_v2, OutgoingRequest, Requests, }, - v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, - UnifiedReputationChange as Rep, Versioned, View, + v1 as protocol_v1, v2 as protocol_v2, OurView, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_primitives::{SignedFullStatement, Statement}; use polkadot_node_subsystem::{ @@ -624,13 +624,9 @@ async fn notify_collation_seconded( CollationVersion::V1 => Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol( protocol_v1::CollatorProtocolMessage::CollationSeconded(relay_parent, statement), )), - CollationVersion::VStaging => - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( - protocol_vstaging::CollatorProtocolMessage::CollationSeconded( - relay_parent, - statement, - ), - )), + CollationVersion::V2 => Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol( + protocol_v2::CollatorProtocolMessage::CollationSeconded(relay_parent, statement), + )), }; sender .send_message(NetworkBridgeTxMessage::SendCollationMessage(vec![peer_id], wire_message)) @@ -694,16 +690,12 @@ async fn request_collation( let requests = Requests::CollationFetchingV1(req); (requests, response_recv.boxed()) }, - (CollationVersion::VStaging, Some(ProspectiveCandidate { candidate_hash, .. })) => { + (CollationVersion::V2, Some(ProspectiveCandidate { candidate_hash, .. })) => { let (req, response_recv) = OutgoingRequest::new( Recipient::Peer(peer_id), - request_vstaging::CollationFetchingRequest { - relay_parent, - para_id, - candidate_hash, - }, + request_v2::CollationFetchingRequest { relay_parent, para_id, candidate_hash }, ); - let requests = Requests::CollationFetchingVStaging(req); + let requests = Requests::CollationFetchingV2(req); (requests, response_recv.boxed()) }, _ => return Err(FetchError::ProtocolMismatch), @@ -758,18 +750,15 @@ async fn process_incoming_peer_message( ctx: &mut Context, state: &mut State, origin: PeerId, - msg: Versioned< - protocol_v1::CollatorProtocolMessage, - protocol_vstaging::CollatorProtocolMessage, - >, + msg: Versioned, ) { use protocol_v1::CollatorProtocolMessage as V1; - use protocol_vstaging::CollatorProtocolMessage as VStaging; + use protocol_v2::CollatorProtocolMessage as V2; use sp_runtime::traits::AppVerify; match msg { Versioned::V1(V1::Declare(collator_id, para_id, signature)) | - Versioned::VStaging(VStaging::Declare(collator_id, para_id, signature)) => { + Versioned::V2(V2::Declare(collator_id, para_id, signature)) => { if collator_peer_id(&state.peer_data, &collator_id).is_some() { modify_reputation( &mut state.reputation, @@ -881,7 +870,7 @@ async fn process_incoming_peer_message( modify_reputation(&mut state.reputation, ctx.sender(), origin, rep).await; } }, - Versioned::VStaging(VStaging::AdvertiseCollation { + Versioned::V2(V2::AdvertiseCollation { relay_parent, candidate_hash, parent_head_data_hash, @@ -901,15 +890,14 @@ async fn process_incoming_peer_message( ?relay_parent, ?candidate_hash, error = ?err, - "Rejected vstaging advertisement", + "Rejected v2 advertisement", ); if let Some(rep) = err.reputation_changes() { modify_reputation(&mut state.reputation, ctx.sender(), origin, rep).await; } }, - Versioned::V1(V1::CollationSeconded(..)) | - Versioned::VStaging(VStaging::CollationSeconded(..)) => { + Versioned::V1(V1::CollationSeconded(..)) | Versioned::V2(V2::CollationSeconded(..)) => { gum::warn!( target: LOG_TARGET, peer_id = ?origin, @@ -1074,7 +1062,7 @@ where }; if relay_parent_mode.is_enabled() && prospective_candidate.is_none() { - // Expected vstaging advertisement. + // Expected v2 advertisement. return Err(AdvertisementError::ProtocolMismatch) } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 06e40a12130e..9812998aab76 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -357,7 +357,7 @@ async fn assert_fetch_collation_request( ), Some(candidate_hash) => assert_matches!( req, - Requests::CollationFetchingVStaging(req) => { + Requests::CollationFetchingV2(req) => { let payload = req.payload; assert_eq!(payload.relay_parent, relay_parent); assert_eq!(payload.para_id, para_id); @@ -394,12 +394,11 @@ async fn connect_and_declare_collator( para_id, collator.sign(&protocol_v1::declare_signature_payload(&peer)), )), - CollationVersion::VStaging => - Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::Declare( - collator.public(), - para_id, - collator.sign(&protocol_v1::declare_signature_payload(&peer)), - )), + CollationVersion::V2 => Versioned::V2(protocol_v2::CollatorProtocolMessage::Declare( + collator.public(), + para_id, + collator.sign(&protocol_v1::declare_signature_payload(&peer)), + )), }; overseer_send( @@ -421,7 +420,7 @@ async fn advertise_collation( ) { let wire_message = match candidate { Some((candidate_hash, parent_head_data_hash)) => - Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation { + Versioned::V2(protocol_v2::CollatorProtocolMessage::AdvertiseCollation { relay_parent, candidate_hash, parent_head_data_hash, @@ -499,10 +498,10 @@ fn act_on_advertisement() { }); } -/// Tests that validator side works with vstaging network protocol +/// Tests that validator side works with v2 network protocol /// before async backing is enabled. #[test] -fn act_on_advertisement_vstaging() { +fn act_on_advertisement_v2() { let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { @@ -529,13 +528,13 @@ fn act_on_advertisement_vstaging() { peer_b, pair.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; let candidate_hash = CandidateHash::default(); let parent_head_data_hash = Hash::zero(); - // vstaging advertisement. + // v2 advertisement. advertise_collation( &mut virtual_overseer, peer_b, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index 863d7bf3bb3a..4da0f11da390 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -226,8 +226,8 @@ async fn assert_collation_seconded( overseer_recv(virtual_overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendCollationMessage( peers, - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( - protocol_vstaging::CollatorProtocolMessage::CollationSeconded( + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol( + protocol_v2::CollatorProtocolMessage::CollationSeconded( _relay_parent, .., ), @@ -306,7 +306,7 @@ fn accept_advertisements_from_implicit_view() { peer_a, pair_a.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; connect_and_declare_collator( @@ -314,7 +314,7 @@ fn accept_advertisements_from_implicit_view() { peer_b, pair_b.clone(), test_state.chain_ids[1], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -406,7 +406,7 @@ fn second_multiple_candidates_per_relay_parent() { peer_a, pair.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -457,7 +457,7 @@ fn second_multiple_candidates_per_relay_parent() { let pov = PoV { block_data: BlockData(vec![1]) }; response_channel - .send(Ok(request_vstaging::CollationFetchingResponse::Collation( + .send(Ok(request_v2::CollationFetchingResponse::Collation( candidate.clone(), pov.clone(), ) @@ -514,7 +514,7 @@ fn second_multiple_candidates_per_relay_parent() { peer_b, pair_b.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -562,7 +562,7 @@ fn fetched_collation_sanity_check() { peer_a, pair.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -611,7 +611,7 @@ fn fetched_collation_sanity_check() { let pov = PoV { block_data: BlockData(vec![1]) }; response_channel - .send(Ok(request_vstaging::CollationFetchingResponse::Collation( + .send(Ok(request_v2::CollationFetchingResponse::Collation( candidate.clone(), pov.clone(), ) @@ -668,7 +668,7 @@ fn advertisement_spam_protection() { peer_a, pair_a.clone(), test_state.chain_ids[1], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -748,7 +748,7 @@ fn backed_candidate_unblocks_advertisements() { peer_a, pair_a.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; connect_and_declare_collator( @@ -756,7 +756,7 @@ fn backed_candidate_unblocks_advertisements() { peer_b, pair_b.clone(), test_state.chain_ids[1], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -856,7 +856,7 @@ fn active_leave_unblocks_advertisements() { *peer_id, peer.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; } diff --git a/polkadot/node/network/gossip-support/src/lib.rs b/polkadot/node/network/gossip-support/src/lib.rs index c5dc1ba14bd3..4fa23507e86b 100644 --- a/polkadot/node/network/gossip-support/src/lib.rs +++ b/polkadot/node/network/gossip-support/src/lib.rs @@ -452,7 +452,7 @@ where // match void -> LLVM unreachable match message { Versioned::V1(m) => match m {}, - Versioned::VStaging(m) => match m {}, + Versioned::V2(m) => match m {}, } }, } diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index c33b9eae3252..379334ded24a 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -27,6 +27,3 @@ bitvec = "1" [dev-dependencies] rand_chacha = "0.3.1" - -[features] -network-protocol-staging = [] diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs index ca3601297fe6..901ac99b6693 100644 --- a/polkadot/node/network/protocol/src/lib.rs +++ b/polkadot/node/network/protocol/src/lib.rs @@ -253,26 +253,25 @@ impl View { /// A protocol-versioned type. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum Versioned { +pub enum Versioned { /// V1 type. V1(V1), - /// VStaging type. - VStaging(VStaging), + /// V2 type. + V2(V2), } -impl Versioned<&'_ V1, &'_ VStaging> { +impl Versioned<&'_ V1, &'_ V2> { /// Convert to a fully-owned version of the message. - pub fn clone_inner(&self) -> Versioned { + pub fn clone_inner(&self) -> Versioned { match *self { Versioned::V1(inner) => Versioned::V1(inner.clone()), - Versioned::VStaging(inner) => Versioned::VStaging(inner.clone()), + Versioned::V2(inner) => Versioned::V2(inner.clone()), } } } /// All supported versions of the validation protocol message. -pub type VersionedValidationProtocol = - Versioned; +pub type VersionedValidationProtocol = Versioned; impl From for VersionedValidationProtocol { fn from(v1: v1::ValidationProtocol) -> Self { @@ -280,14 +279,14 @@ impl From for VersionedValidationProtocol { } } -impl From for VersionedValidationProtocol { - fn from(vstaging: vstaging::ValidationProtocol) -> Self { - VersionedValidationProtocol::VStaging(vstaging) +impl From for VersionedValidationProtocol { + fn from(v2: v2::ValidationProtocol) -> Self { + VersionedValidationProtocol::V2(v2) } } /// All supported versions of the collation protocol message. -pub type VersionedCollationProtocol = Versioned; +pub type VersionedCollationProtocol = Versioned; impl From for VersionedCollationProtocol { fn from(v1: v1::CollationProtocol) -> Self { @@ -295,9 +294,9 @@ impl From for VersionedCollationProtocol { } } -impl From for VersionedCollationProtocol { - fn from(vstaging: vstaging::CollationProtocol) -> Self { - VersionedCollationProtocol::VStaging(vstaging) +impl From for VersionedCollationProtocol { + fn from(v2: v2::CollationProtocol) -> Self { + VersionedCollationProtocol::V2(v2) } } @@ -307,7 +306,7 @@ macro_rules! impl_versioned_full_protocol_from { fn from(versioned_from: $from) -> $out { match versioned_from { Versioned::V1(x) => Versioned::V1(x.into()), - Versioned::VStaging(x) => Versioned::VStaging(x.into()), + Versioned::V2(x) => Versioned::V2(x.into()), } } } @@ -321,7 +320,7 @@ macro_rules! impl_versioned_try_from { $from:ty, $out:ty, $v1_pat:pat => $v1_out:expr, - $vstaging_pat:pat => $vstaging_out:expr + $v2_pat:pat => $v2_out:expr ) => { impl TryFrom<$from> for $out { type Error = crate::WrongVariant; @@ -330,7 +329,7 @@ macro_rules! impl_versioned_try_from { #[allow(unreachable_patterns)] // when there is only one variant match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out)), - Versioned::VStaging($vstaging_pat) => Ok(Versioned::VStaging($vstaging_out)), + Versioned::V2($v2_pat) => Ok(Versioned::V2($v2_out)), _ => Err(crate::WrongVariant), } } @@ -343,8 +342,7 @@ macro_rules! impl_versioned_try_from { #[allow(unreachable_patterns)] // when there is only one variant match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out.clone())), - Versioned::VStaging($vstaging_pat) => - Ok(Versioned::VStaging($vstaging_out.clone())), + Versioned::V2($v2_pat) => Ok(Versioned::V2($v2_out.clone())), _ => Err(crate::WrongVariant), } } @@ -354,7 +352,7 @@ macro_rules! impl_versioned_try_from { /// Version-annotated messages used by the bitfield distribution subsystem. pub type BitfieldDistributionMessage = - Versioned; + Versioned; impl_versioned_full_protocol_from!( BitfieldDistributionMessage, VersionedValidationProtocol, @@ -364,12 +362,12 @@ impl_versioned_try_from!( VersionedValidationProtocol, BitfieldDistributionMessage, v1::ValidationProtocol::BitfieldDistribution(x) => x, - vstaging::ValidationProtocol::BitfieldDistribution(x) => x + v2::ValidationProtocol::BitfieldDistribution(x) => x ); /// Version-annotated messages used by the statement distribution subsystem. pub type StatementDistributionMessage = - Versioned; + Versioned; impl_versioned_full_protocol_from!( StatementDistributionMessage, VersionedValidationProtocol, @@ -379,12 +377,12 @@ impl_versioned_try_from!( VersionedValidationProtocol, StatementDistributionMessage, v1::ValidationProtocol::StatementDistribution(x) => x, - vstaging::ValidationProtocol::StatementDistribution(x) => x + v2::ValidationProtocol::StatementDistribution(x) => x ); /// Version-annotated messages used by the approval distribution subsystem. pub type ApprovalDistributionMessage = - Versioned; + Versioned; impl_versioned_full_protocol_from!( ApprovalDistributionMessage, VersionedValidationProtocol, @@ -394,13 +392,13 @@ impl_versioned_try_from!( VersionedValidationProtocol, ApprovalDistributionMessage, v1::ValidationProtocol::ApprovalDistribution(x) => x, - vstaging::ValidationProtocol::ApprovalDistribution(x) => x + v2::ValidationProtocol::ApprovalDistribution(x) => x ); /// Version-annotated messages used by the gossip-support subsystem (this is void). pub type GossipSupportNetworkMessage = - Versioned; + Versioned; // This is a void enum placeholder, so never gets sent over the wire. impl TryFrom for GossipSupportNetworkMessage { type Error = WrongVariant; @@ -418,7 +416,7 @@ impl<'a> TryFrom<&'a VersionedValidationProtocol> for GossipSupportNetworkMessag /// Version-annotated messages used by the bitfield distribution subsystem. pub type CollatorProtocolMessage = - Versioned; + Versioned; impl_versioned_full_protocol_from!( CollatorProtocolMessage, VersionedCollationProtocol, @@ -428,7 +426,7 @@ impl_versioned_try_from!( VersionedCollationProtocol, CollatorProtocolMessage, v1::CollationProtocol::CollatorProtocol(x) => x, - vstaging::CollationProtocol::CollatorProtocol(x) => x + v2::CollationProtocol::CollatorProtocol(x) => x ); /// v1 notification protocol types. @@ -589,8 +587,8 @@ pub mod v1 { } } -/// vstaging network protocol types. -pub mod vstaging { +/// v2 network protocol types. +pub mod v2 { use bitvec::{order::Lsb0, slice::BitSlice, vec::BitVec}; use parity_scale_codec::{Decode, Encode}; diff --git a/polkadot/node/network/protocol/src/peer_set.rs b/polkadot/node/network/protocol/src/peer_set.rs index c2163783c2ce..8dd68b297e30 100644 --- a/polkadot/node/network/protocol/src/peer_set.rs +++ b/polkadot/node/network/protocol/src/peer_set.rs @@ -118,16 +118,9 @@ impl PeerSet { /// Networking layer relies on `get_main_version()` being the version /// of the main protocol name reported by [`PeerSetProtocolNames::get_main_name()`]. pub fn get_main_version(self) -> ProtocolVersion { - #[cfg(not(feature = "network-protocol-staging"))] match self { - PeerSet::Validation => ValidationVersion::V1.into(), - PeerSet::Collation => CollationVersion::V1.into(), - } - - #[cfg(feature = "network-protocol-staging")] - match self { - PeerSet::Validation => ValidationVersion::VStaging.into(), - PeerSet::Collation => CollationVersion::VStaging.into(), + PeerSet::Validation => ValidationVersion::V2.into(), + PeerSet::Collation => CollationVersion::V2.into(), } } @@ -152,7 +145,7 @@ impl PeerSet { PeerSet::Validation => if version == ValidationVersion::V1.into() { Some("validation/1") - } else if version == ValidationVersion::VStaging.into() { + } else if version == ValidationVersion::V2.into() { Some("validation/2") } else { None @@ -160,7 +153,7 @@ impl PeerSet { PeerSet::Collation => if version == CollationVersion::V1.into() { Some("collation/1") - } else if version == CollationVersion::VStaging.into() { + } else if version == CollationVersion::V2.into() { Some("collation/2") } else { None @@ -223,8 +216,8 @@ impl From for u32 { pub enum ValidationVersion { /// The first version. V1 = 1, - /// The staging version. - VStaging = 2, + /// The second version. + V2 = 2, } /// Supported collation protocol versions. Only versions defined here must be used in the codebase. @@ -232,8 +225,8 @@ pub enum ValidationVersion { pub enum CollationVersion { /// The first version. V1 = 1, - /// The staging version. - VStaging = 2, + /// The second version. + V2 = 2, } /// Marker indicating the version is unknown. diff --git a/polkadot/node/network/protocol/src/request_response/mod.rs b/polkadot/node/network/protocol/src/request_response/mod.rs index baed4b846316..96f7adeb29ba 100644 --- a/polkadot/node/network/protocol/src/request_response/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/mod.rs @@ -55,7 +55,7 @@ pub use outgoing::{OutgoingRequest, OutgoingResult, Recipient, Requests, Respons pub mod v1; /// Actual versioned requests and responses that are sent over the wire. -pub mod vstaging; +pub mod v2; /// A protocol per subsystem seems to make the most sense, this way we don't need any dispatching /// within protocols. @@ -66,7 +66,7 @@ pub enum Protocol { /// Protocol for fetching collations from collators. CollationFetchingV1, /// Protocol for fetching collations from collators when async backing is enabled. - CollationFetchingVStaging, + CollationFetchingV2, /// Protocol for fetching seconded PoVs from validators of the same group. PoVFetchingV1, /// Protocol for fetching available data. @@ -78,7 +78,7 @@ pub enum Protocol { /// Protocol for requesting candidates with attestations in statement distribution /// when async backing is enabled. - AttestedCandidateVStaging, + AttestedCandidateV2, } /// Minimum bandwidth we expect for validators - 500Mbit/s is the recommendation, so approximately @@ -147,7 +147,7 @@ const POV_RESPONSE_SIZE: u64 = MAX_POV_SIZE as u64 + 10_000; /// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead. const STATEMENT_RESPONSE_SIZE: u64 = MAX_CODE_SIZE as u64 + 10_000; -/// Maximum response sizes for `AttestedCandidateVStaging`. +/// Maximum response sizes for `AttestedCandidateV2`. /// /// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead and /// additional backing statements. @@ -199,7 +199,7 @@ impl Protocol { request_timeout: CHUNK_REQUEST_TIMEOUT, inbound_queue: tx, }, - Protocol::CollationFetchingV1 | Protocol::CollationFetchingVStaging => + Protocol::CollationFetchingV1 | Protocol::CollationFetchingV2 => RequestResponseConfig { name, fallback_names, @@ -254,7 +254,7 @@ impl Protocol { request_timeout: DISPUTE_REQUEST_TIMEOUT, inbound_queue: tx, }, - Protocol::AttestedCandidateVStaging => RequestResponseConfig { + Protocol::AttestedCandidateV2 => RequestResponseConfig { name, fallback_names, max_request_size: 1_000, @@ -275,7 +275,7 @@ impl Protocol { // as well. Protocol::ChunkFetchingV1 => 100, // 10 seems reasonable, considering group sizes of max 10 validators. - Protocol::CollationFetchingV1 | Protocol::CollationFetchingVStaging => 10, + Protocol::CollationFetchingV1 | Protocol::CollationFetchingV2 => 10, // 10 seems reasonable, considering group sizes of max 10 validators. Protocol::PoVFetchingV1 => 10, // Validators are constantly self-selecting to request available data which may lead @@ -307,7 +307,7 @@ impl Protocol { // failure, so having a good value here is mostly about performance tuning. Protocol::DisputeSendingV1 => 100, - Protocol::AttestedCandidateVStaging => { + Protocol::AttestedCandidateV2 => { // We assume we can utilize up to 70% of the available bandwidth for statements. // This is just a guess/estimate, with the following considerations: If we are // faster than that, queue size will stay low anyway, even if not - requesters will @@ -344,8 +344,8 @@ impl Protocol { Protocol::DisputeSendingV1 => Some("/polkadot/send_dispute/1"), // Introduced after legacy names became legacy. - Protocol::AttestedCandidateVStaging => None, - Protocol::CollationFetchingVStaging => None, + Protocol::AttestedCandidateV2 => None, + Protocol::CollationFetchingV2 => None, } } } @@ -402,8 +402,8 @@ impl ReqProtocolNames { Protocol::StatementFetchingV1 => "/req_statement/1", Protocol::DisputeSendingV1 => "/send_dispute/1", - Protocol::CollationFetchingVStaging => "/req_collation/2", - Protocol::AttestedCandidateVStaging => "/req_attested_candidate/2", + Protocol::CollationFetchingV2 => "/req_collation/2", + Protocol::AttestedCandidateV2 => "/req_attested_candidate/2", }; format!("{}{}", prefix, short_name).into() diff --git a/polkadot/node/network/protocol/src/request_response/outgoing.rs b/polkadot/node/network/protocol/src/request_response/outgoing.rs index ddc6b85645bb..c613d5778f5e 100644 --- a/polkadot/node/network/protocol/src/request_response/outgoing.rs +++ b/polkadot/node/network/protocol/src/request_response/outgoing.rs @@ -23,7 +23,7 @@ use sc_network::PeerId; use polkadot_primitives::AuthorityDiscoveryId; -use super::{v1, vstaging, IsRequest, Protocol}; +use super::{v1, v2, IsRequest, Protocol}; /// All requests that can be sent to the network bridge via `NetworkBridgeTxMessage::SendRequest`. #[derive(Debug)] @@ -42,10 +42,10 @@ pub enum Requests { DisputeSendingV1(OutgoingRequest), /// Request a candidate and attestations. - AttestedCandidateVStaging(OutgoingRequest), + AttestedCandidateV2(OutgoingRequest), /// Fetch a collation from a collator which previously announced it. /// Compared to V1 it requires specifying which candidate is requested by its hash. - CollationFetchingVStaging(OutgoingRequest), + CollationFetchingV2(OutgoingRequest), } impl Requests { @@ -54,12 +54,12 @@ impl Requests { match self { Self::ChunkFetchingV1(_) => Protocol::ChunkFetchingV1, Self::CollationFetchingV1(_) => Protocol::CollationFetchingV1, - Self::CollationFetchingVStaging(_) => Protocol::CollationFetchingVStaging, + Self::CollationFetchingV2(_) => Protocol::CollationFetchingV2, Self::PoVFetchingV1(_) => Protocol::PoVFetchingV1, Self::AvailableDataFetchingV1(_) => Protocol::AvailableDataFetchingV1, Self::StatementFetchingV1(_) => Protocol::StatementFetchingV1, Self::DisputeSendingV1(_) => Protocol::DisputeSendingV1, - Self::AttestedCandidateVStaging(_) => Protocol::AttestedCandidateVStaging, + Self::AttestedCandidateV2(_) => Protocol::AttestedCandidateV2, } } @@ -74,12 +74,12 @@ impl Requests { match self { Self::ChunkFetchingV1(r) => r.encode_request(), Self::CollationFetchingV1(r) => r.encode_request(), - Self::CollationFetchingVStaging(r) => r.encode_request(), + Self::CollationFetchingV2(r) => r.encode_request(), Self::PoVFetchingV1(r) => r.encode_request(), Self::AvailableDataFetchingV1(r) => r.encode_request(), Self::StatementFetchingV1(r) => r.encode_request(), Self::DisputeSendingV1(r) => r.encode_request(), - Self::AttestedCandidateVStaging(r) => r.encode_request(), + Self::AttestedCandidateV2(r) => r.encode_request(), } } } diff --git a/polkadot/node/network/protocol/src/request_response/vstaging.rs b/polkadot/node/network/protocol/src/request_response/v2.rs similarity index 94% rename from polkadot/node/network/protocol/src/request_response/vstaging.rs rename to polkadot/node/network/protocol/src/request_response/v2.rs index c79663abb8a5..6b90c579237f 100644 --- a/polkadot/node/network/protocol/src/request_response/vstaging.rs +++ b/polkadot/node/network/protocol/src/request_response/v2.rs @@ -24,7 +24,7 @@ use polkadot_primitives::{ }; use super::{IsRequest, Protocol}; -use crate::vstaging::StatementFilter; +use crate::v2::StatementFilter; /// Request a candidate with statements. #[derive(Debug, Clone, Encode, Decode)] @@ -56,7 +56,7 @@ pub struct AttestedCandidateResponse { impl IsRequest for AttestedCandidateRequest { type Response = AttestedCandidateResponse; - const PROTOCOL: Protocol = Protocol::AttestedCandidateVStaging; + const PROTOCOL: Protocol = Protocol::AttestedCandidateV2; } /// Responses as sent by collators. @@ -76,5 +76,5 @@ pub struct CollationFetchingRequest { impl IsRequest for CollationFetchingRequest { // The response is the same as for V1. type Response = CollationFetchingResponse; - const PROTOCOL: Protocol = Protocol::CollationFetchingVStaging; + const PROTOCOL: Protocol = Protocol::CollationFetchingV2; } diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs index 9ae76047383c..fc2aff0da305 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -21,8 +21,7 @@ use polkadot_node_network_protocol::{ grid_topology::{GridNeighbors, RequiredRouting, SessionBoundGridTopologyStorage}, peer_set::{IsAuthority, PeerSet, ValidationVersion}, v1::{self as protocol_v1, StatementMetadata}, - vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep, - Versioned, View, + v2 as protocol_v2, IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::{ SignedFullStatement, Statement, StatementWithPVD, UncheckedSignedFullStatement, @@ -1062,7 +1061,7 @@ async fn circulate_statement<'a, Context>( "We filter out duplicates above. qed.", ); - let (v1_peers_to_send, vstaging_peers_to_send) = peers_to_send + let (v1_peers_to_send, v2_peers_to_send) = peers_to_send .into_iter() .map(|peer_id| { let peer_data = @@ -1074,7 +1073,7 @@ async fn circulate_statement<'a, Context>( }) .partition::, _>(|(_, _, version)| match version { ValidationVersion::V1 => true, - ValidationVersion::VStaging => false, + ValidationVersion::V2 => false, }); // partition is handy here but not if we add more protocol versions let payload = v1_statement_message(relay_parent, stored.statement.clone(), metrics); @@ -1094,24 +1093,24 @@ async fn circulate_statement<'a, Context>( )) .await; } - if !vstaging_peers_to_send.is_empty() { + if !v2_peers_to_send.is_empty() { gum::trace!( target: LOG_TARGET, - ?vstaging_peers_to_send, + ?v2_peers_to_send, ?relay_parent, statement = ?stored.statement, - "Sending statement to vstaging peers", + "Sending statement to v2 peers", ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_peers_to_send.iter().map(|(p, _, _)| *p).collect(), - compatible_v1_message(ValidationVersion::VStaging, payload.clone()).into(), + v2_peers_to_send.iter().map(|(p, _, _)| *p).collect(), + compatible_v1_message(ValidationVersion::V2, payload.clone()).into(), )) .await; } v1_peers_to_send .into_iter() - .chain(vstaging_peers_to_send) + .chain(v2_peers_to_send) .filter_map(|(peer, needs_dependent, _)| if needs_dependent { Some(peer) } else { None }) .collect() } @@ -1443,10 +1442,8 @@ async fn handle_incoming_message<'a, Context>( let message = match message { Versioned::V1(m) => m, - Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility( - m, - )) => m, - Versioned::VStaging(_) => { + Versioned::V2(protocol_v2::StatementDistributionMessage::V1Compatibility(m)) => m, + Versioned::V2(_) => { // The higher-level subsystem code is supposed to filter out // all non v1 messages. gum::debug!( @@ -2170,8 +2167,7 @@ fn compatible_v1_message( ) -> net_protocol::StatementDistributionMessage { match version { ValidationVersion::V1 => Versioned::V1(message), - ValidationVersion::VStaging => Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(message), - ), + ValidationVersion::V2 => + Versioned::V2(protocol_v2::StatementDistributionMessage::V1Compatibility(message)), } } diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs index b2eb9cccced4..eead7df5224d 100644 --- a/polkadot/node/network/statement-distribution/src/lib.rs +++ b/polkadot/node/network/statement-distribution/src/lib.rs @@ -26,10 +26,8 @@ use error::{log_error, FatalResult}; use std::time::Duration; use polkadot_node_network_protocol::{ - request_response::{ - v1 as request_v1, vstaging::AttestedCandidateRequest, IncomingRequestReceiver, - }, - vstaging as protocol_vstaging, Versioned, + request_response::{v1 as request_v1, v2::AttestedCandidateRequest, IncomingRequestReceiver}, + v2 as protocol_v2, Versioned, }; use polkadot_node_primitives::StatementWithPVD; use polkadot_node_subsystem::{ @@ -60,7 +58,7 @@ use legacy_v1::{ ResponderMessage as V1ResponderMessage, }; -mod vstaging; +mod v2; const LOG_TARGET: &str = "parachain::statement-distribution"; @@ -104,9 +102,9 @@ enum MuxedMessage { /// Messages from spawned v1 (legacy) responder background task. V1Responder(Option), /// Messages from candidate responder background task. - Responder(Option), + Responder(Option), /// Messages from answered requests. - Response(vstaging::UnhandledResponse), + Response(v2::UnhandledResponse), /// Message that a request is ready to be retried. This just acts as a signal that we should /// dispatch all pending requests again. RetryRequest(()), @@ -116,10 +114,10 @@ enum MuxedMessage { impl MuxedMessage { async fn receive( ctx: &mut Context, - state: &mut vstaging::State, + state: &mut v2::State, from_v1_requester: &mut mpsc::Receiver, from_v1_responder: &mut mpsc::Receiver, - from_responder: &mut mpsc::Receiver, + from_responder: &mut mpsc::Receiver, ) -> MuxedMessage { let (request_manager, response_manager) = state.request_and_response_managers(); // We are only fusing here to make `select` happy, in reality we will quit if one of those @@ -128,8 +126,8 @@ impl MuxedMessage { let from_v1_requester = from_v1_requester.next(); let from_v1_responder = from_v1_responder.next(); let from_responder = from_responder.next(); - let receive_response = vstaging::receive_response(response_manager).fuse(); - let retry_request = vstaging::next_retry(request_manager).fuse(); + let receive_response = v2::receive_response(response_manager).fuse(); + let retry_request = v2::next_retry(request_manager).fuse(); futures::pin_mut!( from_orchestra, from_v1_requester, @@ -182,7 +180,7 @@ impl StatementDistributionSubsystem { let mut reputation_delay = new_reputation_delay(); let mut legacy_v1_state = crate::legacy_v1::State::new(self.keystore.clone()); - let mut state = crate::vstaging::State::new(self.keystore.clone()); + let mut state = crate::v2::State::new(self.keystore.clone()); // Sender/Receiver for getting news from our statement fetching tasks. let (v1_req_sender, mut v1_req_receiver) = mpsc::channel(1); @@ -206,7 +204,7 @@ impl StatementDistributionSubsystem { ctx.spawn( "candidate-responder", - vstaging::respond_task( + v2::respond_task( self.req_receiver.take().expect("Mandatory argument to new. qed"), res_sender.clone(), ) @@ -280,14 +278,13 @@ impl StatementDistributionSubsystem { )?; }, MuxedMessage::Responder(result) => { - vstaging::answer_request( + v2::answer_request( &mut state, result.ok_or(FatalError::RequesterReceiverFinished)?, ); }, MuxedMessage::Response(result) => { - vstaging::handle_response(&mut ctx, &mut state, result, &mut self.reputation) - .await; + v2::handle_response(&mut ctx, &mut state, result, &mut self.reputation).await; }, MuxedMessage::RetryRequest(()) => { // A pending request is ready to retry. This is only a signal to call @@ -296,7 +293,7 @@ impl StatementDistributionSubsystem { }, }; - vstaging::dispatch_requests(&mut ctx, &mut state).await; + v2::dispatch_requests(&mut ctx, &mut state).await; } Ok(()) } @@ -304,7 +301,7 @@ impl StatementDistributionSubsystem { async fn handle_subsystem_message( &mut self, ctx: &mut Context, - state: &mut vstaging::State, + state: &mut v2::State, legacy_v1_state: &mut legacy_v1::State, v1_req_sender: &mpsc::Sender, message: FromOrchestra, @@ -318,11 +315,11 @@ impl StatementDistributionSubsystem { })) => { let _timer = metrics.time_active_leaves_update(); - // vstaging should handle activated first because of implicit view. + // v2 should handle activated first because of implicit view. if let Some(ref activated) = activated { let mode = prospective_parachains_mode(ctx.sender(), activated.hash).await?; if let ProspectiveParachainsMode::Enabled { .. } = mode { - vstaging::handle_active_leaves_update(ctx, state, activated, mode).await?; + v2::handle_active_leaves_update(ctx, state, activated, mode).await?; } else if let ProspectiveParachainsMode::Disabled = mode { for deactivated in &deactivated { crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated); @@ -339,7 +336,7 @@ impl StatementDistributionSubsystem { for deactivated in &deactivated { crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated); } - vstaging::handle_deactivate_leaves(state, &deactivated); + v2::handle_deactivate_leaves(state, &deactivated); } }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => { @@ -362,7 +359,7 @@ impl StatementDistributionSubsystem { ) .await?; } else { - vstaging::share_local_statement( + v2::share_local_statement( ctx, state, relay_parent, @@ -399,11 +396,11 @@ impl StatementDistributionSubsystem { let target = match &event { NetworkBridgeEvent::PeerMessage(_, message) => match message { - Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), + Versioned::V2( + protocol_v2::StatementDistributionMessage::V1Compatibility(_), ) => VersionTarget::Legacy, Versioned::V1(_) => VersionTarget::Legacy, - Versioned::VStaging(_) => VersionTarget::Current, + Versioned::V2(_) => VersionTarget::Current, }, _ => VersionTarget::Both, }; @@ -422,14 +419,12 @@ impl StatementDistributionSubsystem { } if target.targets_current() { - // pass to vstaging. - vstaging::handle_network_update(ctx, state, event, &mut self.reputation) - .await; + // pass to v2. + v2::handle_network_update(ctx, state, event, &mut self.reputation).await; } }, StatementDistributionMessage::Backed(candidate_hash) => { - crate::vstaging::handle_backed_candidate_message(ctx, state, candidate_hash) - .await; + crate::v2::handle_backed_candidate_message(ctx, state, candidate_hash).await; }, }, } diff --git a/polkadot/node/network/statement-distribution/src/vstaging/candidates.rs b/polkadot/node/network/statement-distribution/src/v2/candidates.rs similarity index 100% rename from polkadot/node/network/statement-distribution/src/vstaging/candidates.rs rename to polkadot/node/network/statement-distribution/src/v2/candidates.rs diff --git a/polkadot/node/network/statement-distribution/src/vstaging/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/cluster.rs similarity index 100% rename from polkadot/node/network/statement-distribution/src/vstaging/cluster.rs rename to polkadot/node/network/statement-distribution/src/v2/cluster.rs diff --git a/polkadot/node/network/statement-distribution/src/vstaging/grid.rs b/polkadot/node/network/statement-distribution/src/v2/grid.rs similarity index 99% rename from polkadot/node/network/statement-distribution/src/vstaging/grid.rs rename to polkadot/node/network/statement-distribution/src/v2/grid.rs index b26c74682778..3d53ff6d321e 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/grid.rs @@ -60,9 +60,7 @@ //! - which has sent a `BackedCandidateAcknowledgement` //! - 1st-hop nodes do the same thing -use polkadot_node_network_protocol::{ - grid_topology::SessionGridTopology, vstaging::StatementFilter, -}; +use polkadot_node_network_protocol::{grid_topology::SessionGridTopology, v2::StatementFilter}; use polkadot_primitives::{CandidateHash, CompactStatement, GroupIndex, Hash, ValidatorIndex}; use std::collections::{ diff --git a/polkadot/node/network/statement-distribution/src/vstaging/groups.rs b/polkadot/node/network/statement-distribution/src/v2/groups.rs similarity index 100% rename from polkadot/node/network/statement-distribution/src/vstaging/groups.rs rename to polkadot/node/network/statement-distribution/src/v2/groups.rs diff --git a/polkadot/node/network/statement-distribution/src/vstaging/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs similarity index 97% rename from polkadot/node/network/statement-distribution/src/vstaging/mod.rs rename to polkadot/node/network/statement-distribution/src/v2/mod.rs index 8768d3504e0a..e11d66c41a04 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -23,11 +23,11 @@ use polkadot_node_network_protocol::{ peer_set::ValidationVersion, request_response::{ incoming::OutgoingResponse, - vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, + v2::{AttestedCandidateRequest, AttestedCandidateResponse}, IncomingRequest, IncomingRequestReceiver, Requests, MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, }, - vstaging::{self as protocol_vstaging, StatementFilter}, + v2::{self as protocol_v2, StatementFilter}, IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::{ @@ -323,7 +323,7 @@ pub(crate) async fn handle_network_update( NetworkBridgeEvent::PeerConnected(peer_id, role, protocol_version, mut authority_ids) => { gum::trace!(target: LOG_TARGET, ?peer_id, ?role, ?protocol_version, "Peer connected"); - if protocol_version != ValidationVersion::VStaging.into() { + if protocol_version != ValidationVersion::V2.into() { return } @@ -381,19 +381,19 @@ pub(crate) async fn handle_network_update( }, NetworkBridgeEvent::PeerMessage(peer_id, message) => match message { net_protocol::StatementDistributionMessage::V1(_) => return, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), + net_protocol::StatementDistributionMessage::V2( + protocol_v2::StatementDistributionMessage::V1Compatibility(_), ) => return, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + net_protocol::StatementDistributionMessage::V2( + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) => handle_incoming_statement(ctx, state, peer_id, relay_parent, statement, reputation) .await, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(inner), + net_protocol::StatementDistributionMessage::V2( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(inner), ) => handle_incoming_manifest(ctx, state, peer_id, inner, reputation).await, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(inner), + net_protocol::StatementDistributionMessage::V2( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(inner), ) => handle_incoming_acknowledgement(ctx, state, peer_id, inner, reputation).await, }, NetworkBridgeEvent::PeerViewChange(peer_id, view) => @@ -727,10 +727,8 @@ fn pending_statement_network_message( statement_store .validator_statement(originator, compact) .map(|s| s.as_unchecked().clone()) - .map(|signed| { - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, signed) - }) - .map(|msg| (vec![*peer], Versioned::VStaging(msg).into())) + .map(|signed| protocol_v2::StatementDistributionMessage::Statement(relay_parent, signed)) + .map(|msg| (vec![*peer], Versioned::V2(msg).into())) } /// Send a peer all pending cluster statements for a relay parent. @@ -823,7 +821,7 @@ async fn send_pending_grid_messages( match kind { grid::ManifestKind::Full => { - let manifest = protocol_vstaging::BackedCandidateManifest { + let manifest = protocol_v2::BackedCandidateManifest { relay_parent, candidate_hash, group_index, @@ -847,8 +845,8 @@ async fn send_pending_grid_messages( messages.push(( vec![*peer_id], - Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + Versioned::V2( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest, ), ) @@ -1192,7 +1190,7 @@ async fn circulate_statement( ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( statement_to, - Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::StatementDistributionMessage::Statement( relay_parent, statement.as_unchecked().clone(), )) @@ -1672,7 +1670,7 @@ async fn provide_candidate_to_grid( filter.clone(), ); - let manifest = protocol_vstaging::BackedCandidateManifest { + let manifest = protocol_v2::BackedCandidateManifest { relay_parent, candidate_hash, group_index, @@ -1680,16 +1678,15 @@ async fn provide_candidate_to_grid( parent_head_data_hash: confirmed_candidate.parent_head_data_hash(), statement_knowledge: filter.clone(), }; - let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { + let acknowledgement = protocol_v2::BackedCandidateAcknowledgement { candidate_hash, statement_knowledge: filter.clone(), }; - let manifest_message = Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), - ); - let ack_message = Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), + let manifest_message = + Versioned::V2(protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest)); + let ack_message = Versioned::V2( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), ); let mut manifest_peers = Vec::new(); @@ -2062,8 +2059,8 @@ fn post_acknowledgement_statement_messages( statement.payload(), ); - messages.push(Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::Statement( + messages.push(Versioned::V2( + protocol_v2::StatementDistributionMessage::Statement( relay_parent, statement.as_unchecked().clone(), ) @@ -2079,7 +2076,7 @@ async fn handle_incoming_manifest( ctx: &mut Context, state: &mut State, peer: PeerId, - manifest: net_protocol::vstaging::BackedCandidateManifest, + manifest: net_protocol::v2::BackedCandidateManifest, reputation: &mut ReputationAggregator, ) { gum::debug!( @@ -2183,14 +2180,14 @@ fn acknowledgement_and_statement_messages( Some(l) => l, }; - let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { + let acknowledgement = protocol_v2::BackedCandidateAcknowledgement { candidate_hash, statement_knowledge: local_knowledge.clone(), }; - let msg = Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), - ); + let msg = Versioned::V2(protocol_v2::StatementDistributionMessage::BackedCandidateKnown( + acknowledgement, + )); let mut messages = vec![(vec![peer], msg.into())]; @@ -2221,7 +2218,7 @@ async fn handle_incoming_acknowledgement( ctx: &mut Context, state: &mut State, peer: PeerId, - acknowledgement: net_protocol::vstaging::BackedCandidateAcknowledgement, + acknowledgement: net_protocol::v2::BackedCandidateAcknowledgement, reputation: &mut ReputationAggregator, ) { // The key difference between acknowledgments and full manifests is that only @@ -2521,7 +2518,7 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St ) { // Peer is supposedly connected. ctx.send_message(NetworkBridgeTxMessage::SendRequests( - vec![Requests::AttestedCandidateVStaging(request)], + vec![Requests::AttestedCandidateV2(request)], IfDisconnected::ImmediateError, )) .await; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs similarity index 99% rename from polkadot/node/network/statement-distribution/src/vstaging/requests.rs rename to polkadot/node/network/statement-distribution/src/v2/requests.rs index e96b891f82bb..f13496024fcf 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -39,10 +39,10 @@ use crate::LOG_TARGET; use polkadot_node_network_protocol::{ request_response::{ outgoing::{Recipient as RequestRecipient, RequestError}, - vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, + v2::{AttestedCandidateRequest, AttestedCandidateResponse}, OutgoingRequest, OutgoingResult, MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, }, - vstaging::StatementFilter, + v2::StatementFilter, PeerId, UnifiedReputationChange as Rep, }; use polkadot_primitives::{ diff --git a/polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs similarity index 99% rename from polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs rename to polkadot/node/network/statement-distribution/src/v2/statement_store.rs index c20e7fe45f7c..74db431eda1d 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs @@ -24,7 +24,7 @@ //! groups, and views based on the validators themselves. use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; -use polkadot_node_network_protocol::vstaging::StatementFilter; +use polkadot_node_network_protocol::v2::StatementFilter; use polkadot_primitives::{ CandidateHash, CompactStatement, GroupIndex, SignedStatement, ValidatorIndex, }; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs similarity index 95% rename from polkadot/node/network/statement-distribution/src/vstaging/tests/cluster.rs rename to polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs index 50d0477eb516..80dec1d75ab9 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/tests/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs @@ -103,8 +103,8 @@ fn share_seconded_circulated_to_cluster() { overseer.recv().await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -173,7 +173,7 @@ fn cluster_valid_statement_before_seconded_ignored() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( + protocol_v2::StatementDistributionMessage::Statement( relay_parent, signed_valid.as_unchecked().clone(), ), @@ -252,7 +252,7 @@ fn cluster_statement_bad_signature() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( + protocol_v2::StatementDistributionMessage::Statement( relay_parent, statement.clone(), ), @@ -327,7 +327,7 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -388,7 +388,7 @@ fn statement_from_non_cluster_originator_unexpected() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -465,7 +465,7 @@ fn seconded_statement_leads_to_request() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -593,8 +593,8 @@ fn cluster_statements_shared_seconded_first() { assert_matches!( &messages[0].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -604,8 +604,8 @@ fn cluster_statements_shared_seconded_first() { assert_matches!( &messages[1].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -699,8 +699,8 @@ fn cluster_accounts_for_implicit_view() { overseer.recv().await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -749,8 +749,8 @@ fn cluster_accounts_for_implicit_view() { &messages[0], ( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -836,10 +836,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -971,10 +968,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -1191,7 +1185,7 @@ fn ensure_seconding_limit_is_respected() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1216,7 +1210,7 @@ fn ensure_seconding_limit_is_respected() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1241,7 +1235,7 @@ fn ensure_seconding_limit_is_respected() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs similarity index 95% rename from polkadot/node/network/statement-distribution/src/vstaging/tests/grid.rs rename to polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index 0739f3019437..a0af95798235 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -17,9 +17,7 @@ use super::*; use bitvec::order::Lsb0; -use polkadot_node_network_protocol::vstaging::{ - BackedCandidateAcknowledgement, BackedCandidateManifest, -}; +use polkadot_node_network_protocol::v2::{BackedCandidateAcknowledgement, BackedCandidateManifest}; use polkadot_node_subsystem::messages::CandidateBackingMessage; use polkadot_primitives_test_helpers::make_candidate; @@ -156,7 +154,7 @@ fn backed_candidate_leads_to_advertisement() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -181,7 +179,7 @@ fn backed_candidate_leads_to_advertisement() { send_peer_message( &mut overseer, peer_b.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -210,9 +208,9 @@ fn backed_candidate_leads_to_advertisement() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ), ), ) @@ -349,7 +347,7 @@ fn received_advertisement_before_confirmation_leads_to_request() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ) .await; @@ -534,7 +532,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -603,9 +601,9 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack), ), ), ) @@ -629,7 +627,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { send_peer_message( &mut overseer, peer_d.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -654,8 +652,8 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { assert_matches!( &messages[0].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack) + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack) )) if *ack == expected_ack ); } @@ -782,7 +780,7 @@ fn received_advertisement_after_confirmation_before_backing() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -842,7 +840,7 @@ fn received_advertisement_after_confirmation_before_backing() { send_peer_message( &mut overseer, peer_d.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -951,7 +949,7 @@ fn additional_statements_are_shared_after_manifest_exchange() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -1066,9 +1064,9 @@ fn additional_statements_are_shared_after_manifest_exchange() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack), ), ), ) @@ -1104,7 +1102,7 @@ fn additional_statements_are_shared_after_manifest_exchange() { send_peer_message( &mut overseer, peer_d.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -1130,15 +1128,15 @@ fn additional_statements_are_shared_after_manifest_exchange() { assert_matches!( &messages[0].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack) + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack) )) if *ack == expected_ack ); assert_matches!( &messages[1].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement(r, s) + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement(r, s) )) if *r == relay_parent && s.unchecked_payload() == &CompactStatement::Seconded(candidate_hash) && s.unchecked_validator_index() == v_e ); } @@ -1281,7 +1279,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1306,7 +1304,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { send_peer_message( &mut overseer, peer_b.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1357,8 +1355,8 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { assert_matches!( &messages[0].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest) + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest) )) => { assert_eq!(*manifest, expected_manifest); } @@ -1504,7 +1502,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1529,7 +1527,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { send_peer_message( &mut overseer, peer_b.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1558,9 +1556,9 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ), ), ) @@ -1692,7 +1690,7 @@ fn grid_statements_imported_to_backing() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -1907,7 +1905,7 @@ fn advertisements_rejected_from_incorrect_peers() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -1925,7 +1923,7 @@ fn advertisements_rejected_from_incorrect_peers() { send_peer_message( &mut overseer, peer_b.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ) .await; @@ -2029,7 +2027,7 @@ fn manifest_rejected_with_unknown_relay_parent() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -2131,7 +2129,7 @@ fn manifest_rejected_when_not_a_validator() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -2238,7 +2236,7 @@ fn manifest_rejected_when_group_does_not_match_para() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -2370,7 +2368,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -2439,7 +2437,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ) .await; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs similarity index 98% rename from polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs rename to polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 818c91c53565..4150377a0c6c 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -479,7 +479,7 @@ async fn handle_sent_request( assert_eq!(requests.len(), 1); assert_matches!( requests.pop().unwrap(), - Requests::AttestedCandidateVStaging(outgoing) => { + Requests::AttestedCandidateV2(outgoing) => { assert_eq!(outgoing.peer, Recipient::Peer(peer)); assert_eq!(outgoing.payload.candidate_hash, candidate_hash); assert_eq!(outgoing.payload.mask, mask); @@ -537,7 +537,7 @@ async fn connect_peer( NetworkBridgeEvent::PeerConnected( peer, ObservedRole::Authority, - ValidationVersion::VStaging.into(), + ValidationVersion::V2.into(), authority_ids, ), ), @@ -570,12 +570,12 @@ async fn send_peer_view_change(virtual_overseer: &mut VirtualOverseer, peer: Pee async fn send_peer_message( virtual_overseer: &mut VirtualOverseer, peer: PeerId, - message: protocol_vstaging::StatementDistributionMessage, + message: protocol_v2::StatementDistributionMessage, ) { virtual_overseer .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerMessage(peer, Versioned::VStaging(message)), + NetworkBridgeEvent::PeerMessage(peer, Versioned::V2(message)), ), }) .await; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs similarity index 95% rename from polkadot/node/network/statement-distribution/src/vstaging/tests/requests.rs rename to polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index 5eef5809b4d4..0734b75c9712 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -19,7 +19,7 @@ use super::*; use bitvec::order::Lsb0; use parity_scale_codec::{Decode, Encode}; use polkadot_node_network_protocol::{ - request_response::vstaging as request_vstaging, vstaging::BackedCandidateManifest, + request_response::v2 as request_v2, v2::BackedCandidateManifest, }; use polkadot_primitives_test_helpers::make_candidate; use sc_network::config::{ @@ -109,10 +109,7 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -164,9 +161,9 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement(hash, statement), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement(hash, statement), ), ), ) @@ -304,7 +301,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -376,7 +373,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -453,7 +450,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -568,9 +565,7 @@ fn peer_reported_for_not_enough_statements() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( - manifest.clone(), - ), + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest.clone()), ) .await; @@ -752,10 +747,7 @@ fn peer_reported_for_duplicate_statements() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -812,9 +804,9 @@ fn peer_reported_for_duplicate_statements() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement(hash, statement), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement(hash, statement), ), ), ) @@ -916,10 +908,7 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -1058,10 +1047,7 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -1191,7 +1177,7 @@ fn local_node_sanity_checks_incoming_requests() { .send(RawIncomingRequest { // Request from peer that received manifest. peer: peer_c, - payload: request_vstaging::AttestedCandidateRequest { + payload: request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask: mask.clone(), } @@ -1225,8 +1211,8 @@ fn local_node_sanity_checks_incoming_requests() { overseer.recv().await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -1250,7 +1236,7 @@ fn local_node_sanity_checks_incoming_requests() { .send(RawIncomingRequest { // Request from peer that received manifest. peer: peer_d, - payload: request_vstaging::AttestedCandidateRequest { + payload: request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask: mask.clone(), } @@ -1269,10 +1255,7 @@ fn local_node_sanity_checks_incoming_requests() { let response = state .send_request( peer_c, - request_vstaging::AttestedCandidateRequest { - candidate_hash: candidate.hash(), - mask, - }, + request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask }, ) .await .await; @@ -1296,7 +1279,7 @@ fn local_node_sanity_checks_incoming_requests() { let response = state .send_request( peer_c, - request_vstaging::AttestedCandidateRequest { + request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask: mask.clone(), }, @@ -1455,7 +1438,7 @@ fn local_node_respects_statement_mask() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1479,7 +1462,7 @@ fn local_node_respects_statement_mask() { send_peer_message( &mut overseer, peer_b.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( + protocol_v2::StatementDistributionMessage::Statement( relay_parent, statement_b.clone(), ), @@ -1511,9 +1494,9 @@ fn local_node_respects_statement_mask() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ), ), ) @@ -1547,19 +1530,16 @@ fn local_node_respects_statement_mask() { let response = state .send_request( peer_c, - request_vstaging::AttestedCandidateRequest { - candidate_hash: candidate.hash(), - mask, - }, + request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask }, ) .await .await; let expected_statements = vec![statement_b]; assert_matches!(response, full_response => { - // Response is the same for vstaging. - let request_vstaging::AttestedCandidateResponse { candidate_receipt, persisted_validation_data, statements } = - request_vstaging::AttestedCandidateResponse::decode( + // Response is the same for v2. + let request_v2::AttestedCandidateResponse { candidate_receipt, persisted_validation_data, statements } = + request_v2::AttestedCandidateResponse::decode( &mut full_response.result.expect("We should have a proper answer").as_ref(), ).expect("Decoding should work"); assert_eq!(candidate_receipt, candidate); @@ -1683,7 +1663,7 @@ fn should_delay_before_retrying_dropped_requests() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -1696,7 +1676,7 @@ fn should_delay_before_retrying_dropped_requests() { assert_eq!(requests.len(), 1); assert_matches!( requests.pop().unwrap(), - Requests::AttestedCandidateVStaging(outgoing) => { + Requests::AttestedCandidateV2(outgoing) => { assert_eq!(outgoing.peer, Recipient::Peer(peer_c)); assert_eq!(outgoing.payload.candidate_hash, candidate_hash_1); assert_eq!(outgoing.payload.mask, mask); @@ -1729,7 +1709,7 @@ fn should_delay_before_retrying_dropped_requests() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index a50f33b98e12..bfa08f76bb90 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -242,7 +242,3 @@ runtime-metrics = [ "rococo-runtime?/runtime-metrics", "westend-runtime?/runtime-metrics", ] - -network-protocol-staging = [ - "polkadot-node-network-protocol/network-protocol-staging", -] diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 7f4eadaba7f8..d48e3b66e4e1 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -858,7 +858,7 @@ pub fn new_full( let (collation_req_v1_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); net_config.add_request_response_protocol(cfg); - let (collation_req_vstaging_receiver, cfg) = + let (collation_req_v2_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); net_config.add_request_response_protocol(cfg); let (available_data_req_receiver, cfg) = @@ -866,7 +866,7 @@ pub fn new_full( net_config.add_request_response_protocol(cfg); let (statement_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); net_config.add_request_response_protocol(cfg); - let (candidate_req_vstaging_receiver, cfg) = + let (candidate_req_v2_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); net_config.add_request_response_protocol(cfg); let (dispute_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); @@ -1054,10 +1054,10 @@ pub fn new_full( pov_req_receiver, chunk_req_receiver, collation_req_v1_receiver, - collation_req_vstaging_receiver, + collation_req_v2_receiver, available_data_req_receiver, statement_req_receiver, - candidate_req_vstaging_receiver, + candidate_req_v2_receiver, dispute_req_receiver, registry: prometheus_registry.as_ref(), spawner, diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index 33127b638e5a..7d1add118241 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -28,7 +28,7 @@ use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig; use polkadot_node_network_protocol::{ peer_set::PeerSetProtocolNames, request_response::{ - v1 as request_v1, vstaging as request_vstaging, IncomingRequestReceiver, ReqProtocolNames, + v1 as request_v1, v2 as request_v2, IncomingRequestReceiver, ReqProtocolNames, }, }; #[cfg(any(feature = "malus", test))] @@ -104,17 +104,15 @@ where pub chunk_req_receiver: IncomingRequestReceiver, /// Collations request receiver for network protocol v1. pub collation_req_v1_receiver: IncomingRequestReceiver, - /// Collations request receiver for network protocol vstaging. - pub collation_req_vstaging_receiver: - IncomingRequestReceiver, + /// Collations request receiver for network protocol v2. + pub collation_req_v2_receiver: IncomingRequestReceiver, /// Receiver for available data requests. pub available_data_req_receiver: IncomingRequestReceiver, /// Receiver for incoming large statement requests. pub statement_req_receiver: IncomingRequestReceiver, /// Receiver for incoming candidate requests. - pub candidate_req_vstaging_receiver: - IncomingRequestReceiver, + pub candidate_req_v2_receiver: IncomingRequestReceiver, /// Receiver for incoming disputes. pub dispute_req_receiver: IncomingRequestReceiver, /// Prometheus registry, commonly used for production systems, less so for test. @@ -158,10 +156,10 @@ pub fn prepared_overseer_builder( pov_req_receiver, chunk_req_receiver, collation_req_v1_receiver, - collation_req_vstaging_receiver, + collation_req_v2_receiver, available_data_req_receiver, statement_req_receiver, - candidate_req_vstaging_receiver, + candidate_req_v2_receiver, dispute_req_receiver, registry, spawner, @@ -288,7 +286,7 @@ where peer_id: network_service.local_peer_id(), collator_pair, request_receiver_v1: collation_req_v1_receiver, - request_receiver_vstaging: collation_req_vstaging_receiver, + request_receiver_v2: collation_req_v2_receiver, metrics: Metrics::register(registry)?, }, IsParachainNode::FullNode => ProtocolSide::None, @@ -309,7 +307,7 @@ where .statement_distribution(StatementDistributionSubsystem::new( keystore.clone(), statement_req_receiver, - candidate_req_vstaging_receiver, + candidate_req_v2_receiver, Metrics::register(registry)?, rand::rngs::StdRng::from_entropy(), )) diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 77bcd62f9c9c..8d7cef88a70e 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -429,7 +429,7 @@ where pub enum ProspectiveParachainsMode { /// Runtime API without support of `async_backing_params`: no prospective parachains. Disabled, - /// vstaging runtime API: prospective parachains. + /// v6 runtime API: prospective parachains. Enabled { /// The maximum number of para blocks between the para head in a relay parent /// and a new candidate. Restricts nodes from building arbitrary long chains diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index a2ce15c21db2..c03fd6752f16 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -39,6 +39,3 @@ sc-service = { path = "../../../../../substrate/client/service" } sp-keyring = { path = "../../../../../substrate/primitives/keyring" } tokio = { version = "1.24.2", features = ["macros"] } - -[features] -network-protocol-staging = [ "polkadot-cli/network-protocol-staging" ] From 3c839be69df91cdd8ffe0612cc022a959110e707 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Tue, 19 Sep 2023 17:19:13 +0400 Subject: [PATCH 14/19] use default zombienet images --- .gitlab/pipeline/zombienet/polkadot.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index e420baf486aa..56cd70581320 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -109,8 +109,6 @@ zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common before_script: - - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - - export COL_IMAGE="docker.io/paritypr/colander:4519" # The collator image is fixed - echo "Zombienet Tests Config" - echo "gh-dir ${GH_DIR}" - echo "local-dir ${LOCAL_DIR}" @@ -126,8 +124,6 @@ zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke: extends: - .zombienet-polkadot-common before_script: - - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - - export COL_IMAGE="docker.io/parity/polkadot-collator:latest" # Use cumulus lastest image - echo "Zombienet Tests Config" - echo "gh-dir ${GH_DIR}" - echo "local-dir ${LOCAL_DIR}" From cc45b7965fa840386354b284c44e671ad26c47ac Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Tue, 19 Sep 2023 18:27:24 +0400 Subject: [PATCH 15/19] override pipeline images --- .gitlab/pipeline/zombienet/polkadot.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 56cd70581320..a0b4cf438682 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -109,6 +109,8 @@ zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common before_script: + - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} + - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} - echo "Zombienet Tests Config" - echo "gh-dir ${GH_DIR}" - echo "local-dir ${LOCAL_DIR}" @@ -124,6 +126,8 @@ zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke: extends: - .zombienet-polkadot-common before_script: + - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} + - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} - echo "Zombienet Tests Config" - echo "gh-dir ${GH_DIR}" - echo "local-dir ${LOCAL_DIR}" From 1888da6e9b0b12e6591f30db9dfa96bca0dcdfb0 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Mon, 25 Sep 2023 14:24:55 +0400 Subject: [PATCH 16/19] update zombienet collator command --- .../smoke/0002-parachains-upgrade-smoke-test.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml b/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml index 0becb408550a..88b789f37fa1 100644 --- a/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml +++ b/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml @@ -31,7 +31,7 @@ cumulus_based = true [parachains.collator] name = "collator01" image = "{{COL_IMAGE}}" - command = "polkadot-collator" + command = "polkadot-parachain" [[parachains.collator.env]] name = "RUST_LOG" From 535c94a7c4e9f92995bd5557b3d4a80e7a68258e Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Tue, 26 Sep 2023 20:23:03 +0400 Subject: [PATCH 17/19] use cumulus image --- .gitlab/pipeline/zombienet/polkadot.yml | 1 - .../smoke/0002-parachains-upgrade-smoke-test.toml | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index a0b4cf438682..8f69542afb9d 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -127,7 +127,6 @@ zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke: - .zombienet-polkadot-common before_script: - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} - echo "Zombienet Tests Config" - echo "gh-dir ${GH_DIR}" - echo "local-dir ${LOCAL_DIR}" diff --git a/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml b/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml index 88b789f37fa1..d72e3ebdb335 100644 --- a/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml +++ b/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml @@ -30,7 +30,7 @@ cumulus_based = true [parachains.collator] name = "collator01" - image = "{{COL_IMAGE}}" + image = "{{CUMULUS_IMAGE}}" command = "polkadot-parachain" [[parachains.collator.env]] From 692b269cf2c6ce9e8bc8cff8d02b016bc592a431 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 27 Sep 2023 00:04:59 +0400 Subject: [PATCH 18/19] use cumulus image from the pr --- .gitlab/pipeline/zombienet/polkadot.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 8f69542afb9d..a199ed02b3c4 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -127,11 +127,12 @@ zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke: - .zombienet-polkadot-common before_script: - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} + - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:${DOCKER_IMAGES_VERSION}" - echo "Zombienet Tests Config" - echo "gh-dir ${GH_DIR}" - echo "local-dir ${LOCAL_DIR}" - echo "polkadot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - - echo "colander image ${COL_IMAGE}" + - echo "polkadot-parachain image ${CUMULUS_IMAGE}" - echo "malus image ${MALUS_IMAGE}" script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh From a1893941387931edd707d1a0c3ef508d3636d30e Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 27 Sep 2023 10:38:12 +0400 Subject: [PATCH 19/19] drop compatibility tests --- .gitlab/pipeline/zombienet/polkadot.yml | 24 --------- .../001-async-backing-compatibility.toml | 34 ------------ .../001-async-backing-compatibility.zndsl | 23 -------- .../002-async-backing-runtime-upgrade.toml | 54 ------------------- .../002-async-backing-runtime-upgrade.zndsl | 34 ------------ .../003-async-backing-collator-mix.toml | 40 -------------- .../003-async-backing-collator-mix.zndsl | 19 ------- .../zombienet_tests/async_backing/README.md | 9 ---- 8 files changed, 237 deletions(-) delete mode 100644 polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.toml delete mode 100644 polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl delete mode 100644 polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml delete mode 100644 polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl delete mode 100644 polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.toml delete mode 100644 polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl delete mode 100644 polkadot/zombienet_tests/async_backing/README.md diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index a199ed02b3c4..0402c194134b 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -193,27 +193,3 @@ zombienet-polkadot-malus-0001-dispute-valid: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh --local-dir="${LOCAL_DIR}/integrationtests" --test="0001-dispute-valid-block.zndsl" - -zombienet-polkadot-async-backing-compatibility: - extends: - - .zombienet-polkadot-common - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/async_backing" - --test="001-async-backing-compatibility.zndsl" - -zombienet-polkadot-async-backing-runtime-upgrade: - extends: - - .zombienet-polkadot-common - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/async_backing" - --test="002-async-backing-runtime-upgrade.zndsl" - -zombienet-polkadot-async-backing-collator-mix: - extends: - - .zombienet-polkadot-common - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/async_backing" - --test="003-async-backing-collator-mix.zndsl" diff --git a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.toml b/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.toml deleted file mode 100644 index 918fb5bf4f62..000000000000 --- a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.toml +++ /dev/null @@ -1,34 +0,0 @@ -[settings] -timeout = 1000 - -[relaychain] -default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "rococo-local" -default_command = "polkadot" - - [relaychain.default_resources] - limits = { memory = "4G", cpu = "2" } - requests = { memory = "2G", cpu = "1" } - - [[relaychain.nodes]] - name = "alice" - args = [ "-lparachain=debug,runtime=debug"] - - [[relaychain.nodes]] - name = "bob" - image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" - args = [ "-lparachain=debug,runtime=debug"] - -[[parachains]] -id = 100 - - [parachains.collator] - name = "collator01" - image = "{{COL_IMAGE}}" - command = "undying-collator" - args = ["-lparachain=debug"] - -[types.Header] -number = "u64" -parent_hash = "Hash" -post_state = "Hash" diff --git a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl b/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl deleted file mode 100644 index 46c1d77acf46..000000000000 --- a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl +++ /dev/null @@ -1,23 +0,0 @@ -Description: Async Backing Compatibility Test -Network: ./001-async-backing-compatibility.toml -Creds: config - -# General -alice: is up -bob: is up - -# Check authority status -alice: reports node_roles is 4 -bob: reports node_roles is 4 - -# Check peers -alice: reports peers count is at least 2 within 20 seconds -bob: reports peers count is at least 2 within 20 seconds - -# Parachain registration -alice: parachain 100 is registered within 225 seconds -bob: parachain 100 is registered within 225 seconds - -# Ensure parachain progress -alice: parachain 100 block height is at least 10 within 250 seconds -bob: parachain 100 block height is at least 10 within 250 seconds diff --git a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml b/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml deleted file mode 100644 index e61f7dd47ef6..000000000000 --- a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml +++ /dev/null @@ -1,54 +0,0 @@ -[settings] -timeout = 1000 - -[relaychain] -default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "rococo-local" -default_command = "polkadot" - - [relaychain.default_resources] - limits = { memory = "4G", cpu = "2" } - requests = { memory = "2G", cpu = "1" } - - [[relaychain.nodes]] - name = "alice" - args = [ "-lparachain=debug,runtime=debug"] - - [[relaychain.nodes]] - name = "bob" - args = [ "-lparachain=debug,runtime=debug"] - - [[relaychain.nodes]] - name = "charlie" - image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" - args = [ "-lparachain=debug,runtime=debug"] - - [[relaychain.nodes]] - name = "dave" - image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" - args = [ "-lparachain=debug,runtime=debug"] - -[[parachains]] -id = 100 -addToGenesis = true - - [parachains.collator] - name = "collator02" - image = "{{COL_IMAGE}}" - command = "undying-collator" - args = ["-lparachain=debug"] - -[[parachains]] -id = 101 -addToGenesis = true - - [parachains.collator] - name = "collator02" - image = "{{COL_IMAGE}}" - command = "undying-collator" - args = ["-lparachain=debug"] - -[types.Header] -number = "u64" -parent_hash = "Hash" -post_state = "Hash" diff --git a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl b/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl deleted file mode 100644 index 6213d1afb81e..000000000000 --- a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl +++ /dev/null @@ -1,34 +0,0 @@ -Description: Async Backing Runtime Upgrade Test -Network: ./002-async-backing-runtime-upgrade.toml -Creds: config - -# General -alice: is up -bob: is up -charlie: is up -dave: is up - -# Check peers -alice: reports peers count is at least 3 within 20 seconds -bob: reports peers count is at least 3 within 20 seconds - -# Parachain registration -alice: parachain 100 is registered within 225 seconds -bob: parachain 100 is registered within 225 seconds -charlie: parachain 100 is registered within 225 seconds -dave: parachain 100 is registered within 225 seconds -alice: parachain 101 is registered within 225 seconds -bob: parachain 101 is registered within 225 seconds -charlie: parachain 101 is registered within 225 seconds -dave: parachain 101 is registered within 225 seconds - -# Ensure parachain progress -alice: parachain 100 block height is at least 10 within 250 seconds -bob: parachain 100 block height is at least 10 within 250 seconds - -# Runtime upgrade (according to previous runtime tests, avg. is 30s) -alice: run ../misc/0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_BIN_URL}}" within 40 seconds -bob: run ../misc/0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_BIN_URL}}" within 40 seconds - -# Bootstrap the runtime upgrade -sleep 30 seconds diff --git a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.toml b/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.toml deleted file mode 100644 index 4dca4d3d5312..000000000000 --- a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.toml +++ /dev/null @@ -1,40 +0,0 @@ -[settings] -timeout = 1000 - -[relaychain] -default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "rococo-local" -default_command = "polkadot" - - [relaychain.default_resources] - limits = { memory = "4G", cpu = "2" } - requests = { memory = "2G", cpu = "1" } - - [[relaychain.nodes]] - name = "alice" - args = [ "-lparachain=debug"] - - [[relaychain.nodes]] - name = "bob" - image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" - args = [ "-lparachain=debug"] - -[[parachains]] -id = 100 - - [[parachains.collators]] - name = "collator01" - image = "docker.io/paritypr/colander:master" - command = "undying-collator" - args = ["-lparachain=debug"] - - [[parachains.collators]] - name = "collator02" - image = "{{COL_IMAGE}}" - command = "undying-collator" - args = ["-lparachain=debug"] - -[types.Header] -number = "u64" -parent_hash = "Hash" -post_state = "Hash" diff --git a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl b/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl deleted file mode 100644 index 98436b0459cf..000000000000 --- a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl +++ /dev/null @@ -1,19 +0,0 @@ -Description: Async Backing Collator Mix Test -Network: ./003-async-backing-collator-mix.toml -Creds: config - -# General -alice: is up -bob: is up - -# Check peers -alice: reports peers count is at least 3 within 20 seconds -bob: reports peers count is at least 3 within 20 seconds - -# Parachain registration -alice: parachain 100 is registered within 225 seconds -bob: parachain 100 is registered within 225 seconds - -# Ensure parachain progress -alice: parachain 100 block height is at least 10 within 250 seconds -bob: parachain 100 block height is at least 10 within 250 seconds diff --git a/polkadot/zombienet_tests/async_backing/README.md b/polkadot/zombienet_tests/async_backing/README.md deleted file mode 100644 index 9774ea3c25c9..000000000000 --- a/polkadot/zombienet_tests/async_backing/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# async-backing zombienet tests - -This directory contains zombienet tests made explicitly for the async-backing feature branch. - -## coverage - -- Network protocol upgrade deploying both master and async branch (compatibility). -- Runtime ugprade while running both master and async backing branch nodes. -- Async backing test with a mix of collators collating via async backing and sync backing.