diff --git a/node/core/backing/src/error.rs b/node/core/backing/src/error.rs
index 604c6c0a0c37..13d33d852f60 100644
--- a/node/core/backing/src/error.rs
+++ b/node/core/backing/src/error.rs
@@ -17,9 +17,9 @@
use fatality::Nested;
use futures::channel::{mpsc, oneshot};
-use polkadot_node_subsystem::{messages::ValidationFailed, SubsystemError};
+use polkadot_node_subsystem::{messages::ValidationFailed, RuntimeApiError, SubsystemError};
use polkadot_node_subsystem_util::Error as UtilError;
-use polkadot_primitives::v2::BackedCandidate;
+use polkadot_primitives::v2::{BackedCandidate, ValidationCodeHash};
use crate::LOG_TARGET;
@@ -42,16 +42,31 @@ pub enum Error {
#[error("FetchPoV failed")]
FetchPoV,
+ #[error("Fetching validation code by hash failed {0:?}, {1:?}")]
+ FetchValidationCode(ValidationCodeHash, RuntimeApiError),
+
+ #[error("Fetching Runtime API version failed {0:?}")]
+ FetchRuntimeApiVersion(RuntimeApiError),
+
+ #[error("No validation code {0:?}")]
+ NoValidationCode(ValidationCodeHash),
+
+ #[error("Candidate rejected by prospective parachains subsystem")]
+ RejectedByProspectiveParachains,
+
#[fatal]
#[error("Failed to spawn background task")]
FailedToSpawnBackgroundTask,
- #[error("ValidateFromChainState channel closed before receipt")]
- ValidateFromChainState(#[source] oneshot::Canceled),
+ #[error("ValidateFromExhaustive channel closed before receipt")]
+ ValidateFromExhaustive(#[source] oneshot::Canceled),
#[error("StoreAvailableData channel closed before receipt")]
StoreAvailableData(#[source] oneshot::Canceled),
+ #[error("RuntimeAPISubsystem channel closed before receipt")]
+ RuntimeApiUnavailable(#[source] oneshot::Canceled),
+
#[error("a channel was closed before receipt in try_join!")]
JoinMultiple(#[source] oneshot::Canceled),
diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs
index a189b5955c89..9d5f521da1f8 100644
--- a/node/core/backing/src/lib.rs
+++ b/node/core/backing/src/lib.rs
@@ -14,44 +14,95 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see .
-//! Implements a `CandidateBackingSubsystem`.
+//! Implements the `CandidateBackingSubsystem`.
+//!
+//! This subsystem maintains the entire responsibility of tracking parachain
+//! candidates which can be backed, as well as the issuance of statements
+//! about candidates when run on a validator node.
+//!
+//! There are two types of statements: `Seconded` and `Valid`.
+//! `Seconded` implies `Valid`, and nothing should be stated as
+//! `Valid` unless its already been `Seconded`.
+//!
+//! Validators may only second candidates which fall under their own group
+//! assignment, and they may only second one candidate per depth per active leaf.
+//! Candidates which are stated as either `Second` or `Valid` by a majority of the
+//! assigned group of validators may be backed on-chain and proceed to the availability
+//! stage.
+//!
+//! Depth is a concept relating to asynchronous backing, by which validators
+//! short sub-chains of candidates are backed and extended off-chain, and then placed
+//! asynchronously into blocks of the relay chain as those are authored and as the
+//! relay-chain state becomes ready for them. Asynchronous backing allows parachains to
+//! grow mostly independently from the state of the relay chain, which gives more time for
+//! parachains to be validated and thereby increases performance.
+//!
+//! Most of the work of asynchronous backing is handled by the Prospective Parachains
+//! subsystem. The 'depth' of a parachain block with respect to a relay chain block is
+//! a measure of how many parachain blocks are between the most recent included parachain block
+//! in the post-state of the relay-chain block and the candidate. For instance,
+//! a candidate that descends directly from the most recent parachain block in the relay-chain
+//! state has depth 0. The child of that candidate would have depth 1. And so on.
+//!
+//! The candidate backing subsystem keeps track of a set of 'active leaves' which are the
+//! most recent blocks in the relay-chain (which is in fact a tree) which could be built
+//! upon. Depth is always measured against active leaves, and the valid relay-parent that
+//! each candidate can have is determined by the active leaves. The Prospective Parachains
+//! subsystem enforces that the relay-parent increases monotonically, so that logic
+//! is not handled here. By communicating with the Prospective Parachains subsystem,
+//! this subsystem extrapolates an "implicit view" from the set of currently active leaves,
+//! which determines the set of all recent relay-chain block hashes which could be relay-parents
+//! for candidates backed in children of the active leaves.
+//!
+//! In fact, this subsystem relies on the Statement Distribution subsystem to prevent spam
+//! by enforcing the rule that each validator may second at most one candidate per depth per
+//! active leaf. This bounds the number of candidates that the system needs to consider and
+//! is not handled within this subsystem, except for candidates seconded locally.
+//!
+//! This subsystem also handles relay-chain heads which don't support asynchronous backing.
+//! For such active leaves, the only valid relay-parent is the leaf hash itself and the only
+//! allowed depth is 0.
#![deny(unused_crate_dependencies)]
use std::{
- collections::{HashMap, HashSet},
+ collections::{BTreeMap, HashMap, HashSet},
sync::Arc,
};
use bitvec::vec::BitVec;
use futures::{
channel::{mpsc, oneshot},
- FutureExt, SinkExt, StreamExt,
+ future::BoxFuture,
+ stream::FuturesOrdered,
+ FutureExt, SinkExt, StreamExt, TryFutureExt,
};
use error::{Error, FatalResult};
use polkadot_node_primitives::{
- AvailableData, InvalidCandidate, PoV, SignedDisputeStatement, SignedFullStatement, Statement,
- ValidationResult, BACKING_EXECUTION_TIMEOUT,
+ AvailableData, InvalidCandidate, PoV, SignedDisputeStatement, SignedFullStatementWithPVD,
+ StatementWithPVD, ValidationResult, BACKING_EXECUTION_TIMEOUT,
};
use polkadot_node_subsystem::{
- jaeger,
messages::{
AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage,
CandidateValidationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage,
- ProvisionableData, ProvisionerMessage, RuntimeApiRequest, StatementDistributionMessage,
+ HypotheticalDepthRequest, ProspectiveParachainsMessage, ProvisionableData,
+ ProvisionerMessage, RuntimeApiMessage, RuntimeApiRequest, StatementDistributionMessage,
},
- overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem,
- Stage, SubsystemError,
+ overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError,
};
use polkadot_node_subsystem_util::{
- self as util, request_from_runtime, request_session_index_for_child, request_validator_groups,
+ self as util,
+ backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView},
+ request_from_runtime, request_session_index_for_child, request_validator_groups,
request_validators, Validator,
};
use polkadot_primitives::v2::{
- BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CollatorId,
- CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, SessionIndex,
- SigningContext, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation,
+ BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt,
+ CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, PersistedValidationData,
+ SessionIndex, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature,
+ ValidityAttestation,
};
use sp_keystore::SyncCryptoStorePtr;
use statement_table::{
@@ -60,7 +111,7 @@ use statement_table::{
SignedStatement as TableSignedStatement, Statement as TableStatement,
Summary as TableSummary,
},
- Context as TableContextTrait, Table,
+ Config as TableConfig, Context as TableContextTrait, Table,
};
mod error;
@@ -108,9 +159,9 @@ impl std::fmt::Debug for ValidatedCandidateCommand {
impl ValidatedCandidateCommand {
fn candidate_hash(&self) -> CandidateHash {
match *self {
- ValidatedCandidateCommand::Second(Ok((ref candidate, _, _))) => candidate.hash(),
+ ValidatedCandidateCommand::Second(Ok(ref outputs)) => outputs.candidate.hash(),
ValidatedCandidateCommand::Second(Err(ref candidate)) => candidate.hash(),
- ValidatedCandidateCommand::Attest(Ok((ref candidate, _, _))) => candidate.hash(),
+ ValidatedCandidateCommand::Attest(Ok(ref outputs)) => outputs.candidate.hash(),
ValidatedCandidateCommand::Attest(Err(ref candidate)) => candidate.hash(),
ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => candidate_hash,
}
@@ -147,6 +198,113 @@ where
}
}
+struct PerRelayParentState {
+ prospective_parachains_mode: ProspectiveParachainsMode,
+ /// The hash of the relay parent on top of which this job is doing it's work.
+ parent: Hash,
+ /// The session index this corresponds to.
+ session_index: SessionIndex,
+ /// The `ParaId` assigned to the local validator at this relay parent.
+ assignment: Option,
+ /// The candidates that are backed by enough validators in their group, by hash.
+ backed: HashSet,
+ /// The table of candidates and statements under this relay-parent.
+ table: Table,
+ /// The table context, including groups.
+ table_context: TableContext,
+ /// We issued `Seconded` or `Valid` statements on about these candidates.
+ issued_statements: HashSet,
+ /// These candidates are undergoing validation in the background.
+ awaiting_validation: HashSet,
+ /// Data needed for retrying in case of `ValidatedCandidateCommand::AttestNoPoV`.
+ fallbacks: HashMap,
+}
+
+struct PerCandidateState {
+ persisted_validation_data: PersistedValidationData,
+ seconded_locally: bool,
+ para_id: ParaId,
+ relay_parent: Hash,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq)]
+enum ProspectiveParachainsMode {
+ // v2 runtime API: no prospective parachains.
+ Disabled,
+ // vstaging runtime API: prospective parachains.
+ Enabled,
+}
+
+impl ProspectiveParachainsMode {
+ fn is_enabled(&self) -> bool {
+ self == &ProspectiveParachainsMode::Enabled
+ }
+}
+
+struct ActiveLeafState {
+ prospective_parachains_mode: ProspectiveParachainsMode,
+ /// The candidates seconded at various depths under this active
+ /// leaf. A candidate can only be seconded when its hypothetical
+ /// depth under every active leaf has an empty entry in this map.
+ ///
+ /// When prospective parachains are disabled, the only depth
+ /// which is allowed is 0.
+ seconded_at_depth: BTreeMap,
+}
+
+/// The state of the subsystem.
+struct State {
+ /// The utility for managing the implicit and explicit views in a consistent way.
+ ///
+ /// We only feed leaves which have prospective parachains enabled to this view.
+ implicit_view: ImplicitView,
+ /// State tracked for all active leaves, whether or not they have prospective parachains
+ /// enabled.
+ per_leaf: HashMap,
+ /// State tracked for all relay-parents backing work is ongoing for. This includes
+ /// all active leaves.
+ ///
+ /// relay-parents fall into one of 3 categories.
+ /// 1. active leaves which do support prospective parachains
+ /// 2. active leaves which do not support prospective parachains
+ /// 3. relay-chain blocks which are ancestors of an active leaf and
+ /// do support prospective parachains.
+ ///
+ /// Relay-chain blocks which don't support prospective parachains are
+ /// never included in the fragment trees of active leaves which do.
+ ///
+ /// While it would be technically possible to support such leaves in
+ /// fragment trees, it only benefits the transition period when asynchronous
+ /// backing is being enabled and complicates code complexity.
+ per_relay_parent: HashMap,
+ /// State tracked for all candidates relevant to the implicit view.
+ ///
+ /// This is guaranteed to have an entry for each candidate with a relay parent in the implicit
+ /// or explicit view for which a `Seconded` statement has been successfully imported.
+ per_candidate: HashMap,
+ /// A cloneable sender which is dispatched to background candidate validation tasks to inform
+ /// the main task of the result.
+ background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
+ /// The handle to the keystore used for signing.
+ keystore: SyncCryptoStorePtr,
+}
+
+impl State {
+ fn new(
+ background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
+ keystore: SyncCryptoStorePtr,
+ ) -> Self {
+ State {
+ implicit_view: ImplicitView::default(),
+ per_leaf: HashMap::default(),
+ per_relay_parent: HashMap::default(),
+ per_candidate: HashMap::new(),
+ background_validation_tx,
+ keystore,
+ }
+ }
+}
+
#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
async fn run(
mut ctx: Context,
@@ -154,18 +312,11 @@ async fn run(
metrics: Metrics,
) -> FatalResult<()> {
let (background_validation_tx, mut background_validation_rx) = mpsc::channel(16);
- let mut jobs = HashMap::new();
+ let mut state = State::new(background_validation_tx, keystore);
loop {
- let res = run_iteration(
- &mut ctx,
- keystore.clone(),
- &metrics,
- &mut jobs,
- background_validation_tx.clone(),
- &mut background_validation_rx,
- )
- .await;
+ let res =
+ run_iteration(&mut ctx, &mut state, &metrics, &mut background_validation_rx).await;
match res {
Ok(()) => break,
@@ -179,10 +330,8 @@ async fn run(
#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
async fn run_iteration(
ctx: &mut Context,
- keystore: SyncCryptoStorePtr,
+ state: &mut State,
metrics: &Metrics,
- jobs: &mut HashMap>,
- background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
background_validation_rx: &mut mpsc::Receiver<(Hash, ValidatedCandidateCommand)>,
) -> Result<(), Error> {
loop {
@@ -191,9 +340,10 @@ async fn run_iteration(
if let Some((relay_parent, command)) = validated_command {
handle_validated_candidate_command(
&mut *ctx,
- jobs,
+ state,
relay_parent,
command,
+ metrics,
).await?;
} else {
panic!("background_validation_tx always alive at this point; qed");
@@ -201,243 +351,24 @@ async fn run_iteration(
}
from_overseer = ctx.recv().fuse() => {
match from_overseer? {
- FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => handle_active_leaves_update(
- &mut *ctx,
- update,
- jobs,
- &keystore,
- &background_validation_tx,
- &metrics,
- ).await?,
+ FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => {
+ handle_active_leaves_update(
+ &mut *ctx,
+ update,
+ state,
+ ).await?;
+ }
FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}
FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()),
- FromOrchestra::Communication { msg } => handle_communication(&mut *ctx, jobs, msg).await?,
+ FromOrchestra::Communication { msg } => {
+ handle_communication(&mut *ctx, state, msg, metrics).await?;
+ }
}
}
)
}
}
-#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
-async fn handle_validated_candidate_command(
- ctx: &mut Context,
- jobs: &mut HashMap>,
- relay_parent: Hash,
- command: ValidatedCandidateCommand,
-) -> Result<(), Error> {
- if let Some(job) = jobs.get_mut(&relay_parent) {
- job.job.handle_validated_candidate_command(&job.span, ctx, command).await?;
- } else {
- // simple race condition; can be ignored - this relay-parent
- // is no longer relevant.
- }
-
- Ok(())
-}
-
-#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
-async fn handle_communication(
- ctx: &mut Context,
- jobs: &mut HashMap>,
- message: CandidateBackingMessage,
-) -> Result<(), Error> {
- match message {
- CandidateBackingMessage::Second(relay_parent, candidate, pov) => {
- if let Some(job) = jobs.get_mut(&relay_parent) {
- job.job.handle_second_msg(&job.span, ctx, candidate, pov).await?;
- }
- },
- CandidateBackingMessage::Statement(relay_parent, statement) => {
- if let Some(job) = jobs.get_mut(&relay_parent) {
- job.job.handle_statement_message(&job.span, ctx, statement).await?;
- }
- },
- CandidateBackingMessage::GetBackedCandidates(relay_parent, requested_candidates, tx) =>
- if let Some(job) = jobs.get_mut(&relay_parent) {
- job.job.handle_get_backed_candidates_message(requested_candidates, tx)?;
- },
- }
-
- Ok(())
-}
-
-#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
-async fn handle_active_leaves_update(
- ctx: &mut Context,
- update: ActiveLeavesUpdate,
- jobs: &mut HashMap>,
- keystore: &SyncCryptoStorePtr,
- background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
- metrics: &Metrics,
-) -> Result<(), Error> {
- for deactivated in update.deactivated {
- jobs.remove(&deactivated);
- }
-
- let leaf = match update.activated {
- None => return Ok(()),
- Some(a) => a,
- };
-
- macro_rules! try_runtime_api {
- ($x: expr) => {
- match $x {
- Ok(x) => x,
- Err(e) => {
- gum::warn!(
- target: LOG_TARGET,
- err = ?e,
- "Failed to fetch runtime API data for job",
- );
-
- // We can't do candidate validation work if we don't have the
- // requisite runtime API data. But these errors should not take
- // down the node.
- return Ok(());
- }
- }
- }
- }
-
- let parent = leaf.hash;
- let span = PerLeafSpan::new(leaf.span, "backing");
- let _span = span.child("runtime-apis");
-
- let (validators, groups, session_index, cores) = futures::try_join!(
- request_validators(parent, ctx.sender()).await,
- request_validator_groups(parent, ctx.sender()).await,
- request_session_index_for_child(parent, ctx.sender()).await,
- request_from_runtime(parent, ctx.sender(), |tx| {
- RuntimeApiRequest::AvailabilityCores(tx)
- },)
- .await,
- )
- .map_err(Error::JoinMultiple)?;
-
- let validators: Vec<_> = try_runtime_api!(validators);
- let (validator_groups, group_rotation_info) = try_runtime_api!(groups);
- let session_index = try_runtime_api!(session_index);
- let cores = try_runtime_api!(cores);
-
- drop(_span);
- let _span = span.child("validator-construction");
-
- let signing_context = SigningContext { parent_hash: parent, session_index };
- let validator =
- match Validator::construct(&validators, signing_context.clone(), keystore.clone()).await {
- Ok(v) => Some(v),
- Err(util::Error::NotAValidator) => None,
- Err(e) => {
- gum::warn!(
- target: LOG_TARGET,
- err = ?e,
- "Cannot participate in candidate backing",
- );
-
- return Ok(())
- },
- };
-
- drop(_span);
- let mut assignments_span = span.child("compute-assignments");
-
- let mut groups = HashMap::new();
-
- let n_cores = cores.len();
-
- let mut assignment = None;
-
- for (idx, core) in cores.into_iter().enumerate() {
- // Ignore prospective assignments on occupied cores for the time being.
- if let CoreState::Scheduled(scheduled) = core {
- let core_index = CoreIndex(idx as _);
- let group_index = group_rotation_info.group_for_core(core_index, n_cores);
- if let Some(g) = validator_groups.get(group_index.0 as usize) {
- if validator.as_ref().map_or(false, |v| g.contains(&v.index())) {
- assignment = Some((scheduled.para_id, scheduled.collator));
- }
- groups.insert(scheduled.para_id, g.clone());
- }
- }
- }
-
- let table_context = TableContext { groups, validators, validator };
-
- let (assignment, required_collator) = match assignment {
- None => {
- assignments_span.add_string_tag("assigned", "false");
- (None, None)
- },
- Some((assignment, required_collator)) => {
- assignments_span.add_string_tag("assigned", "true");
- assignments_span.add_para_id(assignment);
- (Some(assignment), required_collator)
- },
- };
-
- drop(assignments_span);
- let _span = span.child("wait-for-job");
-
- let job = CandidateBackingJob {
- parent,
- session_index,
- assignment,
- required_collator,
- issued_statements: HashSet::new(),
- awaiting_validation: HashSet::new(),
- fallbacks: HashMap::new(),
- seconded: None,
- unbacked_candidates: HashMap::new(),
- backed: HashSet::new(),
- keystore: keystore.clone(),
- table: Table::default(),
- table_context,
- background_validation_tx: background_validation_tx.clone(),
- metrics: metrics.clone(),
- _marker: std::marker::PhantomData,
- };
-
- jobs.insert(parent, JobAndSpan { job, span });
-
- Ok(())
-}
-
-struct JobAndSpan {
- job: CandidateBackingJob,
- span: PerLeafSpan,
-}
-
-/// Holds all data needed for candidate backing job operation.
-struct CandidateBackingJob {
- /// The hash of the relay parent on top of which this job is doing it's work.
- parent: Hash,
- /// The session index this corresponds to.
- session_index: SessionIndex,
- /// The `ParaId` assigned to this validator
- assignment: Option,
- /// The collator required to author the candidate, if any.
- required_collator: Option,
- /// Spans for all candidates that are not yet backable.
- unbacked_candidates: HashMap,
- /// We issued `Seconded`, `Valid` or `Invalid` statements on about these candidates.
- issued_statements: HashSet,
- /// These candidates are undergoing validation in the background.
- awaiting_validation: HashSet,
- /// Data needed for retrying in case of `ValidatedCandidateCommand::AttestNoPoV`.
- fallbacks: HashMap)>,
- /// `Some(h)` if this job has already issued `Seconded` statement for some candidate with `h` hash.
- seconded: Option,
- /// The candidates that are includable, by hash. Each entry here indicates
- /// that we've sent the provisioner the backed candidate.
- backed: HashSet,
- keystore: SyncCryptoStorePtr,
- table: Table,
- table_context: TableContext,
- background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
- metrics: Metrics,
- _marker: std::marker::PhantomData,
-}
-
/// In case a backing validator does not provide a PoV, we need to retry with other backing
/// validators.
///
@@ -499,10 +430,10 @@ struct InvalidErasureRoot;
// It looks like it's not possible to do an `impl From` given the current state of
// the code. So this does the necessary conversion.
-fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement {
+fn primitive_statement_to_table(s: &SignedFullStatementWithPVD) -> TableSignedStatement {
let statement = match s.payload() {
- Statement::Seconded(c) => TableStatement::Seconded(c.clone()),
- Statement::Valid(h) => TableStatement::Valid(h.clone()),
+ StatementWithPVD::Seconded(c, _) => TableStatement::Seconded(c.clone()),
+ StatementWithPVD::Valid(h) => TableStatement::Valid(h.clone()),
};
TableSignedStatement {
@@ -586,21 +517,17 @@ async fn store_available_data(
//
// This will compute the erasure root internally and compare it to the expected erasure root.
// This returns `Err()` iff there is an internal error. Otherwise, it returns either `Ok(Ok(()))` or `Ok(Err(_))`.
-
async fn make_pov_available(
sender: &mut impl overseer::CandidateBackingSenderTrait,
n_validators: usize,
pov: Arc,
candidate_hash: CandidateHash,
- validation_data: polkadot_primitives::v2::PersistedValidationData,
+ validation_data: PersistedValidationData,
expected_erasure_root: Hash,
- span: Option<&jaeger::Span>,
) -> Result, Error> {
let available_data = AvailableData { pov, validation_data };
{
- let _span = span.as_ref().map(|s| s.child("erasure-coding").with_candidate(candidate_hash));
-
let chunks = erasure_coding::obtain_chunks_v1(n_validators, &available_data)?;
let branches = erasure_coding::branches(chunks.as_ref());
@@ -612,8 +539,6 @@ async fn make_pov_available(
}
{
- let _span = span.as_ref().map(|s| s.child("store-data").with_candidate(candidate_hash));
-
store_available_data(sender, n_validators as u32, candidate_hash, available_data).await?;
}
@@ -644,13 +569,17 @@ async fn request_pov(
async fn request_candidate_validation(
sender: &mut impl overseer::CandidateBackingSenderTrait,
+ pvd: PersistedValidationData,
+ code: ValidationCode,
candidate_receipt: CandidateReceipt,
pov: Arc,
) -> Result {
let (tx, rx) = oneshot::channel();
sender
- .send_message(CandidateValidationMessage::ValidateFromChainState(
+ .send_message(CandidateValidationMessage::ValidateFromExhaustive(
+ pvd,
+ code,
candidate_receipt,
pov,
BACKING_EXECUTION_TIMEOUT,
@@ -661,21 +590,26 @@ async fn request_candidate_validation(
match rx.await {
Ok(Ok(validation_result)) => Ok(validation_result),
Ok(Err(err)) => Err(Error::ValidationFailed(err)),
- Err(err) => Err(Error::ValidateFromChainState(err)),
+ Err(err) => Err(Error::ValidateFromExhaustive(err)),
}
}
-type BackgroundValidationResult =
- Result<(CandidateReceipt, CandidateCommitments, Arc), CandidateReceipt>;
+struct BackgroundValidationOutputs {
+ candidate: CandidateReceipt,
+ commitments: CandidateCommitments,
+ persisted_validation_data: PersistedValidationData,
+}
+
+type BackgroundValidationResult = Result;
struct BackgroundValidationParams {
sender: S,
tx_command: mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
candidate: CandidateReceipt,
relay_parent: Hash,
+ persisted_validation_data: PersistedValidationData,
pov: PoVData,
n_validators: usize,
- span: Option,
make_command: F,
}
@@ -690,16 +624,33 @@ async fn validate_and_make_available(
mut tx_command,
candidate,
relay_parent,
+ persisted_validation_data,
pov,
n_validators,
- span,
make_command,
} = params;
+ let validation_code = {
+ let validation_code_hash = candidate.descriptor().validation_code_hash;
+ let (tx, rx) = oneshot::channel();
+ sender
+ .send_message(RuntimeApiMessage::Request(
+ relay_parent,
+ RuntimeApiRequest::ValidationCodeByHash(validation_code_hash, tx),
+ ))
+ .await;
+
+ let code = rx.await.map_err(Error::RuntimeApiUnavailable)?;
+ match code {
+ Err(e) => return Err(Error::FetchValidationCode(validation_code_hash, e)),
+ Ok(None) => return Err(Error::NoValidationCode(validation_code_hash)),
+ Ok(Some(c)) => c,
+ }
+ };
+
let pov = match pov {
PoVData::Ready(pov) => pov,
- PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => {
- let _span = span.as_ref().map(|s| s.child("request-pov"));
+ PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } =>
match request_pov(&mut sender, relay_parent, from_validator, candidate_hash, pov_hash)
.await
{
@@ -715,17 +666,18 @@ async fn validate_and_make_available(
},
Err(err) => return Err(err),
Ok(pov) => pov,
- }
- },
+ },
};
let v = {
- let _span = span.as_ref().map(|s| {
- s.child("request-validation")
- .with_pov(&pov)
- .with_para_id(candidate.descriptor().para_id)
- });
- request_candidate_validation(&mut sender, candidate.clone(), pov.clone()).await?
+ request_candidate_validation(
+ &mut sender,
+ persisted_validation_data,
+ validation_code,
+ candidate.clone(),
+ pov.clone(),
+ )
+ .await?
};
let res = match v {
@@ -741,14 +693,17 @@ async fn validate_and_make_available(
n_validators,
pov.clone(),
candidate.hash(),
- validation_data,
+ validation_data.clone(),
candidate.descriptor.erasure_root,
- span.as_ref(),
)
.await?;
match erasure_valid {
- Ok(()) => Ok((candidate, commitments, pov.clone())),
+ Ok(()) => Ok(BackgroundValidationOutputs {
+ candidate,
+ commitments,
+ persisted_validation_data: validation_data,
+ }),
Err(InvalidErasureRoot) => {
gum::debug!(
target: LOG_TARGET,
@@ -786,626 +741,1279 @@ async fn validate_and_make_available(
struct ValidatorIndexOutOfBounds;
#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
-impl CandidateBackingJob {
- async fn handle_validated_candidate_command(
- &mut self,
- root_span: &jaeger::Span,
- ctx: &mut Context,
- command: ValidatedCandidateCommand,
- ) -> Result<(), Error> {
- let candidate_hash = command.candidate_hash();
- self.awaiting_validation.remove(&candidate_hash);
-
- match command {
- ValidatedCandidateCommand::Second(res) => {
- match res {
- Ok((candidate, commitments, _)) => {
- // sanity check.
- if self.seconded.is_none() &&
- !self.issued_statements.contains(&candidate_hash)
- {
- self.seconded = Some(candidate_hash);
- self.issued_statements.insert(candidate_hash);
- self.metrics.on_candidate_seconded();
-
- let statement = Statement::Seconded(CommittedCandidateReceipt {
- descriptor: candidate.descriptor.clone(),
- commitments,
- });
- if let Some(stmt) = self
- .sign_import_and_distribute_statement(ctx, statement, root_span)
- .await?
- {
- ctx.send_message(CollatorProtocolMessage::Seconded(
- self.parent,
- stmt,
- ))
- .await;
- }
- }
- },
- Err(candidate) => {
- ctx.send_message(CollatorProtocolMessage::Invalid(self.parent, candidate))
- .await;
- },
- }
- },
- ValidatedCandidateCommand::Attest(res) => {
- // We are done - avoid new validation spawns:
- self.fallbacks.remove(&candidate_hash);
- // sanity check.
- if !self.issued_statements.contains(&candidate_hash) {
- if res.is_ok() {
- let statement = Statement::Valid(candidate_hash);
- self.sign_import_and_distribute_statement(ctx, statement, &root_span)
- .await?;
- }
- self.issued_statements.insert(candidate_hash);
- }
- },
- ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => {
- if let Some((attesting, span)) = self.fallbacks.get_mut(&candidate_hash) {
- if let Some(index) = attesting.backing.pop() {
- attesting.from_validator = index;
- // Ok, another try:
- let c_span = span.as_ref().map(|s| s.child("try"));
- let attesting = attesting.clone();
- self.kick_off_validation_work(ctx, attesting, c_span).await?
- }
- } else {
- gum::warn!(
- target: LOG_TARGET,
- "AttestNoPoV was triggered without fallback being available."
- );
- debug_assert!(false);
- }
+async fn handle_communication(
+ ctx: &mut Context,
+ state: &mut State,
+ message: CandidateBackingMessage,
+ metrics: &Metrics,
+) -> Result<(), Error> {
+ match message {
+ CandidateBackingMessage::Second(_relay_parent, candidate, pvd, pov) => {
+ handle_second_message(ctx, state, candidate, pvd, pov, metrics).await?;
+ },
+ CandidateBackingMessage::Statement(relay_parent, statement) => {
+ handle_statement_message(ctx, state, relay_parent, statement, metrics).await?;
+ },
+ CandidateBackingMessage::GetBackedCandidates(relay_parent, requested_candidates, tx) =>
+ if let Some(rp_state) = state.per_relay_parent.get(&relay_parent) {
+ handle_get_backed_candidates_message(rp_state, requested_candidates, tx, metrics)?;
},
- }
-
- Ok(())
}
- async fn background_validate_and_make_available(
- &mut self,
- ctx: &mut Context,
- params: BackgroundValidationParams<
- impl overseer::CandidateBackingSenderTrait,
- impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Send + 'static + Sync,
- >,
- ) -> Result<(), Error> {
- let candidate_hash = params.candidate.hash();
- if self.awaiting_validation.insert(candidate_hash) {
- // spawn background task.
- let bg = async move {
- if let Err(e) = validate_and_make_available(params).await {
- if let Error::BackgroundValidationMpsc(error) = e {
- gum::debug!(
- target: LOG_TARGET,
- ?error,
- "Mpsc background validation mpsc died during validation- leaf no longer active?"
- );
- } else {
- gum::error!(
+ Ok(())
+}
+
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn prospective_parachains_mode(
+ ctx: &mut Context,
+ leaf_hash: Hash,
+) -> Result {
+ // TODO: call a Runtime API once staging version is available
+ // https://github.com/paritytech/substrate/discussions/11338
+
+ let (tx, rx) = oneshot::channel();
+ ctx.send_message(RuntimeApiMessage::Request(leaf_hash, RuntimeApiRequest::Version(tx)))
+ .await;
+
+ let version = rx
+ .await
+ .map_err(Error::RuntimeApiUnavailable)?
+ .map_err(Error::FetchRuntimeApiVersion)?;
+
+ if version == 3 {
+ Ok(ProspectiveParachainsMode::Enabled)
+ } else {
+ if version != 2 {
+ gum::warn!(
+ target: LOG_TARGET,
+ "Runtime API version is {}, expected 2 or 3. Prospective parachains are disabled",
+ version
+ );
+ }
+ Ok(ProspectiveParachainsMode::Disabled)
+ }
+}
+
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn handle_active_leaves_update(
+ ctx: &mut Context,
+ update: ActiveLeavesUpdate,
+ state: &mut State,
+) -> Result<(), Error> {
+ enum LeafHasProspectiveParachains {
+ Enabled(Result, ImplicitViewFetchError>),
+ Disabled,
+ }
+
+ // Activate in implicit view before deactivate, per the docs
+ // on ImplicitView, this is more efficient.
+ let res = if let Some(leaf) = update.activated {
+ // Only activate in implicit view if prospective
+ // parachains are enabled.
+ let mode = prospective_parachains_mode(ctx, leaf.hash).await?;
+
+ let leaf_hash = leaf.hash;
+ Some((
+ leaf,
+ match mode {
+ ProspectiveParachainsMode::Disabled => LeafHasProspectiveParachains::Disabled,
+ ProspectiveParachainsMode::Enabled => LeafHasProspectiveParachains::Enabled(
+ state.implicit_view.activate_leaf(ctx.sender(), leaf_hash).await,
+ ),
+ },
+ ))
+ } else {
+ None
+ };
+
+ for deactivated in update.deactivated {
+ state.per_leaf.remove(&deactivated);
+ state.implicit_view.deactivate_leaf(deactivated);
+ }
+
+ // clean up `per_relay_parent` according to ancestry
+ // of leaves. we do this so we can clean up candidates right after
+ // as a result.
+ //
+ // when prospective parachains are disabled, the implicit view is empty,
+ // which means we'll clean up everything. This is correct.
+ {
+ let remaining: HashSet<_> = state.implicit_view.all_allowed_relay_parents().collect();
+ state.per_relay_parent.retain(|r, _| remaining.contains(&r));
+ }
+
+ // clean up `per_candidate` according to which relay-parents
+ // are known.
+ //
+ // when prospective parachains are disabled, we clean up all candidates
+ // because we've cleaned up all relay parents. this is correct.
+ state
+ .per_candidate
+ .retain(|_, pc| state.per_relay_parent.contains_key(&pc.relay_parent));
+
+ // Get relay parents which might be fresh but might be known already
+ // that are explicit or implicit from the new active leaf.
+ let fresh_relay_parents = match res {
+ None => return Ok(()),
+ Some((leaf, LeafHasProspectiveParachains::Disabled)) => {
+ // defensive in this case - for enabled, this manifests as an error.
+ if state.per_leaf.contains_key(&leaf.hash) {
+ return Ok(())
+ }
+
+ state.per_leaf.insert(
+ leaf.hash,
+ ActiveLeafState {
+ prospective_parachains_mode: ProspectiveParachainsMode::Disabled,
+ // This is empty because the only allowed relay-parent and depth
+ // when prospective parachains are disabled is the leaf hash and 0,
+ // respectively. We've just learned about the leaf hash, so we cannot
+ // have any candidates seconded with it as a relay-parent yet.
+ seconded_at_depth: BTreeMap::new(),
+ },
+ );
+
+ vec![leaf.hash]
+ },
+ Some((leaf, LeafHasProspectiveParachains::Enabled(Ok(_)))) => {
+ let fresh_relay_parents =
+ state.implicit_view.known_allowed_relay_parents_under(&leaf.hash, None);
+
+ // At this point, all candidates outside of the implicit view
+ // have been cleaned up. For all which remain, which we've seconded,
+ // we ask the prospective parachains subsystem where they land in the fragment
+ // tree for the given active leaf. This comprises our `seconded_at_depth`.
+
+ let remaining_seconded = state
+ .per_candidate
+ .iter()
+ .filter(|(_, cd)| cd.seconded_locally)
+ .map(|(c_hash, cd)| (*c_hash, cd.para_id));
+
+ // one-to-one correspondence to remaining_seconded
+ let mut membership_answers = FuturesOrdered::new();
+
+ for (candidate_hash, para_id) in remaining_seconded {
+ let (tx, rx) = oneshot::channel();
+ membership_answers.push(rx.map_ok(move |membership| (candidate_hash, membership)));
+
+ ctx.send_message(ProspectiveParachainsMessage::GetTreeMembership(
+ para_id,
+ candidate_hash,
+ tx,
+ ))
+ .await;
+ }
+
+ let mut seconded_at_depth = BTreeMap::new();
+ for response in membership_answers.next().await {
+ match response {
+ Err(oneshot::Canceled) => {
+ gum::warn!(
target: LOG_TARGET,
- "Failed to validate and make available: {:?}",
- e
+ "Prospective parachains subsystem unreachable for membership request",
);
- }
+
+ continue
+ },
+ Ok((candidate_hash, membership)) => {
+ // This request gives membership in all fragment trees. We have some
+ // wasted data here, and it can be optimized if it proves
+ // relevant to performance.
+ if let Some((_, depths)) =
+ membership.into_iter().find(|(leaf_hash, _)| leaf_hash == &leaf.hash)
+ {
+ for depth in depths {
+ seconded_at_depth.insert(depth, candidate_hash);
+ }
+ }
+ },
}
- };
+ }
+
+ state.per_leaf.insert(
+ leaf.hash,
+ ActiveLeafState {
+ prospective_parachains_mode: ProspectiveParachainsMode::Enabled,
+ seconded_at_depth,
+ },
+ );
+
+ match fresh_relay_parents {
+ Some(f) => f.to_vec(),
+ None => {
+ gum::warn!(
+ target: LOG_TARGET,
+ leaf_hash = ?leaf.hash,
+ "Implicit view gave no relay-parents"
+ );
+
+ vec![leaf.hash]
+ },
+ }
+ },
+ Some((leaf, LeafHasProspectiveParachains::Enabled(Err(e)))) => {
+ gum::debug!(
+ target: LOG_TARGET,
+ leaf_hash = ?leaf.hash,
+ err = ?e,
+ "Failed to load implicit view for leaf."
+ );
+
+ return Ok(())
+ },
+ };
- ctx.spawn("backing-validation", bg.boxed())
- .map_err(|_| Error::FailedToSpawnBackgroundTask)?;
+ // add entries in `per_relay_parent`. for all new relay-parents.
+ for maybe_new in fresh_relay_parents {
+ if state.per_relay_parent.contains_key(&maybe_new) {
+ continue
}
- Ok(())
+ let mode = match state.per_leaf.get(&maybe_new) {
+ None => {
+ // If the relay-parent isn't a leaf itself,
+ // then it is guaranteed by the prospective parachains
+ // subsystem that it is an ancestor of a leaf which
+ // has prospective parachains enabled and that the
+ // block itself did.
+ ProspectiveParachainsMode::Enabled
+ },
+ Some(l) => l.prospective_parachains_mode,
+ };
+
+ // construct a `PerRelayParent` from the runtime API
+ // and insert it.
+ let per = construct_per_relay_parent_state(ctx, maybe_new, &state.keystore, mode).await?;
+
+ if let Some(per) = per {
+ state.per_relay_parent.insert(maybe_new, per);
+ }
}
- /// Kick off background validation with intent to second.
- async fn validate_and_second(
- &mut self,
- parent_span: &jaeger::Span,
- root_span: &jaeger::Span,
- ctx: &mut Context,
- candidate: &CandidateReceipt,
- pov: Arc,
- ) -> Result<(), Error> {
- // Check that candidate is collated by the right collator.
- if self
- .required_collator
- .as_ref()
- .map_or(false, |c| c != &candidate.descriptor().collator)
- {
- ctx.send_message(CollatorProtocolMessage::Invalid(self.parent, candidate.clone()))
- .await;
- return Ok(())
+ Ok(())
+}
+
+/// Load the data necessary to do backing work on top of a relay-parent.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn construct_per_relay_parent_state(
+ ctx: &mut Context,
+ relay_parent: Hash,
+ keystore: &SyncCryptoStorePtr,
+ mode: ProspectiveParachainsMode,
+) -> Result, Error> {
+ macro_rules! try_runtime_api {
+ ($x: expr) => {
+ match $x {
+ Ok(x) => x,
+ Err(e) => {
+ gum::warn!(
+ target: LOG_TARGET,
+ err = ?e,
+ "Failed to fetch runtime API data for job",
+ );
+
+ // We can't do candidate validation work if we don't have the
+ // requisite runtime API data. But these errors should not take
+ // down the node.
+ return Ok(None);
+ }
+ }
}
+ }
- let candidate_hash = candidate.hash();
- let mut span = self.get_unbacked_validation_child(
- root_span,
- candidate_hash,
- candidate.descriptor().para_id,
- );
+ let parent = relay_parent;
- span.as_mut().map(|span| span.add_follows_from(parent_span));
+ let (validators, groups, session_index, cores) = futures::try_join!(
+ request_validators(parent, ctx.sender()).await,
+ request_validator_groups(parent, ctx.sender()).await,
+ request_session_index_for_child(parent, ctx.sender()).await,
+ request_from_runtime(parent, ctx.sender(), |tx| {
+ RuntimeApiRequest::AvailabilityCores(tx)
+ },)
+ .await,
+ )
+ .map_err(Error::JoinMultiple)?;
- gum::debug!(
- target: LOG_TARGET,
- candidate_hash = ?candidate_hash,
- candidate_receipt = ?candidate,
- "Validate and second candidate",
- );
+ let validators: Vec<_> = try_runtime_api!(validators);
+ let (validator_groups, group_rotation_info) = try_runtime_api!(groups);
+ let session_index = try_runtime_api!(session_index);
+ let cores = try_runtime_api!(cores);
- let bg_sender = ctx.sender().clone();
- self.background_validate_and_make_available(
- ctx,
- BackgroundValidationParams {
- sender: bg_sender,
- tx_command: self.background_validation_tx.clone(),
- candidate: candidate.clone(),
- relay_parent: self.parent,
- pov: PoVData::Ready(pov),
- n_validators: self.table_context.validators.len(),
- span,
- make_command: ValidatedCandidateCommand::Second,
+ let signing_context = SigningContext { parent_hash: parent, session_index };
+ let validator =
+ match Validator::construct(&validators, signing_context.clone(), keystore.clone()).await {
+ Ok(v) => Some(v),
+ Err(util::Error::NotAValidator) => None,
+ Err(e) => {
+ gum::warn!(
+ target: LOG_TARGET,
+ err = ?e,
+ "Cannot participate in candidate backing",
+ );
+
+ return Ok(None)
},
- )
- .await?;
+ };
+
+ let mut groups = HashMap::new();
+ let n_cores = cores.len();
+ let mut assignment = None;
- Ok(())
+ for (idx, core) in cores.into_iter().enumerate() {
+ // Ignore prospective assignments on occupied cores for the time being.
+ if let CoreState::Scheduled(scheduled) = core {
+ let core_index = CoreIndex(idx as _);
+ let group_index = group_rotation_info.group_for_core(core_index, n_cores);
+ if let Some(g) = validator_groups.get(group_index.0 as usize) {
+ if validator.as_ref().map_or(false, |v| g.contains(&v.index())) {
+ assignment = Some((scheduled.para_id, scheduled.collator));
+ }
+ groups.insert(scheduled.para_id, g.clone());
+ }
+ }
+ }
+
+ let table_context = TableContext { groups, validators, validator };
+ let table_config = TableConfig {
+ allow_multiple_seconded: match mode {
+ ProspectiveParachainsMode::Enabled => true,
+ ProspectiveParachainsMode::Disabled => false,
+ },
+ };
+
+ // TODO [now]: I've removed the `required_collator` more broadly,
+ // because it's not used in practice and was intended for parathreads.
+ //
+ // We should attempt parathreads another way, I think, so it makes sense
+ // to remove.
+ let assignment = assignment.map(|(a, _required_collator)| a);
+
+ Ok(Some(PerRelayParentState {
+ prospective_parachains_mode: mode,
+ parent,
+ session_index,
+ assignment,
+ backed: HashSet::new(),
+ table: Table::new(table_config),
+ table_context,
+ issued_statements: HashSet::new(),
+ awaiting_validation: HashSet::new(),
+ fallbacks: HashMap::new(),
+ }))
+}
+
+enum SecondingAllowed {
+ No,
+ Yes(Vec<(Hash, Vec)>),
+}
+
+/// Checks whether a candidate can be seconded based on its hypothetical
+/// depths in the fragment tree and what we've already seconded in all
+/// active leaves.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn seconding_sanity_check(
+ ctx: &mut Context,
+ active_leaves: &HashMap,
+ implicit_view: &ImplicitView,
+ candidate_hash: CandidateHash,
+ candidate_para: ParaId,
+ parent_head_data_hash: Hash,
+ head_data_hash: Hash,
+ candidate_relay_parent: Hash,
+) -> SecondingAllowed {
+ // Note that `GetHypotheticalDepths` doesn't account for recursion,
+ // i.e. candidates can appear at multiple depths in the tree and in fact
+ // at all depths, and we don't know what depths a candidate will ultimately occupy
+ // because that's dependent on other candidates we haven't yet received.
+ //
+ // The only way to effectively rule this out is to have candidate receipts
+ // directly commit to the parachain block number or some other incrementing
+ // counter. That requires a major primitives format upgrade, so for now
+ // we just rule out trivial cycles.
+ if parent_head_data_hash == head_data_hash {
+ return SecondingAllowed::No
}
- async fn sign_import_and_distribute_statement(
- &mut self,
- ctx: &mut Context,
- statement: Statement,
- root_span: &jaeger::Span,
- ) -> Result, Error> {
- if let Some(signed_statement) = self.sign_statement(statement).await {
- self.import_statement(ctx, &signed_statement, root_span).await?;
- let smsg = StatementDistributionMessage::Share(self.parent, signed_statement.clone());
- ctx.send_unbounded_message(smsg);
-
- Ok(Some(signed_statement))
+ let mut membership = Vec::new();
+ let mut responses = FuturesOrdered::>>::new();
+
+ for (head, leaf_state) in active_leaves {
+ if leaf_state.prospective_parachains_mode.is_enabled() {
+ // Check that the candidate relay parent is allowed for para, skip the
+ // leaf otherwise.
+ let allowed_parents_for_para =
+ implicit_view.known_allowed_relay_parents_under(head, Some(candidate_para));
+ if !allowed_parents_for_para.unwrap_or_default().contains(&candidate_relay_parent) {
+ continue
+ }
+
+ let (tx, rx) = oneshot::channel();
+ ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalDepth(
+ HypotheticalDepthRequest {
+ candidate_hash,
+ candidate_para,
+ parent_head_data_hash,
+ candidate_relay_parent,
+ fragment_tree_relay_parent: *head,
+ },
+ tx,
+ ))
+ .await;
+ responses.push(rx.map_ok(move |depths| (depths, head, leaf_state)).boxed());
} else {
- Ok(None)
+ if head == &candidate_relay_parent {
+ if leaf_state.seconded_at_depth.contains_key(&0) {
+ // The leaf is already occupied.
+ return SecondingAllowed::No
+ }
+ responses.push(futures::future::ok((vec![0], head, leaf_state)).boxed());
+ }
}
}
- /// Check if there have happened any new misbehaviors and issue necessary messages.
- fn issue_new_misbehaviors(&mut self, sender: &mut impl overseer::CandidateBackingSenderTrait) {
- // collect the misbehaviors to avoid double mutable self borrow issues
- let misbehaviors: Vec<_> = self.table.drain_misbehaviors().collect();
- for (validator_id, report) in misbehaviors {
- // The provisioner waits on candidate-backing, which means
- // that we need to send unbounded messages to avoid cycles.
- //
- // Misbehaviors are bounded by the number of validators and
- // the block production protocol.
- sender.send_unbounded_message(ProvisionerMessage::ProvisionableData(
- self.parent,
- ProvisionableData::MisbehaviorReport(self.parent, validator_id, report),
- ));
- }
+ if responses.is_empty() {
+ return SecondingAllowed::No
}
- /// Import a statement into the statement table and return the summary of the import.
- async fn import_statement(
- &mut self,
- ctx: &mut Context,
- statement: &SignedFullStatement,
- root_span: &jaeger::Span,
- ) -> Result, Error> {
- gum::debug!(
- target: LOG_TARGET,
- statement = ?statement.payload().to_compact(),
- validator_index = statement.validator_index().0,
- "Importing statement",
- );
+ while let Some(response) = responses.next().await {
+ match response {
+ Err(oneshot::Canceled) => {
+ gum::warn!(
+ target: LOG_TARGET,
+ "Failed to reach prospective parachains subsystem for hypothetical depths",
+ );
- let candidate_hash = statement.payload().candidate_hash();
- let import_statement_span = {
- // create a span only for candidates we're already aware of.
- self.get_unbacked_statement_child(
- root_span,
- candidate_hash,
- statement.validator_index(),
- )
- };
+ return SecondingAllowed::No
+ },
+ Ok((depths, head, leaf_state)) => {
+ for depth in &depths {
+ if leaf_state.seconded_at_depth.contains_key(&depth) {
+ gum::debug!(
+ target: LOG_TARGET,
+ ?candidate_hash,
+ depth,
+ leaf_hash = ?head,
+ "Refusing to second candidate at depth - already occupied."
+ );
- if let Err(ValidatorIndexOutOfBounds) = self
- .dispatch_new_statement_to_dispute_coordinator(ctx.sender(), candidate_hash, &statement)
- .await
- {
- gum::warn!(
- target: LOG_TARGET,
- session_index = ?self.session_index,
- relay_parent = ?self.parent,
- validator_index = statement.validator_index().0,
- "Supposedly 'Signed' statement has validator index out of bounds."
- );
+ return SecondingAllowed::No
+ }
+ }
- return Ok(None)
+ membership.push((*head, depths));
+ },
}
+ }
- let stmt = primitive_statement_to_table(statement);
+ // At this point we've checked the depths of the candidate against all active
+ // leaves.
+ SecondingAllowed::Yes(membership)
+}
- let summary = self.table.import_statement(&self.table_context, stmt);
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn handle_validated_candidate_command(
+ ctx: &mut Context,
+ state: &mut State,
+ relay_parent: Hash,
+ command: ValidatedCandidateCommand,
+ metrics: &Metrics,
+) -> Result<(), Error> {
+ match state.per_relay_parent.get_mut(&relay_parent) {
+ Some(rp_state) => {
+ let candidate_hash = command.candidate_hash();
+ rp_state.awaiting_validation.remove(&candidate_hash);
+
+ match command {
+ ValidatedCandidateCommand::Second(res) => match res {
+ Ok(outputs) => {
+ let BackgroundValidationOutputs {
+ candidate,
+ commitments,
+ persisted_validation_data,
+ } = outputs;
+
+ if rp_state.issued_statements.contains(&candidate_hash) {
+ return Ok(())
+ }
- let unbacked_span = if let Some(attested) = summary
- .as_ref()
- .and_then(|s| self.table.attested_candidate(&s.candidate, &self.table_context))
- {
- let candidate_hash = attested.candidate.hash();
- // `HashSet::insert` returns true if the thing wasn't in there already.
- if self.backed.insert(candidate_hash) {
- let span = self.remove_unbacked_span(&candidate_hash);
+ // sanity check that we're allowed to second the candidate
+ // and that it doesn't conflict with other candidates we've
+ // seconded.
+ let fragment_tree_membership = match seconding_sanity_check(
+ ctx,
+ &state.per_leaf,
+ &state.implicit_view,
+ candidate_hash,
+ candidate.descriptor().para_id,
+ persisted_validation_data.parent_head.hash(),
+ commitments.head_data.hash(),
+ candidate.descriptor().relay_parent,
+ )
+ .await
+ {
+ SecondingAllowed::No => return Ok(()),
+ SecondingAllowed::Yes(membership) => membership,
+ };
- if let Some(backed) = table_attested_to_backed(attested, &self.table_context) {
- gum::debug!(
- target: LOG_TARGET,
- candidate_hash = ?candidate_hash,
- relay_parent = ?self.parent,
- para_id = %backed.candidate.descriptor.para_id,
- "Candidate backed",
- );
+ let statement = StatementWithPVD::Seconded(
+ CommittedCandidateReceipt {
+ descriptor: candidate.descriptor.clone(),
+ commitments,
+ },
+ persisted_validation_data,
+ );
- // The provisioner waits on candidate-backing, which means
- // that we need to send unbounded messages to avoid cycles.
- //
- // Backed candidates are bounded by the number of validators,
- // parachains, and the block production rate of the relay chain.
- let message = ProvisionerMessage::ProvisionableData(
- self.parent,
- ProvisionableData::BackedCandidate(backed.receipt()),
- );
- ctx.send_unbounded_message(message);
+ // If we get an Error::RejectedByProspectiveParachains,
+ // then the statement has not been distributed or imported into
+ // the table.
+ let res = sign_import_and_distribute_statement(
+ ctx,
+ rp_state,
+ &mut state.per_candidate,
+ statement,
+ state.keystore.clone(),
+ metrics,
+ )
+ .await;
+
+ if let Err(Error::RejectedByProspectiveParachains) = res {
+ let candidate_hash = candidate.hash();
+ gum::debug!(
+ target: LOG_TARGET,
+ relay_parent = ?candidate.descriptor().relay_parent,
+ ?candidate_hash,
+ "Attempted to second candidate but was rejected by prospective parachains",
+ );
+
+ // Ensure the collator is reported.
+ ctx.send_message(CollatorProtocolMessage::Invalid(
+ candidate.descriptor().relay_parent,
+ candidate,
+ ))
+ .await;
- span.as_ref().map(|s| s.child("backed"));
- span
- } else {
- None
- }
- } else {
- None
- }
- } else {
- None
- };
+ return Ok(())
+ }
+
+ if let Some(stmt) = res? {
+ match state.per_candidate.get_mut(&candidate_hash) {
+ None => {
+ gum::warn!(
+ target: LOG_TARGET,
+ ?candidate_hash,
+ "Missing `per_candidate` for seconded candidate.",
+ );
+ },
+ Some(p) => p.seconded_locally = true,
+ }
- self.issue_new_misbehaviors(ctx.sender());
+ // update seconded depths in active leaves.
+ for (leaf, depths) in fragment_tree_membership {
+ let leaf_data = match state.per_leaf.get_mut(&leaf) {
+ None => {
+ gum::warn!(
+ target: LOG_TARGET,
+ leaf_hash = ?leaf,
+ "Missing `per_leaf` for known active leaf."
+ );
+
+ continue
+ },
+ Some(d) => d,
+ };
+
+ for depth in depths {
+ leaf_data.seconded_at_depth.insert(depth, candidate_hash);
+ }
+ }
- // It is important that the child span is dropped before its parent span (`unbacked_span`)
- drop(import_statement_span);
- drop(unbacked_span);
+ rp_state.issued_statements.insert(candidate_hash);
- Ok(summary)
+ metrics.on_candidate_seconded();
+ ctx.send_message(CollatorProtocolMessage::Seconded(
+ rp_state.parent,
+ StatementWithPVD::drop_pvd_from_signed(stmt),
+ ))
+ .await;
+ }
+ },
+ Err(candidate) => {
+ ctx.send_message(CollatorProtocolMessage::Invalid(
+ rp_state.parent,
+ candidate,
+ ))
+ .await;
+ },
+ },
+ ValidatedCandidateCommand::Attest(res) => {
+ // We are done - avoid new validation spawns:
+ rp_state.fallbacks.remove(&candidate_hash);
+ // sanity check.
+ if !rp_state.issued_statements.contains(&candidate_hash) {
+ if res.is_ok() {
+ let statement = StatementWithPVD::Valid(candidate_hash);
+
+ sign_import_and_distribute_statement(
+ ctx,
+ rp_state,
+ &mut state.per_candidate,
+ statement,
+ state.keystore.clone(),
+ metrics,
+ )
+ .await?;
+ }
+ rp_state.issued_statements.insert(candidate_hash);
+ }
+ },
+ ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => {
+ if let Some(attesting) = rp_state.fallbacks.get_mut(&candidate_hash) {
+ if let Some(index) = attesting.backing.pop() {
+ attesting.from_validator = index;
+ let attesting = attesting.clone();
+
+ // The candidate state should be available because we've
+ // validated it before, the relay-parent is still around,
+ // and candidates are pruned on the basis of relay-parents.
+ //
+ // If it's not, then no point in validating it anyway.
+ if let Some(pvd) = state
+ .per_candidate
+ .get(&candidate_hash)
+ .map(|pc| pc.persisted_validation_data.clone())
+ {
+ kick_off_validation_work(
+ ctx,
+ rp_state,
+ pvd,
+ &state.background_validation_tx,
+ attesting,
+ )
+ .await?;
+ }
+ }
+ } else {
+ gum::warn!(
+ target: LOG_TARGET,
+ "AttestNoPoV was triggered without fallback being available."
+ );
+ debug_assert!(false);
+ }
+ },
+ }
+ },
+ None => {
+ // simple race condition; can be ignored = this relay-parent
+ // is no longer relevant.
+ },
}
- /// The dispute coordinator keeps track of all statements by validators about every recent
- /// candidate.
- ///
- /// When importing a statement, this should be called access the candidate receipt either
- /// from the statement itself or from the underlying statement table in order to craft
- /// and dispatch the notification to the dispute coordinator.
- ///
- /// This also does bounds-checking on the validator index and will return an error if the
- /// validator index is out of bounds for the current validator set. It's expected that
- /// this should never happen due to the interface of the candidate backing subsystem -
- /// the networking component responsible for feeding statements to the backing subsystem
- /// is meant to check the signature and provenance of all statements before submission.
- async fn dispatch_new_statement_to_dispute_coordinator(
- &self,
- sender: &mut impl overseer::CandidateBackingSenderTrait,
- candidate_hash: CandidateHash,
- statement: &SignedFullStatement,
- ) -> Result<(), ValidatorIndexOutOfBounds> {
- // Dispatch the statement to the dispute coordinator.
- let validator_index = statement.validator_index();
- let signing_context =
- SigningContext { parent_hash: self.parent, session_index: self.session_index };
-
- let validator_public = match self.table_context.validators.get(validator_index.0 as usize) {
- None => return Err(ValidatorIndexOutOfBounds),
- Some(v) => v,
- };
+ Ok(())
+}
- let maybe_candidate_receipt = match statement.payload() {
- Statement::Seconded(receipt) => Some(receipt.to_plain()),
- Statement::Valid(candidate_hash) => {
- // Valid statements are only supposed to be imported
- // once we've seen at least one `Seconded` statement.
- self.table.get_candidate(&candidate_hash).map(|c| c.to_plain())
- },
- };
+async fn sign_statement(
+ rp_state: &PerRelayParentState,
+ statement: StatementWithPVD,
+ keystore: SyncCryptoStorePtr,
+ metrics: &Metrics,
+) -> Option {
+ let signed = rp_state
+ .table_context
+ .validator
+ .as_ref()?
+ .sign(keystore, statement)
+ .await
+ .ok()
+ .flatten()?;
+ metrics.on_statement_signed();
+ Some(signed)
+}
- let maybe_signed_dispute_statement = SignedDisputeStatement::from_backing_statement(
- statement.as_unchecked(),
- signing_context,
- validator_public.clone(),
- )
- .ok();
+/// The dispute coordinator keeps track of all statements by validators about every recent
+/// candidate.
+///
+/// When importing a statement, this should be called access the candidate receipt either
+/// from the statement itself or from the underlying statement table in order to craft
+/// and dispatch the notification to the dispute coordinator.
+///
+/// This also does bounds-checking on the validator index and will return an error if the
+/// validator index is out of bounds for the current validator set. It's expected that
+/// this should never happen due to the interface of the candidate backing subsystem -
+/// the networking component responsible for feeding statements to the backing subsystem
+/// is meant to check the signature and provenance of all statements before submission.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn dispatch_new_statement_to_dispute_coordinator(
+ ctx: &mut Context,
+ rp_state: &PerRelayParentState,
+ candidate_hash: CandidateHash,
+ statement: &SignedFullStatementWithPVD,
+) -> Result<(), ValidatorIndexOutOfBounds> {
+ // Dispatch the statement to the dispute coordinator.
+ let validator_index = statement.validator_index();
+ let signing_context =
+ SigningContext { parent_hash: rp_state.parent, session_index: rp_state.session_index };
+
+ let validator_public = match rp_state.table_context.validators.get(validator_index.0 as usize) {
+ None => return Err(ValidatorIndexOutOfBounds),
+ Some(v) => v,
+ };
- if let (Some(candidate_receipt), Some(dispute_statement)) =
- (maybe_candidate_receipt, maybe_signed_dispute_statement)
- {
- sender
- .send_message(DisputeCoordinatorMessage::ImportStatements {
- candidate_hash,
- candidate_receipt,
- session: self.session_index,
- statements: vec![(dispute_statement, validator_index)],
- pending_confirmation: None,
- })
+ let maybe_candidate_receipt = match statement.payload() {
+ StatementWithPVD::Seconded(receipt, _) => Some(receipt.to_plain()),
+ StatementWithPVD::Valid(candidate_hash) => {
+ // Valid statements are only supposed to be imported
+ // once we've seen at least one `Seconded` statement.
+ rp_state.table.get_candidate(&candidate_hash).map(|c| c.to_plain())
+ },
+ };
+
+ let maybe_signed_dispute_statement = SignedDisputeStatement::from_backing_statement(
+ statement.as_unchecked(),
+ signing_context,
+ validator_public.clone(),
+ )
+ .ok();
+
+ if let (Some(candidate_receipt), Some(dispute_statement)) =
+ (maybe_candidate_receipt, maybe_signed_dispute_statement)
+ {
+ ctx.send_message(DisputeCoordinatorMessage::ImportStatements {
+ candidate_hash,
+ candidate_receipt,
+ session: rp_state.session_index,
+ statements: vec![(dispute_statement, validator_index)],
+ pending_confirmation: None,
+ })
+ .await;
+ }
+
+ Ok(())
+}
+
+/// Import a statement into the statement table and return the summary of the import.
+///
+/// This will fail with `Error::RejectedByProspectiveParachains` if the message type
+/// is seconded, the candidate is fresh,
+/// and any of the following are true:
+/// 1. There is no `PersistedValidationData` attached.
+/// 2. Prospective parachains are enabled for the relay parent and the prospective parachains
+/// subsystem returned an empty `FragmentTreeMembership`
+/// i.e. did not recognize the candidate as being applicable to any of the active leaves.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn import_statement(
+ ctx: &mut Context,
+ rp_state: &mut PerRelayParentState,
+ per_candidate: &mut HashMap,
+ statement: &SignedFullStatementWithPVD,
+) -> Result, Error> {
+ gum::debug!(
+ target: LOG_TARGET,
+ statement = ?statement.payload().to_compact(),
+ validator_index = statement.validator_index().0,
+ "Importing statement",
+ );
+
+ let candidate_hash = statement.payload().candidate_hash();
+
+ // If this is a new candidate (statement is 'seconded' and candidate is unknown),
+ // we need to create an entry in the `PerCandidateState` map.
+ //
+ // If the relay parent supports prospective parachains, we also need
+ // to inform the prospective parachains subsystem of the seconded candidate
+ // If `ProspectiveParachainsMessage::Second` fails, then we return
+ // Error::RejectedByProspectiveParachains.
+ //
+ // Persisted Validation Data should be available - it may already be available
+ // if this is a candidate we are seconding.
+ //
+ // We should also not accept any candidates which have no valid depths under any of
+ // our active leaves.
+ if let StatementWithPVD::Seconded(candidate, pvd) = statement.payload() {
+ if !per_candidate.contains_key(&candidate_hash) {
+ if rp_state.prospective_parachains_mode.is_enabled() {
+ let (tx, rx) = oneshot::channel();
+ ctx.send_message(ProspectiveParachainsMessage::CandidateSeconded(
+ candidate.descriptor().para_id,
+ candidate.clone(),
+ pvd.clone(),
+ tx,
+ ))
.await;
+
+ match rx.await {
+ Err(oneshot::Canceled) => {
+ gum::warn!(
+ target: LOG_TARGET,
+ "Could not reach the Prospective Parachains subsystem."
+ );
+
+ return Err(Error::RejectedByProspectiveParachains)
+ },
+ Ok(membership) =>
+ if membership.is_empty() {
+ return Err(Error::RejectedByProspectiveParachains)
+ },
+ }
+ }
+
+ // Only save the candidate if it was approved by prospective parachains.
+ per_candidate.insert(
+ candidate_hash,
+ PerCandidateState {
+ persisted_validation_data: pvd.clone(),
+ // This is set after importing when seconding locally.
+ seconded_locally: false,
+ para_id: candidate.descriptor().para_id,
+ relay_parent: candidate.descriptor().relay_parent,
+ },
+ );
}
+ }
+
+ if let Err(ValidatorIndexOutOfBounds) =
+ dispatch_new_statement_to_dispute_coordinator(ctx, rp_state, candidate_hash, statement)
+ .await
+ {
+ gum::warn!(
+ target: LOG_TARGET,
+ session_index = ?rp_state.session_index,
+ relay_parent = ?rp_state.parent,
+ validator_index = statement.validator_index().0,
+ "Supposedly 'Signed' statement has validator index out of bounds."
+ );
- Ok(())
+ return Ok(None)
}
- async fn handle_second_msg(
- &mut self,
- root_span: &jaeger::Span,
- ctx: &mut Context,
- candidate: CandidateReceipt,
- pov: PoV,
- ) -> Result<(), Error> {
- let _timer = self.metrics.time_process_second();
-
- let candidate_hash = candidate.hash();
- let span = root_span
- .child("second")
- .with_stage(jaeger::Stage::CandidateBacking)
- .with_pov(&pov)
- .with_candidate(candidate_hash)
- .with_relay_parent(self.parent);
-
- // Sanity check that candidate is from our assignment.
- if Some(candidate.descriptor().para_id) != self.assignment {
- gum::debug!(
- target: LOG_TARGET,
- our_assignment = ?self.assignment,
- collation = ?candidate.descriptor().para_id,
- "Subsystem asked to second for para outside of our assignment",
- );
+ let stmt = primitive_statement_to_table(statement);
- return Ok(())
- }
+ let summary = rp_state.table.import_statement(&rp_state.table_context, stmt);
- // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a
- // Seconded statement only if we have not seconded any other candidate and
- // have not signed a Valid statement for the requested candidate.
- if self.seconded.is_none() {
- // This job has not seconded a candidate yet.
+ if let Some(attested) = summary
+ .as_ref()
+ .and_then(|s| rp_state.table.attested_candidate(&s.candidate, &rp_state.table_context))
+ {
+ // `HashSet::insert` returns true if the thing wasn't in there already.
+ if rp_state.backed.insert(candidate_hash) {
+ if let Some(backed) = table_attested_to_backed(attested, &rp_state.table_context) {
+ let para_id = backed.candidate.descriptor.para_id;
+ gum::debug!(
+ target: LOG_TARGET,
+ candidate_hash = ?candidate_hash,
+ relay_parent = ?rp_state.parent,
+ %para_id,
+ "Candidate backed",
+ );
- if !self.issued_statements.contains(&candidate_hash) {
- let pov = Arc::new(pov);
- self.validate_and_second(&span, &root_span, ctx, &candidate, pov).await?;
+ // Inform the prospective parachains subsystem
+ // that the candidate is now backed.
+ if rp_state.prospective_parachains_mode.is_enabled() {
+ ctx.send_message(ProspectiveParachainsMessage::CandidateBacked(
+ para_id,
+ candidate_hash,
+ ))
+ .await;
+ }
+
+ // The provisioner waits on candidate-backing, which means
+ // that we need to send unbounded messages to avoid cycles.
+ //
+ // Backed candidates are bounded by the number of validators,
+ // parachains, and the block production rate of the relay chain.
+ let message = ProvisionerMessage::ProvisionableData(
+ rp_state.parent,
+ ProvisionableData::BackedCandidate(backed.receipt()),
+ );
+ ctx.send_unbounded_message(message);
}
}
+ }
+
+ issue_new_misbehaviors(ctx, rp_state.parent, &mut rp_state.table);
+
+ Ok(summary)
+}
- Ok(())
+/// Check if there have happened any new misbehaviors and issue necessary messages.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+fn issue_new_misbehaviors(
+ ctx: &mut Context,
+ relay_parent: Hash,
+ table: &mut Table,
+) {
+ // collect the misbehaviors to avoid double mutable self borrow issues
+ let misbehaviors: Vec<_> = table.drain_misbehaviors().collect();
+ for (validator_id, report) in misbehaviors {
+ // The provisioner waits on candidate-backing, which means
+ // that we need to send unbounded messages to avoid cycles.
+ //
+ // Misbehaviors are bounded by the number of validators and
+ // the block production protocol.
+ ctx.send_unbounded_message(ProvisionerMessage::ProvisionableData(
+ relay_parent,
+ ProvisionableData::MisbehaviorReport(relay_parent, validator_id, report),
+ ));
}
+}
- async fn handle_statement_message(
- &mut self,
- root_span: &jaeger::Span,
- ctx: &mut Context,
- statement: SignedFullStatement,
- ) -> Result<(), Error> {
- let _timer = self.metrics.time_process_statement();
- let _span = root_span
- .child("statement")
- .with_stage(jaeger::Stage::CandidateBacking)
- .with_candidate(statement.payload().candidate_hash())
- .with_relay_parent(self.parent);
-
- match self.maybe_validate_and_import(&root_span, ctx, statement).await {
- Err(Error::ValidationFailed(_)) => Ok(()),
- Err(e) => Err(e),
- Ok(()) => Ok(()),
- }
+/// Sign, import, and distribute a statement.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn sign_import_and_distribute_statement(
+ ctx: &mut Context,
+ rp_state: &mut PerRelayParentState,
+ per_candidate: &mut HashMap,
+ statement: StatementWithPVD,
+ keystore: SyncCryptoStorePtr,
+ metrics: &Metrics,
+) -> Result, Error> {
+ if let Some(signed_statement) = sign_statement(&*rp_state, statement, keystore, metrics).await {
+ import_statement(ctx, rp_state, per_candidate, &signed_statement).await?;
+
+ let smsg = StatementDistributionMessage::Share(
+ rp_state.parent,
+ StatementWithPVD::drop_pvd_from_signed(signed_statement.clone()),
+ );
+ ctx.send_unbounded_message(smsg);
+
+ Ok(Some(signed_statement))
+ } else {
+ Ok(None)
+ }
+}
+
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn background_validate_and_make_available(
+ ctx: &mut Context,
+ rp_state: &mut PerRelayParentState,
+ params: BackgroundValidationParams<
+ impl overseer::CandidateBackingSenderTrait,
+ impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Send + 'static + Sync,
+ >,
+) -> Result<(), Error> {
+ let candidate_hash = params.candidate.hash();
+ if rp_state.awaiting_validation.insert(candidate_hash) {
+ // spawn background task.
+ let bg = async move {
+ if let Err(e) = validate_and_make_available(params).await {
+ if let Error::BackgroundValidationMpsc(error) = e {
+ gum::debug!(
+ target: LOG_TARGET,
+ ?error,
+ "Mpsc background validation mpsc died during validation- leaf no longer active?"
+ );
+ } else {
+ gum::error!(
+ target: LOG_TARGET,
+ "Failed to validate and make available: {:?}",
+ e
+ );
+ }
+ }
+ };
+
+ ctx.spawn("backing-validation", bg.boxed())
+ .map_err(|_| Error::FailedToSpawnBackgroundTask)?;
}
- fn handle_get_backed_candidates_message(
- &mut self,
- requested_candidates: Vec,
- tx: oneshot::Sender>,
- ) -> Result<(), Error> {
- let _timer = self.metrics.time_get_backed_candidates();
+ Ok(())
+}
- let backed = requested_candidates
- .into_iter()
- .filter_map(|hash| {
- self.table
- .attested_candidate(&hash, &self.table_context)
- .and_then(|attested| table_attested_to_backed(attested, &self.table_context))
- })
- .collect();
-
- tx.send(backed).map_err(|data| Error::Send(data))?;
- Ok(())
+/// Kick off validation work and distribute the result as a signed statement.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn kick_off_validation_work(
+ ctx: &mut Context,
+ rp_state: &mut PerRelayParentState,
+ persisted_validation_data: PersistedValidationData,
+ background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
+ attesting: AttestingData,
+) -> Result<(), Error> {
+ let candidate_hash = attesting.candidate.hash();
+ if rp_state.issued_statements.contains(&candidate_hash) {
+ return Ok(())
}
- /// Kick off validation work and distribute the result as a signed statement.
- async fn kick_off_validation_work(
- &mut self,
- ctx: &mut Context,
- attesting: AttestingData,
- span: Option,
- ) -> Result<(), Error> {
- let candidate_hash = attesting.candidate.hash();
- if self.issued_statements.contains(&candidate_hash) {
+ gum::debug!(
+ target: LOG_TARGET,
+ candidate_hash = ?candidate_hash,
+ candidate_receipt = ?attesting.candidate,
+ "Kicking off validation",
+ );
+
+ let bg_sender = ctx.sender().clone();
+ let pov = PoVData::FetchFromValidator {
+ from_validator: attesting.from_validator,
+ candidate_hash,
+ pov_hash: attesting.pov_hash,
+ };
+
+ background_validate_and_make_available(
+ ctx,
+ rp_state,
+ BackgroundValidationParams {
+ sender: bg_sender,
+ tx_command: background_validation_tx.clone(),
+ candidate: attesting.candidate,
+ relay_parent: rp_state.parent,
+ persisted_validation_data,
+ pov,
+ n_validators: rp_state.table_context.validators.len(),
+ make_command: ValidatedCandidateCommand::Attest,
+ },
+ )
+ .await
+}
+
+/// Import the statement and kick off validation work if it is a part of our assignment.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn maybe_validate_and_import(
+ ctx: &mut Context,
+ state: &mut State,
+ relay_parent: Hash,
+ statement: SignedFullStatementWithPVD,
+) -> Result<(), Error> {
+ let rp_state = match state.per_relay_parent.get_mut(&relay_parent) {
+ Some(r) => r,
+ None => {
+ gum::trace!(
+ target: LOG_TARGET,
+ ?relay_parent,
+ "Received statement for unknown relay-parent"
+ );
+
return Ok(())
- }
+ },
+ };
- let descriptor = attesting.candidate.descriptor().clone();
+ let res = import_statement(ctx, rp_state, &mut state.per_candidate, &statement).await;
+ // if we get an Error::RejectedByProspectiveParachains,
+ // we will do nothing.
+ if let Err(Error::RejectedByProspectiveParachains) = res {
gum::debug!(
target: LOG_TARGET,
- candidate_hash = ?candidate_hash,
- candidate_receipt = ?attesting.candidate,
- "Kicking off validation",
+ ?relay_parent,
+ "Statement rejected by prospective parachains."
);
- // Check that candidate is collated by the right collator.
- if self.required_collator.as_ref().map_or(false, |c| c != &descriptor.collator) {
- // If not, we've got the statement in the table but we will
- // not issue validation work for it.
- //
- // Act as though we've issued a statement.
- self.issued_statements.insert(candidate_hash);
+ return Ok(())
+ }
+
+ if let Some(summary) = res? {
+ // import_statement already takes care of communicating with the
+ // prospective parachains subsystem. At this point, the candidate
+ // has already been accepted into the fragment trees.
+
+ let candidate_hash = summary.candidate;
+
+ if Some(summary.group_id) != rp_state.assignment {
return Ok(())
}
+ let attesting = match statement.payload() {
+ StatementWithPVD::Seconded(receipt, _) => {
+ let attesting = AttestingData {
+ candidate: rp_state
+ .table
+ .get_candidate(&candidate_hash)
+ .ok_or(Error::CandidateNotFound)?
+ .to_plain(),
+ pov_hash: receipt.descriptor.pov_hash,
+ from_validator: statement.validator_index(),
+ backing: Vec::new(),
+ };
+ rp_state.fallbacks.insert(summary.candidate, attesting.clone());
+ attesting
+ },
+ StatementWithPVD::Valid(candidate_hash) => {
+ if let Some(attesting) = rp_state.fallbacks.get_mut(candidate_hash) {
+ let our_index = rp_state.table_context.validator.as_ref().map(|v| v.index());
+ if our_index == Some(statement.validator_index()) {
+ return Ok(())
+ }
- let bg_sender = ctx.sender().clone();
- let pov = PoVData::FetchFromValidator {
- from_validator: attesting.from_validator,
- candidate_hash,
- pov_hash: attesting.pov_hash,
- };
- self.background_validate_and_make_available(
- ctx,
- BackgroundValidationParams {
- sender: bg_sender,
- tx_command: self.background_validation_tx.clone(),
- candidate: attesting.candidate,
- relay_parent: self.parent,
- pov,
- n_validators: self.table_context.validators.len(),
- span,
- make_command: ValidatedCandidateCommand::Attest,
+ if rp_state.awaiting_validation.contains(candidate_hash) {
+ // Job already running:
+ attesting.backing.push(statement.validator_index());
+ return Ok(())
+ } else {
+ // No job, so start another with current validator:
+ attesting.from_validator = statement.validator_index();
+ attesting.clone()
+ }
+ } else {
+ return Ok(())
+ }
},
- )
- .await
+ };
+
+ // After `import_statement` succeeds, the candidate entry is guaranteed
+ // to exist.
+ if let Some(pvd) = state
+ .per_candidate
+ .get(&candidate_hash)
+ .map(|pc| pc.persisted_validation_data.clone())
+ {
+ kick_off_validation_work(
+ ctx,
+ rp_state,
+ pvd,
+ &state.background_validation_tx,
+ attesting,
+ )
+ .await?;
+ }
}
+ Ok(())
+}
- /// Import the statement and kick off validation work if it is a part of our assignment.
- async fn maybe_validate_and_import(
- &mut self,
- root_span: &jaeger::Span,
- ctx: &mut Context,
- statement: SignedFullStatement,
- ) -> Result<(), Error> {
- if let Some(summary) = self.import_statement(ctx, &statement, root_span).await? {
- if Some(summary.group_id) != self.assignment {
- return Ok(())
- }
- let (attesting, span) = match statement.payload() {
- Statement::Seconded(receipt) => {
- let candidate_hash = summary.candidate;
-
- let span = self.get_unbacked_validation_child(
- root_span,
- summary.candidate,
- summary.group_id,
- );
+/// Kick off background validation with intent to second.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn validate_and_second(
+ ctx: &mut Context,
+ rp_state: &mut PerRelayParentState,
+ persisted_validation_data: PersistedValidationData,
+ candidate: &CandidateReceipt,
+ pov: Arc,
+ background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
+) -> Result<(), Error> {
+ let candidate_hash = candidate.hash();
+
+ gum::debug!(
+ target: LOG_TARGET,
+ candidate_hash = ?candidate_hash,
+ candidate_receipt = ?candidate,
+ "Validate and second candidate",
+ );
+
+ let bg_sender = ctx.sender().clone();
+ background_validate_and_make_available(
+ ctx,
+ rp_state,
+ BackgroundValidationParams {
+ sender: bg_sender,
+ tx_command: background_validation_tx.clone(),
+ candidate: candidate.clone(),
+ relay_parent: rp_state.parent,
+ persisted_validation_data,
+ pov: PoVData::Ready(pov),
+ n_validators: rp_state.table_context.validators.len(),
+ make_command: ValidatedCandidateCommand::Second,
+ },
+ )
+ .await?;
- let attesting = AttestingData {
- candidate: self
- .table
- .get_candidate(&candidate_hash)
- .ok_or(Error::CandidateNotFound)?
- .to_plain(),
- pov_hash: receipt.descriptor.pov_hash,
- from_validator: statement.validator_index(),
- backing: Vec::new(),
- };
- let child = span.as_ref().map(|s| s.child("try"));
- self.fallbacks.insert(summary.candidate, (attesting.clone(), span));
- (attesting, child)
- },
- Statement::Valid(candidate_hash) => {
- if let Some((attesting, span)) = self.fallbacks.get_mut(candidate_hash) {
- let our_index = self.table_context.validator.as_ref().map(|v| v.index());
- if our_index == Some(statement.validator_index()) {
- return Ok(())
- }
+ Ok(())
+}
- if self.awaiting_validation.contains(candidate_hash) {
- // Job already running:
- attesting.backing.push(statement.validator_index());
- return Ok(())
- } else {
- // No job, so start another with current validator:
- attesting.from_validator = statement.validator_index();
- (attesting.clone(), span.as_ref().map(|s| s.child("try")))
- }
- } else {
- return Ok(())
- }
- },
- };
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn handle_second_message(
+ ctx: &mut Context,
+ state: &mut State,
+ candidate: CandidateReceipt,
+ persisted_validation_data: PersistedValidationData,
+ pov: PoV,
+ metrics: &Metrics,
+) -> Result<(), Error> {
+ let _timer = metrics.time_process_second();
- self.kick_off_validation_work(ctx, attesting, span).await?;
- }
- Ok(())
+ let candidate_hash = candidate.hash();
+ let relay_parent = candidate.descriptor().relay_parent;
+
+ if candidate.descriptor().persisted_validation_data_hash != persisted_validation_data.hash() {
+ gum::warn!(
+ target: LOG_TARGET,
+ ?candidate_hash,
+ "Candidate backing was asked to second candidate with wrong PVD",
+ );
+
+ return Ok(())
}
- async fn sign_statement(&mut self, statement: Statement) -> Option {
- let signed = self
- .table_context
- .validator
- .as_ref()?
- .sign(self.keystore.clone(), statement)
- .await
- .ok()
- .flatten()?;
- self.metrics.on_statement_signed();
- Some(signed)
+ let rp_state = match state.per_relay_parent.get_mut(&relay_parent) {
+ None => {
+ gum::trace!(
+ target: LOG_TARGET,
+ ?relay_parent,
+ ?candidate_hash,
+ "We were asked to second a candidate outside of our view."
+ );
+
+ return Ok(())
+ },
+ Some(r) => r,
+ };
+
+ // Sanity check that candidate is from our assignment.
+ if Some(candidate.descriptor().para_id) != rp_state.assignment {
+ gum::debug!(
+ target: LOG_TARGET,
+ our_assignment = ?rp_state.assignment,
+ collation = ?candidate.descriptor().para_id,
+ "Subsystem asked to second for para outside of our assignment",
+ );
+
+ return Ok(())
}
- /// Insert or get the unbacked-span for the given candidate hash.
- fn insert_or_get_unbacked_span(
- &mut self,
- parent_span: &jaeger::Span,
- hash: CandidateHash,
- para_id: Option,
- ) -> Option<&jaeger::Span> {
- if !self.backed.contains(&hash) {
- // only add if we don't consider this backed.
- let span = self.unbacked_candidates.entry(hash).or_insert_with(|| {
- let s = parent_span.child("unbacked-candidate").with_candidate(hash);
- if let Some(para_id) = para_id {
- s.with_para_id(para_id)
- } else {
- s
- }
- });
- Some(span)
- } else {
- None
- }
+ // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a
+ // Seconded statement only if we have not signed a Valid statement for the requested candidate.
+ //
+ // The actual logic of issuing the signed statement checks that this isn't
+ // conflicting with other seconded candidates. Not doing that check here
+ // gives other subsystems the ability to get us to execute arbitrary candidates,
+ // but no more.
+ if !rp_state.issued_statements.contains(&candidate_hash) {
+ let pov = Arc::new(pov);
+
+ validate_and_second(
+ ctx,
+ rp_state,
+ persisted_validation_data,
+ &candidate,
+ pov,
+ &state.background_validation_tx,
+ )
+ .await?;
}
- fn get_unbacked_validation_child(
- &mut self,
- parent_span: &jaeger::Span,
- hash: CandidateHash,
- para_id: ParaId,
- ) -> Option {
- self.insert_or_get_unbacked_span(parent_span, hash, Some(para_id)).map(|span| {
- span.child("validation")
- .with_candidate(hash)
- .with_stage(Stage::CandidateBacking)
- })
+ Ok(())
+}
+
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn handle_statement_message(
+ ctx: &mut Context,
+ state: &mut State,
+ relay_parent: Hash,
+ statement: SignedFullStatementWithPVD,
+ metrics: &Metrics,
+) -> Result<(), Error> {
+ let _timer = metrics.time_process_statement();
+
+ match maybe_validate_and_import(ctx, state, relay_parent, statement).await {
+ Err(Error::ValidationFailed(_)) => Ok(()),
+ Err(e) => Err(e),
+ Ok(()) => Ok(()),
}
+}
- fn get_unbacked_statement_child(
- &mut self,
- parent_span: &jaeger::Span,
- hash: CandidateHash,
- validator: ValidatorIndex,
- ) -> Option {
- self.insert_or_get_unbacked_span(parent_span, hash, None).map(|span| {
- span.child("import-statement")
- .with_candidate(hash)
- .with_validator_index(validator)
+fn handle_get_backed_candidates_message(
+ rp_state: &PerRelayParentState,
+ requested_candidates: Vec,
+ tx: oneshot::Sender>,
+ metrics: &Metrics,
+) -> Result<(), Error> {
+ let _timer = metrics.time_get_backed_candidates();
+
+ let backed = requested_candidates
+ .into_iter()
+ .filter_map(|hash| {
+ rp_state
+ .table
+ .attested_candidate(&hash, &rp_state.table_context)
+ .and_then(|attested| table_attested_to_backed(attested, &rp_state.table_context))
})
- }
+ .collect();
- fn remove_unbacked_span(&mut self, hash: &CandidateHash) -> Option {
- self.unbacked_candidates.remove(hash)
- }
+ tx.send(backed).map_err(|data| Error::Send(data))?;
+ Ok(())
}
diff --git a/node/core/backing/src/tests.rs b/node/core/backing/src/tests/mod.rs
similarity index 71%
rename from node/core/backing/src/tests.rs
rename to node/core/backing/src/tests/mod.rs
index 0243c68c7c4c..402462913749 100644
--- a/node/core/backing/src/tests.rs
+++ b/node/core/backing/src/tests/mod.rs
@@ -17,17 +17,18 @@
use super::*;
use ::test_helpers::{
dummy_candidate_receipt_bad_sig, dummy_collator, dummy_collator_signature,
- dummy_committed_candidate_receipt, dummy_hash, dummy_validation_code,
+ dummy_committed_candidate_receipt, dummy_hash,
};
use assert_matches::assert_matches;
use futures::{future, Future};
-use polkadot_node_primitives::{BlockData, InvalidCandidate};
+use polkadot_node_primitives::{BlockData, InvalidCandidate, SignedFullStatement, Statement};
use polkadot_node_subsystem::{
+ jaeger,
messages::{
AllMessages, CollatorProtocolMessage, RuntimeApiMessage, RuntimeApiRequest,
ValidationFailed,
},
- ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, LeafStatus, OverseerSignal,
+ ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, LeafStatus, OverseerSignal, TimeoutExt,
};
use polkadot_node_subsystem_test_helpers as test_helpers;
use polkadot_primitives::v2::{
@@ -41,6 +42,10 @@ use sp_tracing as _;
use statement_table::v2::Misbehavior;
use std::collections::HashMap;
+mod prospective_parachains;
+
+const API_VERSION_PROSPECTIVE_DISABLED: u32 = 2;
+
fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec {
val_ids.iter().map(|v| v.public().into()).collect()
}
@@ -53,6 +58,15 @@ fn table_statement_to_primitive(statement: TableStatement) -> Statement {
}
}
+fn dummy_pvd() -> PersistedValidationData {
+ PersistedValidationData {
+ parent_head: HeadData(vec![7, 8, 9]),
+ relay_parent_number: 0_u32.into(),
+ max_pov_size: 1024,
+ relay_parent_storage_root: dummy_hash(),
+ }
+}
+
struct TestState {
chain_ids: Vec,
keystore: SyncCryptoStorePtr,
@@ -175,21 +189,22 @@ fn test_harness>(
));
}
-fn make_erasure_root(test: &TestState, pov: PoV) -> Hash {
- let available_data =
- AvailableData { validation_data: test.validation_data.clone(), pov: Arc::new(pov) };
+fn make_erasure_root(test: &TestState, pov: PoV, validation_data: PersistedValidationData) -> Hash {
+ let available_data = AvailableData { validation_data, pov: Arc::new(pov) };
let chunks = erasure_coding::obtain_chunks_v1(test.validators.len(), &available_data).unwrap();
erasure_coding::branches(&chunks).root()
}
-#[derive(Default)]
+#[derive(Default, Clone)]
struct TestCandidateBuilder {
para_id: ParaId,
head_data: HeadData,
pov_hash: Hash,
relay_parent: Hash,
erasure_root: Hash,
+ persisted_validation_data_hash: Hash,
+ validation_code: Vec,
}
impl TestCandidateBuilder {
@@ -203,8 +218,8 @@ impl TestCandidateBuilder {
collator: dummy_collator(),
signature: dummy_collator_signature(),
para_head: dummy_hash(),
- validation_code_hash: dummy_validation_code().hash(),
- persisted_validation_data_hash: dummy_hash(),
+ validation_code_hash: ValidationCode(self.validation_code).hash(),
+ persisted_validation_data_hash: self.persisted_validation_data_hash,
},
commitments: CandidateCommitments {
head_data: self.head_data,
@@ -232,6 +247,17 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS
))))
.await;
+ // Prospective parachains mode is temporarily defined by the Runtime API version.
+ // Disable it for the test leaf.
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx))
+ ) if parent == test_state.relay_parent => {
+ tx.send(Ok(API_VERSION_PROSPECTIVE_DISABLED)).unwrap();
+ }
+ );
+
// Check that subsystem job issues a request for a validator set.
assert_matches!(
virtual_overseer.recv().await,
@@ -310,6 +336,8 @@ fn backing_second_works() {
test_startup(&mut virtual_overseer, &test_state).await;
let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap();
@@ -319,7 +347,9 @@ fn backing_second_works() {
relay_parent: test_state.relay_parent,
pov_hash,
head_data: expected_head_data.clone(),
- erasure_root: make_erasure_root(&test_state, pov.clone()),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ persisted_validation_data_hash: pvd.hash(),
+ validation_code: validation_code.0.clone(),
..Default::default()
}
.build();
@@ -327,31 +357,50 @@ fn backing_second_works() {
let second = CandidateBackingMessage::Second(
test_state.relay_parent,
candidate.to_plain(),
+ pvd.clone(),
pov.clone(),
);
virtual_overseer.send(FromOrchestra::Communication { msg: second }).await;
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))
+ ) if hash == validation_code.hash() => {
+ tx.send(Ok(Some(validation_code.clone()))).unwrap();
+ }
+ );
+
assert_matches!(
virtual_overseer.recv().await,
AllMessages::CandidateValidation(
- CandidateValidationMessage::ValidateFromChainState(
+ CandidateValidationMessage::ValidateFromExhaustive(
+ _pvd,
+ _validation_code,
candidate_receipt,
- pov,
+ _pov,
timeout,
tx,
- )
- ) if pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && candidate.commitments.hash() == candidate_receipt.commitments_hash => {
- tx.send(Ok(
- ValidationResult::Valid(CandidateCommitments {
+ ),
+ ) if _pvd == pvd &&
+ _validation_code == validation_code &&
+ *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() &&
+ timeout == BACKING_EXECUTION_TIMEOUT &&
+ candidate.commitments.hash() == candidate_receipt.commitments_hash =>
+ {
+ tx.send(Ok(ValidationResult::Valid(
+ CandidateCommitments {
head_data: expected_head_data.clone(),
horizontal_messages: Vec::new(),
upward_messages: Vec::new(),
new_validation_code: None,
processed_downward_messages: 0,
hrmp_watermark: 0,
- }, test_state.validation_data.clone()),
- )).unwrap();
+ },
+ test_state.validation_data.clone(),
+ )))
+ .unwrap();
}
);
@@ -407,6 +456,8 @@ fn backing_works() {
test_startup(&mut virtual_overseer, &test_state).await;
let pov = PoV { block_data: BlockData(vec![1, 2, 3]) };
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
let pov_hash = pov.hash();
@@ -417,7 +468,8 @@ fn backing_works() {
relay_parent: test_state.relay_parent,
pov_hash,
head_data: expected_head_data.clone(),
- erasure_root: make_erasure_root(&test_state, pov.clone()),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ validation_code: validation_code.0.clone(),
..Default::default()
}
.build();
@@ -440,9 +492,9 @@ fn backing_works() {
.await
.expect("Insert key into keystore");
- let signed_a = SignedFullStatement::sign(
+ let signed_a = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Seconded(candidate_a.clone()),
+ StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()),
&test_state.signing_context,
ValidatorIndex(2),
&public2.into(),
@@ -452,9 +504,9 @@ fn backing_works() {
.flatten()
.expect("should be signed");
- let signed_b = SignedFullStatement::sign(
+ let signed_b = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Valid(candidate_a_hash),
+ StatementWithPVD::Valid(candidate_a_hash),
&test_state.signing_context,
ValidatorIndex(5),
&public1.into(),
@@ -477,6 +529,15 @@ fn backing_works() {
)
.await;
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))
+ ) if hash == validation_code.hash() => {
+ tx.send(Ok(Some(validation_code.clone()))).unwrap();
+ }
+ );
+
// Sending a `Statement::Seconded` for our assignment will start
// validation process. The first thing requested is the PoV.
assert_matches!(
@@ -497,13 +558,20 @@ fn backing_works() {
assert_matches!(
virtual_overseer.recv().await,
AllMessages::CandidateValidation(
- CandidateValidationMessage::ValidateFromChainState(
- c,
- pov,
+ CandidateValidationMessage::ValidateFromExhaustive(
+ _pvd,
+ _validation_code,
+ candidate_receipt,
+ _pov,
timeout,
tx,
- )
- ) if pov == pov && c.descriptor() == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && c.commitments_hash == candidate_a_commitments_hash=> {
+ ),
+ ) if _pvd == pvd &&
+ _validation_code == validation_code &&
+ *_pov == pov && &candidate_receipt.descriptor == candidate_a.descriptor() &&
+ timeout == BACKING_EXECUTION_TIMEOUT &&
+ candidate_a_commitments_hash == candidate_receipt.commitments_hash =>
+ {
tx.send(Ok(
ValidationResult::Valid(CandidateCommitments {
head_data: expected_head_data.clone(),
@@ -584,6 +652,8 @@ fn backing_works_while_validation_ongoing() {
test_startup(&mut virtual_overseer, &test_state).await;
let pov = PoV { block_data: BlockData(vec![1, 2, 3]) };
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
let pov_hash = pov.hash();
@@ -594,7 +664,8 @@ fn backing_works_while_validation_ongoing() {
relay_parent: test_state.relay_parent,
pov_hash,
head_data: expected_head_data.clone(),
- erasure_root: make_erasure_root(&test_state, pov.clone()),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ validation_code: validation_code.0.clone(),
..Default::default()
}
.build();
@@ -624,9 +695,9 @@ fn backing_works_while_validation_ongoing() {
.await
.expect("Insert key into keystore");
- let signed_a = SignedFullStatement::sign(
+ let signed_a = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Seconded(candidate_a.clone()),
+ StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()),
&test_state.signing_context,
ValidatorIndex(2),
&public2.into(),
@@ -636,9 +707,9 @@ fn backing_works_while_validation_ongoing() {
.flatten()
.expect("should be signed");
- let signed_b = SignedFullStatement::sign(
+ let signed_b = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Valid(candidate_a_hash),
+ StatementWithPVD::Valid(candidate_a_hash),
&test_state.signing_context,
ValidatorIndex(5),
&public1.into(),
@@ -648,9 +719,9 @@ fn backing_works_while_validation_ongoing() {
.flatten()
.expect("should be signed");
- let signed_c = SignedFullStatement::sign(
+ let signed_c = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Valid(candidate_a_hash),
+ StatementWithPVD::Valid(candidate_a_hash),
&test_state.signing_context,
ValidatorIndex(3),
&public3.into(),
@@ -672,6 +743,15 @@ fn backing_works_while_validation_ongoing() {
)
.await;
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))
+ ) if hash == validation_code.hash() => {
+ tx.send(Ok(Some(validation_code.clone()))).unwrap();
+ }
+ );
+
// Sending a `Statement::Seconded` for our assignment will start
// validation process. The first thing requested is PoV from the
// `PoVDistribution`.
@@ -693,13 +773,20 @@ fn backing_works_while_validation_ongoing() {
assert_matches!(
virtual_overseer.recv().await,
AllMessages::CandidateValidation(
- CandidateValidationMessage::ValidateFromChainState(
- c,
- pov,
+ CandidateValidationMessage::ValidateFromExhaustive(
+ _pvd,
+ _validation_code,
+ candidate_receipt,
+ _pov,
timeout,
tx,
- )
- ) if pov == pov && c.descriptor() == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && candidate_a_commitments_hash == c.commitments_hash => {
+ ),
+ ) if _pvd == pvd &&
+ _validation_code == validation_code &&
+ *_pov == pov && &candidate_receipt.descriptor == candidate_a.descriptor() &&
+ timeout == BACKING_EXECUTION_TIMEOUT &&
+ candidate_a_commitments_hash == candidate_receipt.commitments_hash =>
+ {
// we never validate the candidate. our local node
// shouldn't issue any statements.
std::mem::forget(tx);
@@ -793,6 +880,8 @@ fn backing_misbehavior_works() {
let pov = PoV { block_data: BlockData(vec![1, 2, 3]) };
let pov_hash = pov.hash();
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap();
@@ -800,8 +889,9 @@ fn backing_misbehavior_works() {
para_id: test_state.chain_ids[0],
relay_parent: test_state.relay_parent,
pov_hash,
- erasure_root: make_erasure_root(&test_state, pov.clone()),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
head_data: expected_head_data.clone(),
+ validation_code: validation_code.0.clone(),
..Default::default()
}
.build();
@@ -816,9 +906,9 @@ fn backing_misbehavior_works() {
)
.await
.expect("Insert key into keystore");
- let seconded_2 = SignedFullStatement::sign(
+ let seconded_2 = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Seconded(candidate_a.clone()),
+ StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()),
&test_state.signing_context,
ValidatorIndex(2),
&public2.into(),
@@ -828,9 +918,9 @@ fn backing_misbehavior_works() {
.flatten()
.expect("should be signed");
- let valid_2 = SignedFullStatement::sign(
+ let valid_2 = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Valid(candidate_a_hash),
+ StatementWithPVD::Valid(candidate_a_hash),
&test_state.signing_context,
ValidatorIndex(2),
&public2.into(),
@@ -853,6 +943,15 @@ fn backing_misbehavior_works() {
)
.await;
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))
+ ) if hash == validation_code.hash() => {
+ tx.send(Ok(Some(validation_code.clone()))).unwrap();
+ }
+ );
+
assert_matches!(
virtual_overseer.recv().await,
AllMessages::AvailabilityDistribution(
@@ -869,13 +968,20 @@ fn backing_misbehavior_works() {
assert_matches!(
virtual_overseer.recv().await,
AllMessages::CandidateValidation(
- CandidateValidationMessage::ValidateFromChainState(
- c,
- pov,
+ CandidateValidationMessage::ValidateFromExhaustive(
+ _pvd,
+ _validation_code,
+ candidate_receipt,
+ _pov,
timeout,
tx,
- )
- ) if pov == pov && c.descriptor() == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && candidate_a_commitments_hash == c.commitments_hash => {
+ ),
+ ) if _pvd == pvd &&
+ _validation_code == validation_code &&
+ *_pov == pov && &candidate_receipt.descriptor == candidate_a.descriptor() &&
+ timeout == BACKING_EXECUTION_TIMEOUT &&
+ candidate_a_commitments_hash == candidate_receipt.commitments_hash =>
+ {
tx.send(Ok(
ValidationResult::Valid(CandidateCommitments {
head_data: expected_head_data.clone(),
@@ -991,8 +1097,17 @@ fn backing_dont_second_invalid() {
test_startup(&mut virtual_overseer, &test_state).await;
let pov_block_a = PoV { block_data: BlockData(vec![42, 43, 44]) };
+ let pvd_a = dummy_pvd();
+ let validation_code_a = ValidationCode(vec![1, 2, 3]);
let pov_block_b = PoV { block_data: BlockData(vec![45, 46, 47]) };
+ let pvd_b = {
+ let mut pvd_b = pvd_a.clone();
+ pvd_b.parent_head = HeadData(vec![14, 15, 16]);
+ pvd_b.max_pov_size = pvd_a.max_pov_size / 2;
+ pvd_b
+ };
+ let validation_code_b = ValidationCode(vec![4, 5, 6]);
let pov_hash_a = pov_block_a.hash();
let pov_hash_b = pov_block_b.hash();
@@ -1003,7 +1118,9 @@ fn backing_dont_second_invalid() {
para_id: test_state.chain_ids[0],
relay_parent: test_state.relay_parent,
pov_hash: pov_hash_a,
- erasure_root: make_erasure_root(&test_state, pov_block_a.clone()),
+ erasure_root: make_erasure_root(&test_state, pov_block_a.clone(), pvd_a.clone()),
+ persisted_validation_data_hash: pvd_a.hash(),
+ validation_code: validation_code_a.0.clone(),
..Default::default()
}
.build();
@@ -1012,8 +1129,10 @@ fn backing_dont_second_invalid() {
para_id: test_state.chain_ids[0],
relay_parent: test_state.relay_parent,
pov_hash: pov_hash_b,
- erasure_root: make_erasure_root(&test_state, pov_block_b.clone()),
+ erasure_root: make_erasure_root(&test_state, pov_block_b.clone(), pvd_b.clone()),
head_data: expected_head_data.clone(),
+ persisted_validation_data_hash: pvd_b.hash(),
+ validation_code: validation_code_b.0.clone(),
..Default::default()
}
.build();
@@ -1021,21 +1140,38 @@ fn backing_dont_second_invalid() {
let second = CandidateBackingMessage::Second(
test_state.relay_parent,
candidate_a.to_plain(),
+ pvd_a.clone(),
pov_block_a.clone(),
);
virtual_overseer.send(FromOrchestra::Communication { msg: second }).await;
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))
+ ) if hash == validation_code_a.hash() => {
+ tx.send(Ok(Some(validation_code_a.clone()))).unwrap();
+ }
+ );
+
assert_matches!(
virtual_overseer.recv().await,
AllMessages::CandidateValidation(
- CandidateValidationMessage::ValidateFromChainState(
- c,
- pov,
+ CandidateValidationMessage::ValidateFromExhaustive(
+ _pvd,
+ _validation_code,
+ candidate_receipt,
+ _pov,
timeout,
tx,
- )
- ) if pov == pov && c.descriptor() == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT => {
+ ),
+ ) if _pvd == pvd_a &&
+ _validation_code == validation_code_a &&
+ *_pov == pov_block_a && &candidate_receipt.descriptor == candidate_a.descriptor() &&
+ timeout == BACKING_EXECUTION_TIMEOUT &&
+ candidate_a.commitments.hash() == candidate_receipt.commitments_hash =>
+ {
tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap();
}
);
@@ -1050,21 +1186,38 @@ fn backing_dont_second_invalid() {
let second = CandidateBackingMessage::Second(
test_state.relay_parent,
candidate_b.to_plain(),
+ pvd_b.clone(),
pov_block_b.clone(),
);
virtual_overseer.send(FromOrchestra::Communication { msg: second }).await;
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))
+ ) if hash == validation_code_b.hash() => {
+ tx.send(Ok(Some(validation_code_b.clone()))).unwrap();
+ }
+ );
+
assert_matches!(
virtual_overseer.recv().await,
AllMessages::CandidateValidation(
- CandidateValidationMessage::ValidateFromChainState(
- c,
- pov,
+ CandidateValidationMessage::ValidateFromExhaustive(
+ _pvd,
+ _validation_code,
+ candidate_receipt,
+ _pov,
timeout,
tx,
- )
- ) if pov == pov && c.descriptor() == candidate_b.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT => {
+ ),
+ ) if _pvd == pvd_b &&
+ _validation_code == validation_code_b &&
+ *_pov == pov_block_b && &candidate_receipt.descriptor == candidate_b.descriptor() &&
+ timeout == BACKING_EXECUTION_TIMEOUT &&
+ candidate_b.commitments.hash() == candidate_receipt.commitments_hash =>
+ {
tx.send(Ok(
ValidationResult::Valid(CandidateCommitments {
head_data: expected_head_data.clone(),
@@ -1073,7 +1226,7 @@ fn backing_dont_second_invalid() {
new_validation_code: None,
processed_downward_messages: 0,
hrmp_watermark: 0,
- }, test_state.validation_data.clone()),
+ }, pvd_b.clone()),
)).unwrap();
}
);
@@ -1125,6 +1278,8 @@ fn backing_second_after_first_fails_works() {
test_startup(&mut virtual_overseer, &test_state).await;
let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
let pov_hash = pov.hash();
@@ -1132,7 +1287,9 @@ fn backing_second_after_first_fails_works() {
para_id: test_state.chain_ids[0],
relay_parent: test_state.relay_parent,
pov_hash,
- erasure_root: make_erasure_root(&test_state, pov.clone()),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ persisted_validation_data_hash: pvd.hash(),
+ validation_code: validation_code.0.clone(),
..Default::default()
}
.build();
@@ -1145,9 +1302,9 @@ fn backing_second_after_first_fails_works() {
.await
.expect("Insert key into keystore");
- let signed_a = SignedFullStatement::sign(
+ let signed_a = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Seconded(candidate.clone()),
+ StatementWithPVD::Seconded(candidate.clone(), pvd.clone()),
&test_state.signing_context,
ValidatorIndex(2),
&validator2.into(),
@@ -1171,6 +1328,15 @@ fn backing_second_after_first_fails_works() {
)
.await;
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))
+ ) if hash == validation_code.hash() => {
+ tx.send(Ok(Some(validation_code.clone()))).unwrap();
+ }
+ );
+
// Subsystem requests PoV and requests validation.
assert_matches!(
virtual_overseer.recv().await,
@@ -1189,13 +1355,20 @@ fn backing_second_after_first_fails_works() {
assert_matches!(
virtual_overseer.recv().await,
AllMessages::CandidateValidation(
- CandidateValidationMessage::ValidateFromChainState(
- c,
- pov,
+ CandidateValidationMessage::ValidateFromExhaustive(
+ _pvd,
+ _validation_code,
+ candidate_receipt,
+ _pov,
timeout,
tx,
- )
- ) if pov == pov && c.descriptor() == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && c.commitments_hash == candidate.commitments.hash() => {
+ ),
+ ) if _pvd == pvd &&
+ _validation_code == validation_code &&
+ *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() &&
+ timeout == BACKING_EXECUTION_TIMEOUT &&
+ candidate.commitments.hash() == candidate_receipt.commitments_hash =>
+ {
tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap();
}
);
@@ -1205,12 +1378,15 @@ fn backing_second_after_first_fails_works() {
let second = CandidateBackingMessage::Second(
test_state.relay_parent,
candidate.to_plain(),
+ pvd.clone(),
pov.clone(),
);
virtual_overseer.send(FromOrchestra::Communication { msg: second }).await;
let pov_to_second = PoV { block_data: BlockData(vec![3, 2, 1]) };
+ let pvd_to_second = dummy_pvd();
+ let validation_code_to_second = ValidationCode(vec![5, 6, 7]);
let pov_hash = pov_to_second.hash();
@@ -1218,7 +1394,13 @@ fn backing_second_after_first_fails_works() {
para_id: test_state.chain_ids[0],
relay_parent: test_state.relay_parent,
pov_hash,
- erasure_root: make_erasure_root(&test_state, pov_to_second.clone()),
+ erasure_root: make_erasure_root(
+ &test_state,
+ pov_to_second.clone(),
+ pvd_to_second.clone(),
+ ),
+ persisted_validation_data_hash: pvd_to_second.hash(),
+ validation_code: validation_code_to_second.0.clone(),
..Default::default()
}
.build();
@@ -1226,6 +1408,7 @@ fn backing_second_after_first_fails_works() {
let second = CandidateBackingMessage::Second(
test_state.relay_parent,
candidate_to_second.to_plain(),
+ pvd_to_second.clone(),
pov_to_second.clone(),
);
@@ -1234,15 +1417,19 @@ fn backing_second_after_first_fails_works() {
// triggered on the prev step.
virtual_overseer.send(FromOrchestra::Communication { msg: second }).await;
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))
+ ) if hash == validation_code_to_second.hash() => {
+ tx.send(Ok(Some(validation_code_to_second.clone()))).unwrap();
+ }
+ );
+
assert_matches!(
virtual_overseer.recv().await,
AllMessages::CandidateValidation(
- CandidateValidationMessage::ValidateFromChainState(
- _,
- pov,
- _,
- _,
- )
+ CandidateValidationMessage::ValidateFromExhaustive(_, _, _, pov, ..),
) => {
assert_eq!(&*pov, &pov_to_second);
}
@@ -1260,6 +1447,8 @@ fn backing_works_after_failed_validation() {
test_startup(&mut virtual_overseer, &test_state).await;
let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
let pov_hash = pov.hash();
@@ -1267,7 +1456,8 @@ fn backing_works_after_failed_validation() {
para_id: test_state.chain_ids[0],
relay_parent: test_state.relay_parent,
pov_hash,
- erasure_root: make_erasure_root(&test_state, pov.clone()),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ validation_code: validation_code.0.clone(),
..Default::default()
}
.build();
@@ -1279,9 +1469,9 @@ fn backing_works_after_failed_validation() {
)
.await
.expect("Insert key into keystore");
- let signed_a = SignedFullStatement::sign(
+ let signed_a = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Seconded(candidate.clone()),
+ StatementWithPVD::Seconded(candidate.clone(), pvd.clone()),
&test_state.signing_context,
ValidatorIndex(2),
&public2.into(),
@@ -1305,6 +1495,15 @@ fn backing_works_after_failed_validation() {
)
.await;
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))
+ ) if hash == validation_code.hash() => {
+ tx.send(Ok(Some(validation_code.clone()))).unwrap();
+ }
+ );
+
// Subsystem requests PoV and requests validation.
assert_matches!(
virtual_overseer.recv().await,
@@ -1323,13 +1522,20 @@ fn backing_works_after_failed_validation() {
assert_matches!(
virtual_overseer.recv().await,
AllMessages::CandidateValidation(
- CandidateValidationMessage::ValidateFromChainState(
- c,
- pov,
+ CandidateValidationMessage::ValidateFromExhaustive(
+ _pvd,
+ _validation_code,
+ candidate_receipt,
+ _pov,
timeout,
tx,
- )
- ) if pov == pov && c.descriptor() == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && c.commitments_hash == candidate.commitments.hash() => {
+ ),
+ ) if _pvd == pvd &&
+ _validation_code == validation_code &&
+ *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() &&
+ timeout == BACKING_EXECUTION_TIMEOUT &&
+ candidate.commitments.hash() == candidate_receipt.commitments_hash =>
+ {
tx.send(Err(ValidationFailed("Internal test error".into()))).unwrap();
}
);
@@ -1352,6 +1558,7 @@ fn backing_works_after_failed_validation() {
// Test that a `CandidateBackingMessage::Second` issues validation work
// and in case validation is successful issues a `StatementDistributionMessage`.
#[test]
+#[ignore] // `required_collator` is disabled.
fn backing_doesnt_second_wrong_collator() {
let mut test_state = TestState::default();
test_state.availability_cores[0] = CoreState::Scheduled(ScheduledCore {
@@ -1363,6 +1570,8 @@ fn backing_doesnt_second_wrong_collator() {
test_startup(&mut virtual_overseer, &test_state).await;
let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap();
@@ -1372,7 +1581,9 @@ fn backing_doesnt_second_wrong_collator() {
relay_parent: test_state.relay_parent,
pov_hash,
head_data: expected_head_data.clone(),
- erasure_root: make_erasure_root(&test_state, pov.clone()),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ persisted_validation_data_hash: pvd.hash(),
+ validation_code: validation_code.0.clone(),
..Default::default()
}
.build();
@@ -1380,6 +1591,7 @@ fn backing_doesnt_second_wrong_collator() {
let second = CandidateBackingMessage::Second(
test_state.relay_parent,
candidate.to_plain(),
+ pvd.clone(),
pov.clone(),
);
@@ -1403,6 +1615,7 @@ fn backing_doesnt_second_wrong_collator() {
}
#[test]
+#[ignore] // `required_collator` is disabled.
fn validation_work_ignores_wrong_collator() {
let mut test_state = TestState::default();
test_state.availability_cores[0] = CoreState::Scheduled(ScheduledCore {
@@ -1414,6 +1627,8 @@ fn validation_work_ignores_wrong_collator() {
test_startup(&mut virtual_overseer, &test_state).await;
let pov = PoV { block_data: BlockData(vec![1, 2, 3]) };
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
let pov_hash = pov.hash();
@@ -1424,7 +1639,9 @@ fn validation_work_ignores_wrong_collator() {
relay_parent: test_state.relay_parent,
pov_hash,
head_data: expected_head_data.clone(),
- erasure_root: make_erasure_root(&test_state, pov.clone()),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ persisted_validation_data_hash: pvd.hash(),
+ validation_code: validation_code.0.clone(),
..Default::default()
}
.build();
@@ -1436,9 +1653,9 @@ fn validation_work_ignores_wrong_collator() {
)
.await
.expect("Insert key into keystore");
- let seconding = SignedFullStatement::sign(
+ let seconding = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Seconded(candidate_a.clone()),
+ StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()),
&test_state.signing_context,
ValidatorIndex(2),
&public2.into(),
@@ -1543,6 +1760,8 @@ fn retry_works() {
test_startup(&mut virtual_overseer, &test_state).await;
let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
let pov_hash = pov.hash();
@@ -1550,7 +1769,9 @@ fn retry_works() {
para_id: test_state.chain_ids[0],
relay_parent: test_state.relay_parent,
pov_hash,
- erasure_root: make_erasure_root(&test_state, pov.clone()),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ persisted_validation_data_hash: pvd.hash(),
+ validation_code: validation_code.0.clone(),
..Default::default()
}
.build();
@@ -1576,9 +1797,9 @@ fn retry_works() {
)
.await
.expect("Insert key into keystore");
- let signed_a = SignedFullStatement::sign(
+ let signed_a = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Seconded(candidate.clone()),
+ StatementWithPVD::Seconded(candidate.clone(), pvd.clone()),
&test_state.signing_context,
ValidatorIndex(2),
&public2.into(),
@@ -1587,9 +1808,9 @@ fn retry_works() {
.ok()
.flatten()
.expect("should be signed");
- let signed_b = SignedFullStatement::sign(
+ let signed_b = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Valid(candidate.hash()),
+ StatementWithPVD::Valid(candidate.hash()),
&test_state.signing_context,
ValidatorIndex(3),
&public3.into(),
@@ -1598,9 +1819,9 @@ fn retry_works() {
.ok()
.flatten()
.expect("should be signed");
- let signed_c = SignedFullStatement::sign(
+ let signed_c = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Valid(candidate.hash()),
+ StatementWithPVD::Valid(candidate.hash()),
&test_state.signing_context,
ValidatorIndex(5),
&public5.into(),
@@ -1623,6 +1844,15 @@ fn retry_works() {
)
.await;
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))
+ ) if hash == validation_code.hash() => {
+ tx.send(Ok(Some(validation_code.clone()))).unwrap();
+ }
+ );
+
// Subsystem requests PoV and requests validation.
// We cancel - should mean retry on next backing statement.
assert_matches!(
@@ -1651,7 +1881,7 @@ fn retry_works() {
.await;
// Not deterministic which message comes first:
- for _ in 0u32..2 {
+ for _ in 0u32..3 {
match virtual_overseer.recv().await {
AllMessages::Provisioner(ProvisionerMessage::ProvisionableData(
_,
@@ -1664,6 +1894,12 @@ fn retry_works() {
) if relay_parent == test_state.relay_parent => {
std::mem::drop(tx);
},
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ _,
+ RuntimeApiRequest::ValidationCodeByHash(hash, tx),
+ )) if hash == validation_code.hash() => {
+ tx.send(Ok(Some(validation_code.clone()))).unwrap();
+ },
msg => {
assert!(false, "Unexpected message: {:?}", msg);
},
@@ -1682,6 +1918,15 @@ fn retry_works() {
)
.await;
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))
+ ) if hash == validation_code.hash() => {
+ tx.send(Ok(Some(validation_code.clone()))).unwrap();
+ }
+ );
+
assert_matches!(
virtual_overseer.recv().await,
AllMessages::AvailabilityDistribution(
@@ -1700,13 +1945,19 @@ fn retry_works() {
assert_matches!(
virtual_overseer.recv().await,
AllMessages::CandidateValidation(
- CandidateValidationMessage::ValidateFromChainState(
- c,
- pov,
+ CandidateValidationMessage::ValidateFromExhaustive(
+ _pvd,
+ _validation_code,
+ candidate_receipt,
+ _pov,
timeout,
- _tx,
- )
- ) if pov == pov && c.descriptor() == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && c.commitments_hash == candidate.commitments.hash()
+ ..
+ ),
+ ) if _pvd == pvd &&
+ _validation_code == validation_code &&
+ *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() &&
+ timeout == BACKING_EXECUTION_TIMEOUT &&
+ candidate.commitments.hash() == candidate_receipt.commitments_hash
);
virtual_overseer
});
@@ -1720,6 +1971,8 @@ fn observes_backing_even_if_not_validator() {
test_startup(&mut virtual_overseer, &test_state).await;
let pov = PoV { block_data: BlockData(vec![1, 2, 3]) };
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
let pov_hash = pov.hash();
@@ -1730,7 +1983,9 @@ fn observes_backing_even_if_not_validator() {
relay_parent: test_state.relay_parent,
pov_hash,
head_data: expected_head_data.clone(),
- erasure_root: make_erasure_root(&test_state, pov.clone()),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ persisted_validation_data_hash: pvd.hash(),
+ validation_code: validation_code.0.clone(),
..Default::default()
}
.build();
@@ -1760,9 +2015,9 @@ fn observes_backing_even_if_not_validator() {
// Produce a 3-of-5 quorum on the candidate.
- let signed_a = SignedFullStatement::sign(
+ let signed_a = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Seconded(candidate_a.clone()),
+ StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()),
&test_state.signing_context,
ValidatorIndex(0),
&public0.into(),
@@ -1772,9 +2027,9 @@ fn observes_backing_even_if_not_validator() {
.flatten()
.expect("should be signed");
- let signed_b = SignedFullStatement::sign(
+ let signed_b = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Valid(candidate_a_hash),
+ StatementWithPVD::Valid(candidate_a_hash),
&test_state.signing_context,
ValidatorIndex(5),
&public1.into(),
@@ -1784,9 +2039,9 @@ fn observes_backing_even_if_not_validator() {
.flatten()
.expect("should be signed");
- let signed_c = SignedFullStatement::sign(
+ let signed_c = SignedFullStatementWithPVD::sign(
&test_state.keystore,
- Statement::Valid(candidate_a_hash),
+ StatementWithPVD::Valid(candidate_a_hash),
&test_state.signing_context,
ValidatorIndex(2),
&public2.into(),
@@ -1847,3 +2102,183 @@ fn observes_backing_even_if_not_validator() {
virtual_overseer
});
}
+
+// Tests that it's impossible to second multiple candidates per relay parent
+// without prospective parachains.
+#[test]
+fn cannot_second_multiple_candidates_per_parent() {
+ let test_state = TestState::default();
+ test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
+ test_startup(&mut virtual_overseer, &test_state).await;
+
+ let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
+
+ let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap();
+
+ let pov_hash = pov.hash();
+ let candidate_builder = TestCandidateBuilder {
+ para_id: test_state.chain_ids[0],
+ relay_parent: test_state.relay_parent,
+ pov_hash,
+ head_data: expected_head_data.clone(),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ persisted_validation_data_hash: pvd.hash(),
+ validation_code: validation_code.0.clone(),
+ ..Default::default()
+ };
+ let candidate = candidate_builder.clone().build();
+
+ let second = CandidateBackingMessage::Second(
+ test_state.relay_parent,
+ candidate.to_plain(),
+ pvd.clone(),
+ pov.clone(),
+ );
+
+ virtual_overseer.send(FromOrchestra::Communication { msg: second }).await;
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))
+ ) if hash == validation_code.hash() => {
+ tx.send(Ok(Some(validation_code.clone()))).unwrap();
+ }
+ );
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::CandidateValidation(
+ CandidateValidationMessage::ValidateFromExhaustive(
+ _pvd,
+ _validation_code,
+ candidate_receipt,
+ _pov,
+ timeout,
+ tx,
+ ),
+ ) if _pvd == pvd &&
+ _validation_code == validation_code &&
+ *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() &&
+ timeout == BACKING_EXECUTION_TIMEOUT &&
+ candidate.commitments.hash() == candidate_receipt.commitments_hash =>
+ {
+ tx.send(Ok(ValidationResult::Valid(
+ CandidateCommitments {
+ head_data: expected_head_data.clone(),
+ horizontal_messages: Vec::new(),
+ upward_messages: Vec::new(),
+ new_validation_code: None,
+ processed_downward_messages: 0,
+ hrmp_watermark: 0,
+ },
+ test_state.validation_data.clone(),
+ )))
+ .unwrap();
+ }
+ );
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::AvailabilityStore(
+ AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. }
+ ) if candidate_hash == candidate.hash() => {
+ tx.send(Ok(())).unwrap();
+ }
+ );
+
+ test_dispute_coordinator_notifications(
+ &mut virtual_overseer,
+ candidate.hash(),
+ test_state.session(),
+ vec![ValidatorIndex(0)],
+ )
+ .await;
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::StatementDistribution(
+ StatementDistributionMessage::Share(
+ parent_hash,
+ _signed_statement,
+ )
+ ) if parent_hash == test_state.relay_parent => {}
+ );
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => {
+ assert_eq!(test_state.relay_parent, hash);
+ assert_matches!(statement.payload(), Statement::Seconded(_));
+ }
+ );
+
+ // Try to second candidate with the same relay parent again.
+
+ // Make sure the candidate hash is different.
+ let validation_code = ValidationCode(vec![4, 5, 6]);
+ let mut candidate_builder = candidate_builder;
+ candidate_builder.validation_code = validation_code.0.clone();
+ let candidate = candidate_builder.build();
+
+ let second = CandidateBackingMessage::Second(
+ test_state.relay_parent,
+ candidate.to_plain(),
+ pvd.clone(),
+ pov.clone(),
+ );
+
+ virtual_overseer.send(FromOrchestra::Communication { msg: second }).await;
+
+ // The validation is still requested.
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))
+ ) if hash == validation_code.hash() => {
+ tx.send(Ok(Some(validation_code.clone()))).unwrap();
+ }
+ );
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::CandidateValidation(
+ CandidateValidationMessage::ValidateFromExhaustive(.., tx),
+ ) => {
+ tx.send(Ok(ValidationResult::Valid(
+ CandidateCommitments {
+ head_data: expected_head_data.clone(),
+ horizontal_messages: Vec::new(),
+ upward_messages: Vec::new(),
+ new_validation_code: None,
+ processed_downward_messages: 0,
+ hrmp_watermark: 0,
+ },
+ test_state.validation_data.clone(),
+ )))
+ .unwrap();
+ }
+ );
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::AvailabilityStore(
+ AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. }
+ ) if candidate_hash == candidate.hash() => {
+ tx.send(Ok(())).unwrap();
+ }
+ );
+
+ // Validation done, but the candidate is rejected cause of 0-depth being already occupied.
+
+ assert!(virtual_overseer
+ .recv()
+ .timeout(std::time::Duration::from_millis(50))
+ .await
+ .is_none());
+
+ virtual_overseer
+ });
+}
diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs
new file mode 100644
index 000000000000..5be62b344980
--- /dev/null
+++ b/node/core/backing/src/tests/prospective_parachains.rs
@@ -0,0 +1,1352 @@
+// Copyright 2022 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+//! Tests for the backing subsystem with enabled prospective parachains.
+
+use polkadot_node_subsystem::{messages::ChainApiMessage, TimeoutExt};
+use polkadot_primitives::v2::{BlockNumber, Header};
+
+use super::*;
+
+const API_VERSION_PROSPECTIVE_ENABLED: u32 = 3;
+
+struct TestLeaf {
+ activated: ActivatedLeaf,
+ min_relay_parents: Vec<(ParaId, u32)>,
+}
+
+fn get_parent_hash(hash: Hash) -> Hash {
+ Hash::from_low_u64_be(hash.to_low_u64_be() + 1)
+}
+
+async fn activate_leaf(
+ virtual_overseer: &mut VirtualOverseer,
+ leaf: TestLeaf,
+ test_state: &TestState,
+ seconded_in_view: usize,
+) {
+ let TestLeaf { activated, min_relay_parents } = leaf;
+ let leaf_hash = activated.hash;
+ let leaf_number = activated.number;
+ // Start work on some new parent.
+ virtual_overseer
+ .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(
+ activated,
+ ))))
+ .await;
+
+ // Prospective parachains mode is temporarily defined by the Runtime API version.
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx))
+ ) if parent == leaf_hash => {
+ tx.send(Ok(API_VERSION_PROSPECTIVE_ENABLED)).unwrap();
+ }
+ );
+
+ let min_min = *min_relay_parents
+ .iter()
+ .map(|(_, block_num)| block_num)
+ .min()
+ .unwrap_or(&leaf_number);
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::ProspectiveParachains(
+ ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx)
+ ) if parent == leaf_hash => {
+ tx.send(min_relay_parents).unwrap();
+ }
+ );
+
+ let ancestry_len = leaf_number + 1 - min_min;
+
+ let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h)))
+ .take(ancestry_len as usize);
+ let ancestry_numbers = (min_min..=leaf_number).rev();
+ let mut ancestry_iter = ancestry_hashes.clone().zip(ancestry_numbers).peekable();
+
+ let mut next_overseer_message = None;
+ // How many blocks were actually requested.
+ let mut requested_len = 0;
+ loop {
+ let (hash, number) = match ancestry_iter.next() {
+ Some((hash, number)) => (hash, number),
+ None => break,
+ };
+
+ // May be `None` for the last element.
+ let parent_hash =
+ ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash));
+
+ let msg = virtual_overseer.recv().await;
+ // It may happen that some blocks were cached by implicit view,
+ // reuse the message.
+ if !matches!(&msg, AllMessages::ChainApi(ChainApiMessage::BlockHeader(..))) {
+ next_overseer_message.replace(msg);
+ break
+ }
+
+ assert_matches!(
+ msg,
+ AllMessages::ChainApi(
+ ChainApiMessage::BlockHeader(_hash, tx)
+ ) if _hash == hash => {
+ let header = Header {
+ parent_hash,
+ number,
+ state_root: Hash::zero(),
+ extrinsics_root: Hash::zero(),
+ digest: Default::default(),
+ };
+
+ tx.send(Ok(Some(header))).unwrap();
+ }
+ );
+ requested_len += 1;
+ }
+
+ for _ in 0..seconded_in_view {
+ let msg = match next_overseer_message.take() {
+ Some(msg) => msg,
+ None => virtual_overseer.recv().await,
+ };
+ assert_matches!(
+ msg,
+ AllMessages::ProspectiveParachains(
+ ProspectiveParachainsMessage::GetTreeMembership(.., tx),
+ ) => {
+ tx.send(Vec::new()).unwrap();
+ }
+ );
+ }
+
+ for hash in ancestry_hashes.take(requested_len) {
+ // Check that subsystem job issues a request for a validator set.
+ let msg = match next_overseer_message.take() {
+ Some(msg) => msg,
+ None => virtual_overseer.recv().await,
+ };
+ assert_matches!(
+ msg,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx))
+ ) if parent == hash => {
+ tx.send(Ok(test_state.validator_public.clone())).unwrap();
+ }
+ );
+
+ // Check that subsystem job issues a request for the validator groups.
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx))
+ ) if parent == hash => {
+ tx.send(Ok(test_state.validator_groups.clone())).unwrap();
+ }
+ );
+
+ // Check that subsystem job issues a request for the session index for child.
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx))
+ ) if parent == hash => {
+ tx.send(Ok(test_state.signing_context.session_index)).unwrap();
+ }
+ );
+
+ // Check that subsystem job issues a request for the availability cores.
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx))
+ ) if parent == hash => {
+ tx.send(Ok(test_state.availability_cores.clone())).unwrap();
+ }
+ );
+ }
+}
+
+async fn assert_validate_seconded_candidate(
+ virtual_overseer: &mut VirtualOverseer,
+ relay_parent: Hash,
+ candidate: &CommittedCandidateReceipt,
+ pov: &PoV,
+ pvd: &PersistedValidationData,
+ validation_code: &ValidationCode,
+ expected_head_data: &HeadData,
+ fetch_pov: bool,
+) {
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::RuntimeApi(
+ RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidationCodeByHash(hash, tx))
+ ) if parent == relay_parent && hash == validation_code.hash() => {
+ tx.send(Ok(Some(validation_code.clone()))).unwrap();
+ }
+ );
+
+ if fetch_pov {
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::AvailabilityDistribution(
+ AvailabilityDistributionMessage::FetchPoV {
+ relay_parent: hash,
+ tx,
+ ..
+ }
+ ) if hash == relay_parent => {
+ tx.send(pov.clone()).unwrap();
+ }
+ );
+ }
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive(
+ _pvd,
+ _validation_code,
+ candidate_receipt,
+ _pov,
+ timeout,
+ tx,
+ )) if &_pvd == pvd &&
+ &_validation_code == validation_code &&
+ &*_pov == pov &&
+ &candidate_receipt.descriptor == candidate.descriptor() &&
+ timeout == BACKING_EXECUTION_TIMEOUT &&
+ candidate.commitments.hash() == candidate_receipt.commitments_hash =>
+ {
+ tx.send(Ok(ValidationResult::Valid(
+ CandidateCommitments {
+ head_data: expected_head_data.clone(),
+ horizontal_messages: Vec::new(),
+ upward_messages: Vec::new(),
+ new_validation_code: None,
+ processed_downward_messages: 0,
+ hrmp_watermark: 0,
+ },
+ pvd.clone(),
+ )))
+ .unwrap();
+ }
+ );
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::AvailabilityStore(
+ AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. }
+ ) if candidate_hash == candidate.hash() => {
+ tx.send(Ok(())).unwrap();
+ }
+ );
+}
+
+async fn assert_hypothetical_depth_requests(
+ virtual_overseer: &mut VirtualOverseer,
+ mut expected_requests: Vec<(HypotheticalDepthRequest, Vec)>,
+) {
+ // Requests come with no particular order.
+ let requests_num = expected_requests.len();
+
+ for _ in 0..requests_num {
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::ProspectiveParachains(
+ ProspectiveParachainsMessage::GetHypotheticalDepth(request, tx),
+ ) => {
+ let idx = match expected_requests.iter().position(|r| r.0 == request) {
+ Some(idx) => idx,
+ None => panic!(
+ "unexpected hypothetical depth request, no match found for {:?}",
+ request
+ ),
+ };
+ let resp = std::mem::take(&mut expected_requests[idx].1);
+ tx.send(resp).unwrap();
+
+ expected_requests.remove(idx);
+ }
+ );
+ }
+}
+
+// Test that `seconding_sanity_check` works when a candidate is allowed
+// for all leaves.
+#[test]
+fn seconding_sanity_check_allowed() {
+ let test_state = TestState::default();
+ test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
+ // Candidate is seconded in a parent of the activated `leaf_a`.
+ const LEAF_A_BLOCK_NUMBER: BlockNumber = 100;
+ const LEAF_A_DEPTH: BlockNumber = 3;
+ let para_id = test_state.chain_ids[0];
+
+ let leaf_b_hash = Hash::from_low_u64_be(128);
+ // `a` is grandparent of `b`.
+ let leaf_a_hash = Hash::from_low_u64_be(130);
+ let leaf_a_parent = get_parent_hash(leaf_a_hash);
+ let activated = ActivatedLeaf {
+ hash: leaf_a_hash,
+ number: LEAF_A_BLOCK_NUMBER,
+ status: LeafStatus::Fresh,
+ span: Arc::new(jaeger::Span::Disabled),
+ };
+ let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_DEPTH)];
+ let test_leaf_a = TestLeaf { activated, min_relay_parents };
+
+ const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2;
+ const LEAF_B_DEPTH: BlockNumber = 4;
+
+ let activated = ActivatedLeaf {
+ hash: leaf_b_hash,
+ number: LEAF_B_BLOCK_NUMBER,
+ status: LeafStatus::Fresh,
+ span: Arc::new(jaeger::Span::Disabled),
+ };
+ let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_DEPTH)];
+ let test_leaf_b = TestLeaf { activated, min_relay_parents };
+
+ activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await;
+ activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state, 0).await;
+
+ let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
+
+ let expected_head_data = test_state.head_data.get(¶_id).unwrap();
+
+ let pov_hash = pov.hash();
+ let candidate = TestCandidateBuilder {
+ para_id,
+ relay_parent: leaf_a_parent,
+ pov_hash,
+ head_data: expected_head_data.clone(),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ persisted_validation_data_hash: pvd.hash(),
+ validation_code: validation_code.0.clone(),
+ ..Default::default()
+ }
+ .build();
+
+ let second = CandidateBackingMessage::Second(
+ leaf_a_hash,
+ candidate.to_plain(),
+ pvd.clone(),
+ pov.clone(),
+ );
+
+ virtual_overseer.send(FromOrchestra::Communication { msg: second }).await;
+
+ assert_validate_seconded_candidate(
+ &mut virtual_overseer,
+ leaf_a_parent,
+ &candidate,
+ &pov,
+ &pvd,
+ &validation_code,
+ expected_head_data,
+ false,
+ )
+ .await;
+
+ // `seconding_sanity_check`
+ let expected_request_a = HypotheticalDepthRequest {
+ candidate_hash: candidate.hash(),
+ candidate_para: para_id,
+ parent_head_data_hash: pvd.parent_head.hash(),
+ candidate_relay_parent: leaf_a_parent,
+ fragment_tree_relay_parent: leaf_a_hash,
+ };
+ let expected_request_b = HypotheticalDepthRequest {
+ candidate_hash: candidate.hash(),
+ candidate_para: para_id,
+ parent_head_data_hash: pvd.parent_head.hash(),
+ candidate_relay_parent: leaf_a_parent,
+ fragment_tree_relay_parent: leaf_b_hash,
+ };
+ assert_hypothetical_depth_requests(
+ &mut virtual_overseer,
+ vec![(expected_request_a, vec![0, 1, 2, 3]), (expected_request_b, vec![3])],
+ )
+ .await;
+ // Prospective parachains are notified.
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::ProspectiveParachains(
+ ProspectiveParachainsMessage::CandidateSeconded(
+ candidate_para,
+ candidate_receipt,
+ _pvd,
+ tx,
+ ),
+ ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => {
+ // Any non-empty response will do.
+ tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap();
+ }
+ );
+
+ test_dispute_coordinator_notifications(
+ &mut virtual_overseer,
+ candidate.hash(),
+ test_state.session(),
+ vec![ValidatorIndex(0)],
+ )
+ .await;
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::StatementDistribution(
+ StatementDistributionMessage::Share(
+ parent_hash,
+ _signed_statement,
+ )
+ ) if parent_hash == leaf_a_parent => {}
+ );
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => {
+ assert_eq!(leaf_a_parent, hash);
+ assert_matches!(statement.payload(), Statement::Seconded(_));
+ }
+ );
+
+ virtual_overseer
+ });
+}
+
+// Test that `seconding_sanity_check` works when a candidate is disallowed
+// for at least one leaf.
+#[test]
+fn seconding_sanity_check_disallowed() {
+ let test_state = TestState::default();
+ test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
+ // Candidate is seconded in a parent of the activated `leaf_a`.
+ const LEAF_A_BLOCK_NUMBER: BlockNumber = 100;
+ const LEAF_A_DEPTH: BlockNumber = 3;
+ let para_id = test_state.chain_ids[0];
+
+ let leaf_b_hash = Hash::from_low_u64_be(128);
+ // `a` is grandparent of `b`.
+ let leaf_a_hash = Hash::from_low_u64_be(130);
+ let leaf_a_parent = get_parent_hash(leaf_a_hash);
+ let activated = ActivatedLeaf {
+ hash: leaf_a_hash,
+ number: LEAF_A_BLOCK_NUMBER,
+ status: LeafStatus::Fresh,
+ span: Arc::new(jaeger::Span::Disabled),
+ };
+ let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_DEPTH)];
+ let test_leaf_a = TestLeaf { activated, min_relay_parents };
+
+ const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2;
+ const LEAF_B_DEPTH: BlockNumber = 4;
+
+ let activated = ActivatedLeaf {
+ hash: leaf_b_hash,
+ number: LEAF_B_BLOCK_NUMBER,
+ status: LeafStatus::Fresh,
+ span: Arc::new(jaeger::Span::Disabled),
+ };
+ let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_DEPTH)];
+ let test_leaf_b = TestLeaf { activated, min_relay_parents };
+
+ activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await;
+
+ let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
+
+ let expected_head_data = test_state.head_data.get(¶_id).unwrap();
+
+ let pov_hash = pov.hash();
+ let candidate = TestCandidateBuilder {
+ para_id,
+ relay_parent: leaf_a_parent,
+ pov_hash,
+ head_data: expected_head_data.clone(),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ persisted_validation_data_hash: pvd.hash(),
+ validation_code: validation_code.0.clone(),
+ ..Default::default()
+ }
+ .build();
+
+ let second = CandidateBackingMessage::Second(
+ leaf_a_hash,
+ candidate.to_plain(),
+ pvd.clone(),
+ pov.clone(),
+ );
+
+ virtual_overseer.send(FromOrchestra::Communication { msg: second }).await;
+
+ assert_validate_seconded_candidate(
+ &mut virtual_overseer,
+ leaf_a_parent,
+ &candidate,
+ &pov,
+ &pvd,
+ &validation_code,
+ expected_head_data,
+ false,
+ )
+ .await;
+
+ // `seconding_sanity_check`
+ let expected_request_a = HypotheticalDepthRequest {
+ candidate_hash: candidate.hash(),
+ candidate_para: para_id,
+ parent_head_data_hash: pvd.parent_head.hash(),
+ candidate_relay_parent: leaf_a_parent,
+ fragment_tree_relay_parent: leaf_a_hash,
+ };
+ assert_hypothetical_depth_requests(
+ &mut virtual_overseer,
+ vec![(expected_request_a, vec![0, 1, 2, 3])],
+ )
+ .await;
+ // Prospective parachains are notified.
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::ProspectiveParachains(
+ ProspectiveParachainsMessage::CandidateSeconded(
+ candidate_para,
+ candidate_receipt,
+ _pvd,
+ tx,
+ ),
+ ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => {
+ // Any non-empty response will do.
+ tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap();
+ }
+ );
+
+ test_dispute_coordinator_notifications(
+ &mut virtual_overseer,
+ candidate.hash(),
+ test_state.session(),
+ vec![ValidatorIndex(0)],
+ )
+ .await;
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::StatementDistribution(
+ StatementDistributionMessage::Share(
+ parent_hash,
+ _signed_statement,
+ )
+ ) if parent_hash == leaf_a_parent => {}
+ );
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => {
+ assert_eq!(leaf_a_parent, hash);
+ assert_matches!(statement.payload(), Statement::Seconded(_));
+ }
+ );
+
+ // A seconded candidate occupies a depth, try to second another one.
+ // It is allowed in a new leaf but not allowed in the old one.
+ // Expect it to be rejected.
+ activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state, 1).await;
+ let leaf_a_grandparent = get_parent_hash(leaf_a_parent);
+ let candidate = TestCandidateBuilder {
+ para_id,
+ relay_parent: leaf_a_grandparent,
+ pov_hash,
+ head_data: expected_head_data.clone(),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ persisted_validation_data_hash: pvd.hash(),
+ validation_code: validation_code.0.clone(),
+ ..Default::default()
+ }
+ .build();
+
+ let second = CandidateBackingMessage::Second(
+ leaf_a_hash,
+ candidate.to_plain(),
+ pvd.clone(),
+ pov.clone(),
+ );
+
+ virtual_overseer.send(FromOrchestra::Communication { msg: second }).await;
+
+ assert_validate_seconded_candidate(
+ &mut virtual_overseer,
+ leaf_a_grandparent,
+ &candidate,
+ &pov,
+ &pvd,
+ &validation_code,
+ expected_head_data,
+ false,
+ )
+ .await;
+
+ // `seconding_sanity_check`
+ let expected_request_a = HypotheticalDepthRequest {
+ candidate_hash: candidate.hash(),
+ candidate_para: para_id,
+ parent_head_data_hash: pvd.parent_head.hash(),
+ candidate_relay_parent: leaf_a_grandparent,
+ fragment_tree_relay_parent: leaf_a_hash,
+ };
+ let expected_request_b = HypotheticalDepthRequest {
+ candidate_hash: candidate.hash(),
+ candidate_para: para_id,
+ parent_head_data_hash: pvd.parent_head.hash(),
+ candidate_relay_parent: leaf_a_grandparent,
+ fragment_tree_relay_parent: leaf_b_hash,
+ };
+ assert_hypothetical_depth_requests(
+ &mut virtual_overseer,
+ vec![
+ (expected_request_a, vec![3]), // All depths are occupied.
+ (expected_request_b, vec![1]),
+ ],
+ )
+ .await;
+
+ assert!(virtual_overseer
+ .recv()
+ .timeout(std::time::Duration::from_millis(50))
+ .await
+ .is_none());
+
+ virtual_overseer
+ });
+}
+
+// Test that a seconded candidate which is not approved by prospective parachains
+// subsystem doesn't change the view.
+#[test]
+fn prospective_parachains_reject_candidate() {
+ let test_state = TestState::default();
+ test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
+ // Candidate is seconded in a parent of the activated `leaf_a`.
+ const LEAF_A_BLOCK_NUMBER: BlockNumber = 100;
+ const LEAF_A_DEPTH: BlockNumber = 3;
+ let para_id = test_state.chain_ids[0];
+
+ let leaf_a_hash = Hash::from_low_u64_be(130);
+ let leaf_a_parent = get_parent_hash(leaf_a_hash);
+ let activated = ActivatedLeaf {
+ hash: leaf_a_hash,
+ number: LEAF_A_BLOCK_NUMBER,
+ status: LeafStatus::Fresh,
+ span: Arc::new(jaeger::Span::Disabled),
+ };
+ let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_DEPTH)];
+ let test_leaf_a = TestLeaf { activated, min_relay_parents };
+
+ activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await;
+
+ let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
+
+ let expected_head_data = test_state.head_data.get(¶_id).unwrap();
+
+ let pov_hash = pov.hash();
+ let candidate = TestCandidateBuilder {
+ para_id,
+ relay_parent: leaf_a_parent,
+ pov_hash,
+ head_data: expected_head_data.clone(),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ persisted_validation_data_hash: pvd.hash(),
+ validation_code: validation_code.0.clone(),
+ ..Default::default()
+ }
+ .build();
+
+ let second = CandidateBackingMessage::Second(
+ leaf_a_hash,
+ candidate.to_plain(),
+ pvd.clone(),
+ pov.clone(),
+ );
+
+ virtual_overseer.send(FromOrchestra::Communication { msg: second }).await;
+
+ assert_validate_seconded_candidate(
+ &mut virtual_overseer,
+ leaf_a_parent,
+ &candidate,
+ &pov,
+ &pvd,
+ &validation_code,
+ expected_head_data,
+ false,
+ )
+ .await;
+
+ // `seconding_sanity_check`
+ let expected_request_a = vec![(
+ HypotheticalDepthRequest {
+ candidate_hash: candidate.hash(),
+ candidate_para: para_id,
+ parent_head_data_hash: pvd.parent_head.hash(),
+ candidate_relay_parent: leaf_a_parent,
+ fragment_tree_relay_parent: leaf_a_hash,
+ },
+ vec![0, 1, 2, 3],
+ )];
+ assert_hypothetical_depth_requests(&mut virtual_overseer, expected_request_a.clone()).await;
+
+ // Prospective parachains are notified.
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::ProspectiveParachains(
+ ProspectiveParachainsMessage::CandidateSeconded(
+ candidate_para,
+ candidate_receipt,
+ _pvd,
+ tx,
+ ),
+ ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => {
+ // Reject it.
+ tx.send(Vec::new()).unwrap();
+ }
+ );
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::CollatorProtocol(CollatorProtocolMessage::Invalid(
+ relay_parent,
+ candidate_receipt,
+ )) if candidate_receipt.descriptor() == candidate.descriptor() &&
+ candidate_receipt.commitments_hash == candidate.commitments.hash() &&
+ relay_parent == leaf_a_parent
+ );
+
+ // Try seconding the same candidate.
+
+ let second = CandidateBackingMessage::Second(
+ leaf_a_hash,
+ candidate.to_plain(),
+ pvd.clone(),
+ pov.clone(),
+ );
+
+ virtual_overseer.send(FromOrchestra::Communication { msg: second }).await;
+
+ assert_validate_seconded_candidate(
+ &mut virtual_overseer,
+ leaf_a_parent,
+ &candidate,
+ &pov,
+ &pvd,
+ &validation_code,
+ expected_head_data,
+ false,
+ )
+ .await;
+
+ // `seconding_sanity_check`
+ assert_hypothetical_depth_requests(&mut virtual_overseer, expected_request_a).await;
+ // Prospective parachains are notified.
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::ProspectiveParachains(
+ ProspectiveParachainsMessage::CandidateSeconded(
+ candidate_para,
+ candidate_receipt,
+ _pvd,
+ tx,
+ ),
+ ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => {
+ // Any non-empty response will do.
+ tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap();
+ }
+ );
+
+ test_dispute_coordinator_notifications(
+ &mut virtual_overseer,
+ candidate.hash(),
+ test_state.session(),
+ vec![ValidatorIndex(0)],
+ )
+ .await;
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::StatementDistribution(
+ StatementDistributionMessage::Share(
+ parent_hash,
+ _signed_statement,
+ )
+ ) if parent_hash == leaf_a_parent => {}
+ );
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => {
+ assert_eq!(leaf_a_parent, hash);
+ assert_matches!(statement.payload(), Statement::Seconded(_));
+ }
+ );
+
+ virtual_overseer
+ });
+}
+
+// Test that a validator can second multiple candidates per single relay parent.
+#[test]
+fn second_multiple_candidates_per_relay_parent() {
+ let test_state = TestState::default();
+ test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
+ // Candidate `a` is seconded in a parent of the activated `leaf`.
+ const LEAF_BLOCK_NUMBER: BlockNumber = 100;
+ const LEAF_DEPTH: BlockNumber = 3;
+ let para_id = test_state.chain_ids[0];
+
+ let leaf_hash = Hash::from_low_u64_be(130);
+ let leaf_parent = get_parent_hash(leaf_hash);
+ let leaf_grandparent = get_parent_hash(leaf_parent);
+ let activated = ActivatedLeaf {
+ hash: leaf_hash,
+ number: LEAF_BLOCK_NUMBER,
+ status: LeafStatus::Fresh,
+ span: Arc::new(jaeger::Span::Disabled),
+ };
+ let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_DEPTH)];
+ let test_leaf_a = TestLeaf { activated, min_relay_parents };
+
+ activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await;
+
+ let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
+
+ let expected_head_data = test_state.head_data.get(¶_id).unwrap();
+
+ let pov_hash = pov.hash();
+ let candidate_a = TestCandidateBuilder {
+ para_id,
+ relay_parent: leaf_parent,
+ pov_hash,
+ head_data: expected_head_data.clone(),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ persisted_validation_data_hash: pvd.hash(),
+ validation_code: validation_code.0.clone(),
+ ..Default::default()
+ };
+ let mut candidate_b = candidate_a.clone();
+ candidate_b.relay_parent = leaf_grandparent;
+
+ // With depths.
+ let candidate_a = (candidate_a.build(), 1);
+ let candidate_b = (candidate_b.build(), 2);
+
+ for candidate in &[candidate_a, candidate_b] {
+ let (candidate, depth) = candidate;
+ let second = CandidateBackingMessage::Second(
+ leaf_hash,
+ candidate.to_plain(),
+ pvd.clone(),
+ pov.clone(),
+ );
+
+ virtual_overseer.send(FromOrchestra::Communication { msg: second }).await;
+
+ assert_validate_seconded_candidate(
+ &mut virtual_overseer,
+ candidate.descriptor().relay_parent,
+ &candidate,
+ &pov,
+ &pvd,
+ &validation_code,
+ expected_head_data,
+ false,
+ )
+ .await;
+
+ // `seconding_sanity_check`
+ let expected_request_a = vec![(
+ HypotheticalDepthRequest {
+ candidate_hash: candidate.hash(),
+ candidate_para: para_id,
+ parent_head_data_hash: pvd.parent_head.hash(),
+ candidate_relay_parent: candidate.descriptor().relay_parent,
+ fragment_tree_relay_parent: leaf_hash,
+ },
+ vec![*depth],
+ )];
+ assert_hypothetical_depth_requests(&mut virtual_overseer, expected_request_a.clone())
+ .await;
+
+ // Prospective parachains are notified.
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::ProspectiveParachains(
+ ProspectiveParachainsMessage::CandidateSeconded(
+ candidate_para,
+ candidate_receipt,
+ _pvd,
+ tx,
+ ),
+ ) if &candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => {
+ // Any non-empty response will do.
+ tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap();
+ }
+ );
+
+ test_dispute_coordinator_notifications(
+ &mut virtual_overseer,
+ candidate.hash(),
+ test_state.session(),
+ vec![ValidatorIndex(0)],
+ )
+ .await;
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::StatementDistribution(
+ StatementDistributionMessage::Share(
+ parent_hash,
+ _signed_statement,
+ )
+ ) if parent_hash == candidate.descriptor().relay_parent => {}
+ );
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => {
+ assert_eq!(candidate.descriptor().relay_parent, hash);
+ assert_matches!(statement.payload(), Statement::Seconded(_));
+ }
+ );
+ }
+
+ virtual_overseer
+ });
+}
+
+// Test that the candidate reaches quorum successfully.
+#[test]
+fn backing_works() {
+ let test_state = TestState::default();
+ test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
+ // Candidate `a` is seconded in a parent of the activated `leaf`.
+ const LEAF_BLOCK_NUMBER: BlockNumber = 100;
+ const LEAF_DEPTH: BlockNumber = 3;
+ let para_id = test_state.chain_ids[0];
+
+ let leaf_hash = Hash::from_low_u64_be(130);
+ let leaf_parent = get_parent_hash(leaf_hash);
+ let activated = ActivatedLeaf {
+ hash: leaf_hash,
+ number: LEAF_BLOCK_NUMBER,
+ status: LeafStatus::Fresh,
+ span: Arc::new(jaeger::Span::Disabled),
+ };
+ let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_DEPTH)];
+ let test_leaf_a = TestLeaf { activated, min_relay_parents };
+
+ activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await;
+
+ let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
+ let pvd = dummy_pvd();
+ let validation_code = ValidationCode(vec![1, 2, 3]);
+
+ let expected_head_data = test_state.head_data.get(¶_id).unwrap();
+
+ let pov_hash = pov.hash();
+
+ let candidate_a = TestCandidateBuilder {
+ para_id,
+ relay_parent: leaf_parent,
+ pov_hash,
+ head_data: expected_head_data.clone(),
+ erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+ validation_code: validation_code.0.clone(),
+ persisted_validation_data_hash: pvd.hash(),
+ ..Default::default()
+ }
+ .build();
+
+ let candidate_a_hash = candidate_a.hash();
+
+ let public1 = CryptoStore::sr25519_generate_new(
+ &*test_state.keystore,
+ ValidatorId::ID,
+ Some(&test_state.validators[5].to_seed()),
+ )
+ .await
+ .expect("Insert key into keystore");
+ let public2 = CryptoStore::sr25519_generate_new(
+ &*test_state.keystore,
+ ValidatorId::ID,
+ Some(&test_state.validators[2].to_seed()),
+ )
+ .await
+ .expect("Insert key into keystore");
+
+ // Signing context should have a parent hash candidate is based on.
+ let signing_context =
+ SigningContext { parent_hash: leaf_parent, session_index: test_state.session() };
+ let signed_a = SignedFullStatementWithPVD::sign(
+ &test_state.keystore,
+ StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()),
+ &signing_context,
+ ValidatorIndex(2),
+ &public2.into(),
+ )
+ .await
+ .ok()
+ .flatten()
+ .expect("should be signed");
+
+ let signed_b = SignedFullStatementWithPVD::sign(
+ &test_state.keystore,
+ StatementWithPVD::Valid(candidate_a_hash),
+ &signing_context,
+ ValidatorIndex(5),
+ &public1.into(),
+ )
+ .await
+ .ok()
+ .flatten()
+ .expect("should be signed");
+
+ let statement = CandidateBackingMessage::Statement(leaf_parent, signed_a.clone());
+
+ virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await;
+
+ // Prospective parachains are notified about candidate seconded first.
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::ProspectiveParachains(
+ ProspectiveParachainsMessage::CandidateSeconded(
+ candidate_para,
+ candidate_receipt,
+ _pvd,
+ tx,
+ ),
+ ) if candidate_receipt == candidate_a && candidate_para == para_id && pvd == _pvd => {
+ // Any non-empty response will do.
+ tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap();
+ }
+ );
+
+ test_dispute_coordinator_notifications(
+ &mut virtual_overseer,
+ candidate_a_hash,
+ test_state.session(),
+ vec![ValidatorIndex(2)],
+ )
+ .await;
+
+ assert_validate_seconded_candidate(
+ &mut virtual_overseer,
+ candidate_a.descriptor().relay_parent,
+ &candidate_a,
+ &pov,
+ &pvd,
+ &validation_code,
+ expected_head_data,
+ true,
+ )
+ .await;
+
+ test_dispute_coordinator_notifications(
+ &mut virtual_overseer,
+ candidate_a_hash,
+ test_state.session(),
+ vec![ValidatorIndex(0)],
+ )
+ .await;
+ // Prospective parachains are notified about candidate backed.
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::ProspectiveParachains(
+ ProspectiveParachainsMessage::CandidateBacked(
+ candidate_para_id, candidate_hash
+ ),
+ ) if candidate_a_hash == candidate_hash && candidate_para_id == para_id
+ );
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::Provisioner(
+ ProvisionerMessage::ProvisionableData(
+ _,
+ ProvisionableData::BackedCandidate(candidate_receipt)
+ )
+ ) => {
+ assert_eq!(candidate_receipt, candidate_a.to_plain());
+ }
+ );
+
+ assert_matches!(
+ virtual_overseer.recv().await,
+ AllMessages::StatementDistribution(
+ StatementDistributionMessage::Share(hash, _stmt)
+ ) => {
+ assert_eq!(leaf_parent, hash);
+ }
+ );
+
+ let statement = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone());
+
+ virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await;
+ test_dispute_coordinator_notifications(
+ &mut virtual_overseer,
+ candidate_a_hash,
+ test_state.session(),
+ vec![ValidatorIndex(5)],
+ )
+ .await;
+ virtual_overseer
+ });
+}
+
+// Tests that validators start work on consecutive prospective parachain blocks.
+#[test]
+fn concurrent_dependent_candidates() {
+ let test_state = TestState::default();
+ test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
+ // Candidate `a` is seconded in a grandparent of the activated `leaf`,
+ // candidate `b` -- in parent.
+ const LEAF_BLOCK_NUMBER: BlockNumber = 100;
+ const LEAF_DEPTH: BlockNumber = 3;
+ let para_id = test_state.chain_ids[0];
+
+ let leaf_hash = Hash::from_low_u64_be(130);
+ let leaf_parent = get_parent_hash(leaf_hash);
+ let leaf_grandparent = get_parent_hash(leaf_parent);
+ let activated = ActivatedLeaf {
+ hash: leaf_hash,
+ number: LEAF_BLOCK_NUMBER,
+ status: LeafStatus::Fresh,
+ span: Arc::new(jaeger::Span::Disabled),
+ };
+ let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_DEPTH)];
+ let test_leaf_a = TestLeaf { activated, min_relay_parents };
+
+ activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await;
+
+ let head_data = &[
+ HeadData(vec![10, 20, 30]), // Before `a`.
+ HeadData(vec![11, 21, 31]), // After `a`.
+ HeadData(vec![12, 22]), // After `b`.
+ ];
+
+ let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) };
+ let pvd_a = PersistedValidationData {
+ parent_head: head_data[0].clone(),
+ relay_parent_number: LEAF_BLOCK_NUMBER - 2,
+ relay_parent_storage_root: Hash::zero(),
+ max_pov_size: 1024,
+ };
+
+ let pov_b = PoV { block_data: BlockData(vec![22, 14, 100]) };
+ let pvd_b = PersistedValidationData {
+ parent_head: head_data[1].clone(),
+ relay_parent_number: LEAF_BLOCK_NUMBER - 1,
+ relay_parent_storage_root: Hash::zero(),
+ max_pov_size: 1024,
+ };
+ let validation_code = ValidationCode(vec![1, 2, 3]);
+
+ let candidate_a = TestCandidateBuilder {
+ para_id,
+ relay_parent: leaf_grandparent,
+ pov_hash: pov_a.hash(),
+ head_data: head_data[1].clone(),
+ erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()),
+ persisted_validation_data_hash: pvd_a.hash(),
+ validation_code: validation_code.0.clone(),
+ ..Default::default()
+ }
+ .build();
+ let candidate_b = TestCandidateBuilder {
+ para_id,
+ relay_parent: leaf_parent,
+ pov_hash: pov_b.hash(),
+ head_data: head_data[2].clone(),
+ erasure_root: make_erasure_root(&test_state, pov_b.clone(), pvd_b.clone()),
+ persisted_validation_data_hash: pvd_b.hash(),
+ validation_code: validation_code.0.clone(),
+ ..Default::default()
+ }
+ .build();
+ let candidate_a_hash = candidate_a.hash();
+ let candidate_b_hash = candidate_b.hash();
+
+ let public1 = CryptoStore::sr25519_generate_new(
+ &*test_state.keystore,
+ ValidatorId::ID,
+ Some(&test_state.validators[5].to_seed()),
+ )
+ .await
+ .expect("Insert key into keystore");
+ let public2 = CryptoStore::sr25519_generate_new(
+ &*test_state.keystore,
+ ValidatorId::ID,
+ Some(&test_state.validators[2].to_seed()),
+ )
+ .await
+ .expect("Insert key into keystore");
+
+ // Signing context should have a parent hash candidate is based on.
+ let signing_context =
+ SigningContext { parent_hash: leaf_grandparent, session_index: test_state.session() };
+ let signed_a = SignedFullStatementWithPVD::sign(
+ &test_state.keystore,
+ StatementWithPVD::Seconded(candidate_a.clone(), pvd_a.clone()),
+ &signing_context,
+ ValidatorIndex(2),
+ &public2.into(),
+ )
+ .await
+ .ok()
+ .flatten()
+ .expect("should be signed");
+
+ let signing_context =
+ SigningContext { parent_hash: leaf_parent, session_index: test_state.session() };
+ let signed_b = SignedFullStatementWithPVD::sign(
+ &test_state.keystore,
+ StatementWithPVD::Seconded(candidate_b.clone(), pvd_b.clone()),
+ &signing_context,
+ ValidatorIndex(5),
+ &public1.into(),
+ )
+ .await
+ .ok()
+ .flatten()
+ .expect("should be signed");
+
+ let statement_a = CandidateBackingMessage::Statement(leaf_grandparent, signed_a.clone());
+ let statement_b = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone());
+
+ virtual_overseer.send(FromOrchestra::Communication { msg: statement_a }).await;
+ // At this point the subsystem waits for response, the previous message is received,
+ // send a second one without blocking.
+ let _ = virtual_overseer
+ .tx
+ .start_send_unpin(FromOrchestra::Communication { msg: statement_b });
+
+ let mut valid_statements = HashSet::new();
+
+ loop {
+ let msg = virtual_overseer
+ .recv()
+ .timeout(std::time::Duration::from_secs(1))
+ .await
+ .expect("overseer recv timed out");
+
+ // Order is not guaranteed since we have 2 statements being handled concurrently.
+ match msg {
+ AllMessages::ProspectiveParachains(
+ ProspectiveParachainsMessage::CandidateSeconded(.., tx),
+ ) => {
+ tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap();
+ },
+ AllMessages::DisputeCoordinator(DisputeCoordinatorMessage::ImportStatements {
+ ..
+ }) => {},
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ _,
+ RuntimeApiRequest::ValidationCodeByHash(_, tx),
+ )) => {
+ tx.send(Ok(Some(validation_code.clone()))).unwrap();
+ },
+ AllMessages::AvailabilityDistribution(
+ AvailabilityDistributionMessage::FetchPoV { candidate_hash, tx, .. },
+ ) => {
+ let pov = if candidate_hash == candidate_a_hash {
+ &pov_a
+ } else if candidate_hash == candidate_b_hash {
+ &pov_b
+ } else {
+ panic!("unknown candidate hash")
+ };
+ tx.send(pov.clone()).unwrap();
+ },
+ AllMessages::CandidateValidation(
+ CandidateValidationMessage::ValidateFromExhaustive(.., candidate, _, _, tx),
+ ) => {
+ let candidate_hash = candidate.hash();
+ let (head_data, pvd) = if candidate_hash == candidate_a_hash {
+ (&head_data[1], &pvd_a)
+ } else if candidate_hash == candidate_b_hash {
+ (&head_data[2], &pvd_b)
+ } else {
+ panic!("unknown candidate hash")
+ };
+ tx.send(Ok(ValidationResult::Valid(
+ CandidateCommitments {
+ head_data: head_data.clone(),
+ horizontal_messages: Vec::new(),
+ upward_messages: Vec::new(),
+ new_validation_code: None,
+ processed_downward_messages: 0,
+ hrmp_watermark: 0,
+ },
+ pvd.clone(),
+ )))
+ .unwrap();
+ },
+ AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreAvailableData {
+ tx,
+ ..
+ }) => {
+ tx.send(Ok(())).unwrap();
+ },
+ AllMessages::ProspectiveParachains(
+ ProspectiveParachainsMessage::CandidateBacked(..),
+ ) => {},
+ AllMessages::Provisioner(ProvisionerMessage::ProvisionableData(..)) => {},
+ AllMessages::StatementDistribution(StatementDistributionMessage::Share(
+ _,
+ statement,
+ )) => {
+ assert_eq!(statement.validator_index(), ValidatorIndex(0));
+ let payload = statement.payload();
+ assert_matches!(
+ payload.clone(),
+ Statement::Valid(hash)
+ if hash == candidate_a_hash || hash == candidate_b_hash =>
+ {
+ assert!(valid_statements.insert(hash));
+ }
+ );
+
+ if valid_statements.len() == 2 {
+ break
+ }
+ },
+ _ => panic!("unexpected message received from overseer: {:?}", msg),
+ }
+ }
+
+ assert!(
+ valid_statements.contains(&candidate_a_hash) &&
+ valid_statements.contains(&candidate_b_hash)
+ );
+
+ virtual_overseer
+ });
+}
diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs
index 9972b60490a1..ab9d678f77b0 100644
--- a/node/core/prospective-parachains/src/fragment_tree.rs
+++ b/node/core/prospective-parachains/src/fragment_tree.rs
@@ -62,7 +62,7 @@ use polkadot_node_subsystem_util::inclusion_emulator::staging::{
ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo,
};
use polkadot_primitives::vstaging::{
- BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId,
+ BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, Id as ParaId,
PersistedValidationData,
};
@@ -158,6 +158,17 @@ impl CandidateStorage {
})
}
+ /// Get head-data by hash.
+ pub(crate) fn head_data_by_hash(&self, hash: &Hash) -> Option<&HeadData> {
+ // Get some candidate which has a parent-head with the same hash as requested.
+ let a_candidate_hash = self.by_parent_head.get(hash).and_then(|m| m.iter().next())?;
+
+ // Extract the full parent head from that candidate's `PersistedValidationData`.
+ self.by_candidate_hash
+ .get(a_candidate_hash)
+ .map(|e| &e.candidate.persisted_validation_data.parent_head)
+ }
+
fn iter_para_children<'a>(
&'a self,
parent_head_hash: &Hash,
@@ -271,13 +282,19 @@ impl Scope {
.unwrap_or_else(|| self.relay_parent.clone())
}
- fn ancestor_by_hash(&self, hash: &Hash) -> Option {
+ /// Get the ancestor of the fragment tree by hash.
+ pub fn ancestor_by_hash(&self, hash: &Hash) -> Option {
if hash == &self.relay_parent.hash {
return Some(self.relay_parent.clone())
}
self.ancestors_by_hash.get(hash).map(|info| info.clone())
}
+
+ /// Get the base constraints of the scope
+ pub fn base_constraints(&self) -> &Constraints {
+ &self.base_constraints
+ }
}
// We use indices into a flat vector to refer to nodes in the tree.
diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs
index 963c99e0a743..f90d23e92ad7 100644
--- a/node/core/prospective-parachains/src/lib.rs
+++ b/node/core/prospective-parachains/src/lib.rs
@@ -34,7 +34,8 @@ use futures::{channel::oneshot, prelude::*};
use polkadot_node_subsystem::{
messages::{
ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest,
- ProspectiveParachainsMessage, RuntimeApiMessage, RuntimeApiRequest,
+ ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage,
+ RuntimeApiRequest,
},
overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError,
};
@@ -137,8 +138,10 @@ async fn run_iteration(ctx: &mut Context, view: &mut View) -> Result<()
answer_hypothetical_depths_request(&view, request, tx),
ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) =>
answer_tree_membership_request(&view, para, candidate, tx),
- ProspectiveParachainsMessage::GetMinimumRelayParent(para, relay_parent, tx) =>
- answer_minimum_relay_parent_request(&view, para, relay_parent, tx),
+ ProspectiveParachainsMessage::GetMinimumRelayParents(relay_parent, tx) =>
+ answer_minimum_relay_parents_request(&view, relay_parent, tx),
+ ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx) =>
+ answer_prospective_validation_data_request(&view, request, tx),
},
}
}
@@ -160,6 +163,9 @@ async fn handle_active_leaves_update(
}
for activated in update.activated.into_iter() {
+ // TODO [now]: skip leaves which don't have prospective parachains
+ // enabled. This should be a runtime API version check.
+
let hash = activated.hash;
let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?;
@@ -331,7 +337,7 @@ async fn handle_candidate_backed(
target: LOG_TARGET,
para_id = ?para,
?candidate_hash,
- "Received instructio to back candidate",
+ "Received instruction to back candidate",
);
return Ok(())
@@ -461,19 +467,75 @@ fn answer_tree_membership_request(
let _ = tx.send(membership);
}
-fn answer_minimum_relay_parent_request(
+fn answer_minimum_relay_parents_request(
view: &View,
- para: ParaId,
relay_parent: Hash,
- tx: oneshot::Sender>,
+ tx: oneshot::Sender>,
+) {
+ let mut v = Vec::new();
+ if let Some(leaf_data) = view.active_leaves.get(&relay_parent) {
+ for (para_id, fragment_tree) in &leaf_data.fragment_trees {
+ v.push((*para_id, fragment_tree.scope().earliest_relay_parent().number));
+ }
+ }
+
+ let _ = tx.send(v);
+}
+
+fn answer_prospective_validation_data_request(
+ view: &View,
+ request: ProspectiveValidationDataRequest,
+ tx: oneshot::Sender>,
) {
- let res = view
+ // 1. Try to get the head-data from the candidate store if known.
+ // 2. Otherwise, it might exist as the base in some relay-parent and we can find it by
+ // iterating fragment trees.
+ // 3. Otherwise, it is unknown.
+ // 4. Also try to find the relay parent block info by scanning
+ // fragment trees.
+ // 5. If head data and relay parent block info are found - success. Otherwise, failure.
+
+ let storage = match view.candidate_storage.get(&request.para_id) {
+ None => {
+ let _ = tx.send(None);
+ return
+ },
+ Some(s) => s,
+ };
+
+ let mut head_data =
+ storage.head_data_by_hash(&request.parent_head_data_hash).map(|x| x.clone());
+ let mut relay_parent_info = None;
+
+ for fragment_tree in view
.active_leaves
- .get(&relay_parent)
- .and_then(|data| data.fragment_trees.get(¶))
- .map(|tree| tree.scope().earliest_relay_parent().number);
+ .values()
+ .filter_map(|x| x.fragment_trees.get(&request.para_id))
+ {
+ if head_data.is_some() && relay_parent_info.is_some() {
+ break
+ }
+ if relay_parent_info.is_none() {
+ relay_parent_info =
+ fragment_tree.scope().ancestor_by_hash(&request.candidate_relay_parent);
+ }
+ if head_data.is_none() {
+ let required_parent = &fragment_tree.scope().base_constraints().required_parent;
+ if required_parent.hash() == request.parent_head_data_hash {
+ head_data = Some(required_parent.clone());
+ }
+ }
+ }
- let _ = tx.send(res);
+ let _ = tx.send(match (head_data, relay_parent_info) {
+ (Some(h), Some(i)) => Some(PersistedValidationData {
+ parent_head: h,
+ relay_parent_number: i.number,
+ relay_parent_storage_root: i.storage_root,
+ max_pov_size: request.max_pov_size,
+ }),
+ _ => None,
+ });
}
#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)]
diff --git a/node/malus/src/variants/suggest_garbage_candidate.rs b/node/malus/src/variants/suggest_garbage_candidate.rs
index b8aaaa18c10d..ddcf6b3c98b0 100644
--- a/node/malus/src/variants/suggest_garbage_candidate.rs
+++ b/node/malus/src/variants/suggest_garbage_candidate.rs
@@ -30,7 +30,6 @@ use polkadot_cli::{
ProvideRuntimeApi,
},
};
-use polkadot_node_core_candidate_validation::find_validation_data;
use polkadot_node_primitives::{AvailableData, BlockData, PoV};
use polkadot_primitives::v2::{CandidateDescriptor, CandidateHash};
@@ -88,7 +87,13 @@ where
) -> Option> {
match msg {
FromOrchestra::Communication {
- msg: CandidateBackingMessage::Second(relay_parent, candidate, _pov),
+ msg:
+ CandidateBackingMessage::Second(
+ relay_parent,
+ candidate,
+ persisted_validation_data,
+ _pov,
+ ),
} => {
gum::debug!(
target: MALUS,
@@ -103,7 +108,7 @@ where
let mut new_sender = subsystem_sender.clone();
let _candidate = candidate.clone();
self.spawner.spawn_blocking(
- "malus-get-validation-data",
+ "malus-get-n-validators",
Some("malus"),
Box::pin(async move {
gum::trace!(target: MALUS, "Requesting validators");
@@ -114,25 +119,16 @@ where
.unwrap()
.len();
gum::trace!(target: MALUS, "Validators {}", n_validators);
- match find_validation_data(&mut new_sender, &_candidate.descriptor()).await
- {
- Ok(Some((validation_data, validation_code))) => {
- sender
- .send((validation_data, validation_code, n_validators))
- .expect("channel is still open");
- },
- _ => {
- panic!("Unable to fetch validation data");
- },
- }
+ sender.send(n_validators).expect("channel is still open");
}),
);
- let (validation_data, validation_code, n_validators) = receiver.recv().unwrap();
+ let n_validators = receiver.recv().unwrap();
- let validation_data_hash = validation_data.hash();
- let validation_code_hash = validation_code.hash();
- let validation_data_relay_parent_number = validation_data.relay_parent_number;
+ let validation_data_hash = persisted_validation_data.hash();
+ let validation_code_hash = candidate.descriptor.validation_code_hash;
+ let validation_data_relay_parent_number =
+ persisted_validation_data.relay_parent_number;
gum::trace!(
target: MALUS,
@@ -142,11 +138,13 @@ where
?validation_data_hash,
?validation_code_hash,
?validation_data_relay_parent_number,
- "Fetched validation data."
+ "Fetched current validators set"
);
- let malicious_available_data =
- AvailableData { pov: Arc::new(pov.clone()), validation_data };
+ let malicious_available_data = AvailableData {
+ pov: Arc::new(pov.clone()),
+ validation_data: persisted_validation_data.clone(),
+ };
let pov_hash = pov.hash();
let erasure_root = {
@@ -209,7 +207,12 @@ where
.insert(malicious_candidate_hash, candidate.hash());
let message = FromOrchestra::Communication {
- msg: CandidateBackingMessage::Second(relay_parent, malicious_candidate, pov),
+ msg: CandidateBackingMessage::Second(
+ relay_parent,
+ malicious_candidate,
+ persisted_validation_data,
+ pov,
+ ),
};
Some(message)
diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs
index 592feaf9124a..30ff333b40fb 100644
--- a/node/network/collator-protocol/src/validator_side/mod.rs
+++ b/node/network/collator-protocol/src/validator_side/mod.rs
@@ -53,7 +53,10 @@ use polkadot_node_subsystem::{
overseer, FromOrchestra, OverseerSignal, PerLeafSpan, SubsystemSender,
};
use polkadot_node_subsystem_util::metrics::{self, prometheus};
-use polkadot_primitives::v2::{CandidateReceipt, CollatorId, Hash, Id as ParaId};
+use polkadot_primitives::v2::{
+ CandidateReceipt, CollatorId, Hash, Id as ParaId, OccupiedCoreAssumption,
+ PersistedValidationData,
+};
use crate::error::Result;
@@ -1307,6 +1310,39 @@ async fn dequeue_next_collation_and_fetch(
}
}
+#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
+async fn request_persisted_validation_data(
+ ctx: &mut Context,
+ relay_parent: Hash,
+ para_id: ParaId,
+) -> Option {
+ // TODO [https://github.com/paritytech/polkadot/issues/5054]
+ //
+ // As of https://github.com/paritytech/polkadot/pull/5557 the
+ // `Second` message requires the `PersistedValidationData` to be
+ // supplied.
+ //
+ // Without asynchronous backing, this can be easily fetched from the
+ // chain state.
+ //
+ // This assumes the core is _scheduled_, in keeping with the effective
+ // current behavior. If the core is occupied, we simply don't return
+ // anything. Likewise with runtime API errors, which are rare.
+ let res = polkadot_node_subsystem_util::request_persisted_validation_data(
+ relay_parent,
+ para_id,
+ OccupiedCoreAssumption::Free,
+ ctx.sender(),
+ )
+ .await
+ .await;
+
+ match res {
+ Ok(Ok(Some(pvd))) => Some(pvd),
+ _ => None,
+ }
+}
+
/// Handle a fetched collation result.
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
async fn handle_collation_fetched_result(
@@ -1351,13 +1387,31 @@ async fn handle_collation_fetched_result(
if let Entry::Vacant(entry) = state.pending_candidates.entry(relay_parent) {
collation_event.1.commitments_hash = Some(candidate_receipt.commitments_hash);
- ctx.sender()
- .send_message(CandidateBackingMessage::Second(
+
+ if let Some(pvd) = request_persisted_validation_data(
+ ctx,
+ candidate_receipt.descriptor().relay_parent,
+ candidate_receipt.descriptor().para_id,
+ )
+ .await
+ {
+ // TODO [https://github.com/paritytech/polkadot/issues/5054]
+ //
+ // If PVD isn't available (core occupied) then we'll silently
+ // just not second this. But prior to asynchronous backing
+ // we wouldn't second anyway because the core is occupied.
+ //
+ // The proper refactoring would be to accept declares from collators
+ // but not even fetch from them if the core is occupied. Given 5054,
+ // there's no reason to do this right now.
+ ctx.send_message(CandidateBackingMessage::Second(
relay_parent.clone(),
candidate_receipt,
+ pvd,
pov,
))
.await;
+ }
entry.insert(collation_event);
} else {
diff --git a/node/network/collator-protocol/src/validator_side/tests.rs b/node/network/collator-protocol/src/validator_side/tests.rs
index 77c209361422..3c45a675aa48 100644
--- a/node/network/collator-protocol/src/validator_side/tests.rs
+++ b/node/network/collator-protocol/src/validator_side/tests.rs
@@ -32,8 +32,8 @@ use polkadot_node_subsystem::messages::{AllMessages, RuntimeApiMessage, RuntimeA
use polkadot_node_subsystem_test_helpers as test_helpers;
use polkadot_node_subsystem_util::TimeoutExt;
use polkadot_primitives::v2::{
- CollatorPair, CoreState, GroupIndex, GroupRotationInfo, OccupiedCore, ScheduledCore,
- ValidatorId, ValidatorIndex,
+ CollatorPair, CoreState, GroupIndex, GroupRotationInfo, HeadData, OccupiedCore,
+ PersistedValidationData, ScheduledCore, ValidatorId, ValidatorIndex,
};
use polkadot_primitives_test_helpers::{
dummy_candidate_descriptor, dummy_candidate_receipt_bad_sig, dummy_hash,
@@ -245,15 +245,45 @@ async fn assert_candidate_backing_second(
expected_para_id: ParaId,
expected_pov: &PoV,
) -> CandidateReceipt {
+ // TODO [https://github.com/paritytech/polkadot/issues/5054]
+ //
+ // While collator protocol isn't updated, it's expected to receive
+ // a Runtime API request for persisted validation data.
+ let pvd = PersistedValidationData {
+ parent_head: HeadData(vec![7, 8, 9]),
+ relay_parent_number: 5,
+ max_pov_size: 1024,
+ relay_parent_storage_root: Default::default(),
+ };
+
assert_matches!(
overseer_recv(virtual_overseer).await,
- AllMessages::CandidateBacking(CandidateBackingMessage::Second(relay_parent, candidate_receipt, incoming_pov)
- ) => {
- assert_eq!(expected_relay_parent, relay_parent);
- assert_eq!(expected_para_id, candidate_receipt.descriptor.para_id);
- assert_eq!(*expected_pov, incoming_pov);
- candidate_receipt
- })
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ hash,
+ RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx),
+ )) => {
+ assert_eq!(expected_relay_parent, hash);
+ assert_eq!(expected_para_id, para_id);
+ assert_eq!(OccupiedCoreAssumption::Free, assumption);
+ tx.send(Ok(Some(pvd.clone()))).unwrap();
+ }
+ );
+
+ assert_matches!(
+ overseer_recv(virtual_overseer).await,
+ AllMessages::CandidateBacking(CandidateBackingMessage::Second(
+ relay_parent,
+ candidate_receipt,
+ received_pvd,
+ incoming_pov,
+ )) => {
+ assert_eq!(expected_relay_parent, relay_parent);
+ assert_eq!(expected_para_id, candidate_receipt.descriptor.para_id);
+ assert_eq!(*expected_pov, incoming_pov);
+ assert_eq!(pvd, received_pvd);
+ candidate_receipt
+ }
+ )
}
/// Assert that a collator got disconnected.
diff --git a/node/network/statement-distribution/src/error.rs b/node/network/statement-distribution/src/error.rs
index 01b2efd53b86..f91b0980c966 100644
--- a/node/network/statement-distribution/src/error.rs
+++ b/node/network/statement-distribution/src/error.rs
@@ -18,9 +18,11 @@
//! Error handling related code and Error/Result definitions.
use polkadot_node_network_protocol::PeerId;
-use polkadot_node_subsystem::SubsystemError;
+use polkadot_node_subsystem::{RuntimeApiError, SubsystemError};
use polkadot_node_subsystem_util::runtime;
-use polkadot_primitives::v2::{CandidateHash, Hash};
+use polkadot_primitives::v2::{CandidateHash, Hash, Id as ParaId};
+
+use futures::channel::oneshot;
use crate::LOG_TARGET;
@@ -56,6 +58,12 @@ pub enum Error {
#[error("Error while accessing runtime information")]
Runtime(#[from] runtime::Error),
+ #[error("RuntimeAPISubsystem channel closed before receipt")]
+ RuntimeApiUnavailable(#[source] oneshot::Canceled),
+
+ #[error("Fetching persisted validation data for para {0:?}, {1:?}")]
+ FetchPersistedValidationData(ParaId, RuntimeApiError),
+
#[error("Relay parent could not be found in active heads")]
NoSuchHead(Hash),
diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs
index 2abb765f392b..ee5f2869e8a0 100644
--- a/node/network/statement-distribution/src/lib.rs
+++ b/node/network/statement-distribution/src/lib.rs
@@ -33,7 +33,9 @@ use polkadot_node_network_protocol::{
v1::{self as protocol_v1, StatementMetadata},
IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View,
};
-use polkadot_node_primitives::{SignedFullStatement, Statement, UncheckedSignedFullStatement};
+use polkadot_node_primitives::{
+ SignedFullStatement, Statement, StatementWithPVD, UncheckedSignedFullStatement,
+};
use polkadot_node_subsystem_util::{self as util, rand, MIN_GOSSIP_PEERS};
use polkadot_node_subsystem::{
@@ -43,12 +45,12 @@ use polkadot_node_subsystem::{
StatementDistributionMessage,
},
overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem,
- SubsystemError,
+ StatementDistributionSenderTrait, SubsystemError,
};
use polkadot_primitives::v2::{
AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, Hash,
- SignedStatement, SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex,
- ValidatorSignature,
+ Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, SignedStatement, SigningContext,
+ UncheckedSignedStatement, ValidatorId, ValidatorIndex, ValidatorSignature,
};
use futures::{
@@ -657,6 +659,8 @@ enum DeniedStatement {
struct ActiveHeadData {
/// All candidates we are aware of for this head, keyed by hash.
candidates: HashSet,
+ /// Persisted validation data cache.
+ cached_validation_data: HashMap,
/// Stored statements for circulation to peers.
///
/// These are iterable in insertion order, and `Seconded` statements are always
@@ -682,6 +686,7 @@ impl ActiveHeadData {
) -> Self {
ActiveHeadData {
candidates: Default::default(),
+ cached_validation_data: Default::default(),
statements: Default::default(),
waiting_large_statements: Default::default(),
validators,
@@ -691,6 +696,37 @@ impl ActiveHeadData {
}
}
+ async fn fetch_persisted_validation_data(
+ &mut self,
+ sender: &mut Sender,
+ relay_parent: Hash,
+ para_id: ParaId,
+ ) -> Result>
+ where
+ Sender: StatementDistributionSenderTrait,
+ {
+ if let Entry::Vacant(entry) = self.cached_validation_data.entry(para_id) {
+ let persisted_validation_data =
+ polkadot_node_subsystem_util::request_persisted_validation_data(
+ relay_parent,
+ para_id,
+ OccupiedCoreAssumption::Free,
+ sender,
+ )
+ .await
+ .await
+ .map_err(Error::RuntimeApiUnavailable)?
+ .map_err(|err| Error::FetchPersistedValidationData(para_id, err))?;
+
+ match persisted_validation_data {
+ Some(pvd) => entry.insert(pvd),
+ None => return Ok(None),
+ };
+ }
+
+ Ok(self.cached_validation_data.get(¶_id))
+ }
+
/// Note the given statement.
///
/// If it was not already known and can be accepted, returns `NotedStatement::Fresh`,
@@ -1554,6 +1590,45 @@ async fn handle_incoming_message<'a, Context>(
Ok(false) => {},
}
+ // TODO [https://github.com/paritytech/polkadot/issues/5055]
+ //
+ // For `Seconded` statements `None` or `Err` means we couldn't fetch the PVD, which
+ // means the statement shouldn't be accepted.
+ //
+ // In case of `Valid` we should have it cached prior, therefore this performs
+ // no Runtime API calls and always returns `Ok(Some(_))`.
+ if let Statement::Seconded(receipt) = statement.payload() {
+ let para_id = receipt.descriptor.para_id;
+ // Either call the Runtime API or check that validation data is cached.
+ let result = active_head
+ .fetch_persisted_validation_data(ctx.sender(), relay_parent, para_id)
+ .await;
+ if !matches!(result, Ok(Some(_))) {
+ return None
+ }
+ }
+
+ // Extend the payload with persisted validation data required by the backing
+ // subsystem.
+ //
+ // Do it in advance before noting the statement because we don't want to borrow active
+ // head mutable and use the cache.
+ let statement_with_pvd = statement
+ .clone()
+ .convert_to_superpayload_with(|statement| match statement {
+ Statement::Seconded(receipt) => {
+ let para_id = &receipt.descriptor.para_id;
+ let persisted_validation_data = active_head
+ .cached_validation_data
+ .get(para_id)
+ .cloned()
+ .expect("pvd is ensured to be cached above; qed");
+ StatementWithPVD::Seconded(receipt, persisted_validation_data)
+ },
+ Statement::Valid(candidate_hash) => StatementWithPVD::Valid(candidate_hash),
+ })
+ .expect("payload was checked with conversion from compact; qed");
+
// Note: `peer_data.receive` already ensures that the statement is not an unbounded equivocation
// or unpinned to a seconded candidate. So it is safe to place it into the storage.
match active_head.note_statement(statement) {
@@ -1567,11 +1642,8 @@ async fn handle_incoming_message<'a, Context>(
// When we receive a new message from a peer, we forward it to the
// candidate backing subsystem.
- ctx.send_message(CandidateBackingMessage::Statement(
- relay_parent,
- statement.statement.clone(),
- ))
- .await;
+ ctx.send_message(CandidateBackingMessage::Statement(relay_parent, statement_with_pvd))
+ .await;
Some((relay_parent, statement))
},
diff --git a/node/network/statement-distribution/src/tests.rs b/node/network/statement-distribution/src/tests.rs
index 9f5b4f6de326..8635dcec3a17 100644
--- a/node/network/statement-distribution/src/tests.rs
+++ b/node/network/statement-distribution/src/tests.rs
@@ -26,14 +26,16 @@ use polkadot_node_network_protocol::{
},
view, ObservedRole,
};
-use polkadot_node_primitives::{Statement, UncheckedSignedFullStatement};
+use polkadot_node_primitives::{
+ SignedFullStatementWithPVD, Statement, UncheckedSignedFullStatement,
+};
use polkadot_node_subsystem::{
jaeger,
messages::{network_bridge_event, AllMessages, RuntimeApiMessage, RuntimeApiRequest},
ActivatedLeaf, LeafStatus,
};
use polkadot_node_subsystem_test_helpers::mock::make_ferdie_keystore;
-use polkadot_primitives::v2::{Hash, SessionInfo, ValidationCode};
+use polkadot_primitives::v2::{Hash, HeadData, SessionInfo, ValidationCode};
use polkadot_primitives_test_helpers::{
dummy_committed_candidate_receipt, dummy_hash, AlwaysZeroRng,
};
@@ -44,6 +46,27 @@ use sp_keyring::Sr25519Keyring;
use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr};
use std::{iter::FromIterator as _, sync::Arc, time::Duration};
+fn dummy_pvd() -> PersistedValidationData {
+ PersistedValidationData {
+ parent_head: HeadData(vec![7, 8, 9]),
+ relay_parent_number: 5,
+ max_pov_size: 1024,
+ relay_parent_storage_root: Default::default(),
+ }
+}
+
+fn extend_statement_with_pvd(
+ statement: SignedFullStatement,
+ pvd: PersistedValidationData,
+) -> SignedFullStatementWithPVD {
+ statement
+ .convert_to_superpayload_with(|statement| match statement {
+ Statement::Seconded(receipt) => StatementWithPVD::Seconded(receipt, pvd),
+ Statement::Valid(candidate_hash) => StatementWithPVD::Valid(candidate_hash),
+ })
+ .unwrap()
+}
+
#[test]
fn active_head_accepts_only_2_seconded_per_validator() {
let validators = vec![
@@ -699,12 +722,14 @@ fn circulated_statement_goes_to_all_peers_with_view() {
#[test]
fn receiving_from_one_sends_to_another_and_to_candidate_backing() {
+ const PARA_ID: ParaId = ParaId::new(1);
let hash_a = Hash::repeat_byte(1);
+ let pvd = dummy_pvd();
let candidate = {
let mut c = dummy_committed_candidate_receipt(dummy_hash());
c.descriptor.relay_parent = hash_a;
- c.descriptor.para_id = 1.into();
+ c.descriptor.para_id = PARA_ID;
c
};
@@ -845,18 +870,32 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() {
})
.await;
+ let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone());
+
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ hash,
+ RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx),
+ )) if para_id == PARA_ID &&
+ assumption == OccupiedCoreAssumption::Free &&
+ hash == hash_a =>
+ {
+ tx.send(Ok(Some(pvd))).unwrap();
+ }
+ );
+
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(p, r)
) if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => {}
);
-
assert_matches!(
handle.recv().await,
AllMessages::CandidateBacking(
CandidateBackingMessage::Statement(r, s)
- ) if r == hash_a && s == statement => {}
+ ) if r == hash_a && s == statement_with_pvd => {}
);
assert_matches!(
@@ -885,6 +924,9 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() {
#[test]
fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing() {
+ const PARA_ID: ParaId = ParaId::new(1);
+ let pvd = dummy_pvd();
+
sp_tracing::try_init_simple();
let hash_a = Hash::repeat_byte(1);
let hash_b = Hash::repeat_byte(2);
@@ -892,7 +934,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing(
let candidate = {
let mut c = dummy_committed_candidate_receipt(dummy_hash());
c.descriptor.relay_parent = hash_a;
- c.descriptor.para_id = 1.into();
+ c.descriptor.para_id = PARA_ID;
c.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3]));
c
};
@@ -1274,6 +1316,20 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing(
) if p == peer_c && r == BENEFIT_VALID_RESPONSE => {}
);
+ let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone());
+
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ hash,
+ RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx),
+ )) if para_id == PARA_ID &&
+ assumption == OccupiedCoreAssumption::Free &&
+ hash == hash_a =>
+ {
+ tx.send(Ok(Some(pvd))).unwrap();
+ }
+ );
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
@@ -1285,7 +1341,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing(
handle.recv().await,
AllMessages::CandidateBacking(
CandidateBackingMessage::Statement(r, s)
- ) if r == hash_a && s == statement => {}
+ ) if r == hash_a && s == statement_with_pvd => {}
);
// Now messages should go out:
@@ -1887,6 +1943,7 @@ fn peer_cant_flood_with_large_statements() {
#[test]
fn handle_multiple_seconded_statements() {
let relay_parent_hash = Hash::repeat_byte(1);
+ let pvd = dummy_pvd();
let candidate = dummy_committed_candidate_receipt(relay_parent_hash);
let candidate_hash = candidate.hash();
@@ -2086,6 +2143,18 @@ fn handle_multiple_seconded_statements() {
})
.await;
+ let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone());
+
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ _,
+ RuntimeApiRequest::PersistedValidationData(_, assumption, tx),
+ )) if assumption == OccupiedCoreAssumption::Free => {
+ tx.send(Ok(Some(pvd.clone()))).unwrap();
+ }
+ );
+
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
@@ -2103,7 +2172,7 @@ fn handle_multiple_seconded_statements() {
CandidateBackingMessage::Statement(r, s)
) => {
assert_eq!(r, relay_parent_hash);
- assert_eq!(s, statement);
+ assert_eq!(s, statement_with_pvd);
}
);
@@ -2189,6 +2258,10 @@ fn handle_multiple_seconded_statements() {
})
.await;
+ let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone());
+
+ // Persisted validation data is cached.
+
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
@@ -2205,7 +2278,7 @@ fn handle_multiple_seconded_statements() {
CandidateBackingMessage::Statement(r, s)
) => {
assert_eq!(r, relay_parent_hash);
- assert_eq!(s, statement);
+ assert_eq!(s, statement_with_pvd);
}
);
diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs
index 28c4e6c03fbe..9e95ca8af03e 100644
--- a/node/overseer/src/lib.rs
+++ b/node/overseer/src/lib.rs
@@ -467,12 +467,14 @@ pub struct Overseer {
#[subsystem(CandidateBackingMessage, sends: [
CandidateValidationMessage,
CollatorProtocolMessage,
+ ChainApiMessage,
AvailabilityDistributionMessage,
AvailabilityStoreMessage,
StatementDistributionMessage,
ProvisionerMessage,
RuntimeApiMessage,
DisputeCoordinatorMessage,
+ ProspectiveParachainsMessage,
])]
candidate_backing: CandidateBacking,
diff --git a/node/primitives/src/disputes/mod.rs b/node/primitives/src/disputes/mod.rs
index 4b2d636dc10e..01293d2b64f0 100644
--- a/node/primitives/src/disputes/mod.rs
+++ b/node/primitives/src/disputes/mod.rs
@@ -19,10 +19,10 @@ use parity_scale_codec::{Decode, Encode};
use sp_application_crypto::AppKey;
use sp_keystore::{CryptoStore, Error as KeystoreError, SyncCryptoStorePtr};
-use super::{Statement, UncheckedSignedFullStatement};
use polkadot_primitives::v2::{
- CandidateHash, CandidateReceipt, DisputeStatement, InvalidDisputeStatementKind, SessionIndex,
- SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature,
+ CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement, EncodeAs,
+ InvalidDisputeStatementKind, SessionIndex, SigningContext, UncheckedSigned,
+ ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature,
};
/// `DisputeMessage` and related types.
@@ -174,19 +174,23 @@ impl SignedDisputeStatement {
/// along with the signing context.
///
/// This does signature checks again with the data provided.
- pub fn from_backing_statement(
- backing_statement: &UncheckedSignedFullStatement,
+ pub fn from_backing_statement(
+ backing_statement: &UncheckedSigned,
signing_context: SigningContext,
validator_public: ValidatorId,
- ) -> Result {
- let (statement_kind, candidate_hash) = match backing_statement.unchecked_payload() {
- Statement::Seconded(candidate) => (
+ ) -> Result
+ where
+ for<'a> &'a T: Into,
+ T: EncodeAs,
+ {
+ let (statement_kind, candidate_hash) = match backing_statement.unchecked_payload().into() {
+ CompactStatement::Seconded(candidate_hash) => (
ValidDisputeStatementKind::BackingSeconded(signing_context.parent_hash),
- candidate.hash(),
+ candidate_hash,
),
- Statement::Valid(candidate_hash) => (
+ CompactStatement::Valid(candidate_hash) => (
ValidDisputeStatementKind::BackingValid(signing_context.parent_hash),
- *candidate_hash,
+ candidate_hash,
),
};
diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs
index 882b75a0e81f..6f76732e35c4 100644
--- a/node/primitives/src/lib.rs
+++ b/node/primitives/src/lib.rs
@@ -194,6 +194,76 @@ impl EncodeAs for Statement {
}
}
+/// A statement, exactly the same as [`Statement`] but where seconded messages carry
+/// the [`PersistedValidationData`].
+#[derive(Clone, PartialEq, Eq)]
+pub enum StatementWithPVD {
+ /// A statement that a validator seconds a candidate.
+ Seconded(CommittedCandidateReceipt, PersistedValidationData),
+ /// A statement that a validator has deemed a candidate valid.
+ Valid(CandidateHash),
+}
+
+impl std::fmt::Debug for StatementWithPVD {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ StatementWithPVD::Seconded(seconded, _) =>
+ write!(f, "Seconded: {:?}", seconded.descriptor),
+ StatementWithPVD::Valid(hash) => write!(f, "Valid: {:?}", hash),
+ }
+ }
+}
+
+impl StatementWithPVD {
+ /// Get the candidate hash referenced by this statement.
+ ///
+ /// If this is a `Statement::Seconded`, this does hash the candidate receipt, which may be expensive
+ /// for large candidates.
+ pub fn candidate_hash(&self) -> CandidateHash {
+ match *self {
+ StatementWithPVD::Valid(ref h) => *h,
+ StatementWithPVD::Seconded(ref c, _) => c.hash(),
+ }
+ }
+
+ /// Transform this statement into its compact version, which references only the hash
+ /// of the candidate.
+ pub fn to_compact(&self) -> CompactStatement {
+ match *self {
+ StatementWithPVD::Seconded(ref c, _) => CompactStatement::Seconded(c.hash()),
+ StatementWithPVD::Valid(hash) => CompactStatement::Valid(hash),
+ }
+ }
+
+ /// Drop the [`PersistedValidationData`] from the statement.
+ pub fn drop_pvd(self) -> Statement {
+ match self {
+ StatementWithPVD::Seconded(c, _) => Statement::Seconded(c),
+ StatementWithPVD::Valid(c_h) => Statement::Valid(c_h),
+ }
+ }
+
+ /// Drop the [`PersistedValidationData`] from the statement in a signed
+ /// variant.
+ pub fn drop_pvd_from_signed(signed: SignedFullStatementWithPVD) -> SignedFullStatement {
+ signed
+ .convert_to_superpayload_with(|s| s.drop_pvd())
+ .expect("persisted_validation_data doesn't affect encoded_as; qed")
+ }
+}
+
+impl From<&'_ StatementWithPVD> for CompactStatement {
+ fn from(stmt: &StatementWithPVD) -> Self {
+ stmt.to_compact()
+ }
+}
+
+impl EncodeAs for StatementWithPVD {
+ fn encode_as(&self) -> Vec {
+ self.to_compact().encode()
+ }
+}
+
/// A statement, the corresponding signature, and the index of the sender.
///
/// Signing context and validator set should be apparent from context.
@@ -205,6 +275,13 @@ pub type SignedFullStatement = Signed;
/// Variant of `SignedFullStatement` where the signature has not yet been verified.
pub type UncheckedSignedFullStatement = UncheckedSigned;
+/// A statement, the corresponding signature, and the index of the sender.
+///
+/// Seconded statements are accompanied by the [`PersistedValidationData`]
+///
+/// Signing context and validator set should be apparent from context.
+pub type SignedFullStatementWithPVD = Signed;
+
/// Candidate invalidity details
#[derive(Debug)]
pub enum InvalidCandidate {
diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs
index db2bd89286b7..9652cff20ba7 100644
--- a/node/subsystem-types/src/messages.rs
+++ b/node/subsystem-types/src/messages.rs
@@ -36,7 +36,7 @@ use polkadot_node_primitives::{
approval::{BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote},
AvailableData, BabeEpoch, BlockWeight, CandidateVotes, CollationGenerationConfig,
CollationSecondedSignal, DisputeMessage, ErasureChunk, PoV, SignedDisputeStatement,
- SignedFullStatement, ValidationResult,
+ SignedFullStatement, SignedFullStatementWithPVD, ValidationResult,
};
use polkadot_primitives::{
v2::{
@@ -75,17 +75,17 @@ pub enum CandidateBackingMessage {
GetBackedCandidates(Hash, Vec, oneshot::Sender>),
/// Note that the Candidate Backing subsystem should second the given candidate in the context of the
/// given relay-parent (ref. by hash). This candidate must be validated.
- Second(Hash, CandidateReceipt, PoV),
- /// Note a validator's statement about a particular candidate. Disagreements about validity must be escalated
- /// to a broader check by Misbehavior Arbitration. Agreements are simply tallied until a quorum is reached.
- Statement(Hash, SignedFullStatement),
+ Second(Hash, CandidateReceipt, PersistedValidationData, PoV),
+ /// Note a validator's statement about a particular candidate.
+ /// Agreements are simply tallied until a quorum is reached.
+ Statement(Hash, SignedFullStatementWithPVD),
}
impl BoundToRelayParent for CandidateBackingMessage {
fn relay_parent(&self) -> Hash {
match self {
Self::GetBackedCandidates(hash, _, _) => *hash,
- Self::Second(hash, _, _) => *hash,
+ Self::Second(hash, _, _, _) => *hash,
Self::Statement(hash, _) => *hash,
}
}
@@ -942,7 +942,7 @@ pub enum PvfCheckerMessage {}
/// A request for the depths a hypothetical candidate would occupy within
/// some fragment tree.
-#[derive(Debug)]
+#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub struct HypotheticalDepthRequest {
/// The hash of the potential candidate.
pub candidate_hash: CandidateHash,
@@ -956,6 +956,21 @@ pub struct HypotheticalDepthRequest {
pub fragment_tree_relay_parent: Hash,
}
+/// A request for the persisted validation data stored in the prospective
+/// parachains subsystem.
+#[derive(Debug)]
+pub struct ProspectiveValidationDataRequest {
+ /// The para-id of the candidate.
+ pub para_id: ParaId,
+ /// The relay-parent of the candidate.
+ pub candidate_relay_parent: Hash,
+ /// The parent head-data hash.
+ pub parent_head_data_hash: Hash,
+ /// The maximum POV size expected of this candidate. This should be
+ /// the maximum as configured during the session.
+ pub max_pov_size: u32,
+}
+
/// Indicates the relay-parents whose fragment tree a candidate
/// is present in and the depths of that tree the candidate is present in.
pub type FragmentTreeMembership = Vec<(Hash, Vec)>;
@@ -992,11 +1007,25 @@ pub enum ProspectiveParachainsMessage {
GetHypotheticalDepth(HypotheticalDepthRequest, oneshot::Sender>),
/// Get the membership of the candidate in all fragment trees.
GetTreeMembership(ParaId, CandidateHash, oneshot::Sender),
- /// Get the minimum accepted relay-parent number in the fragment tree
- /// for the given relay-parent and para-id.
+ /// Get the minimum accepted relay-parent number for each para in the fragment tree
+ /// for the given relay-chain block hash.
+ ///
+ /// That is, if the block hash is known and is an active leaf, this returns the
+ /// minimum relay-parent block number in the same branch of the relay chain which
+ /// is accepted in the fragment tree for each para-id.
+ ///
+ /// If the block hash is not an active leaf, this will return an empty vector.
///
- /// That is, if the relay-parent is known and there's a fragment tree for it,
- /// in this para-id, this returns the minimum relay-parent block number in the
- /// same chain which is accepted in the fragment tree for the para-id.
- GetMinimumRelayParent(ParaId, Hash, oneshot::Sender>),
+ /// Para-IDs which are omitted from this list can be assumed to have no
+ /// valid candidate relay-parents under the given relay-chain block hash.
+ ///
+ /// Para-IDs are returned in no particular order.
+ GetMinimumRelayParents(Hash, oneshot::Sender>),
+ /// Get the validation data of some prospective candidate. The candidate doesn't need
+ /// to be part of any fragment tree, but this only succeeds if the parent head-data and
+ /// relay-parent are part of some fragment tree.
+ GetProspectiveValidationData(
+ ProspectiveValidationDataRequest,
+ oneshot::Sender>,
+ ),
}
diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs
new file mode 100644
index 000000000000..dc10efe519fe
--- /dev/null
+++ b/node/subsystem-util/src/backing_implicit_view.rs
@@ -0,0 +1,687 @@
+// Copyright 2022 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+use futures::channel::oneshot;
+use polkadot_node_subsystem::{
+ errors::ChainApiError,
+ messages::{ChainApiMessage, ProspectiveParachainsMessage},
+ SubsystemSender,
+};
+use polkadot_primitives::vstaging::{BlockNumber, Hash, Id as ParaId};
+
+use std::collections::HashMap;
+
+// Always aim to retain 1 block before the active leaves.
+const MINIMUM_RETAIN_LENGTH: BlockNumber = 2;
+
+/// Handles the implicit view of the relay chain derived from the immediate view, which
+/// is composed of active leaves, and the minimum relay-parents allowed for
+/// candidates of various parachains at those leaves.
+#[derive(Default, Clone)]
+pub struct View {
+ leaves: HashMap,
+ block_info_storage: HashMap,
+}
+
+// Minimum relay parents implicitly relative to a particular block.
+#[derive(Debug, Clone)]
+struct AllowedRelayParents {
+ // minimum relay parents can only be fetched for active leaves,
+ // so this will be empty for all blocks that haven't ever been
+ // witnessed as active leaves.
+ minimum_relay_parents: HashMap,
+ // Ancestry, in descending order, starting from the block hash itself down
+ // to and including the minimum of `minimum_relay_parents`.
+ allowed_relay_parents_contiguous: Vec,
+}
+
+impl AllowedRelayParents {
+ fn allowed_relay_parents_for(
+ &self,
+ para_id: Option,
+ base_number: BlockNumber,
+ ) -> &[Hash] {
+ let para_id = match para_id {
+ None => return &self.allowed_relay_parents_contiguous[..],
+ Some(p) => p,
+ };
+
+ let para_min = match self.minimum_relay_parents.get(¶_id) {
+ Some(p) => *p,
+ None => return &[],
+ };
+
+ if base_number < para_min {
+ return &[]
+ }
+
+ let diff = base_number - para_min;
+
+ // difference of 0 should lead to slice len of 1
+ let slice_len = ((diff + 1) as usize).min(self.allowed_relay_parents_contiguous.len());
+ &self.allowed_relay_parents_contiguous[..slice_len]
+ }
+}
+
+#[derive(Debug, Clone)]
+struct ActiveLeafPruningInfo {
+ // The minimum block in the same branch of the relay-chain that should be
+ // preserved.
+ retain_minimum: BlockNumber,
+}
+
+#[derive(Debug, Clone)]
+struct BlockInfo {
+ block_number: BlockNumber,
+ // If this was previously an active leaf, this will be `Some`
+ // and is useful for understanding the views of peers in the network
+ // which may not be in perfect synchrony with our own view.
+ //
+ // If they are ahead of us in getting a new leaf, there's nothing we
+ // can do as it's an unrecognized block hash. But if they're behind us,
+ // it's useful for us to retain some information about previous leaves'
+ // implicit views so we can continue to send relevant messages to them
+ // until they catch up.
+ maybe_allowed_relay_parents: Option,
+ parent_hash: Hash,
+}
+
+impl View {
+ /// Activate a leaf in the view.
+ /// This will request the minimum relay parents from the
+ /// Prospective Parachains subsystem for each leaf and will load headers in the ancestry of each
+ /// leaf in the view as needed. These are the 'implicit ancestors' of the leaf.
+ ///
+ /// To maximize reuse of outdated leaves, it's best to activate new leaves before
+ /// deactivating old ones.
+ ///
+ /// This returns a list of para-ids which are relevant to the leaf,
+ /// and the allowed relay parents for these paras under this leaf can be
+ /// queried with [`known_allowed_relay_parents_under`].
+ ///
+ /// No-op for known leaves.
+ pub async fn activate_leaf(
+ &mut self,
+ sender: &mut Sender,
+ leaf_hash: Hash,
+ ) -> Result, FetchError>
+ where
+ Sender: SubsystemSender,
+ Sender: SubsystemSender,
+ {
+ if self.leaves.contains_key(&leaf_hash) {
+ return Err(FetchError::AlreadyKnown)
+ }
+
+ let res = fetch_fresh_leaf_and_insert_ancestry(
+ leaf_hash,
+ &mut self.block_info_storage,
+ &mut *sender,
+ )
+ .await;
+
+ match res {
+ Ok(fetched) => {
+ // Retain at least `MINIMUM_RETAIN_LENGTH` blocks in storage.
+ // This helps to avoid Chain API calls when activating leaves in the
+ // same chain.
+ let retain_minimum = std::cmp::min(
+ fetched.minimum_ancestor_number,
+ fetched.leaf_number.saturating_sub(MINIMUM_RETAIN_LENGTH),
+ );
+
+ self.leaves.insert(leaf_hash, ActiveLeafPruningInfo { retain_minimum });
+
+ Ok(fetched.relevant_paras)
+ },
+ Err(e) => Err(e),
+ }
+ }
+
+ /// Deactivate a leaf in the view. This prunes any outdated implicit ancestors as well.
+ pub fn deactivate_leaf(&mut self, leaf_hash: Hash) {
+ if self.leaves.remove(&leaf_hash).is_none() {
+ return
+ }
+
+ // Prune everything before the minimum out of all leaves,
+ // pruning absolutely everything if there are no leaves (empty view)
+ //
+ // Pruning by block number does leave behind orphaned forks slightly longer
+ // but the memory overhead is negligible.
+ {
+ let minimum = self.leaves.values().map(|l| l.retain_minimum).min();
+
+ self.block_info_storage
+ .retain(|_, i| minimum.map_or(false, |m| i.block_number >= m));
+ }
+ }
+
+ /// Get an iterator over all allowed relay-parents in the view with no particular order.
+ ///
+ /// **Important**: not all blocks are guaranteed to be allowed for some leaves, it may
+ /// happen that a block info is only kept in the view storage because of a retaining rule.
+ ///
+ /// For getting relay-parents that are valid for parachain candidates use
+ /// [`View::known_allowed_relay_parents_under`].
+ pub fn all_allowed_relay_parents<'a>(&'a self) -> impl Iterator- + 'a {
+ self.block_info_storage.keys()
+ }
+
+ /// Get the known, allowed relay-parents that are valid for parachain candidates
+ /// which could be backed in a child of a given block for a given para ID.
+ ///
+ /// This is expressed as a contiguous slice of relay-chain block hashes which may
+ /// include the provided block hash itself.
+ ///
+ /// If `para_id` is `None`, this returns all valid relay-parents across all paras
+ /// for the leaf.
+ ///
+ /// `None` indicates that the block hash isn't part of the implicit view or that
+ /// there are no known allowed relay parents.
+ ///
+ /// This always returns `Some` for active leaves or for blocks that previously
+ /// were active leaves.
+ ///
+ /// This can return the empty slice, which indicates that no relay-parents are allowed
+ /// for the para, e.g. if the para is not scheduled at the given block hash.
+ pub fn known_allowed_relay_parents_under(
+ &self,
+ block_hash: &Hash,
+ para_id: Option
,
+ ) -> Option<&[Hash]> {
+ let block_info = self.block_info_storage.get(block_hash)?;
+ block_info
+ .maybe_allowed_relay_parents
+ .as_ref()
+ .map(|mins| mins.allowed_relay_parents_for(para_id, block_info.block_number))
+ }
+}
+
+/// Errors when fetching a leaf and associated ancestry.
+#[derive(Debug)]
+pub enum FetchError {
+ /// Leaf was already known.
+ AlreadyKnown,
+ /// The prospective parachains subsystem was unavailable.
+ ProspectiveParachainsUnavailable,
+ /// A block header was unavailable.
+ BlockHeaderUnavailable(Hash, BlockHeaderUnavailableReason),
+ /// A block header was unavailable due to a chain API error.
+ ChainApiError(Hash, ChainApiError),
+ /// The chain API subsystem was unavailable.
+ ChainApiUnavailable,
+}
+
+/// Reasons a block header might have been unavailable.
+#[derive(Debug)]
+pub enum BlockHeaderUnavailableReason {
+ /// Block header simply unknown.
+ Unknown,
+ /// Internal Chain API error.
+ Internal(ChainApiError),
+ /// The subsystem was unavailable.
+ SubsystemUnavailable,
+}
+
+struct FetchSummary {
+ minimum_ancestor_number: BlockNumber,
+ leaf_number: BlockNumber,
+ relevant_paras: Vec,
+}
+
+async fn fetch_fresh_leaf_and_insert_ancestry(
+ leaf_hash: Hash,
+ block_info_storage: &mut HashMap,
+ sender: &mut Sender,
+) -> Result
+where
+ Sender: SubsystemSender,
+ Sender: SubsystemSender,
+{
+ let min_relay_parents_raw = {
+ let (tx, rx) = oneshot::channel();
+ sender
+ .send_message(ProspectiveParachainsMessage::GetMinimumRelayParents(leaf_hash, tx))
+ .await;
+
+ match rx.await {
+ Ok(m) => m,
+ Err(_) => return Err(FetchError::ProspectiveParachainsUnavailable),
+ }
+ };
+
+ let leaf_header = {
+ let (tx, rx) = oneshot::channel();
+ sender.send_message(ChainApiMessage::BlockHeader(leaf_hash, tx)).await;
+
+ match rx.await {
+ Ok(Ok(Some(header))) => header,
+ Ok(Ok(None)) =>
+ return Err(FetchError::BlockHeaderUnavailable(
+ leaf_hash,
+ BlockHeaderUnavailableReason::Unknown,
+ )),
+ Ok(Err(e)) =>
+ return Err(FetchError::BlockHeaderUnavailable(
+ leaf_hash,
+ BlockHeaderUnavailableReason::Internal(e),
+ )),
+ Err(_) =>
+ return Err(FetchError::BlockHeaderUnavailable(
+ leaf_hash,
+ BlockHeaderUnavailableReason::SubsystemUnavailable,
+ )),
+ }
+ };
+
+ let min_min = min_relay_parents_raw.iter().map(|x| x.1).min().unwrap_or(leaf_header.number);
+ let relevant_paras = min_relay_parents_raw.iter().map(|x| x.0).collect();
+ let expected_ancestry_len = (leaf_header.number.saturating_sub(min_min) as usize) + 1;
+
+ let ancestry = if leaf_header.number > 0 {
+ let mut next_ancestor_number = leaf_header.number - 1;
+ let mut next_ancestor_hash = leaf_header.parent_hash;
+
+ let mut ancestry = Vec::with_capacity(expected_ancestry_len);
+ ancestry.push(leaf_hash);
+
+ // Ensure all ancestors up to and including `min_min` are in the
+ // block storage. When views advance incrementally, everything
+ // should already be present.
+ while next_ancestor_number >= min_min {
+ let parent_hash = if let Some(info) = block_info_storage.get(&next_ancestor_hash) {
+ info.parent_hash
+ } else {
+ // load the header and insert into block storage.
+ let (tx, rx) = oneshot::channel();
+ sender.send_message(ChainApiMessage::BlockHeader(next_ancestor_hash, tx)).await;
+
+ let header = match rx.await {
+ Ok(Ok(Some(header))) => header,
+ Ok(Ok(None)) =>
+ return Err(FetchError::BlockHeaderUnavailable(
+ next_ancestor_hash,
+ BlockHeaderUnavailableReason::Unknown,
+ )),
+ Ok(Err(e)) =>
+ return Err(FetchError::BlockHeaderUnavailable(
+ next_ancestor_hash,
+ BlockHeaderUnavailableReason::Internal(e),
+ )),
+ Err(_) =>
+ return Err(FetchError::BlockHeaderUnavailable(
+ next_ancestor_hash,
+ BlockHeaderUnavailableReason::SubsystemUnavailable,
+ )),
+ };
+
+ block_info_storage.insert(
+ next_ancestor_hash,
+ BlockInfo {
+ block_number: next_ancestor_number,
+ parent_hash: header.parent_hash,
+ maybe_allowed_relay_parents: None,
+ },
+ );
+
+ header.parent_hash
+ };
+
+ ancestry.push(next_ancestor_hash);
+ if next_ancestor_number == 0 {
+ break
+ }
+
+ next_ancestor_number -= 1;
+ next_ancestor_hash = parent_hash;
+ }
+
+ ancestry
+ } else {
+ Vec::new()
+ };
+
+ let fetched_ancestry = FetchSummary {
+ minimum_ancestor_number: min_min,
+ leaf_number: leaf_header.number,
+ relevant_paras,
+ };
+
+ let allowed_relay_parents = AllowedRelayParents {
+ minimum_relay_parents: min_relay_parents_raw.iter().cloned().collect(),
+ allowed_relay_parents_contiguous: ancestry,
+ };
+
+ let leaf_block_info = BlockInfo {
+ parent_hash: leaf_header.parent_hash,
+ block_number: leaf_header.number,
+ maybe_allowed_relay_parents: Some(allowed_relay_parents),
+ };
+
+ block_info_storage.insert(leaf_hash, leaf_block_info);
+
+ Ok(fetched_ancestry)
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::TimeoutExt;
+ use assert_matches::assert_matches;
+ use futures::future::{join, FutureExt};
+ use polkadot_node_subsystem::AllMessages;
+ use polkadot_node_subsystem_test_helpers::{
+ make_subsystem_context, TestSubsystemContextHandle,
+ };
+ use polkadot_overseer::SubsystemContext;
+ use polkadot_primitives::v2::Header;
+ use sp_core::testing::TaskExecutor;
+ use std::time::Duration;
+
+ const PARA_A: ParaId = ParaId::new(0);
+ const PARA_B: ParaId = ParaId::new(1);
+ const PARA_C: ParaId = ParaId::new(2);
+
+ const GENESIS_HASH: Hash = Hash::repeat_byte(0xFF);
+ const GENESIS_NUMBER: BlockNumber = 0;
+
+ // Chains A and B are forks of genesis.
+
+ const CHAIN_A: &[Hash] =
+ &[Hash::repeat_byte(0x01), Hash::repeat_byte(0x02), Hash::repeat_byte(0x03)];
+
+ const CHAIN_B: &[Hash] = &[
+ Hash::repeat_byte(0x04),
+ Hash::repeat_byte(0x05),
+ Hash::repeat_byte(0x06),
+ Hash::repeat_byte(0x07),
+ Hash::repeat_byte(0x08),
+ Hash::repeat_byte(0x09),
+ ];
+
+ type VirtualOverseer = TestSubsystemContextHandle;
+
+ const TIMEOUT: Duration = Duration::from_secs(2);
+
+ async fn overseer_recv(virtual_overseer: &mut VirtualOverseer) -> AllMessages {
+ virtual_overseer
+ .recv()
+ .timeout(TIMEOUT)
+ .await
+ .expect("overseer `recv` timed out")
+ }
+
+ fn default_header() -> Header {
+ Header {
+ parent_hash: Hash::zero(),
+ number: 0,
+ state_root: Hash::zero(),
+ extrinsics_root: Hash::zero(),
+ digest: Default::default(),
+ }
+ }
+
+ fn get_block_header(chain: &[Hash], hash: &Hash) -> Option {
+ let idx = chain.iter().position(|h| h == hash)?;
+ let parent_hash = idx.checked_sub(1).map(|i| chain[i]).unwrap_or(GENESIS_HASH);
+ let number =
+ if *hash == GENESIS_HASH { GENESIS_NUMBER } else { GENESIS_NUMBER + idx as u32 + 1 };
+ Some(Header { parent_hash, number, ..default_header() })
+ }
+
+ async fn assert_block_header_requests(
+ virtual_overseer: &mut VirtualOverseer,
+ chain: &[Hash],
+ blocks: &[Hash],
+ ) {
+ for block in blocks.iter().rev() {
+ assert_matches!(
+ overseer_recv(virtual_overseer).await,
+ AllMessages::ChainApi(
+ ChainApiMessage::BlockHeader(hash, tx)
+ ) => {
+ assert_eq!(*block, hash, "unexpected block header request");
+ let header = if block == &GENESIS_HASH {
+ Header {
+ number: GENESIS_NUMBER,
+ ..default_header()
+ }
+ } else {
+ get_block_header(chain, block).expect("unknown block")
+ };
+
+ tx.send(Ok(Some(header))).unwrap();
+ }
+ );
+ }
+ }
+
+ async fn assert_min_relay_parents_request(
+ virtual_overseer: &mut VirtualOverseer,
+ leaf: &Hash,
+ response: Vec<(ParaId, u32)>,
+ ) {
+ assert_matches!(
+ overseer_recv(virtual_overseer).await,
+ AllMessages::ProspectiveParachains(
+ ProspectiveParachainsMessage::GetMinimumRelayParents(
+ leaf_hash,
+ tx
+ )
+ ) => {
+ assert_eq!(*leaf, leaf_hash, "received unexpected leaf hash");
+ tx.send(response).unwrap();
+ }
+ );
+ }
+
+ #[test]
+ fn construct_fresh_view() {
+ let pool = TaskExecutor::new();
+ let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool);
+
+ let mut view = View::default();
+
+ // Chain B.
+ const PARA_A_MIN_PARENT: u32 = 4;
+ const PARA_B_MIN_PARENT: u32 = 3;
+
+ let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT), (PARA_B, PARA_B_MIN_PARENT)];
+
+ let leaf = CHAIN_B.last().unwrap();
+ let min_min_idx = (PARA_B_MIN_PARENT - GENESIS_NUMBER - 1) as usize;
+
+ let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| {
+ let paras = res.expect("`activate_leaf` timed out").unwrap();
+ assert_eq!(paras, vec![PARA_A, PARA_B]);
+ });
+ let overseer_fut = async {
+ assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await;
+ assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[min_min_idx..]).await;
+ };
+ futures::executor::block_on(join(fut, overseer_fut));
+
+ for i in min_min_idx..(CHAIN_B.len() - 1) {
+ // No allowed relay parents constructed for ancestry.
+ assert!(view.known_allowed_relay_parents_under(&CHAIN_B[i], None).is_none());
+ }
+
+ let leaf_info =
+ view.block_info_storage.get(leaf).expect("block must be present in storage");
+ assert_matches!(
+ leaf_info.maybe_allowed_relay_parents,
+ Some(ref allowed_relay_parents) => {
+ assert_eq!(allowed_relay_parents.minimum_relay_parents[&PARA_A], PARA_A_MIN_PARENT);
+ assert_eq!(allowed_relay_parents.minimum_relay_parents[&PARA_B], PARA_B_MIN_PARENT);
+ let expected_ancestry: Vec =
+ CHAIN_B[min_min_idx..].iter().rev().copied().collect();
+ assert_eq!(
+ allowed_relay_parents.allowed_relay_parents_contiguous,
+ expected_ancestry
+ );
+ }
+ );
+
+ // Suppose the whole test chain A is allowed up to genesis for para C.
+ const PARA_C_MIN_PARENT: u32 = 0;
+ let prospective_response = vec![(PARA_C, PARA_C_MIN_PARENT)];
+ let leaf = CHAIN_A.last().unwrap();
+ let blocks = [&[GENESIS_HASH], CHAIN_A].concat();
+
+ let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| {
+ let paras = res.expect("`activate_leaf` timed out").unwrap();
+ assert_eq!(paras, vec![PARA_C]);
+ });
+ let overseer_fut = async {
+ assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await;
+ assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks).await;
+ };
+ futures::executor::block_on(join(fut, overseer_fut));
+
+ assert_eq!(view.leaves.len(), 2);
+ }
+
+ #[test]
+ fn reuse_block_info_storage() {
+ let pool = TaskExecutor::new();
+ let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool);
+
+ let mut view = View::default();
+
+ const PARA_A_MIN_PARENT: u32 = 1;
+ let leaf_a_number = 3;
+ let leaf_a = CHAIN_B[leaf_a_number - 1];
+ let min_min_idx = (PARA_A_MIN_PARENT - GENESIS_NUMBER - 1) as usize;
+
+ let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT)];
+
+ let fut = view.activate_leaf(ctx.sender(), leaf_a).timeout(TIMEOUT).map(|res| {
+ let paras = res.expect("`activate_leaf` timed out").unwrap();
+ assert_eq!(paras, vec![PARA_A]);
+ });
+ let overseer_fut = async {
+ assert_min_relay_parents_request(&mut ctx_handle, &leaf_a, prospective_response).await;
+ assert_block_header_requests(
+ &mut ctx_handle,
+ CHAIN_B,
+ &CHAIN_B[min_min_idx..leaf_a_number],
+ )
+ .await;
+ };
+ futures::executor::block_on(join(fut, overseer_fut));
+
+ // Blocks up to the 3rd are present in storage.
+ const PARA_B_MIN_PARENT: u32 = 2;
+ let leaf_b_number = 5;
+ let leaf_b = CHAIN_B[leaf_b_number - 1];
+
+ let prospective_response = vec![(PARA_B, PARA_B_MIN_PARENT)];
+
+ let fut = view.activate_leaf(ctx.sender(), leaf_b).timeout(TIMEOUT).map(|res| {
+ let paras = res.expect("`activate_leaf` timed out").unwrap();
+ assert_eq!(paras, vec![PARA_B]);
+ });
+ let overseer_fut = async {
+ assert_min_relay_parents_request(&mut ctx_handle, &leaf_b, prospective_response).await;
+ assert_block_header_requests(
+ &mut ctx_handle,
+ CHAIN_B,
+ &CHAIN_B[leaf_a_number..leaf_b_number], // Note the expected range.
+ )
+ .await;
+ };
+ futures::executor::block_on(join(fut, overseer_fut));
+
+ // Allowed relay parents for leaf A are preserved.
+ let leaf_a_info =
+ view.block_info_storage.get(&leaf_a).expect("block must be present in storage");
+ assert_matches!(
+ leaf_a_info.maybe_allowed_relay_parents,
+ Some(ref allowed_relay_parents) => {
+ assert_eq!(allowed_relay_parents.minimum_relay_parents[&PARA_A], PARA_A_MIN_PARENT);
+ let expected_ancestry: Vec =
+ CHAIN_B[min_min_idx..leaf_a_number].iter().rev().copied().collect();
+ let ancestry = view.known_allowed_relay_parents_under(&leaf_a, Some(PARA_A)).unwrap().to_vec();
+ assert_eq!(ancestry, expected_ancestry);
+ }
+ );
+ }
+
+ #[test]
+ fn pruning() {
+ let pool = TaskExecutor::new();
+ let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool);
+
+ let mut view = View::default();
+
+ const PARA_A_MIN_PARENT: u32 = 3;
+ let leaf_a = CHAIN_B.iter().rev().nth(1).unwrap();
+ let leaf_a_idx = CHAIN_B.len() - 2;
+ let min_a_idx = (PARA_A_MIN_PARENT - GENESIS_NUMBER - 1) as usize;
+
+ let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT)];
+
+ let fut = view
+ .activate_leaf(ctx.sender(), *leaf_a)
+ .timeout(TIMEOUT)
+ .map(|res| res.unwrap().unwrap());
+ let overseer_fut = async {
+ assert_min_relay_parents_request(&mut ctx_handle, &leaf_a, prospective_response).await;
+ assert_block_header_requests(
+ &mut ctx_handle,
+ CHAIN_B,
+ &CHAIN_B[min_a_idx..=leaf_a_idx],
+ )
+ .await;
+ };
+ futures::executor::block_on(join(fut, overseer_fut));
+
+ // Also activate a leaf with a lesser minimum relay parent.
+ const PARA_B_MIN_PARENT: u32 = 2;
+ let leaf_b = CHAIN_B.last().unwrap();
+ let min_b_idx = (PARA_B_MIN_PARENT - GENESIS_NUMBER - 1) as usize;
+
+ let prospective_response = vec![(PARA_B, PARA_B_MIN_PARENT)];
+ // Headers will be requested for the minimum block and the leaf.
+ let blocks = &[CHAIN_B[min_b_idx], *leaf_b];
+
+ let fut = view
+ .activate_leaf(ctx.sender(), *leaf_b)
+ .timeout(TIMEOUT)
+ .map(|res| res.expect("`activate_leaf` timed out").unwrap());
+ let overseer_fut = async {
+ assert_min_relay_parents_request(&mut ctx_handle, &leaf_b, prospective_response).await;
+ assert_block_header_requests(&mut ctx_handle, CHAIN_B, blocks).await;
+ };
+ futures::executor::block_on(join(fut, overseer_fut));
+
+ // Prune implicit ancestor (no-op).
+ let block_info_len = view.block_info_storage.len();
+ view.deactivate_leaf(CHAIN_B[leaf_a_idx - 1]);
+ assert_eq!(block_info_len, view.block_info_storage.len());
+
+ // Prune a leaf with a greater minimum relay parent.
+ view.deactivate_leaf(*leaf_b);
+ for hash in CHAIN_B.iter().take(PARA_B_MIN_PARENT as usize) {
+ assert!(!view.block_info_storage.contains_key(hash));
+ }
+
+ // Prune the last leaf.
+ view.deactivate_leaf(*leaf_a);
+ assert!(view.block_info_storage.is_empty());
+ }
+}
diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs
index ef61400eb0f9..b30742e78aba 100644
--- a/node/subsystem-util/src/lib.rs
+++ b/node/subsystem-util/src/lib.rs
@@ -64,6 +64,10 @@ pub mod reexports {
pub use polkadot_overseer::gen::{SpawnedSubsystem, Spawner, Subsystem, SubsystemContext};
}
+/// A utility for managing the implicit view of the relay-chain derived from active
+/// leaves and the minimum allowed relay-parents that parachain candidates can have
+/// and be backed in those leaves' children.
+pub mod backing_implicit_view;
/// An emulator for node-side code to predict the results of on-chain parachain inclusion
/// and predict future constraints.
pub mod inclusion_emulator;
diff --git a/primitives/src/v2/signed.rs b/primitives/src/v2/signed.rs
index 28c3b790039f..bebc2c0208c9 100644
--- a/primitives/src/v2/signed.rs
+++ b/primitives/src/v2/signed.rs
@@ -157,7 +157,6 @@ impl, RealPayload: Encode> Signed Result, (Self, SuperPayload)>
where
SuperPayload: EncodeAs,
- Payload: Encode,
{
if claimed.encode_as() == self.0.payload.encode_as() {
Ok(Signed(UncheckedSigned {
@@ -170,6 +169,34 @@ impl, RealPayload: Encode> Signed(
+ self,
+ convert: F,
+ ) -> Result, SuperPayload>
+ where
+ F: FnOnce(Payload) -> SuperPayload,
+ SuperPayload: EncodeAs,
+ {
+ let expected_encode_as = self.0.payload.encode_as();
+ let converted = convert(self.0.payload);
+ if converted.encode_as() == expected_encode_as {
+ Ok(Signed(UncheckedSigned {
+ payload: converted,
+ validator_index: self.0.validator_index,
+ signature: self.0.signature,
+ real_payload: sp_std::marker::PhantomData,
+ }))
+ } else {
+ Err(converted)
+ }
+ }
}
// We can't bound this on `Payload: Into` because that conversion consumes
diff --git a/runtime/parachains/src/runtime_api_impl/vstaging.rs b/runtime/parachains/src/runtime_api_impl/vstaging.rs
index 8715cdc53121..edde48f4d984 100644
--- a/runtime/parachains/src/runtime_api_impl/vstaging.rs
+++ b/runtime/parachains/src/runtime_api_impl/vstaging.rs
@@ -25,3 +25,7 @@ pub fn get_session_disputes(
) -> Vec<(SessionIndex, CandidateHash, DisputeState)> {
>::disputes()
}
+
+// TODO [now]: implicit `validity_constraints`. Ensure that `min_relay_parent`
+// never goes lower than the point at which asynchronous backing was enabled.
+// Also, never cross session boundaries.
diff --git a/statement-table/src/generic.rs b/statement-table/src/generic.rs
index d899c54d1d53..eb5def0cef44 100644
--- a/statement-table/src/generic.rs
+++ b/statement-table/src/generic.rs
@@ -61,6 +61,14 @@ pub trait Context {
fn requisite_votes(&self, group: &Self::GroupId) -> usize;
}
+/// Table configuration.
+pub struct Config {
+ /// When this is true, the table will allow multiple seconded candidates
+ /// per authority. This flag means that higher-level code is responsible for
+ /// bounding the number of candidates.
+ pub allow_multiple_seconded: bool,
+}
+
/// Statements circulated among peers.
#[derive(PartialEq, Eq, Debug, Clone, Encode, Decode)]
pub enum Statement {
@@ -270,12 +278,12 @@ impl CandidateData {
// authority metadata
struct AuthorityData {
- proposal: Option<(Ctx::Digest, Ctx::Signature)>,
+ proposals: Vec<(Ctx::Digest, Ctx::Signature)>,
}
impl Default for AuthorityData {
fn default() -> Self {
- AuthorityData { proposal: None }
+ AuthorityData { proposals: Vec::new() }
}
}
@@ -290,19 +298,20 @@ pub struct Table {
authority_data: HashMap>,
detected_misbehavior: HashMap>>,
candidate_votes: HashMap>,
+ config: Config,
}
-impl Default for Table {
- fn default() -> Self {
+impl Table {
+ /// Create a new `Table` from a `Config`.
+ pub fn new(config: Config) -> Self {
Table {
- authority_data: HashMap::new(),
- detected_misbehavior: HashMap::new(),
- candidate_votes: HashMap::new(),
+ authority_data: HashMap::default(),
+ detected_misbehavior: HashMap::default(),
+ candidate_votes: HashMap::default(),
+ config,
}
}
-}
-impl Table {
/// Get the attested candidate for `digest`.
///
/// Returns `Some(_)` if the candidate exists and is includable.
@@ -393,7 +402,9 @@ impl Table {
// note misbehavior.
let existing = occ.get_mut();
- if let Some((ref old_digest, ref old_sig)) = existing.proposal {
+ if !self.config.allow_multiple_seconded && existing.proposals.len() == 1 {
+ let &(ref old_digest, ref old_sig) = &existing.proposals[0];
+
if old_digest != &digest {
const EXISTENCE_PROOF: &str =
"when proposal first received from authority, candidate \
@@ -413,15 +424,19 @@ impl Table {
}))
}
+ false
+ } else if self.config.allow_multiple_seconded &&
+ existing.proposals.iter().find(|(ref od, _)| od == &digest).is_some()
+ {
false
} else {
- existing.proposal = Some((digest.clone(), signature.clone()));
+ existing.proposals.push((digest.clone(), signature.clone()));
true
}
},
Entry::Vacant(vacant) => {
vacant
- .insert(AuthorityData { proposal: Some((digest.clone(), signature.clone())) });
+ .insert(AuthorityData { proposals: vec![(digest.clone(), signature.clone())] });
true
},
};
@@ -571,8 +586,12 @@ mod tests {
use super::*;
use std::collections::HashMap;
- fn create() -> Table {
- Table::default()
+ fn create_single_seconded() -> Table {
+ Table::new(Config { allow_multiple_seconded: false })
+ }
+
+ fn create_many_seconded() -> Table {
+ Table::new(Config { allow_multiple_seconded: true })
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
@@ -630,7 +649,7 @@ mod tests {
}
#[test]
- fn submitting_two_candidates_is_misbehavior() {
+ fn submitting_two_candidates_can_be_misbehavior() {
let context = TestContext {
authorities: {
let mut map = HashMap::new();
@@ -639,7 +658,7 @@ mod tests {
},
};
- let mut table = create();
+ let mut table = create_single_seconded();
let statement_a = SignedStatement {
statement: Statement::Seconded(Candidate(2, 100)),
signature: Signature(1),
@@ -665,6 +684,36 @@ mod tests {
);
}
+ #[test]
+ fn submitting_two_candidates_can_be_allowed() {
+ let context = TestContext {
+ authorities: {
+ let mut map = HashMap::new();
+ map.insert(AuthorityId(1), GroupId(2));
+ map
+ },
+ };
+
+ let mut table = create_many_seconded();
+ let statement_a = SignedStatement {
+ statement: Statement::Seconded(Candidate(2, 100)),
+ signature: Signature(1),
+ sender: AuthorityId(1),
+ };
+
+ let statement_b = SignedStatement {
+ statement: Statement::Seconded(Candidate(2, 999)),
+ signature: Signature(1),
+ sender: AuthorityId(1),
+ };
+
+ table.import_statement(&context, statement_a);
+ assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1)));
+
+ table.import_statement(&context, statement_b);
+ assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1)));
+ }
+
#[test]
fn submitting_candidate_from_wrong_group_is_misbehavior() {
let context = TestContext {
@@ -675,7 +724,7 @@ mod tests {
},
};
- let mut table = create();
+ let mut table = create_single_seconded();
let statement = SignedStatement {
statement: Statement::Seconded(Candidate(2, 100)),
signature: Signature(1),
@@ -707,7 +756,7 @@ mod tests {
},
};
- let mut table = create();
+ let mut table = create_single_seconded();
let candidate_a = SignedStatement {
statement: Statement::Seconded(Candidate(2, 100)),
@@ -751,7 +800,7 @@ mod tests {
},
};
- let mut table = create();
+ let mut table = create_single_seconded();
let statement = SignedStatement {
statement: Statement::Seconded(Candidate(2, 100)),
signature: Signature(1),
@@ -781,7 +830,7 @@ mod tests {
},
};
- let mut table = create();
+ let mut table = create_single_seconded();
let statement = SignedStatement {
statement: Statement::Seconded(Candidate(2, 100)),
signature: Signature(1),
@@ -849,7 +898,7 @@ mod tests {
};
// have 2/3 validity guarantors note validity.
- let mut table = create();
+ let mut table = create_single_seconded();
let statement = SignedStatement {
statement: Statement::Seconded(Candidate(2, 100)),
signature: Signature(1),
@@ -883,7 +932,7 @@ mod tests {
},
};
- let mut table = create();
+ let mut table = create_single_seconded();
let statement = SignedStatement {
statement: Statement::Seconded(Candidate(2, 100)),
signature: Signature(1),
@@ -910,7 +959,7 @@ mod tests {
},
};
- let mut table = create();
+ let mut table = create_single_seconded();
let statement = SignedStatement {
statement: Statement::Seconded(Candidate(2, 100)),
signature: Signature(1),
diff --git a/statement-table/src/lib.rs b/statement-table/src/lib.rs
index a3fbbb1fdaaa..3bd586f09da9 100644
--- a/statement-table/src/lib.rs
+++ b/statement-table/src/lib.rs
@@ -16,7 +16,7 @@
pub mod generic;
-pub use generic::{Context, Table};
+pub use generic::{Config, Context, Table};
/// Concrete instantiations suitable for v2 primitives.
pub mod v2 {