Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
parachain-system: ignore go ahead signal once upgrade is processed (#…
Browse files Browse the repository at this point in the history
…2594)

* handle goahead signal for unincluded segment

* doc comment

* add test
  • Loading branch information
slumber authored May 22, 2023
1 parent dcf62e5 commit da07585
Show file tree
Hide file tree
Showing 3 changed files with 242 additions and 25 deletions.
71 changes: 48 additions & 23 deletions pallets/parachain-system/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,7 @@ pub mod pallet {
fn on_finalize(_: T::BlockNumber) {
<DidSetValidationCode<T>>::kill();
<UpgradeRestrictionSignal<T>>::kill();
let relay_upgrade_go_ahead = <UpgradeGoAhead<T>>::take();

assert!(
<ValidationData<T>>::exists(),
Expand Down Expand Up @@ -338,20 +339,30 @@ pub mod pallet {
.collect();
let used_bandwidth =
UsedBandwidth { ump_msg_count, ump_total_bytes, hrmp_outgoing };

let mut aggregated_segment =
AggregatedUnincludedSegment::<T>::get().unwrap_or_default();
let consumed_go_ahead_signal =
if aggregated_segment.consumed_go_ahead_signal().is_some() {
// Some ancestor within the segment already processed this signal -- validated during
// inherent creation.
None
} else {
relay_upgrade_go_ahead
};
// The bandwidth constructed was ensured to satisfy relay chain constraints.
let ancestor = Ancestor::new_unchecked(used_bandwidth);
let ancestor = Ancestor::new_unchecked(used_bandwidth, consumed_go_ahead_signal);

let watermark = HrmpWatermark::<T>::get();
let watermark_update =
HrmpWatermarkUpdate::new(watermark, LastRelayChainBlockNumber::<T>::get());
AggregatedUnincludedSegment::<T>::mutate(|agg| {
let agg = agg.get_or_insert_with(SegmentTracker::default);
// TODO: In order of this panic to be correct, outbound message source should
// respect bandwidth limits as well.
// <https://github.com/paritytech/cumulus/issues/2471>
agg.append(&ancestor, watermark_update, &total_bandwidth_out)
.expect("unincluded segment limits exceeded");
});
// TODO: In order of this panic to be correct, outbound message source should
// respect bandwidth limits as well.
// <https://github.com/paritytech/cumulus/issues/2471>
aggregated_segment
.append(&ancestor, watermark_update, &total_bandwidth_out)
.expect("unincluded segment limits exceeded");
AggregatedUnincludedSegment::<T>::put(aggregated_segment);
// Check in `on_initialize` guarantees there's space for this block.
UnincludedSegment::<T>::append(ancestor);
}
Expand Down Expand Up @@ -425,7 +436,7 @@ pub mod pallet {
4 + hrmp_max_message_num_per_candidate as u64,
);

// Always try to read `MaxUnincludedLen` in `on_finalize`.
// Always try to read `UpgradeGoAhead` in `on_finalize`.
weight += T::DbWeight::get().reads(1);

weight
Expand Down Expand Up @@ -456,6 +467,9 @@ pub mod pallet {
"ValidationData must be updated only once in a block",
);

// TODO: This is more than zero, but will need benchmarking to figure out what.
let mut total_weight = Weight::zero();

// NOTE: the inherent data is expected to be unique, even if this block is built
// in the context of the same relay parent as the previous one. In particular,
// the inherent shouldn't contain messages that were already processed by any of the
Expand Down Expand Up @@ -486,14 +500,28 @@ pub mod pallet {
// Update the desired maximum capacity according to the consensus hook.
let (consensus_hook_weight, capacity) =
T::ConsensusHook::on_state_proof(&relay_state_proof);
total_weight += consensus_hook_weight;
total_weight += Self::maybe_drop_included_ancestors(&relay_state_proof, capacity);

// initialization logic: we know that this runs exactly once every block,
// which means we can put the initialization logic here to remove the
// sequencing problem.
let upgrade_go_ahead_signal = relay_state_proof
.read_upgrade_go_ahead_signal()
.expect("Invalid upgrade go ahead signal");

let upgrade_signal_in_segment = AggregatedUnincludedSegment::<T>::get()
.as_ref()
.and_then(SegmentTracker::consumed_go_ahead_signal);
if let Some(signal_in_segment) = upgrade_signal_in_segment.as_ref() {
// Unincluded ancestor consuming upgrade signal is still within the segment,
// sanity check that it matches with the signal from relay chain.
assert_eq!(upgrade_go_ahead_signal, Some(*signal_in_segment));
}
match upgrade_go_ahead_signal {
Some(_signal) if upgrade_signal_in_segment.is_some() => {
// Do nothing, processing logic was executed by unincluded ancestor.
},
Some(relay_chain::UpgradeGoAhead::GoAhead) => {
assert!(
<PendingValidationCode<T>>::exists(),
Expand All @@ -518,6 +546,7 @@ pub mod pallet {
.read_upgrade_restriction_signal()
.expect("Invalid upgrade restriction signal"),
);
<UpgradeGoAhead<T>>::put(upgrade_go_ahead_signal);

let host_config = relay_state_proof
.read_abridged_host_configuration()
Expand All @@ -533,18 +562,6 @@ pub mod pallet {

<T::OnSystemEvent as OnSystemEvent>::on_validation_data(&vfp);

// TODO: This is more than zero, but will need benchmarking to figure out what.
// NOTE: We don't account for the amount of processed messages from
// downward and horizontal channels in the unincluded segment.
//
// This is correct only because the current implementation always attempts
// to exhaust each message queue and panics if the DMQ head doesn't match.
//
// If one or more messages were ever "re-processed" in a parachain block before its
// ancestor was included, the MQC heads wouldn't match and the block would be invalid.
//
// <https://github.com/paritytech/cumulus/issues/2472>
let mut total_weight = consensus_hook_weight;
total_weight += Self::process_inbound_downward_messages(
relevant_messaging_state.dmq_mqc_head,
downward_messages,
Expand All @@ -554,7 +571,6 @@ pub mod pallet {
horizontal_messages,
vfp.relay_parent_number,
);
total_weight += Self::maybe_drop_included_ancestors(&relay_state_proof, capacity);

Ok(PostDispatchInfo { actual_weight: Some(total_weight), pays_fee: Pays::No })
}
Expand Down Expand Up @@ -719,6 +735,15 @@ pub mod pallet {
pub(super) type UpgradeRestrictionSignal<T: Config> =
StorageValue<_, Option<relay_chain::UpgradeRestriction>, ValueQuery>;

/// Optional upgrade go-ahead signal from the relay-chain.
///
/// This storage item is a mirror of the corresponding value for the current parachain from the
/// relay-chain. This value is ephemeral which means it doesn't hit the storage. This value is
/// set after the inherent.
#[pallet::storage]
pub(super) type UpgradeGoAhead<T: Config> =
StorageValue<_, Option<relay_chain::UpgradeGoAhead>, ValueQuery>;

/// The state proof for the last relay parent block.
///
/// This field is meant to be updated each block with the validation data inherent. Therefore,
Expand Down
109 changes: 109 additions & 0 deletions pallets/parachain-system/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -495,6 +495,115 @@ fn unincluded_segment_is_limited() {
.add(124, || {}); // The previous block wasn't included yet, should panic in `create_inherent`.
}

#[test]
fn unincluded_code_upgrade_handles_signal() {
CONSENSUS_HOOK.with(|c| {
*c.borrow_mut() = Box::new(|_| (Weight::zero(), NonZeroU32::new(2).unwrap().into()))
});

BlockTests::new()
.with_inclusion_delay(1)
.with_relay_sproof_builder(|_, block_number, builder| {
if block_number > 123 && block_number <= 125 {
builder.upgrade_go_ahead = Some(relay_chain::UpgradeGoAhead::GoAhead);
}
})
.add(123, || {
assert_ok!(System::set_code(RawOrigin::Root.into(), Default::default()));
})
.add_with_post_test(
124,
|| {},
|| {
assert!(
!<PendingValidationCode<Test>>::exists(),
"validation function must have been unset"
);
},
)
.add_with_post_test(
125,
|| {
// The signal is present in relay state proof and ignored.
// Block that processed the signal is still not included.
},
|| {
let segment = <UnincludedSegment<Test>>::get();
assert_eq!(segment.len(), 2);
let aggregated_segment =
<AggregatedUnincludedSegment<Test>>::get().expect("segment is non-empty");
assert_eq!(
aggregated_segment.consumed_go_ahead_signal(),
Some(relay_chain::UpgradeGoAhead::GoAhead)
);
},
)
.add_with_post_test(
126,
|| {},
|| {
let aggregated_segment =
<AggregatedUnincludedSegment<Test>>::get().expect("segment is non-empty");
// Block that processed the signal is included.
assert!(aggregated_segment.consumed_go_ahead_signal().is_none());
},
);
}

#[test]
fn unincluded_code_upgrade_scheduled_after_go_ahead() {
CONSENSUS_HOOK.with(|c| {
*c.borrow_mut() = Box::new(|_| (Weight::zero(), NonZeroU32::new(2).unwrap().into()))
});

BlockTests::new()
.with_inclusion_delay(1)
.with_relay_sproof_builder(|_, block_number, builder| {
if block_number > 123 && block_number <= 125 {
builder.upgrade_go_ahead = Some(relay_chain::UpgradeGoAhead::GoAhead);
}
})
.add(123, || {
assert_ok!(System::set_code(RawOrigin::Root.into(), Default::default()));
})
.add_with_post_test(
124,
|| {},
|| {
assert!(
!<PendingValidationCode<Test>>::exists(),
"validation function must have been unset"
);
// The previous go-ahead signal was processed, schedule another upgrade.
assert_ok!(System::set_code(RawOrigin::Root.into(), Default::default()));
},
)
.add_with_post_test(
125,
|| {
// The signal is present in relay state proof and ignored.
// Block that processed the signal is still not included.
},
|| {
let segment = <UnincludedSegment<Test>>::get();
assert_eq!(segment.len(), 2);
let aggregated_segment =
<AggregatedUnincludedSegment<Test>>::get().expect("segment is non-empty");
assert_eq!(
aggregated_segment.consumed_go_ahead_signal(),
Some(relay_chain::UpgradeGoAhead::GoAhead)
);
},
)
.add_with_post_test(
126,
|| {},
|| {
assert!(<PendingValidationCode<Test>>::exists(), "upgrade is pending");
},
);
}

#[test]
fn events() {
BlockTests::new()
Expand Down
Loading

0 comments on commit da07585

Please sign in to comment.