From 4057ccd7a37396bc1c6d1742f418415af61b2787 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Fri, 23 Aug 2024 16:00:36 +0300 Subject: [PATCH] Reactive syncing metrics (#5410) This PR untangles syncing metrics and makes them reactive, the way metrics are supposed to be in general. Syncing metrics were bundled in a way that caused coupling across multiple layers: justifications metrics were defined and managed by `ChainSync`, but only updated periodically on tick in `SyncingEngine`, while actual values were queried from `ExtraRequests`. This convoluted architecture was hard to follow when I was looking into https://github.com/paritytech/polkadot-sdk/issues/5333. Now metrics that correspond to each component are owned by that component and updated as changes are made instead of on tick every 1100ms. This does add some annoying boilerplate that is a bit harder to maintain, but it separates metrics more nicely and if someone queries them more frequently will give arbitrary resolution. Since metrics updates are just atomic operations I do not expect any performance impact of these changes. Will add prdoc if changes look good otherwise. P.S. I noticed that importing requests (and corresponding metrics) were not cleared ever since corresponding code was introduced in https://github.com/paritytech/polkadot-sdk/commit/dc41558b6ef69aad59bb4cfcd9c19c91c1d5c3a9#r145518721 and I left it as is to not change the behavior, but it might be something worth fixing. cc @dmitry-markin --------- Co-authored-by: Dmitry Markin --- prdoc/pr_5410.prdoc | 11 ++ substrate/client/network/sync/src/engine.rs | 28 ++-- .../sync/src/justification_requests.rs | 128 +++++++++++++++--- substrate/client/network/sync/src/strategy.rs | 13 +- .../network/sync/src/strategy/chain_sync.rs | 120 ++++++++-------- 5 files changed, 195 insertions(+), 105 deletions(-) create mode 100644 prdoc/pr_5410.prdoc diff --git a/prdoc/pr_5410.prdoc b/prdoc/pr_5410.prdoc new file mode 100644 index 0000000000000..d0a32bec74239 --- /dev/null +++ b/prdoc/pr_5410.prdoc @@ -0,0 +1,11 @@ +title: Reactive syncing metrics + +doc: + - audience: Node Dev + description: | + Syncing metrics are now updated immediate as changes happen rather than every 1100ms as it was happening before. + This resulted in minor, but breaking API changes. + +crates: + - name: sc-network-sync + bump: major diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index a25db4f789dd7..616dd31688723 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -471,15 +471,6 @@ where )) } - /// Report Prometheus metrics. - pub fn report_metrics(&self) { - if let Some(metrics) = &self.metrics { - let n = u64::try_from(self.peers.len()).unwrap_or(std::u64::MAX); - metrics.peers.set(n); - } - self.strategy.report_metrics(); - } - fn update_peer_info( &mut self, peer_id: &PeerId, @@ -606,7 +597,11 @@ where pub async fn run(mut self) { loop { tokio::select! { - _ = self.tick_timeout.tick() => self.perform_periodic_actions(), + _ = self.tick_timeout.tick() => { + // TODO: This tick should not be necessary, but + // `self.process_strategy_actions()` is not called in some cases otherwise and + // some tests fail because of this + }, command = self.service_rx.select_next_some() => self.process_service_command(command), notification_event = self.notification_service.next_event() => match notification_event { @@ -724,10 +719,6 @@ where Ok(()) } - fn perform_periodic_actions(&mut self) { - self.report_metrics(); - } - fn process_service_command(&mut self, command: ToServiceCommand) { match command { ToServiceCommand::SetSyncForkRequest(peers, hash, number) => { @@ -873,6 +864,9 @@ where log::debug!(target: LOG_TARGET, "{peer_id} does not exist in `SyncingEngine`"); return }; + if let Some(metrics) = &self.metrics { + metrics.peers.dec(); + } if self.important_peers.contains(&peer_id) { log::warn!(target: LOG_TARGET, "Reserved peer {peer_id} disconnected"); @@ -1048,7 +1042,11 @@ where log::debug!(target: LOG_TARGET, "Connected {peer_id}"); - self.peers.insert(peer_id, peer); + if self.peers.insert(peer_id, peer).is_none() { + if let Some(metrics) = &self.metrics { + metrics.peers.inc(); + } + } self.peer_store_handle.set_peer_role(&peer_id, status.roles.into()); if self.default_peers_set_no_slot_peers.contains(&peer_id) { diff --git a/substrate/client/network/sync/src/justification_requests.rs b/substrate/client/network/sync/src/justification_requests.rs index 2b50c85602d78..f2d7488b2c35c 100644 --- a/substrate/client/network/sync/src/justification_requests.rs +++ b/substrate/client/network/sync/src/justification_requests.rs @@ -21,12 +21,14 @@ //! that don't make sense after one of the forks is finalized). use crate::{ - request_metrics::Metrics, strategy::chain_sync::{PeerSync, PeerSyncState}, LOG_TARGET, }; use fork_tree::ForkTree; use log::{debug, trace, warn}; +use prometheus_endpoint::{ + prometheus::core::GenericGauge, register, GaugeVec, Opts, PrometheusError, Registry, U64, +}; use sc_network_types::PeerId; use sp_blockchain::Error as ClientError; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; @@ -41,6 +43,34 @@ const EXTRA_RETRY_WAIT: Duration = Duration::from_secs(10); /// Pending extra data request for the given block (hash and number). type ExtraRequest = (::Hash, NumberFor); +#[derive(Debug)] +struct Metrics { + pending: GenericGauge, + active: GenericGauge, + failed: GenericGauge, + importing: GenericGauge, +} + +impl Metrics { + fn register(registry: &Registry) -> Result { + let justifications = GaugeVec::::new( + Opts::new( + "substrate_sync_extra_justifications", + "Number of extra justifications requests", + ), + &["status"], + )?; + let justifications = register(justifications, registry)?; + + Ok(Self { + pending: justifications.with_label_values(&["pending"]), + active: justifications.with_label_values(&["active"]), + failed: justifications.with_label_values(&["failed"]), + importing: justifications.with_label_values(&["importing"]), + }) + } +} + /// Manages pending block extra data (e.g. justification) requests. /// /// Multiple extras may be requested for competing forks, or for the same branch @@ -62,10 +92,14 @@ pub(crate) struct ExtraRequests { importing_requests: HashSet>, /// the name of this type of extra request (useful for logging.) request_type_name: &'static str, + metrics: Option, } impl ExtraRequests { - pub(crate) fn new(request_type_name: &'static str) -> Self { + pub(crate) fn new( + request_type_name: &'static str, + metrics_registry: Option<&Registry>, + ) -> Self { Self { tree: ForkTree::new(), best_seen_finalized_number: Zero::zero(), @@ -74,6 +108,16 @@ impl ExtraRequests { failed_requests: HashMap::new(), importing_requests: HashSet::new(), request_type_name, + metrics: metrics_registry.and_then(|registry| { + Metrics::register(registry) + .inspect_err(|error| { + log::error!( + target: LOG_TARGET, + "Failed to register `ExtraRequests` metrics {error}", + ); + }) + .ok() + }), } } @@ -83,6 +127,12 @@ impl ExtraRequests { self.pending_requests.clear(); self.active_requests.clear(); self.failed_requests.clear(); + + if let Some(metrics) = &self.metrics { + metrics.pending.set(0); + metrics.active.set(0); + metrics.failed.set(0); + } } /// Returns an iterator-like struct that yields peers which extra @@ -100,6 +150,9 @@ impl ExtraRequests { Ok(true) => { // this is a new root so we add it to the current `pending_requests` self.pending_requests.push_back((request.0, request.1)); + if let Some(metrics) = &self.metrics { + metrics.pending.inc(); + } }, Err(fork_tree::Error::Revert) => { // we have finalized further than the given request, presumably @@ -117,6 +170,10 @@ impl ExtraRequests { pub(crate) fn peer_disconnected(&mut self, who: &PeerId) { if let Some(request) = self.active_requests.remove(who) { self.pending_requests.push_front(request); + if let Some(metrics) = &self.metrics { + metrics.active.dec(); + metrics.pending.inc(); + } } } @@ -130,13 +187,21 @@ impl ExtraRequests { // currently enforced by the outer network protocol before passing on // messages to chain sync. if let Some(request) = self.active_requests.remove(&who) { + if let Some(metrics) = &self.metrics { + metrics.active.dec(); + } + if let Some(r) = resp { trace!(target: LOG_TARGET, "Queuing import of {} from {:?} for {:?}", self.request_type_name, who, request, ); - self.importing_requests.insert(request); + if self.importing_requests.insert(request) { + if let Some(metrics) = &self.metrics { + metrics.importing.inc(); + } + } return Some((who, request.0, request.1, r)) } else { trace!(target: LOG_TARGET, @@ -146,6 +211,10 @@ impl ExtraRequests { } self.failed_requests.entry(request).or_default().push((who, Instant::now())); self.pending_requests.push_front(request); + if let Some(metrics) = &self.metrics { + metrics.failed.set(self.failed_requests.len().try_into().unwrap_or(u64::MAX)); + metrics.pending.inc(); + } } else { trace!(target: LOG_TARGET, "No active {} request to {:?}", @@ -194,6 +263,11 @@ impl ExtraRequests { self.pending_requests.retain(|(h, n)| roots.contains(&(h, n, &()))); self.active_requests.retain(|_, (h, n)| roots.contains(&(h, n, &()))); self.failed_requests.retain(|(h, n), _| roots.contains(&(h, n, &()))); + if let Some(metrics) = &self.metrics { + metrics.pending.set(self.pending_requests.len().try_into().unwrap_or(u64::MAX)); + metrics.active.set(self.active_requests.len().try_into().unwrap_or(u64::MAX)); + metrics.failed.set(self.failed_requests.len().try_into().unwrap_or(u64::MAX)); + } Ok(()) } @@ -210,12 +284,18 @@ impl ExtraRequests { if !self.importing_requests.remove(&request) { return false } + if let Some(metrics) = &self.metrics { + metrics.importing.dec(); + } let (finalized_hash, finalized_number) = match result { Ok(req) => (req.0, req.1), Err(_) => { if reschedule_on_failure { self.pending_requests.push_front(request); + if let Some(metrics) = &self.metrics { + metrics.pending.inc(); + } } return true }, @@ -233,6 +313,11 @@ impl ExtraRequests { self.active_requests.clear(); self.pending_requests.clear(); self.pending_requests.extend(self.tree.roots().map(|(&h, &n, _)| (h, n))); + if let Some(metrics) = &self.metrics { + metrics.failed.set(0); + metrics.active.set(0); + metrics.pending.set(self.pending_requests.len().try_into().unwrap_or(u64::MAX)); + } self.best_seen_finalized_number = finalized_number; true @@ -249,16 +334,6 @@ impl ExtraRequests { pub(crate) fn pending_requests(&self) -> impl Iterator> { self.pending_requests.iter() } - - /// Get some key metrics. - pub(crate) fn metrics(&self) -> Metrics { - Metrics { - pending_requests: self.pending_requests.len().try_into().unwrap_or(std::u32::MAX), - active_requests: self.active_requests.len().try_into().unwrap_or(std::u32::MAX), - failed_requests: self.failed_requests.len().try_into().unwrap_or(std::u32::MAX), - importing_requests: self.importing_requests.len().try_into().unwrap_or(std::u32::MAX), - } - } } /// Matches peers with pending extra requests. @@ -301,8 +376,17 @@ impl<'a, B: BlockT> Matcher<'a, B> { for requests in self.extras.failed_requests.values_mut() { requests.retain(|(_, instant)| instant.elapsed() < EXTRA_RETRY_WAIT); } + if let Some(metrics) = &self.extras.metrics { + metrics + .failed + .set(self.extras.failed_requests.len().try_into().unwrap_or(u64::MAX)); + } while let Some(request) = self.extras.pending_requests.pop_front() { + if let Some(metrics) = &self.extras.metrics { + metrics.pending.dec(); + } + for (peer, sync) in peers.iter().filter(|(_, sync)| sync.state == PeerSyncState::Available) { @@ -326,6 +410,9 @@ impl<'a, B: BlockT> Matcher<'a, B> { continue } self.extras.active_requests.insert(*peer, request); + if let Some(metrics) = &self.extras.metrics { + metrics.active.inc(); + } trace!(target: LOG_TARGET, "Sending {} request to {:?} for {:?}", @@ -336,6 +423,9 @@ impl<'a, B: BlockT> Matcher<'a, B> { } self.extras.pending_requests.push_back(request); + if let Some(metrics) = &self.extras.metrics { + metrics.pending.inc(); + } self.remaining -= 1; if self.remaining == 0 { @@ -359,7 +449,7 @@ mod tests { #[test] fn requests_are_processed_in_order() { fn property(mut peers: ArbitraryPeers) { - let mut requests = ExtraRequests::::new("test"); + let mut requests = ExtraRequests::::new("test", None); let num_peers_available = peers.0.values().filter(|s| s.state == PeerSyncState::Available).count(); @@ -385,7 +475,7 @@ mod tests { #[test] fn new_roots_schedule_new_request() { fn property(data: Vec) { - let mut requests = ExtraRequests::::new("test"); + let mut requests = ExtraRequests::::new("test", None); for (i, number) in data.into_iter().enumerate() { let hash = [i as u8; 32].into(); let pending = requests.pending_requests.len(); @@ -402,7 +492,7 @@ mod tests { #[test] fn disconnecting_implies_rescheduling() { fn property(mut peers: ArbitraryPeers) -> bool { - let mut requests = ExtraRequests::::new("test"); + let mut requests = ExtraRequests::::new("test", None); let num_peers_available = peers.0.values().filter(|s| s.state == PeerSyncState::Available).count(); @@ -438,7 +528,7 @@ mod tests { #[test] fn no_response_reschedules() { fn property(mut peers: ArbitraryPeers) { - let mut requests = ExtraRequests::::new("test"); + let mut requests = ExtraRequests::::new("test", None); let num_peers_available = peers.0.values().filter(|s| s.state == PeerSyncState::Available).count(); @@ -480,7 +570,7 @@ mod tests { fn request_is_rescheduled_when_earlier_block_is_finalized() { sp_tracing::try_init_simple(); - let mut finality_proofs = ExtraRequests::::new("test"); + let mut finality_proofs = ExtraRequests::::new("test", None); let hash4 = [4; 32].into(); let hash5 = [5; 32].into(); @@ -521,7 +611,7 @@ mod tests { #[test] fn ancestor_roots_are_finalized_when_finality_notification_is_missed() { - let mut finality_proofs = ExtraRequests::::new("test"); + let mut finality_proofs = ExtraRequests::::new("test", None); let hash4 = [4; 32].into(); let hash5 = [5; 32].into(); diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 2c9799a9d8368..ad3a9461c93b8 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -210,7 +210,7 @@ where client.clone(), config.max_parallel_downloads, config.max_blocks_per_request, - config.metrics_registry.clone(), + config.metrics_registry.as_ref(), std::iter::empty(), )?; Ok(Self { @@ -455,13 +455,6 @@ where self.chain_sync.as_ref().map_or(0, |chain_sync| chain_sync.num_sync_requests()) } - /// Report Prometheus metrics - pub fn report_metrics(&self) { - if let Some(ref chain_sync) = self.chain_sync { - chain_sync.report_metrics(); - } - } - /// Get actions that should be performed by the owner on the strategy's behalf #[must_use] pub fn actions(&mut self) -> Result>, ClientError> { @@ -519,7 +512,7 @@ where self.client.clone(), self.config.max_parallel_downloads, self.config.max_blocks_per_request, - self.config.metrics_registry.clone(), + self.config.metrics_registry.as_ref(), self.peer_best_blocks.iter().map(|(peer_id, (best_hash, best_number))| { (*peer_id, *best_hash, *best_number) }), @@ -547,7 +540,7 @@ where self.client.clone(), self.config.max_parallel_downloads, self.config.max_blocks_per_request, - self.config.metrics_registry.clone(), + self.config.metrics_registry.as_ref(), self.peer_best_blocks.iter().map(|(peer_id, (best_hash, best_number))| { (*peer_id, *best_hash, *best_number) }), diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index 52870d5ba1514..49e97ab11c370 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -43,7 +43,7 @@ use crate::{ use codec::Encode; use log::{debug, error, info, trace, warn}; -use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; +use prometheus_endpoint::{register, Gauge, PrometheusError, Registry, U64}; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; use sc_network_common::sync::message::{ @@ -128,7 +128,6 @@ mod rep { struct Metrics { queued_blocks: Gauge, fork_targets: Gauge, - justifications: GaugeVec, } impl Metrics { @@ -143,16 +142,6 @@ impl Metrics { let g = Gauge::new("substrate_sync_fork_targets", "Number of fork sync targets")?; register(g, r)? }, - justifications: { - let g = GaugeVec::new( - Opts::new( - "substrate_sync_extra_justifications", - "Number of extra justifications requests", - ), - &["status"], - )?; - register(g, r)? - }, }) } } @@ -374,7 +363,7 @@ where client: Arc, max_parallel_downloads: u32, max_blocks_per_request: u32, - metrics_registry: Option, + metrics_registry: Option<&Registry>, initial_peers: impl Iterator)>, ) -> Result { let mut sync = Self { @@ -384,7 +373,7 @@ where blocks: BlockCollection::new(), best_queued_hash: Default::default(), best_queued_number: Zero::zero(), - extra_justifications: ExtraRequests::new("justification"), + extra_justifications: ExtraRequests::new("justification", metrics_registry), mode, queue_blocks: Default::default(), fork_targets: Default::default(), @@ -396,7 +385,7 @@ where import_existing: false, gap_sync: None, actions: Vec::new(), - metrics: metrics_registry.and_then(|r| match Metrics::register(&r) { + metrics: metrics_registry.and_then(|r| match Metrics::register(r) { Ok(metrics) => Some(metrics), Err(err) => { log::error!( @@ -676,7 +665,13 @@ where self.fork_targets .entry(*hash) - .or_insert_with(|| ForkTarget { number, peers: Default::default(), parent_hash: None }) + .or_insert_with(|| { + if let Some(metrics) = &self.metrics { + metrics.fork_targets.inc(); + } + + ForkTarget { number, peers: Default::default(), parent_hash: None } + }) .peers .extend(peers); } @@ -883,10 +878,16 @@ where ); self.fork_targets .entry(peer.best_hash) - .or_insert_with(|| ForkTarget { - number: peer.best_number, - parent_hash: None, - peers: Default::default(), + .or_insert_with(|| { + if let Some(metrics) = &self.metrics { + metrics.fork_targets.inc(); + } + + ForkTarget { + number: peer.best_number, + parent_hash: None, + peers: Default::default(), + } }) .peers .insert(*peer_id); @@ -1126,10 +1127,16 @@ where ); self.fork_targets .entry(hash) - .or_insert_with(|| ForkTarget { - number, - parent_hash: Some(*announce.header.parent_hash()), - peers: Default::default(), + .or_insert_with(|| { + if let Some(metrics) = &self.metrics { + metrics.fork_targets.inc(); + } + + ForkTarget { + number, + parent_hash: Some(*announce.header.parent_hash()), + peers: Default::default(), + } }) .peers .insert(peer_id); @@ -1161,6 +1168,9 @@ where target.peers.remove(peer_id); !target.peers.is_empty() }); + if let Some(metrics) = &self.metrics { + metrics.fork_targets.set(self.fork_targets.len().try_into().unwrap_or(u64::MAX)); + } let blocks = self.ready_blocks(); @@ -1169,36 +1179,6 @@ where } } - /// Report prometheus metrics. - pub fn report_metrics(&self) { - if let Some(metrics) = &self.metrics { - metrics - .fork_targets - .set(self.fork_targets.len().try_into().unwrap_or(std::u64::MAX)); - metrics - .queued_blocks - .set(self.queue_blocks.len().try_into().unwrap_or(std::u64::MAX)); - - let justifications_metrics = self.extra_justifications.metrics(); - metrics - .justifications - .with_label_values(&["pending"]) - .set(justifications_metrics.pending_requests.into()); - metrics - .justifications - .with_label_values(&["active"]) - .set(justifications_metrics.active_requests.into()); - metrics - .justifications - .with_label_values(&["failed"]) - .set(justifications_metrics.failed_requests.into()); - metrics - .justifications - .with_label_values(&["importing"]) - .set(justifications_metrics.importing_requests.into()); - } - } - /// Returns the median seen block number. fn median_seen(&self) -> Option> { let mut best_seens = self.peers.values().map(|p| p.best_number).collect::>(); @@ -1264,6 +1244,11 @@ where self.on_block_queued(h, n) } self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); + if let Some(metrics) = &self.metrics { + metrics + .queued_blocks + .set(self.queue_blocks.len().try_into().unwrap_or(u64::MAX)); + } self.actions.push(ChainSyncAction::ImportBlocks { origin, blocks: new_blocks }) } @@ -1280,6 +1265,9 @@ where /// through all peers to update our view of their state as well. fn on_block_queued(&mut self, hash: &B::Hash, number: NumberFor) { if self.fork_targets.remove(hash).is_some() { + if let Some(metrics) = &self.metrics { + metrics.fork_targets.dec(); + } trace!(target: LOG_TARGET, "Completed fork sync {hash:?}"); } if let Some(gap_sync) = &mut self.gap_sync { @@ -1549,12 +1537,13 @@ where std::cmp::min(self.best_queued_number, self.client.info().finalized_number); let best_queued = self.best_queued_number; let client = &self.client; - let queue = &self.queue_blocks; + let queue_blocks = &self.queue_blocks; let allowed_requests = self.allowed_requests.take(); let max_parallel = if is_major_syncing { 1 } else { self.max_parallel_downloads }; let max_blocks_per_request = self.max_blocks_per_request; let gap_sync = &mut self.gap_sync; let disconnected_peers = &mut self.disconnected_peers; + let metrics = self.metrics.as_ref(); self.peers .iter_mut() .filter_map(move |(&id, peer)| { @@ -1574,7 +1563,7 @@ where MAX_BLOCKS_TO_LOOK_BACKWARDS.into() && best_queued < peer.best_number && peer.common_number < last_finalized && - queue.len() <= MAJOR_SYNC_BLOCKS.into() + queue_blocks.len() <= MAJOR_SYNC_BLOCKS.into() { trace!( target: LOG_TARGET, @@ -1617,13 +1606,14 @@ where last_finalized, attrs, |hash| { - if queue.contains(hash) { + if queue_blocks.contains(hash) { BlockStatus::Queued } else { client.block_status(*hash).unwrap_or(BlockStatus::Unknown) } }, max_blocks_per_request, + metrics, ) { trace!(target: LOG_TARGET, "Downloading fork {hash:?} from {id}"); peer.state = PeerSyncState::DownloadingStale(hash); @@ -1764,7 +1754,11 @@ where let mut has_error = false; for (_, hash) in &results { - self.queue_blocks.remove(hash); + if self.queue_blocks.remove(hash) { + if let Some(metrics) = &self.metrics { + metrics.queued_blocks.dec(); + } + } self.blocks.clear_queued(hash); if let Some(gap_sync) = &mut self.gap_sync { gap_sync.blocks.clear_queued(hash); @@ -2094,14 +2088,15 @@ fn peer_gap_block_request( /// Get pending fork sync targets for a peer. fn fork_sync_request( id: &PeerId, - targets: &mut HashMap>, + fork_targets: &mut HashMap>, best_num: NumberFor, finalized: NumberFor, attributes: BlockAttributes, check_block: impl Fn(&B::Hash) -> BlockStatus, max_blocks_per_request: u32, + metrics: Option<&Metrics>, ) -> Option<(B::Hash, BlockRequest)> { - targets.retain(|hash, r| { + fork_targets.retain(|hash, r| { if r.number <= finalized { trace!( target: LOG_TARGET, @@ -2122,7 +2117,10 @@ fn fork_sync_request( } true }); - for (hash, r) in targets { + if let Some(metrics) = metrics { + metrics.fork_targets.set(fork_targets.len().try_into().unwrap_or(u64::MAX)); + } + for (hash, r) in fork_targets { if !r.peers.contains(&id) { continue }