diff --git a/crates/sui-core/src/state_accumulator.rs b/crates/sui-core/src/state_accumulator.rs index 775d682cceb1c..5f88a78ba1b94 100644 --- a/crates/sui-core/src/state_accumulator.rs +++ b/crates/sui-core/src/state_accumulator.rs @@ -411,6 +411,13 @@ impl StateAccumulator { ) } + pub fn metrics(&self) -> Arc { + match self { + StateAccumulator::V1(impl_v1) => impl_v1.metrics.clone(), + StateAccumulator::V2(impl_v2) => impl_v2.metrics.clone(), + } + } + pub fn set_inconsistent_state(&self, is_inconsistent_state: bool) { match self { StateAccumulator::V1(impl_v1) => &impl_v1.metrics, diff --git a/crates/sui-node/src/lib.rs b/crates/sui-node/src/lib.rs index 2b49067566386..630af69101d3a 100644 --- a/crates/sui-node/src/lib.rs +++ b/crates/sui-node/src/lib.rs @@ -227,7 +227,7 @@ pub struct SuiNode { state_sync_handle: state_sync::Handle, randomness_handle: randomness::Handle, checkpoint_store: Arc, - accumulator_components: Mutex, Arc)>>, + accumulator: Mutex>>, connection_monitor_status: Arc, /// Broadcast channel to send the starting system state for the next epoch. @@ -716,11 +716,10 @@ impl SuiNode { ) .await?; - let accumulator_metrics = StateAccumulatorMetrics::new(&prometheus_registry); let accumulator = Arc::new(StateAccumulator::new( cache_traits.accumulator_store.clone(), &epoch_store, - accumulator_metrics.clone(), + StateAccumulatorMetrics::new(&prometheus_registry), )); let authority_names_to_peer_ids = epoch_store @@ -787,7 +786,7 @@ impl SuiNode { state_sync_handle, randomness_handle, checkpoint_store, - accumulator_components: Mutex::new(Some((accumulator, accumulator_metrics))), + accumulator: Mutex::new(Some(accumulator)), end_of_epoch_channel, connection_monitor_status, trusted_peer_change_tx, @@ -1467,8 +1466,8 @@ impl SuiNode { CheckpointExecutorMetrics::new(&self.registry_service.default_registry()); loop { - let mut accum_components_guard = self.accumulator_components.lock().await; - let (accumulator, accumulator_metrics) = accum_components_guard.take().unwrap(); + let mut accumulator_guard = self.accumulator.lock().await; + let accumulator = accumulator_guard.take().unwrap(); let mut checkpoint_executor = CheckpointExecutor::new( self.state_sync_handle.subscribe_to_synced_checkpoints(), self.checkpoint_store.clone(), @@ -1609,15 +1608,16 @@ impl SuiNode { // No other components should be holding a strong reference to state accumulator // at this point. Confirm here before we swap in the new accumulator. - Arc::into_inner(accumulator) - .expect("Accumulator should have no other references at this point"); + let accumulator_metrics = Arc::into_inner(accumulator) + .expect("Accumulator should have no other references at this point") + .metrics(); let new_accumulator = Arc::new(StateAccumulator::new( self.state.get_accumulator_store().clone(), &new_epoch_store, - accumulator_metrics.clone(), + accumulator_metrics, )); let weak_accumulator = Arc::downgrade(&new_accumulator); - *accum_components_guard = Some((new_accumulator, accumulator_metrics.clone())); + *accumulator_guard = Some(new_accumulator); consensus_epoch_data_remover .remove_old_data(next_epoch - 1) @@ -1662,15 +1662,16 @@ impl SuiNode { // No other components should be holding a strong reference to state accumulator // at this point. Confirm here before we swap in the new accumulator. - Arc::into_inner(accumulator) - .expect("Accumulator should have no other references at this point"); + let accumulator_metrics = Arc::into_inner(accumulator) + .expect("Accumulator should have no other references at this point") + .metrics(); let new_accumulator = Arc::new(StateAccumulator::new( self.state.get_accumulator_store().clone(), &new_epoch_store, - accumulator_metrics.clone(), + accumulator_metrics, )); let weak_accumulator = Arc::downgrade(&new_accumulator); - *accum_components_guard = Some((new_accumulator, accumulator_metrics.clone())); + *accumulator_guard = Some(new_accumulator); if self.state.is_validator(&new_epoch_store) { info!("Promoting the node from fullnode to validator, starting grpc server");