From dbeee70561cce248f44769117c8097fef29b7c48 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 30 Oct 2024 10:45:14 -0400 Subject: [PATCH] Add NumSubnets to the validator manager interface --- network/network.go | 2 +- node/node.go | 2 +- node/overridden_manager.go | 11 +++- snow/engine/snowman/bootstrap/bootstrapper.go | 2 +- .../snowman/bootstrap/bootstrapper_test.go | 4 +- snow/engine/snowman/syncer/state_syncer.go | 2 +- .../snowman/syncer/state_syncer_test.go | 12 ++-- snow/engine/snowman/syncer/utils_test.go | 2 +- snow/validators/manager.go | 16 ++++- snow/validators/manager_test.go | 61 +++++++++++++------ vms/platformvm/state/state.go | 10 +-- vms/platformvm/vm_test.go | 4 +- 12 files changed, 83 insertions(+), 45 deletions(-) diff --git a/network/network.go b/network/network.go index eab4ecca085e..816cbc69c7a2 100644 --- a/network/network.go +++ b/network/network.go @@ -778,7 +778,7 @@ func (n *network) samplePeers( // As an optimization, if there are fewer validators than // [numValidatorsToSample], only attempt to sample [numValidatorsToSample] // validators to potentially avoid iterating over the entire peer set. - numValidatorsToSample := min(config.Validators, n.config.Validators.Count(subnetID)) + numValidatorsToSample := min(config.Validators, n.config.Validators.NumValidators(subnetID)) n.peersLock.RLock() defer n.peersLock.RUnlock() diff --git a/node/node.go b/node/node.go index 1fedf35eb97e..8cdc8edfce17 100644 --- a/node/node.go +++ b/node/node.go @@ -600,7 +600,7 @@ func (n *Node) initNetworking(reg prometheus.Registerer) error { } n.onSufficientlyConnected = make(chan struct{}) - numBootstrappers := n.bootstrappers.Count(constants.PrimaryNetworkID) + numBootstrappers := n.bootstrappers.NumValidators(constants.PrimaryNetworkID) requiredConns := (3*numBootstrappers + 3) / 4 if requiredConns > 0 { diff --git a/node/overridden_manager.go b/node/overridden_manager.go index 484fe05da758..467dd7df1fa7 100644 --- a/node/overridden_manager.go +++ b/node/overridden_manager.go @@ -56,8 +56,15 @@ func (o *overriddenManager) RemoveWeight(_ ids.ID, nodeID ids.NodeID, weight uin return o.manager.RemoveWeight(o.subnetID, nodeID, weight) } -func (o *overriddenManager) Count(ids.ID) int { - return o.manager.Count(o.subnetID) +func (o *overriddenManager) NumSubnets() int { + if o.manager.NumValidators(o.subnetID) == 0 { + return 0 + } + return 1 +} + +func (o *overriddenManager) NumValidators(ids.ID) int { + return o.manager.NumValidators(o.subnetID) } func (o *overriddenManager) TotalWeight(ids.ID) (uint64, error) { diff --git a/snow/engine/snowman/bootstrap/bootstrapper.go b/snow/engine/snowman/bootstrap/bootstrapper.go index 024925c79288..4b0b511910f0 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper.go +++ b/snow/engine/snowman/bootstrap/bootstrapper.go @@ -300,7 +300,7 @@ func (b *Bootstrapper) sendBootstrappingMessagesOrFinish(ctx context.Context) er if numAccepted == 0 { b.Ctx.Log.Debug("restarting bootstrap", zap.String("reason", "no blocks accepted"), - zap.Int("numBeacons", b.Beacons.Count(b.Ctx.SubnetID)), + zap.Int("numBeacons", b.Beacons.NumValidators(b.Ctx.SubnetID)), ) // Invariant: These functions are mutualy recursive. However, when // [startBootstrapping] calls [sendMessagesOrFinish], it is guaranteed diff --git a/snow/engine/snowman/bootstrap/bootstrapper_test.go b/snow/engine/snowman/bootstrap/bootstrapper_test.go index 772cf51281e9..7803f82a725f 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrap/bootstrapper_test.go @@ -98,7 +98,7 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *enginetest.Sender, *blocktest AllGetsServer: snowGetHandler, Ctx: ctx, Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), + SampleK: vdrs.NumValidators(ctx.SubnetID), StartupTracker: startupTracker, PeerTracker: peerTracker, Sender: sender, @@ -693,7 +693,7 @@ func TestBootstrapNoParseOnNew(t *testing.T) { AllGetsServer: snowGetHandler, Ctx: ctx, Beacons: peers, - SampleK: peers.Count(ctx.SubnetID), + SampleK: peers.NumValidators(ctx.SubnetID), StartupTracker: startupTracker, PeerTracker: peerTracker, Sender: sender, diff --git a/snow/engine/snowman/syncer/state_syncer.go b/snow/engine/snowman/syncer/state_syncer.go index 76e647e73a64..7e669fee2534 100644 --- a/snow/engine/snowman/syncer/state_syncer.go +++ b/snow/engine/snowman/syncer/state_syncer.go @@ -373,7 +373,7 @@ func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.Node if votingStakes < ss.Alpha { ss.Ctx.Log.Debug("restarting state sync", zap.String("reason", "not enough votes received"), - zap.Int("numBeacons", ss.StateSyncBeacons.Count(ss.Ctx.SubnetID)), + zap.Int("numBeacons", ss.StateSyncBeacons.NumValidators(ss.Ctx.SubnetID)), zap.Int("numFailedSyncers", ss.failedVoters.Len()), ) return ss.startup(ctx) diff --git a/snow/engine/snowman/syncer/state_syncer_test.go b/snow/engine/snowman/syncer/state_syncer_test.go index fd062cb2d8b1..ffb7de7ffca7 100644 --- a/snow/engine/snowman/syncer/state_syncer_test.go +++ b/snow/engine/snowman/syncer/state_syncer_test.go @@ -247,7 +247,7 @@ func TestBeaconsAreReachedForFrontiersUponStartup(t *testing.T) { } // check that vdrs are reached out for frontiers - require.Len(contactedFrontiersProviders, min(beacons.Count(ctx.SubnetID), maxOutstandingBroadcastRequests)) + require.Len(contactedFrontiersProviders, min(beacons.NumValidators(ctx.SubnetID), maxOutstandingBroadcastRequests)) for beaconID := range contactedFrontiersProviders { // check that beacon is duly marked as reached out require.Contains(syncer.pendingSeeders, beaconID) @@ -344,7 +344,7 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { // other listed vdrs are reached for data require.True( len(contactedFrontiersProviders) > initiallyReachedOutBeaconsSize || - len(contactedFrontiersProviders) == beacons.Count(ctx.SubnetID)) + len(contactedFrontiersProviders) == beacons.NumValidators(ctx.SubnetID)) } func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { @@ -413,7 +413,7 @@ func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { // are reached for data require.True( len(contactedFrontiersProviders) > initiallyReachedOutBeaconsSize || - len(contactedFrontiersProviders) == beacons.Count(ctx.SubnetID)) + len(contactedFrontiersProviders) == beacons.NumValidators(ctx.SubnetID)) } func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { @@ -475,7 +475,7 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { // are reached for data require.True( len(contactedFrontiersProviders) > initiallyReachedOutBeaconsSize || - len(contactedFrontiersProviders) == beacons.Count(ctx.SubnetID)) + len(contactedFrontiersProviders) == beacons.NumValidators(ctx.SubnetID)) // mock VM to simulate a valid but late summary is returned fullVM.CantParseStateSummary = true @@ -773,7 +773,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { // other listed voters are reached out require.True( len(contactedVoters) > initiallyContactedVotersSize || - len(contactedVoters) == beacons.Count(ctx.SubnetID)) + len(contactedVoters) == beacons.NumValidators(ctx.SubnetID)) } func TestVotesForUnknownSummariesAreDropped(t *testing.T) { @@ -876,7 +876,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { // on unknown summary require.True( len(contactedVoters) > initiallyContactedVotersSize || - len(contactedVoters) == beacons.Count(ctx.SubnetID)) + len(contactedVoters) == beacons.NumValidators(ctx.SubnetID)) } func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { diff --git a/snow/engine/snowman/syncer/utils_test.go b/snow/engine/snowman/syncer/utils_test.go index 4cd6e58d840e..d13b7347771b 100644 --- a/snow/engine/snowman/syncer/utils_test.go +++ b/snow/engine/snowman/syncer/utils_test.go @@ -107,7 +107,7 @@ func buildTestsObjects( startupTracker, sender, beacons, - beacons.Count(ctx.SubnetID), + beacons.NumValidators(ctx.SubnetID), alpha, nil, fullVM, diff --git a/snow/validators/manager.go b/snow/validators/manager.go index 45ba32c0e261..dd2a8f88d1f8 100644 --- a/snow/validators/manager.go +++ b/snow/validators/manager.go @@ -80,8 +80,11 @@ type Manager interface { // If an error is returned, the set will be unmodified. RemoveWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64) error - // Count returns the number of validators currently in the subnet. - Count(subnetID ids.ID) int + // NumSubnets returns the number of subnets with non-zero weight. + NumSubnets() int + + // NumValidators returns the number of validators currently in the subnet. + NumValidators(subnetID ids.ID) int // TotalWeight returns the cumulative weight of all validators in the subnet. // Returns err if total weight overflows uint64. @@ -227,7 +230,14 @@ func (m *manager) RemoveWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64 return nil } -func (m *manager) Count(subnetID ids.ID) int { +func (m *manager) NumSubnets() int { + m.lock.RLock() + defer m.lock.RUnlock() + + return len(m.subnetToVdrs) +} + +func (m *manager) NumValidators(subnetID ids.ID) int { m.lock.RLock() set, exists := m.subnetToVdrs[subnetID] m.lock.RUnlock() diff --git a/snow/validators/manager_test.go b/snow/validators/manager_test.go index 365d7ffdf7d7..4449a324a57d 100644 --- a/snow/validators/manager_test.go +++ b/snow/validators/manager_test.go @@ -242,36 +242,57 @@ func TestGet(t *testing.T) { require.False(ok) } -func TestLen(t *testing.T) { - require := require.New(t) +func TestNum(t *testing.T) { + var ( + require = require.New(t) - m := NewManager() - subnetID := ids.GenerateTestID() + m = NewManager() - count := m.Count(subnetID) - require.Zero(count) + subnetID0 = ids.GenerateTestID() + subnetID1 = ids.GenerateTestID() + nodeID0 = ids.GenerateTestNodeID() + nodeID1 = ids.GenerateTestNodeID() + ) - nodeID0 := ids.GenerateTestNodeID() - require.NoError(m.AddStaker(subnetID, nodeID0, nil, ids.Empty, 1)) + require.Zero(m.NumSubnets()) + require.Zero(m.NumValidators(subnetID0)) + require.Zero(m.NumValidators(subnetID1)) - count = m.Count(subnetID) - require.Equal(1, count) + require.NoError(m.AddStaker(subnetID0, nodeID0, nil, ids.Empty, 1)) - nodeID1 := ids.GenerateTestNodeID() - require.NoError(m.AddStaker(subnetID, nodeID1, nil, ids.Empty, 1)) + require.Equal(1, m.NumSubnets()) + require.Equal(1, m.NumValidators(subnetID0)) + require.Zero(m.NumValidators(subnetID1)) - count = m.Count(subnetID) - require.Equal(2, count) + require.NoError(m.AddStaker(subnetID0, nodeID1, nil, ids.Empty, 1)) - require.NoError(m.RemoveWeight(subnetID, nodeID1, 1)) + require.Equal(1, m.NumSubnets()) + require.Equal(2, m.NumValidators(subnetID0)) + require.Zero(m.NumValidators(subnetID1)) - count = m.Count(subnetID) - require.Equal(1, count) + require.NoError(m.AddStaker(subnetID1, nodeID1, nil, ids.Empty, 2)) - require.NoError(m.RemoveWeight(subnetID, nodeID0, 1)) + require.Equal(2, m.NumSubnets()) + require.Equal(2, m.NumValidators(subnetID0)) + require.Equal(1, m.NumValidators(subnetID1)) + + require.NoError(m.RemoveWeight(subnetID0, nodeID1, 1)) + + require.Equal(2, m.NumSubnets()) + require.Equal(1, m.NumValidators(subnetID0)) + require.Equal(1, m.NumValidators(subnetID1)) + + require.NoError(m.RemoveWeight(subnetID0, nodeID0, 1)) + + require.Equal(1, m.NumSubnets()) + require.Zero(m.NumValidators(subnetID0)) + require.Equal(1, m.NumValidators(subnetID1)) + + require.NoError(m.RemoveWeight(subnetID1, nodeID1, 2)) - count = m.Count(subnetID) - require.Zero(count) + require.Zero(m.NumSubnets()) + require.Zero(m.NumValidators(subnetID0)) + require.Zero(m.NumValidators(subnetID1)) } func TestGetMap(t *testing.T) { diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index be6bee28cf0f..09a22f8c27de 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1750,13 +1750,13 @@ func (s *state) loadPendingValidators() error { // Invariant: initValidatorSets requires loadCurrentValidators to have already // been called. func (s *state) initValidatorSets() error { + if s.validators.NumSubnets() != 0 { + // Enforce the invariant that the validator set is empty here. + return errValidatorSetAlreadyPopulated + } + primaryNetworkValidators := s.currentStakers.validators[constants.PrimaryNetworkID] for subnetID, subnetValidators := range s.currentStakers.validators { - if s.validators.Count(subnetID) != 0 { - // Enforce the invariant that the validator set is empty here. - return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID) - } - for nodeID, subnetValidator := range subnetValidators { // The subnet validator's Public Key is inherited from the // corresponding primary network validator. diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 7048778b19bd..f377fd31f894 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -283,7 +283,7 @@ func TestGenesis(t *testing.T) { } // Ensure current validator set of primary network is correct - require.Len(genesisState.Validators, vm.Validators.Count(constants.PrimaryNetworkID)) + require.Len(genesisState.Validators, vm.Validators.NumValidators(constants.PrimaryNetworkID)) for _, nodeID := range genesistest.DefaultNodeIDs { _, ok := vm.Validators.GetValidator(constants.PrimaryNetworkID, nodeID) @@ -1326,7 +1326,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { AllGetsServer: snowGetHandler, Ctx: consensusCtx, Beacons: beacons, - SampleK: beacons.Count(ctx.SubnetID), + SampleK: beacons.NumValidators(ctx.SubnetID), StartupTracker: startup, PeerTracker: peerTracker, Sender: sender,