diff --git a/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go b/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go index dda37590ec81..5210fa9ef308 100644 --- a/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go +++ b/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go @@ -133,6 +133,73 @@ const ( AllocatorFinalizeAtomicReplicationChange ) +// Add indicates an action adding a replica. +func (a AllocatorAction) Add() bool { + return a == AllocatorAddVoter || a == AllocatorAddNonVoter +} + +// Replace indicates an action replacing a dead or decommissioning replica. +func (a AllocatorAction) Replace() bool { + return a == AllocatorReplaceDeadVoter || + a == AllocatorReplaceDeadNonVoter || + a == AllocatorReplaceDecommissioningVoter || + a == AllocatorReplaceDecommissioningNonVoter +} + +// Remove indicates an action removing a replica, i.e. in overreplication cases. +func (a AllocatorAction) Remove() bool { + return a == AllocatorRemoveVoter || + a == AllocatorRemoveNonVoter || + a == AllocatorRemoveDeadVoter || + a == AllocatorRemoveDeadNonVoter || + a == AllocatorRemoveDecommissioningVoter || + a == AllocatorRemoveDecommissioningNonVoter +} + +// TargetReplicaType returns that the action is for a voter or non-voter replica. +func (a AllocatorAction) TargetReplicaType() TargetReplicaType { + var t TargetReplicaType + if a == AllocatorRemoveVoter || + a == AllocatorAddVoter || + a == AllocatorReplaceDeadVoter || + a == AllocatorRemoveDeadVoter || + a == AllocatorReplaceDecommissioningVoter || + a == AllocatorRemoveDecommissioningVoter { + t = VoterTarget + } else if a == AllocatorRemoveNonVoter || + a == AllocatorAddNonVoter || + a == AllocatorReplaceDeadNonVoter || + a == AllocatorRemoveDeadNonVoter || + a == AllocatorReplaceDecommissioningNonVoter || + a == AllocatorRemoveDecommissioningNonVoter { + t = NonVoterTarget + } + return t +} + +// ReplicaStatus returns that the action is due to a live, dead, or +// decommissioning replica. +func (a AllocatorAction) ReplicaStatus() ReplicaStatus { + var s ReplicaStatus + if a == AllocatorRemoveVoter || + a == AllocatorRemoveNonVoter || + a == AllocatorAddVoter || + a == AllocatorAddNonVoter { + s = Alive + } else if a == AllocatorReplaceDeadVoter || + a == AllocatorReplaceDeadNonVoter || + a == AllocatorRemoveDeadVoter || + a == AllocatorRemoveDeadNonVoter { + s = Dead + } else if a == AllocatorReplaceDecommissioningVoter || + a == AllocatorReplaceDecommissioningNonVoter || + a == AllocatorRemoveDecommissioningVoter || + a == AllocatorRemoveDecommissioningNonVoter { + s = Decommissioning + } + return s +} + var allocatorActionNames = map[AllocatorAction]string{ AllocatorNoop: "noop", AllocatorRemoveVoter: "remove voter", @@ -268,6 +335,19 @@ func (t TargetReplicaType) String() string { } } +func (s ReplicaStatus) String() string { + switch s { + case Alive: + return "live" + case Dead: + return "dead" + case Decommissioning: + return "decommissioning" + default: + panic(fmt.Sprintf("unknown replicaStatus %d", s)) + } +} + type transferDecision int const ( @@ -595,6 +675,146 @@ func GetNeededNonVoters(numVoters, zoneConfigNonVoterCount, clusterNodes int) in return need } +// WillHaveFragileQuorum determines, based on the number of existing voters, +// incoming voters, and needed voters, if we will be upreplicating to a state +// in which we don't have enough needed voters and yet will have a fragile quorum +// due to an even number of voter replicas. +func WillHaveFragileQuorum( + numExistingVoters, numNewVoters, zoneConfigVoterCount, clusterNodes int, +) bool { + neededVoters := GetNeededVoters(int32(zoneConfigVoterCount), clusterNodes) + willHave := numExistingVoters + numNewVoters + // NB: If willHave >= neededVoters, then always allow up-replicating as that + // will be the case when up-replicating a range with a decommissioning + // replica. + return numNewVoters > 0 && willHave < neededVoters && willHave%2 == 0 +} + +// LiveAndDeadVoterAndNonVoterReplicas splits up the replica in the given range +// descriptor by voters vs non-voters and live replicas vs dead replicas. +func LiveAndDeadVoterAndNonVoterReplicas( + storePool storepool.AllocatorStorePool, desc *roachpb.RangeDescriptor, +) ( + voterReplicas, nonVoterReplicas, liveVoterReplicas, deadVoterReplicas, liveNonVoterReplicas, deadNonVoterReplicas []roachpb.ReplicaDescriptor, +) { + voterReplicas = desc.Replicas().VoterDescriptors() + nonVoterReplicas = desc.Replicas().NonVoterDescriptors() + liveVoterReplicas, deadVoterReplicas = storePool.LiveAndDeadReplicas( + voterReplicas, true, /* includeSuspectAndDrainingStores */ + ) + liveNonVoterReplicas, deadNonVoterReplicas = storePool.LiveAndDeadReplicas( + nonVoterReplicas, true, /* includeSuspectAndDrainingStores */ + ) + return +} + +// DetermineReplicaToReplaceAndFilter is used on add or replace allocator actions +// to filter the set of live voter and non-voter replicas to use in determining +// a new allocation target. It identifies a dead or decommissioning replica to +// replace from the list of voters or non-voters, depending on the replica +// status and target type, and returns the filtered live voters and non-voters +// along with the list of existing replicas and the index of the removal candidate. +// In case of an add action, no replicas are removed and a removeIdx of -1 is +// returned, and if no candidates for replacement can be found during a replace +// action, the returned nothingToDo flag will be set to true. +func DetermineReplicaToReplaceAndFilter( + storePool storepool.AllocatorStorePool, + action AllocatorAction, + voters, nonVoters []roachpb.ReplicaDescriptor, + liveVoterReplicas, deadVoterReplicas []roachpb.ReplicaDescriptor, + liveNonVoterReplicas, deadNonVoterReplicas []roachpb.ReplicaDescriptor, +) ( + existing, remainingLiveVoters, remainingLiveNonVoters []roachpb.ReplicaDescriptor, + removeIdx int, + nothingToDo bool, + err error, +) { + removeIdx = -1 + remainingLiveVoters = liveVoterReplicas + remainingLiveNonVoters = liveNonVoterReplicas + var deadReplicas, removalCandidates []roachpb.ReplicaDescriptor + + if !(action.Add() || action.Replace()) { + err = errors.AssertionFailedf( + "unexpected attempt to filter replicas on non-add/non-replacement action %s", + action, + ) + return + } + + replicaType := action.TargetReplicaType() + replicaStatus := action.ReplicaStatus() + + switch replicaType { + case VoterTarget: + existing = voters + deadReplicas = deadVoterReplicas + case NonVoterTarget: + existing = nonVoters + deadReplicas = deadNonVoterReplicas + default: + panic(fmt.Sprintf("unknown targetReplicaType: %s", replicaType)) + } + switch replicaStatus { + case Alive: + // NB: Live replicas are not candidates for replacement. + return + case Dead: + removalCandidates = deadReplicas + case Decommissioning: + removalCandidates = storePool.DecommissioningReplicas(existing) + default: + panic(fmt.Sprintf("unknown replicaStatus: %s", replicaStatus)) + } + if len(removalCandidates) == 0 { + nothingToDo = true + return + } + + removeIdx = getRemoveIdx(existing, removalCandidates[0]) + if removeIdx < 0 { + err = errors.AssertionFailedf( + "%s %s %v unexpectedly not found in %v", + replicaStatus, replicaType, removalCandidates[0], existing, + ) + return + } + + // TODO(sarkesian): Add comment on why this filtering only happens for voters. + if replicaType == VoterTarget { + if len(existing) == 1 { + // If only one replica remains, that replica is the leaseholder and + // we won't be able to swap it out. Ignore the removal and simply add + // a replica. + removeIdx = -1 + } + + if removeIdx >= 0 { + replToRemove := existing[removeIdx] + for i, r := range liveVoterReplicas { + if r.ReplicaID == replToRemove.ReplicaID { + remainingLiveVoters = append(liveVoterReplicas[:i:i], liveVoterReplicas[i+1:]...) + break + } + } + } + } + return +} + +func getRemoveIdx( + repls []roachpb.ReplicaDescriptor, deadOrDecommissioningRepl roachpb.ReplicaDescriptor, +) (removeIdx int) { + removeIdx = -1 + for i, rDesc := range repls { + if rDesc.StoreID == deadOrDecommissioningRepl.StoreID { + removeIdx = i + break + } + } + return removeIdx +} + // ComputeAction determines the exact operation needed to repair the // supplied range, as governed by the supplied zone configuration. It // returns the required action that should be taken and a priority. @@ -925,7 +1145,11 @@ func (s *GoodCandidateSelector) selectOne(cl candidateList) *candidate { return cl.selectGood(s.randGen) } -func (a *Allocator) allocateTarget( +// AllocateTarget returns a suitable store for a new allocation of a voting or +// non-voting replica with the required attributes. Nodes already accommodating +// voting replicas are ruled out in the voter case, and nodes accommodating +// _any_ replicas are ruled out in the non-voter case. +func (a *Allocator) AllocateTarget( ctx context.Context, storePool storepool.AllocatorStorePool, conf roachpb.SpanConfig, @@ -986,6 +1210,50 @@ func (a *Allocator) allocateTarget( } } +// CheckAvoidsFragileQuorum ensures that if we are allocating a new voter and +// will result in an even number of voters, that we can allocate another voter +// target in order to avoid a fragile quorum state. This check should be +// performed whenever we are planning or testing allocation of a new voter. +// +// We can skip this check if we're swapping a replica or allocating a non-voter, +// since that does not change the quorum size. +func (a *Allocator) CheckAvoidsFragileQuorum( + ctx context.Context, + storePool storepool.AllocatorStorePool, + conf roachpb.SpanConfig, + existingVoters, remainingLiveNonVoters []roachpb.ReplicaDescriptor, + replicaStatus ReplicaStatus, + replicaType TargetReplicaType, + newTarget roachpb.ReplicationTarget, + isReplacement bool, +) error { + // Validation is only applicable when allocating new voters. + if replicaType != VoterTarget { + return nil + } + newVoters := 0 + if !isReplacement { + newVoters = 1 + } + clusterNodes := storePool.ClusterNodeCount() + neededVoters := GetNeededVoters(conf.GetNumVoters(), clusterNodes) + + if WillHaveFragileQuorum(len(existingVoters), newVoters, neededVoters, clusterNodes) { + // This means we are going to up-replicate to an even replica state. + // Check if it is possible to go to an odd replica state beyond it. + oldPlusNewReplicas := existingVoters + oldPlusNewReplicas = append( + oldPlusNewReplicas, + roachpb.ReplicaDescriptor{NodeID: newTarget.NodeID, StoreID: newTarget.StoreID}, + ) + + _, _, err := a.AllocateVoter(ctx, storePool, conf, oldPlusNewReplicas, remainingLiveNonVoters, replicaStatus) + return err + } + + return nil +} + // AllocateVoter returns a suitable store for a new allocation of a voting // replica with the required attributes. Nodes already accommodating existing // voting replicas are ruled out as targets. @@ -996,7 +1264,7 @@ func (a *Allocator) AllocateVoter( existingVoters, existingNonVoters []roachpb.ReplicaDescriptor, replicaStatus ReplicaStatus, ) (roachpb.ReplicationTarget, string, error) { - return a.allocateTarget(ctx, storePool, conf, existingVoters, existingNonVoters, replicaStatus, VoterTarget) + return a.AllocateTarget(ctx, storePool, conf, existingVoters, existingNonVoters, replicaStatus, VoterTarget) } // AllocateNonVoter returns a suitable store for a new allocation of a @@ -1009,7 +1277,7 @@ func (a *Allocator) AllocateNonVoter( existingVoters, existingNonVoters []roachpb.ReplicaDescriptor, replicaStatus ReplicaStatus, ) (roachpb.ReplicationTarget, string, error) { - return a.allocateTarget(ctx, storePool, conf, existingVoters, existingNonVoters, replicaStatus, NonVoterTarget) + return a.AllocateTarget(ctx, storePool, conf, existingVoters, existingNonVoters, replicaStatus, NonVoterTarget) } // AllocateTargetFromList returns a suitable store for a new allocation of a diff --git a/pkg/kv/kvserver/replicate_queue.go b/pkg/kv/kvserver/replicate_queue.go index 588cf6e5cb23..1233bc6444ff 100644 --- a/pkg/kv/kvserver/replicate_queue.go +++ b/pkg/kv/kvserver/replicate_queue.go @@ -1003,14 +1003,9 @@ func (rq *replicateQueue) PlanOneChange( // range descriptor. desc, conf := repl.DescAndSpanConfig() - voterReplicas := desc.Replicas().VoterDescriptors() - nonVoterReplicas := desc.Replicas().NonVoterDescriptors() - liveVoterReplicas, deadVoterReplicas := rq.storePool.LiveAndDeadReplicas( - voterReplicas, true, /* includeSuspectAndDrainingStores */ - ) - liveNonVoterReplicas, deadNonVoterReplicas := rq.storePool.LiveAndDeadReplicas( - nonVoterReplicas, true, /* includeSuspectAndDrainingStores */ - ) + voterReplicas, nonVoterReplicas, + liveVoterReplicas, deadVoterReplicas, + liveNonVoterReplicas, deadNonVoterReplicas := allocatorimpl.LiveAndDeadVoterAndNonVoterReplicas(rq.storePool, desc) // NB: the replication layer ensures that the below operations don't cause // unavailability; see: @@ -1021,21 +1016,47 @@ func (rq *replicateQueue) PlanOneChange( var err error var op AllocationOp + removeIdx := -1 + nothingToDo := false switch action { case allocatorimpl.AllocatorNoop, allocatorimpl.AllocatorRangeUnavailable: // We're either missing liveness information or the range is known to have // lost quorum. Either way, it's not a good idea to make changes right now. // Let the scanner requeue it again later. - // Add replicas. - case allocatorimpl.AllocatorAddVoter: - op, err = rq.addOrReplaceVoters( - ctx, repl, liveVoterReplicas, liveNonVoterReplicas, -1 /* removeIdx */, allocatorimpl.Alive, allocatorPrio, - ) - case allocatorimpl.AllocatorAddNonVoter: - op, err = rq.addOrReplaceNonVoters( - ctx, repl, liveVoterReplicas, liveNonVoterReplicas, -1 /* removeIdx */, allocatorimpl.Alive, allocatorPrio, - ) + // Add replicas, replace dead replicas, or replace decommissioning replicas. + case allocatorimpl.AllocatorAddVoter, allocatorimpl.AllocatorAddNonVoter, + allocatorimpl.AllocatorReplaceDeadVoter, allocatorimpl.AllocatorReplaceDeadNonVoter, + allocatorimpl.AllocatorReplaceDecommissioningVoter, allocatorimpl.AllocatorReplaceDecommissioningNonVoter: + var existing, remainingLiveVoters, remainingLiveNonVoters []roachpb.ReplicaDescriptor + + existing, remainingLiveVoters, remainingLiveNonVoters, removeIdx, nothingToDo, err = + allocatorimpl.DetermineReplicaToReplaceAndFilter( + rq.storePool, + action, + voterReplicas, nonVoterReplicas, + liveVoterReplicas, deadVoterReplicas, + liveNonVoterReplicas, deadNonVoterReplicas, + ) + if nothingToDo || err != nil { + // Nothing to do. + break + } + + switch action.TargetReplicaType() { + case allocatorimpl.VoterTarget: + op, err = rq.addOrReplaceVoters( + ctx, repl, existing, remainingLiveVoters, remainingLiveNonVoters, + removeIdx, action.ReplicaStatus(), allocatorPrio, + ) + case allocatorimpl.NonVoterTarget: + op, err = rq.addOrReplaceNonVoters( + ctx, repl, existing, remainingLiveVoters, remainingLiveNonVoters, + removeIdx, action.ReplicaStatus(), allocatorPrio, + ) + default: + panic(fmt.Sprintf("unsupported targetReplicaType: %v", action.TargetReplicaType())) + } // Remove replicas. case allocatorimpl.AllocatorRemoveVoter: @@ -1043,67 +1064,6 @@ func (rq *replicateQueue) PlanOneChange( case allocatorimpl.AllocatorRemoveNonVoter: op, err = rq.removeNonVoter(ctx, repl, voterReplicas, nonVoterReplicas) - // Replace dead replicas. - case allocatorimpl.AllocatorReplaceDeadVoter: - if len(deadVoterReplicas) == 0 { - // Nothing to do. - break - } - removeIdx := getRemoveIdx(voterReplicas, deadVoterReplicas[0]) - if removeIdx < 0 { - err = errors.AssertionFailedf( - "dead voter %v unexpectedly not found in %v", - deadVoterReplicas[0], voterReplicas) - break - } - op, err = rq.addOrReplaceVoters( - ctx, repl, liveVoterReplicas, liveNonVoterReplicas, removeIdx, allocatorimpl.Dead, allocatorPrio) - case allocatorimpl.AllocatorReplaceDeadNonVoter: - if len(deadNonVoterReplicas) == 0 { - // Nothing to do. - break - } - removeIdx := getRemoveIdx(nonVoterReplicas, deadNonVoterReplicas[0]) - if removeIdx < 0 { - err = errors.AssertionFailedf( - "dead non-voter %v unexpectedly not found in %v", - deadNonVoterReplicas[0], nonVoterReplicas) - break - } - op, err = rq.addOrReplaceNonVoters( - ctx, repl, liveVoterReplicas, liveNonVoterReplicas, removeIdx, allocatorimpl.Dead, allocatorPrio) - // Replace decommissioning replicas. - case allocatorimpl.AllocatorReplaceDecommissioningVoter: - decommissioningVoterReplicas := rq.storePool.DecommissioningReplicas(voterReplicas) - if len(decommissioningVoterReplicas) == 0 { - // Nothing to do. - break - } - removeIdx := getRemoveIdx(voterReplicas, decommissioningVoterReplicas[0]) - if removeIdx < 0 { - err = errors.AssertionFailedf( - "decommissioning voter %v unexpectedly not found in %v", - decommissioningVoterReplicas[0], voterReplicas) - break - } - op, err = rq.addOrReplaceVoters( - ctx, repl, liveVoterReplicas, liveNonVoterReplicas, removeIdx, allocatorimpl.Decommissioning, allocatorPrio) - case allocatorimpl.AllocatorReplaceDecommissioningNonVoter: - decommissioningNonVoterReplicas := rq.storePool.DecommissioningReplicas(nonVoterReplicas) - if len(decommissioningNonVoterReplicas) == 0 { - // Nothing to do. - break - } - removeIdx := getRemoveIdx(nonVoterReplicas, decommissioningNonVoterReplicas[0]) - if removeIdx < 0 { - err = errors.AssertionFailedf( - "decommissioning non-voter %v unexpectedly not found in %v", - decommissioningNonVoterReplicas[0], nonVoterReplicas) - break - } - op, err = rq.addOrReplaceNonVoters( - ctx, repl, liveVoterReplicas, liveNonVoterReplicas, removeIdx, allocatorimpl.Decommissioning, allocatorPrio) - // Remove decommissioning replicas. // // NB: these two paths will only be hit when the range is over-replicated and @@ -1161,18 +1121,6 @@ func (rq *replicateQueue) PlanOneChange( return change, err } -func getRemoveIdx( - repls []roachpb.ReplicaDescriptor, deadOrDecommissioningRepl roachpb.ReplicaDescriptor, -) (removeIdx int) { - for i, rDesc := range repls { - if rDesc.StoreID == deadOrDecommissioningRepl.StoreID { - removeIdx = i - break - } - } - return removeIdx -} - func maybeAnnotateDecommissionErr(err error, action allocatorimpl.AllocatorAction) error { if err != nil && isDecommissionAction(action) { err = decommissionPurgatoryError{err} @@ -1198,32 +1146,15 @@ func isDecommissionAction(action allocatorimpl.AllocatorAction) bool { func (rq *replicateQueue) addOrReplaceVoters( ctx context.Context, repl *Replica, - liveVoterReplicas, liveNonVoterReplicas []roachpb.ReplicaDescriptor, + existingVoters []roachpb.ReplicaDescriptor, + remainingLiveVoters, remainingLiveNonVoters []roachpb.ReplicaDescriptor, removeIdx int, replicaStatus allocatorimpl.ReplicaStatus, allocatorPriority float64, ) (op AllocationOp, _ error) { effects := effectBuilder{} desc, conf := repl.DescAndSpanConfig() - existingVoters := desc.Replicas().VoterDescriptors() - if len(existingVoters) == 1 { - // If only one replica remains, that replica is the leaseholder and - // we won't be able to swap it out. Ignore the removal and simply add - // a replica. - removeIdx = -1 - } - - remainingLiveVoters := liveVoterReplicas - remainingLiveNonVoters := liveNonVoterReplicas - if removeIdx >= 0 { - replToRemove := existingVoters[removeIdx] - for i, r := range liveVoterReplicas { - if r.ReplicaID == replToRemove.ReplicaID { - remainingLiveVoters = append(liveVoterReplicas[:i:i], liveVoterReplicas[i+1:]...) - break - } - } - } + isReplace := removeIdx >= 0 // The allocator should not try to re-add this replica since there is a reason // we're removing it (i.e. dead or decommissioning). If we left the replica in @@ -1233,41 +1164,23 @@ func (rq *replicateQueue) addOrReplaceVoters( if err != nil { return nil, err } - if removeIdx >= 0 && newVoter.StoreID == existingVoters[removeIdx].StoreID { + if isReplace && newVoter.StoreID == existingVoters[removeIdx].StoreID { return nil, errors.AssertionFailedf("allocator suggested to replace replica on s%d with itself", newVoter.StoreID) } - clusterNodes := rq.storePool.ClusterNodeCount() - neededVoters := allocatorimpl.GetNeededVoters(conf.GetNumVoters(), clusterNodes) - - // Only up-replicate if there are suitable allocation targets such that, - // either the replication goal is met, or it is possible to get to the next + // We only want to up-replicate if there are suitable allocation targets such + // that, either the replication goal is met, or it is possible to get to the next // odd number of replicas. A consensus group of size 2n has worse failure // tolerance properties than a group of size 2n - 1 because it has a larger // quorum. For example, up-replicating from 1 to 2 replicas only makes sense // if it is possible to be able to go to 3 replicas. - // - // NB: If willHave > neededVoters, then always allow up-replicating as that - // will be the case when up-replicating a range with a decommissioning - // replica. - // - // We skip this check if we're swapping a replica, since that does not - // change the quorum size. - if willHave := len(existingVoters) + 1; removeIdx < 0 && willHave < neededVoters && willHave%2 == 0 { - // This means we are going to up-replicate to an even replica state. - // Check if it is possible to go to an odd replica state beyond it. - oldPlusNewReplicas := append([]roachpb.ReplicaDescriptor(nil), existingVoters...) - oldPlusNewReplicas = append( - oldPlusNewReplicas, - roachpb.ReplicaDescriptor{NodeID: newVoter.NodeID, StoreID: newVoter.StoreID}, - ) - _, _, err := rq.allocator.AllocateVoter(ctx, rq.storePool, conf, oldPlusNewReplicas, remainingLiveNonVoters, replicaStatus) - if err != nil { - // It does not seem possible to go to the next odd replica state. Note - // that AllocateVoter returns an allocatorError (a PurgatoryError) - // when purgatory is requested. - return nil, errors.Wrap(err, "avoid up-replicating to fragile quorum") - } + if err := rq.allocator.CheckAvoidsFragileQuorum(ctx, rq.storePool, conf, + existingVoters, remainingLiveNonVoters, + replicaStatus, allocatorimpl.VoterTarget, newVoter, isReplace); err != nil { + // It does not seem possible to go to the next odd replica state. Note + // that AllocateVoter returns an allocatorError (a PurgatoryError) + // when purgatory is requested. + return nil, errors.Wrap(err, "avoid up-replicating to fragile quorum") } // Figure out whether we should be promoting an existing non-voting replica to @@ -1291,7 +1204,7 @@ func (rq *replicateQueue) addOrReplaceVoters( }) ops = roachpb.MakeReplicationChanges(roachpb.ADD_VOTER, newVoter) } - if removeIdx < 0 { + if !isReplace { log.KvDistribution.Infof(ctx, "adding voter %+v: %s", newVoter, rangeRaftProgress(repl.RaftStatus(), existingVoters)) } else { @@ -1333,14 +1246,14 @@ func (rq *replicateQueue) addOrReplaceVoters( func (rq *replicateQueue) addOrReplaceNonVoters( ctx context.Context, repl *Replica, + existingNonVoters []roachpb.ReplicaDescriptor, liveVoterReplicas, liveNonVoterReplicas []roachpb.ReplicaDescriptor, removeIdx int, replicaStatus allocatorimpl.ReplicaStatus, allocatorPrio float64, ) (op AllocationOp, _ error) { - desc, conf := repl.DescAndSpanConfig() - existingNonVoters := desc.Replicas().NonVoterDescriptors() effects := effectBuilder{} + conf := repl.SpanConfig() newNonVoter, details, err := rq.allocator.AllocateNonVoter(ctx, rq.storePool, conf, liveVoterReplicas, liveNonVoterReplicas, replicaStatus) if err != nil {