diff --git a/pkg/kv/kvserver/allocation_op.go b/pkg/kv/kvserver/allocation_op.go index 9ef3587a35c5..06f97131664b 100644 --- a/pkg/kv/kvserver/allocation_op.go +++ b/pkg/kv/kvserver/allocation_op.go @@ -42,6 +42,8 @@ type AllocationTransferLeaseOp struct { sideEffects func() } +var _ AllocationOp = &AllocationTransferLeaseOp{} + // lhBeingRemoved returns true when the leaseholder is will be removed if this // operation succeeds, otherwise false. This is always true for lease // transfers. @@ -76,6 +78,8 @@ type AllocationChangeReplicasOp struct { sideEffects func() } +var _ AllocationOp = &AllocationChangeReplicasOp{} + // lhBeingRemoved returns true when the voter removals for this change replicas // operation includes the leaseholder store. func (o AllocationChangeReplicasOp) lhBeingRemoved() bool { @@ -107,6 +111,8 @@ func (o AllocationChangeReplicasOp) trackPlanningMetrics() { // atomic change replicas operation and remove any remaining learners. type AllocationFinalizeAtomicReplicationOp struct{} +var _ AllocationOp = &AllocationFinalizeAtomicReplicationOp{} + // TODO(kvoli): This always returns false, however it is possible that the LH // may have been removed here. func (o AllocationFinalizeAtomicReplicationOp) lhBeingRemoved() bool { return false } @@ -116,6 +122,8 @@ func (o AllocationFinalizeAtomicReplicationOp) trackPlanningMetrics() // AllocationNoop represents no operation. type AllocationNoop struct{} +var _ AllocationOp = &AllocationNoop{} + func (o AllocationNoop) lhBeingRemoved() bool { return false } func (o AllocationNoop) applyImpact(storepool storepool.AllocatorStorePool) {} func (o AllocationNoop) trackPlanningMetrics() {} diff --git a/pkg/kv/kvserver/replicate_queue.go b/pkg/kv/kvserver/replicate_queue.go index 74c165f1268b..9f676f5e21e6 100644 --- a/pkg/kv/kvserver/replicate_queue.go +++ b/pkg/kv/kvserver/replicate_queue.go @@ -938,7 +938,14 @@ func (rq *replicateQueue) processOneChange( // will change quickly enough in order to not get the same error and // outcome. if err != nil { - // Annotate the planning error if it is associated with a decomission + // If there was a change during the planning process, possibly due to + // allocator errors finding a target, we should report this as a failure + // for the associated allocator action metric if we are not in dry run. + if !dryRun { + rq.metrics.trackErrorByAllocatorAction(ctx, change.Action) + } + + // Annotate the planning error if it is associated with a decommission // allocator action so that the replica will be put into purgatory // rather than waiting for the next scanner cycle. This is also done // for application failures below.