diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index 5c4f57fe885a..29125cb39977 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -6303,7 +6303,7 @@ func TestProtectedTimestampsDuringBackup(t *testing.T) { require.NoError(t, err) lhServer := tc.Server(int(l.Replica.NodeID) - 1) s, repl := getFirstStoreReplica(t, lhServer, startKey) - trace, _, err := s.ManuallyEnqueue(ctx, "gc", repl, skipShouldQueue) + trace, _, err := s.ManuallyEnqueue(ctx, "mvccGC", repl, skipShouldQueue) require.NoError(t, err) fmt.Fprintf(&traceBuf, "%s\n", trace.String()) } diff --git a/pkg/ccl/importccl/import_into_test.go b/pkg/ccl/importccl/import_into_test.go index ab45e8043ec0..8f3517b2bc05 100644 --- a/pkg/ccl/importccl/import_into_test.go +++ b/pkg/ccl/importccl/import_into_test.go @@ -127,7 +127,7 @@ func TestProtectedTimestampsDuringImportInto(t *testing.T) { require.NoError(t, err) lhServer := tc.Server(int(l.Replica.NodeID) - 1) s, repl := getFirstStoreReplica(t, lhServer, startKey) - trace, _, err := s.ManuallyEnqueue(ctx, "gc", repl, skipShouldQueue) + trace, _, err := s.ManuallyEnqueue(ctx, "mvccGC", repl, skipShouldQueue) require.NoError(t, err) fmt.Fprintf(&traceBuf, "%s\n", trace.String()) } diff --git a/pkg/kv/kvserver/batcheval/cmd_gc.go b/pkg/kv/kvserver/batcheval/cmd_gc.go index a06cef156dd6..7f3dccd85ed3 100644 --- a/pkg/kv/kvserver/batcheval/cmd_gc.go +++ b/pkg/kv/kvserver/batcheval/cmd_gc.go @@ -41,9 +41,9 @@ func declareKeysGC( latchSpans.AddMVCC(spanset.SpanReadWrite, roachpb.Span{Key: key.Key}, header.Timestamp) } } - // Be smart here about blocking on the threshold keys. The GC queue can send an empty - // request first to bump the thresholds, and then another one that actually does work - // but can avoid declaring these keys below. + // Be smart here about blocking on the threshold keys. The MVCC GC queue can + // send an empty request first to bump the thresholds, and then another one + // that actually does work but can avoid declaring these keys below. if !gcr.Threshold.IsEmpty() { latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{Key: keys.RangeGCThresholdKey(rs.GetRangeID())}) } @@ -100,8 +100,8 @@ func GC( updated := newThreshold.Forward(args.Threshold) // Don't write the GC threshold key unless we have to. We also don't - // declare the key unless we have to (to allow the GC queue to batch - // requests more efficiently), and we must honor what we declare. + // declare the key unless we have to (to allow the MVCC GC queue to + // batch requests more efficiently), and we must honor what we declare. if updated { if err := MakeStateLoader(cArgs.EvalCtx).SetGCThreshold( ctx, readWriter, cArgs.Stats, &newThreshold, diff --git a/pkg/kv/kvserver/batcheval/cmd_push_txn.go b/pkg/kv/kvserver/batcheval/cmd_push_txn.go index dee603da6a67..ac7c7b01906a 100644 --- a/pkg/kv/kvserver/batcheval/cmd_push_txn.go +++ b/pkg/kv/kvserver/batcheval/cmd_push_txn.go @@ -98,9 +98,9 @@ func declareKeysPushTransaction( // If the pushee is aborted, its timestamp will be forwarded to match // its last client activity timestamp (i.e. last heartbeat), if available. // This is done so that the updated timestamp populates the AbortSpan when -// the pusher proceeds to resolve intents, allowing the GC queue to purge -// records for which the transaction coordinator must have found out via -// its heartbeats that the transaction has failed. +// the pusher proceeds to resolve intents, allowing the MVCC GC queue to +// purge records for which the transaction coordinator must have found out +// via its heartbeats that the transaction has failed. func PushTxn( ctx context.Context, readWriter storage.ReadWriter, cArgs CommandArgs, resp roachpb.Response, ) (result.Result, error) { diff --git a/pkg/kv/kvserver/client_protectedts_test.go b/pkg/kv/kvserver/client_protectedts_test.go index c039f9d99355..cf91982d881d 100644 --- a/pkg/kv/kvserver/client_protectedts_test.go +++ b/pkg/kv/kvserver/client_protectedts_test.go @@ -116,7 +116,7 @@ func TestProtectedTimestamps(t *testing.T) { testutils.SucceedsSoon(t, func() error { upsertUntilBackpressure() s, repl := getStoreAndReplica() - trace, _, err := s.ManuallyEnqueue(ctx, "gc", repl, false) + trace, _, err := s.ManuallyEnqueue(ctx, "mvccGC", repl, false) require.NoError(t, err) if !processedRegexp.MatchString(trace.String()) { return errors.Errorf("%q does not match %q", trace.String(), processedRegexp) @@ -162,13 +162,13 @@ func TestProtectedTimestamps(t *testing.T) { s, repl := getStoreAndReplica() // The protectedts record will prevent us from aging the MVCC garbage bytes // past the oldest record so shouldQueue should be false. Verify that. - trace, _, err := s.ManuallyEnqueue(ctx, "gc", repl, false /* skipShouldQueue */) + trace, _, err := s.ManuallyEnqueue(ctx, "mvccGC", repl, false /* skipShouldQueue */) require.NoError(t, err) require.Regexp(t, "(?s)shouldQueue=false", trace.String()) // If we skipShouldQueue then gc will run but it should only run up to the // timestamp of our record at the latest. - trace, _, err = s.ManuallyEnqueue(ctx, "gc", repl, true /* skipShouldQueue */) + trace, _, err = s.ManuallyEnqueue(ctx, "mvccGC", repl, true /* skipShouldQueue */) require.NoError(t, err) require.Regexp(t, "(?s)done with GC evaluation for 0 keys", trace.String()) thresh := thresholdFromTrace(trace) @@ -206,7 +206,7 @@ func TestProtectedTimestamps(t *testing.T) { // happens up to the protected timestamp of the new record. require.NoError(t, ptsWithDB.Release(ctx, nil, ptsRec.ID.GetUUID())) testutils.SucceedsSoon(t, func() error { - trace, _, err = s.ManuallyEnqueue(ctx, "gc", repl, false) + trace, _, err = s.ManuallyEnqueue(ctx, "mvccGC", repl, false) require.NoError(t, err) if !processedRegexp.MatchString(trace.String()) { return errors.Errorf("%q does not match %q", trace.String(), processedRegexp) diff --git a/pkg/kv/kvserver/client_split_test.go b/pkg/kv/kvserver/client_split_test.go index f47826a89688..bd81cdb34f55 100644 --- a/pkg/kv/kvserver/client_split_test.go +++ b/pkg/kv/kvserver/client_split_test.go @@ -2601,13 +2601,13 @@ func TestUnsplittableRange(t *testing.T) { // Wait for much longer than the ttl to accumulate GCByteAge. manualClock.Increment(10 * ttl.Nanoseconds()) - // Trigger the GC queue, which should clean up the earlier version of the + // Trigger the MVCC GC queue, which should clean up the earlier version of the // row. Once the first version of the row is cleaned up, the range should // exit the split queue purgatory. We need to tickle the protected timestamp // subsystem to release a timestamp at which we get to actually remove the data. require.NoError(t, store.GetStoreConfig().ProtectedTimestampCache.Refresh(ctx, s.Clock().Now())) repl := store.LookupReplica(tableKey) - if err := store.ManualGC(repl); err != nil { + if err := store.ManualMVCCGC(repl); err != nil { t.Fatal(err) } diff --git a/pkg/kv/kvserver/gc/gc.go b/pkg/kv/kvserver/gc/gc.go index b8e137541510..3e21f0919cac 100644 --- a/pkg/kv/kvserver/gc/gc.go +++ b/pkg/kv/kvserver/gc/gc.go @@ -50,7 +50,7 @@ const ( // will be resolved. var IntentAgeThreshold = settings.RegisterDurationSetting( "kv.gc.intent_age_threshold", - "intents older than this threshold will be resolved when encountered by the GC queue", + "intents older than this threshold will be resolved when encountered by the MVCC GC queue", 2*time.Hour, func(d time.Duration) error { if d < 2*time.Minute { @@ -120,7 +120,7 @@ type PureGCer interface { GC(context.Context, []roachpb.GCRequest_GCKey) error } -// A GCer is an abstraction used by the GC queue to carry out chunked deletions. +// A GCer is an abstraction used by the MVCC GC queue to carry out chunked deletions. type GCer interface { Thresholder PureGCer diff --git a/pkg/kv/kvserver/helpers_test.go b/pkg/kv/kvserver/helpers_test.go index 09f3b2fd2e99..057e6d2abb43 100644 --- a/pkg/kv/kvserver/helpers_test.go +++ b/pkg/kv/kvserver/helpers_test.go @@ -178,9 +178,9 @@ func manualQueue(s *Store, q queueImpl, repl *Replica) error { return err } -// ManualGC processes the specified replica using the store's GC queue. -func (s *Store) ManualGC(repl *Replica) error { - return manualQueue(s, s.gcQueue, repl) +// ManualMVCCGC processes the specified replica using the store's MVCC GC queue. +func (s *Store) ManualMVCCGC(repl *Replica) error { + return manualQueue(s, s.mvccGCQueue, repl) } // ManualReplicaGC processes the specified replica using the store's replica diff --git a/pkg/kv/kvserver/intentresolver/intent_resolver.go b/pkg/kv/kvserver/intentresolver/intent_resolver.go index f092c1ad9d28..15383cbfe12d 100644 --- a/pkg/kv/kvserver/intentresolver/intent_resolver.go +++ b/pkg/kv/kvserver/intentresolver/intent_resolver.go @@ -547,8 +547,8 @@ func (ir *IntentResolver) CleanupIntents( // the txn record is GC'ed. // // WARNING: Since this GCs the txn record, it should only be called in response -// to requests coming from the coordinator or the GC Queue. We don't want other -// actors to GC a txn record, since that can cause ambiguities for the +// to requests coming from the coordinator or the MVCC GC Queue. We don't want +// other actors to GC a txn record, since that can cause ambiguities for the // coordinator: if it had STAGED the txn, it won't be able to tell the // difference between a txn that had been implicitly committed, recovered, and // GC'ed, and one that someone else aborted and GC'ed. @@ -629,8 +629,8 @@ func (ir *IntentResolver) CleanupTxnIntentsOnGCAsync( stop.TaskOpts{ TaskName: "processing txn intents", Sem: ir.sem, - // We really do not want to hang up the GC queue on this kind of - // processing, so it's better to just skip txns which we can't + // We really do not want to hang up the MVCC GC queue on this kind + // of processing, so it's better to just skip txns which we can't // pass to the async processor (wait=false). Their intents will // get cleaned up on demand, and we'll eventually get back to // them. Not much harm in having old txn records lying around in diff --git a/pkg/kv/kvserver/intentresolver/intent_resolver_test.go b/pkg/kv/kvserver/intentresolver/intent_resolver_test.go index f764bb82d8b9..9930407f58d0 100644 --- a/pkg/kv/kvserver/intentresolver/intent_resolver_test.go +++ b/pkg/kv/kvserver/intentresolver/intent_resolver_test.go @@ -39,7 +39,7 @@ import ( // TestCleanupTxnIntentsOnGCAsync exercises the code which is used to // asynchronously clean up transaction intents and then transaction records. -// This method is invoked from the storage GC queue. +// This method is invoked from the MVCC GC queue. func TestCleanupTxnIntentsOnGCAsync(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/kv/kvserver/metrics.go b/pkg/kv/kvserver/metrics.go index bcd07f865248..4bf1be0d06ed 100644 --- a/pkg/kv/kvserver/metrics.go +++ b/pkg/kv/kvserver/metrics.go @@ -744,27 +744,27 @@ difficult to meaningfully interpret this metric.`, } // Replica queue metrics. - metaGCQueueSuccesses = metric.Metadata{ + metaMVCCGCQueueSuccesses = metric.Metadata{ Name: "queue.gc.process.success", - Help: "Number of replicas successfully processed by the GC queue", + Help: "Number of replicas successfully processed by the MVCC GC queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, } - metaGCQueueFailures = metric.Metadata{ + metaMVCCGCQueueFailures = metric.Metadata{ Name: "queue.gc.process.failure", - Help: "Number of replicas which failed processing in the GC queue", + Help: "Number of replicas which failed processing in the MVCC GC queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, } - metaGCQueuePending = metric.Metadata{ + metaMVCCGCQueuePending = metric.Metadata{ Name: "queue.gc.pending", - Help: "Number of pending replicas in the GC queue", + Help: "Number of pending replicas in the MVCC GC queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, } - metaGCQueueProcessingNanos = metric.Metadata{ + metaMVCCGCQueueProcessingNanos = metric.Metadata{ Name: "queue.gc.processingnanos", - Help: "Nanoseconds spent processing replicas in the GC queue", + Help: "Nanoseconds spent processing replicas in the MVCC GC queue", Measurement: "Processing Time", Unit: metric.Unit_NANOSECONDS, } @@ -1357,10 +1357,10 @@ type StoreMetrics struct { RaftCoalescedHeartbeatsPending *metric.Gauge // Replica queue metrics. - GCQueueSuccesses *metric.Counter - GCQueueFailures *metric.Counter - GCQueuePending *metric.Gauge - GCQueueProcessingNanos *metric.Counter + MVCCGCQueueSuccesses *metric.Counter + MVCCGCQueueFailures *metric.Counter + MVCCGCQueuePending *metric.Gauge + MVCCGCQueueProcessingNanos *metric.Counter MergeQueueSuccesses *metric.Counter MergeQueueFailures *metric.Counter MergeQueuePending *metric.Gauge @@ -1797,10 +1797,10 @@ func newStoreMetrics(histogramWindow time.Duration) *StoreMetrics { RaftCoalescedHeartbeatsPending: metric.NewGauge(metaRaftCoalescedHeartbeatsPending), // Replica queue metrics. - GCQueueSuccesses: metric.NewCounter(metaGCQueueSuccesses), - GCQueueFailures: metric.NewCounter(metaGCQueueFailures), - GCQueuePending: metric.NewGauge(metaGCQueuePending), - GCQueueProcessingNanos: metric.NewCounter(metaGCQueueProcessingNanos), + MVCCGCQueueSuccesses: metric.NewCounter(metaMVCCGCQueueSuccesses), + MVCCGCQueueFailures: metric.NewCounter(metaMVCCGCQueueFailures), + MVCCGCQueuePending: metric.NewGauge(metaMVCCGCQueuePending), + MVCCGCQueueProcessingNanos: metric.NewCounter(metaMVCCGCQueueProcessingNanos), MergeQueueSuccesses: metric.NewCounter(metaMergeQueueSuccesses), MergeQueueFailures: metric.NewCounter(metaMergeQueueFailures), MergeQueuePending: metric.NewGauge(metaMergeQueuePending), diff --git a/pkg/kv/kvserver/mvcc_gc_queue.go b/pkg/kv/kvserver/mvcc_gc_queue.go index 4790ced7a20a..de016745c2bd 100644 --- a/pkg/kv/kvserver/mvcc_gc_queue.go +++ b/pkg/kv/kvserver/mvcc_gc_queue.go @@ -35,29 +35,30 @@ import ( ) const ( - // gcQueueTimerDuration is the duration between GCs of queued replicas. - gcQueueTimerDuration = 1 * time.Second - // gcQueueTimeout is the timeout for a single GC run. - gcQueueTimeout = 10 * time.Minute - // gcQueueIntentBatchTimeout is the timeout for resolving a single batch of - // intents. It is used to ensure progress in the face of unavailable ranges + // mvccGCQueueTimerDuration is the duration between MVCC GCs of queued + // replicas. + mvccGCQueueTimerDuration = 1 * time.Second + // mvccGCQueueTimeout is the timeout for a single MVCC GC run. + mvccGCQueueTimeout = 10 * time.Minute + // mvccGCQueueIntentBatchTimeout is the timeout for resolving a single batch + // of intents. It is used to ensure progress in the face of unavailable ranges // (since intent resolution may touch other ranges), but can prevent progress // for ranged intent resolution if it exceeds the timeout. - gcQueueIntentBatchTimeout = 2 * time.Minute + mvccGCQueueIntentBatchTimeout = 2 * time.Minute - // gcQueueIntentCooldownDuration is the duration to wait between GC attempts - // of the same range when triggered solely by intents. This is to prevent - // continually spinning on intents that belong to active transactions, which - // can't be cleaned up. - gcQueueIntentCooldownDuration = 2 * time.Hour + // mvccGCQueueIntentCooldownDuration is the duration to wait between MVCC GC + // attempts of the same range when triggered solely by intents. This is to + // prevent continually spinning on intents that belong to active transactions, + // which can't be cleaned up. + mvccGCQueueIntentCooldownDuration = 2 * time.Hour // intentAgeNormalization is the average age of outstanding intents // which amount to a score of "1" added to total replica priority. intentAgeNormalization = 8 * time.Hour - // Thresholds used to decide whether to queue for GC based - // on keys and intents. - gcKeyScoreThreshold = 2 - gcIntentScoreThreshold = 1 + // Thresholds used to decide whether to queue for MVCC GC based on keys and + // intents. + mvccGCKeyScoreThreshold = 2 + mvccGCIntentScoreThreshold = 1 probablyLargeAbortSpanSysCountThreshold = 10000 largeAbortSpanBytesThreshold = 16 * (1 << 20) // 16mb @@ -90,9 +91,9 @@ func largeAbortSpan(ms enginepb.MVCCStats) bool { return definitelyLargeAbortSpan || probablyLargeAbortSpan } -// gcQueue manages a queue of replicas slated to be scanned in their -// entirety using the MVCC versions iterator. The gc queue manages the -// following tasks: +// mvccGCQueue manages a queue of replicas slated to be scanned in their +// entirety using the MVCC versions iterator. The mvcc gc queue manages +// the following tasks: // // - GC of version data via TTL expiration (and more complex schemes // as implemented going forward). @@ -103,40 +104,40 @@ func largeAbortSpan(ms enginepb.MVCCStats) bool { // // The shouldQueue function combines the need for the above tasks into a // single priority. If any task is overdue, shouldQueue returns true. -type gcQueue struct { +type mvccGCQueue struct { *baseQueue } -// newGCQueue returns a new instance of gcQueue. -func newGCQueue(store *Store) *gcQueue { - gcq := &gcQueue{} - gcq.baseQueue = newBaseQueue( - "gc", gcq, store, +// newMVCCGCQueue returns a new instance of mvccGCQueue. +func newMVCCGCQueue(store *Store) *mvccGCQueue { + mgcq := &mvccGCQueue{} + mgcq.baseQueue = newBaseQueue( + "mvccGC", mgcq, store, queueConfig{ maxSize: defaultQueueMaxSize, needsLease: true, needsSystemConfig: true, acceptsUnsplitRanges: false, processTimeoutFunc: func(st *cluster.Settings, _ replicaInQueue) time.Duration { - timeout := gcQueueTimeout + timeout := mvccGCQueueTimeout if d := queueGuaranteedProcessingTimeBudget.Get(&st.SV); d > timeout { timeout = d } return timeout }, - successes: store.metrics.GCQueueSuccesses, - failures: store.metrics.GCQueueFailures, - pending: store.metrics.GCQueuePending, - processingNanos: store.metrics.GCQueueProcessingNanos, + successes: store.metrics.MVCCGCQueueSuccesses, + failures: store.metrics.MVCCGCQueueFailures, + pending: store.metrics.MVCCGCQueuePending, + processingNanos: store.metrics.MVCCGCQueueProcessingNanos, }, ) - return gcq + return mgcq } -// gcQueueScore holds details about the score returned by makeGCQueueScoreImpl for -// testing and logging. The fields in this struct are documented in -// makeGCQueueScoreImpl. -type gcQueueScore struct { +// mvccGCQueueScore holds details about the score returned by +// makeMVCCGCQueueScoreImpl for testing and logging. The fields in this struct +// are documented in makeMVCCGCQueueScoreImpl. +type mvccGCQueueScore struct { TTL time.Duration LastGC time.Duration DeadFraction float64 @@ -151,8 +152,8 @@ type gcQueueScore struct { ExpMinGCByteAgeReduction int64 } -func (r gcQueueScore) String() string { - if (r == gcQueueScore{}) { +func (r mvccGCQueueScore) String() string { + if (r == mvccGCQueueScore{}) { return "(empty)" } if r.ExpMinGCByteAgeReduction < 0 { @@ -173,7 +174,7 @@ func (r gcQueueScore) String() string { // collection, and if so, at what priority. Returns true for shouldQ // in the event that the cumulative ages of GC'able bytes or extant // intents exceed thresholds. -func (gcq *gcQueue) shouldQueue( +func (mgcq *mvccGCQueue) shouldQueue( ctx context.Context, now hlc.ClockTimestamp, repl *Replica, _ spanconfig.StoreReader, ) (bool, float64) { // Consult the protected timestamp state to determine whether we can GC and @@ -184,23 +185,23 @@ func (gcq *gcQueue) shouldQueue( return false, 0 } canAdvanceGCThreshold := !newThreshold.Equal(oldThreshold) - lastGC, err := repl.getQueueLastProcessed(ctx, gcq.name) + lastGC, err := repl.getQueueLastProcessed(ctx, mgcq.name) if err != nil { log.VErrEventf(ctx, 2, "failed to fetch last processed time: %v", err) return false, 0 } - r := makeGCQueueScore(ctx, repl, gcTimestamp, lastGC, conf.TTL(), canAdvanceGCThreshold) + r := makeMVCCGCQueueScore(ctx, repl, gcTimestamp, lastGC, conf.TTL(), canAdvanceGCThreshold) return r.ShouldQueue, r.FinalScore } -func makeGCQueueScore( +func makeMVCCGCQueueScore( ctx context.Context, repl *Replica, now hlc.Timestamp, lastGC hlc.Timestamp, gcTTL time.Duration, canAdvanceGCThreshold bool, -) gcQueueScore { +) mvccGCQueueScore { repl.mu.Lock() ms := *repl.mu.state.Stats repl.mu.Unlock() @@ -212,21 +213,22 @@ func makeGCQueueScore( // Use desc.RangeID for fuzzing the final score, so that different ranges // have slightly different priorities and even symmetrical workloads don't // trigger GC at the same time. - r := makeGCQueueScoreImpl( + r := makeMVCCGCQueueScoreImpl( ctx, int64(repl.RangeID), now, ms, gcTTL, lastGC, canAdvanceGCThreshold, ) return r } -// makeGCQueueScoreImpl is used to compute when to trigger the GC Queue. It's -// important that we don't queue a replica before a relevant amount of data is -// actually deletable, or the queue might run in a tight loop. To this end, we -// use a base score with the right interplay between GCByteAge and TTL and -// additionally weigh it so that GC is delayed when a large proportion of the -// data in the replica is live. Additionally, returned scores are slightly -// perturbed to avoid groups of replicas becoming eligible for GC at the same -// time repeatedly. We also use gcQueueIntentCooldownTimer to avoid spinning -// when GCing solely based on intents, since we may not be able to GC them. +// makeMVCCGCQueueScoreImpl is used to compute when to trigger the MVCC GC +// Queue. It's important that we don't queue a replica before a relevant amount +// of data is actually deletable, or the queue might run in a tight loop. To +// this end, we use a base score with the right interplay between GCByteAge and +// TTL and additionally weigh it so that GC is delayed when a large proportion +// of the data in the replica is live. Additionally, returned scores are +// slightly perturbed to avoid groups of replicas becoming eligible for GC at +// the same time repeatedly. We also use gcQueueIntentCooldownTimer to avoid +// spinning when GCing solely based on intents, since we may not be able to GC +// them. // // More details below. // @@ -307,7 +309,7 @@ func makeGCQueueScore( // This means that running GC will always result in a `GCBytesAge` of `<= // ttl*GCBytes`, and that a decent trigger for GC is a multiple of // `ttl*GCBytes`. -func makeGCQueueScoreImpl( +func makeMVCCGCQueueScoreImpl( ctx context.Context, fuzzSeed int64, now hlc.Timestamp, @@ -315,9 +317,9 @@ func makeGCQueueScoreImpl( gcTTL time.Duration, lastGC hlc.Timestamp, canAdvanceGCThreshold bool, -) gcQueueScore { +) mvccGCQueueScore { ms.Forward(now.WallTime) - var r gcQueueScore + var r mvccGCQueueScore if !lastGC.IsEmpty() { r.LastGC = time.Duration(now.WallTime - lastGC.WallTime) @@ -385,13 +387,13 @@ func makeGCQueueScoreImpl( r.FinalScore = r.FuzzFactor * (valScore + r.IntentScore) // First determine whether we should queue based on MVCC score alone. - r.ShouldQueue = canAdvanceGCThreshold && r.FuzzFactor*valScore > gcKeyScoreThreshold + r.ShouldQueue = canAdvanceGCThreshold && r.FuzzFactor*valScore > mvccGCKeyScoreThreshold // Next, determine whether we should queue based on intent score. For // intents, we also enforce a cooldown time since we may not actually // be able to clean up any intents (for active transactions). - if !r.ShouldQueue && r.FuzzFactor*r.IntentScore > gcIntentScoreThreshold && - (r.LastGC == 0 || r.LastGC >= gcQueueIntentCooldownDuration) { + if !r.ShouldQueue && r.FuzzFactor*r.IntentScore > mvccGCIntentScoreThreshold && + (r.LastGC == 0 || r.LastGC >= mvccGCQueueIntentCooldownDuration) { r.ShouldQueue = true } @@ -485,10 +487,10 @@ func (r *replicaGCer) GC(ctx context.Context, keys []roachpb.GCRequest_GCKey) er return r.send(ctx, req) } -// process first determines whether the replica can run GC given its view of -// the protected timestamp subsystem and its current state. This check also -// determines the most recent time which can be used for the purposes of updating -// the GC threshold and running GC. +// process first determines whether the replica can run MVCC GC given its view +// of the protected timestamp subsystem and its current state. This check also +// determines the most recent time which can be used for the purposes of +// updating the GC threshold and running GC. // // If it is safe to GC, process iterates through all keys in a replica's range, // calling the garbage collector for each key and associated set of @@ -514,7 +516,7 @@ func (r *replicaGCer) GC(ctx context.Context, keys []roachpb.GCRequest_GCKey) er // 6) scan the AbortSpan table for old entries // 7) push these transactions (again, recreating txn entries). // 8) send a GCRequest. -func (gcq *gcQueue) process( +func (mgcq *mvccGCQueue) process( ctx context.Context, repl *Replica, _ spanconfig.StoreReader, ) (processed bool, err error) { // Lookup the descriptor and GC policy for the zone containing this key range. @@ -530,12 +532,12 @@ func (gcq *gcQueue) process( canAdvanceGCThreshold := !newThreshold.Equal(oldThreshold) // We don't recheck ShouldQueue here, since the range may have been enqueued // manually e.g. via the admin server. - lastGC, err := repl.getQueueLastProcessed(ctx, gcq.name) + lastGC, err := repl.getQueueLastProcessed(ctx, mgcq.name) if err != nil { lastGC = hlc.Timestamp{} log.VErrEventf(ctx, 2, "failed to fetch last processed time: %v", err) } - r := makeGCQueueScore(ctx, repl, gcTimestamp, lastGC, conf.TTL(), canAdvanceGCThreshold) + r := makeMVCCGCQueueScore(ctx, repl, gcTimestamp, lastGC, conf.TTL(), canAdvanceGCThreshold) log.VEventf(ctx, 2, "processing replica %s with score %s", repl.String(), r) // Synchronize the new GC threshold decision with concurrent // AdminVerifyProtectedTimestamp requests. @@ -544,7 +546,7 @@ func (gcq *gcQueue) process( return false, nil } // Update the last processed timestamp. - if err := repl.setQueueLastProcessed(ctx, gcq.name, repl.store.Clock().Now()); err != nil { + if err := repl.setQueueLastProcessed(ctx, mgcq.name, repl.store.Clock().Now()); err != nil { log.VErrEventf(ctx, 2, "failed to update last processed time: %v", err) } @@ -561,21 +563,21 @@ func (gcq *gcQueue) process( MaxIntentsPerIntentCleanupBatch: maxIntentsPerCleanupBatch, MaxIntentKeyBytesPerIntentCleanupBatch: maxIntentKeyBytesPerCleanupBatch, MaxTxnsPerIntentCleanupBatch: intentresolver.MaxTxnsPerIntentCleanupBatch, - IntentCleanupBatchTimeout: gcQueueIntentBatchTimeout, + IntentCleanupBatchTimeout: mvccGCQueueIntentBatchTimeout, }, conf.TTL(), &replicaGCer{ repl: repl, - admissionController: gcq.store.cfg.KVAdmissionController, - storeID: gcq.store.StoreID(), + admissionController: mgcq.store.cfg.KVAdmissionController, + storeID: mgcq.store.StoreID(), }, func(ctx context.Context, intents []roachpb.Intent) error { intentCount, err := repl.store.intentResolver. CleanupIntents(ctx, intents, gcTimestamp, roachpb.PUSH_TOUCH) if err == nil { - gcq.store.metrics.GCResolveSuccess.Inc(int64(intentCount)) + mgcq.store.metrics.GCResolveSuccess.Inc(int64(intentCount)) } else { - gcq.store.metrics.GCResolveFailed.Inc(int64(intentCount)) + mgcq.store.metrics.GCResolveFailed.Inc(int64(intentCount)) } return err }, @@ -584,12 +586,12 @@ func (gcq *gcQueue) process( CleanupTxnIntentsOnGCAsync(ctx, repl.RangeID, txn, gcTimestamp, func(pushed, succeeded bool) { if pushed { - gcq.store.metrics.GCPushTxn.Inc(1) + mgcq.store.metrics.GCPushTxn.Inc(1) } if succeeded { - gcq.store.metrics.GCResolveSuccess.Inc(int64(len(txn.LockSpans))) + mgcq.store.metrics.GCResolveSuccess.Inc(int64(len(txn.LockSpans))) } else { - gcq.store.metrics.GCTxnIntentsResolveFailed.Inc(int64(len(txn.LockSpans))) + mgcq.store.metrics.GCTxnIntentsResolveFailed.Inc(int64(len(txn.LockSpans))) } }) if errors.Is(err, stop.ErrThrottled) { @@ -603,9 +605,9 @@ func (gcq *gcQueue) process( } log.Eventf(ctx, "MVCC stats after GC: %+v", repl.GetMVCCStats()) - log.Eventf(ctx, "GC score after GC: %s", makeGCQueueScore( + log.Eventf(ctx, "GC score after GC: %s", makeMVCCGCQueueScore( ctx, repl, repl.store.Clock().Now(), lastGC, conf.TTL(), canAdvanceGCThreshold)) - updateStoreMetricsWithGCInfo(gcq.store.metrics, info) + updateStoreMetricsWithGCInfo(mgcq.store.metrics, info) return true, nil } @@ -627,11 +629,11 @@ func updateStoreMetricsWithGCInfo(metrics *StoreMetrics, info gc.Info) { // timer returns a constant duration to space out GC processing // for successive queued replicas. -func (*gcQueue) timer(_ time.Duration) time.Duration { - return gcQueueTimerDuration +func (*mvccGCQueue) timer(_ time.Duration) time.Duration { + return mvccGCQueueTimerDuration } // purgatoryChan returns nil. -func (*gcQueue) purgatoryChan() <-chan time.Time { +func (*mvccGCQueue) purgatoryChan() <-chan time.Time { return nil } diff --git a/pkg/kv/kvserver/mvcc_gc_queue_test.go b/pkg/kv/kvserver/mvcc_gc_queue_test.go index 68f41b2514c8..4033083234ef 100644 --- a/pkg/kv/kvserver/mvcc_gc_queue_test.go +++ b/pkg/kv/kvserver/mvcc_gc_queue_test.go @@ -50,15 +50,15 @@ func makeTS(nanos int64, logical int32) hlc.Timestamp { } } -func TestGCQueueScoreString(t *testing.T) { +func TestMVCCGCQueueScoreString(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) for i, c := range []struct { - r gcQueueScore + r mvccGCQueueScore exp string }{ - {gcQueueScore{}, "(empty)"}, - {gcQueueScore{ + {mvccGCQueueScore{}, "(empty)"}, + {mvccGCQueueScore{ ShouldQueue: true, FuzzFactor: 1.25, FinalScore: 3.45 * 1.25, @@ -73,7 +73,7 @@ func TestGCQueueScoreString(t *testing.T) { `queue=true with 4.31/fuzz(1.25)=3.45=valScaleScore(4.00)*deadFrac(0.25)+intentScore(0.45) likely last GC: 5s ago, 3.0 KiB non-live, curr. age 512 KiB*s, min exp. reduction: 256 KiB*s`}, // Check case of empty Threshold. - {gcQueueScore{ShouldQueue: true}, `queue=true with 0.00/fuzz(0.00)=NaN=valScaleScore(0.00)*deadFrac(0.00)+intentScore(0.00) + {mvccGCQueueScore{ShouldQueue: true}, `queue=true with 0.00/fuzz(0.00)=NaN=valScaleScore(0.00)*deadFrac(0.00)+intentScore(0.00) likely last GC: never, 0 B non-live, curr. age 0 B*s, min exp. reduction: 0 B*s`}, } { if act := c.r.String(); act != c.exp { @@ -82,7 +82,7 @@ likely last GC: never, 0 B non-live, curr. age 0 B*s, min exp. reduction: 0 B*s` } } -func TestGCQueueMakeGCScoreInvariantQuick(t *testing.T) { +func TestMVCCGCQueueMakeGCScoreInvariantQuick(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -108,7 +108,7 @@ func TestGCQueueMakeGCScoreInvariantQuick(t *testing.T) { GCBytesAge: gcByteAge, } now := initialNow.Add(timePassed.Nanoseconds(), 0) - r := makeGCQueueScoreImpl( + r := makeMVCCGCQueueScoreImpl( ctx, int64(seed), now, ms, time.Duration(ttlSec)*time.Second, hlc.Timestamp{}, true /* canAdvanceGCThreshold */) wouldHaveToDeleteSomething := gcBytes*int64(ttlSec) < ms.GCByteAge(now.WallTime) @@ -123,11 +123,11 @@ func TestGCQueueMakeGCScoreInvariantQuick(t *testing.T) { } } -func TestGCQueueMakeGCScoreAnomalousStats(t *testing.T) { +func TestMVCCGCQueueMakeGCScoreAnomalousStats(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) if err := quick.Check(func(keyBytes, valBytes, liveBytes int32, containsEstimates int64) bool { - r := makeGCQueueScoreImpl(context.Background(), 0, hlc.Timestamp{}, enginepb.MVCCStats{ + r := makeMVCCGCQueueScoreImpl(context.Background(), 0, hlc.Timestamp{}, enginepb.MVCCStats{ ContainsEstimates: containsEstimates, LiveBytes: int64(liveBytes), ValBytes: int64(valBytes), @@ -139,7 +139,7 @@ func TestGCQueueMakeGCScoreAnomalousStats(t *testing.T) { } } -func TestGCQueueMakeGCScoreLargeAbortSpan(t *testing.T) { +func TestMVCCGCQueueMakeGCScoreLargeAbortSpan(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) const seed = 1 @@ -152,7 +152,7 @@ func TestGCQueueMakeGCScoreLargeAbortSpan(t *testing.T) { // GC triggered if abort span should all be gc'able and it's large. { - r := makeGCQueueScoreImpl( + r := makeMVCCGCQueueScoreImpl( context.Background(), seed, hlc.Timestamp{WallTime: expiration + 1}, ms, 10000*time.Second, @@ -167,7 +167,7 @@ func TestGCQueueMakeGCScoreLargeAbortSpan(t *testing.T) { ms.AbortSpanBytes = 0 ms.SysCount = probablyLargeAbortSpanSysCountThreshold { - r := makeGCQueueScoreImpl( + r := makeMVCCGCQueueScoreImpl( context.Background(), seed, hlc.Timestamp{WallTime: expiration + 1}, ms, 10000*time.Second, @@ -179,7 +179,7 @@ func TestGCQueueMakeGCScoreLargeAbortSpan(t *testing.T) { // Heuristic doesn't fire if last GC within TxnCleanupThreshold. { - r := makeGCQueueScoreImpl(context.Background(), seed, + r := makeMVCCGCQueueScoreImpl(context.Background(), seed, hlc.Timestamp{WallTime: expiration}, ms, 10000*time.Second, hlc.Timestamp{WallTime: expiration - 100}, true, /* canAdvanceGCThreshold */ @@ -189,7 +189,7 @@ func TestGCQueueMakeGCScoreLargeAbortSpan(t *testing.T) { } } -func TestGCQueueMakeGCScoreIntentCooldown(t *testing.T) { +func TestMVCCGCQueueMakeGCScoreIntentCooldown(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -207,9 +207,9 @@ func TestGCQueueMakeGCScoreIntentCooldown(t *testing.T) { "just GCed": {lastGC: now.Add(-1, 0), expectGC: false}, "future GC": {lastGC: now.Add(1, 0), expectGC: false}, "MVCC GC ignores cooldown": {lastGC: now.Add(-1, 0), mvccGC: true, expectGC: true}, - "before GC cooldown": {lastGC: now.Add(-gcQueueIntentCooldownDuration.Nanoseconds()+1, 0), expectGC: false}, - "at GC cooldown": {lastGC: now.Add(-gcQueueIntentCooldownDuration.Nanoseconds(), 0), expectGC: true}, - "after GC cooldown": {lastGC: now.Add(-gcQueueIntentCooldownDuration.Nanoseconds()-1, 0), expectGC: true}, + "before GC cooldown": {lastGC: now.Add(-mvccGCQueueIntentCooldownDuration.Nanoseconds()+1, 0), expectGC: false}, + "at GC cooldown": {lastGC: now.Add(-mvccGCQueueIntentCooldownDuration.Nanoseconds(), 0), expectGC: true}, + "after GC cooldown": {lastGC: now.Add(-mvccGCQueueIntentCooldownDuration.Nanoseconds()-1, 0), expectGC: true}, } for name, tc := range testcases { tc := tc @@ -221,7 +221,7 @@ func TestGCQueueMakeGCScoreIntentCooldown(t *testing.T) { ms.ValBytes = 1e9 } - r := makeGCQueueScoreImpl( + r := makeMVCCGCQueueScoreImpl( ctx, seed, now, ms, gcTTL, tc.lastGC, true /* canAdvanceGCThreshold */) require.Equal(t, tc.expectGC, r.ShouldQueue) }) @@ -341,7 +341,7 @@ func (cws *cachedWriteSimulator) shouldQueue( ) { cws.t.Helper() ts := hlc.Timestamp{}.Add(ms.LastUpdateNanos+after.Nanoseconds(), 0) - r := makeGCQueueScoreImpl(context.Background(), 0 /* seed */, ts, ms, ttl, + r := makeMVCCGCQueueScoreImpl(context.Background(), 0 /* seed */, ts, ms, ttl, hlc.Timestamp{}, true /* canAdvanceGCThreshold */) if fmt.Sprintf("%.2f", r.FinalScore) != fmt.Sprintf("%.2f", prio) || b != r.ShouldQueue { cws.t.Errorf("expected queued=%t (is %t), prio=%.2f, got %.2f: after=%s, ttl=%s:\nms: %+v\nscore: %s", @@ -349,11 +349,11 @@ func (cws *cachedWriteSimulator) shouldQueue( } } -// TestGCQueueMakeGCScoreRealistic verifies conditions which inform priority and -// whether or not the range should be queued into the GC queue. Ranges are -// queued for GC based on two conditions. The age of bytes available to be GC'd, -// and the age of unresolved intents. -func TestGCQueueMakeGCScoreRealistic(t *testing.T) { +// TestMVCCGCQueueMakeGCScoreRealistic verifies conditions which inform priority +// and whether or not the range should be queued into the MVCC GC queue. Ranges +// are queued for MVCC GC based on two conditions. The age of bytes available to +// be GC'd, and the age of unresolved intents. +func TestMVCCGCQueueMakeGCScoreRealistic(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -457,9 +457,9 @@ func TestGCQueueMakeGCScoreRealistic(t *testing.T) { } } -// TestGCQueueProcess creates test data in the range over various time +// TestMVCCGCQueueProcess creates test data in the range over various time // scales and verifies that scan queue process properly GCs test data. -func TestGCQueueProcess(t *testing.T) { +func TestMVCCGCQueueProcess(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() @@ -633,8 +633,8 @@ func TestGCQueueProcess(t *testing.T) { } // Process through a scan queue. - gcQ := newGCQueue(tc.store) - processed, err := gcQ.process(ctx, tc.repl, cfg) + mgcq := newMVCCGCQueue(tc.store) + processed, err := mgcq.process(ctx, tc.repl, cfg) if err != nil { t.Fatal(err) } @@ -694,7 +694,7 @@ func TestGCQueueProcess(t *testing.T) { }) } -func TestGCQueueTransactionTable(t *testing.T) { +func TestMVCCGCQueueTransactionTable(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() @@ -866,13 +866,13 @@ func TestGCQueueTransactionTable(t *testing.T) { } // Run GC. - gcQ := newGCQueue(tc.store) + mgcq := newMVCCGCQueue(tc.store) cfg := tc.gossip.GetSystemConfig() if cfg == nil { t.Fatal("config not set") } - processed, err := gcQ.process(ctx, tc.repl, cfg) + processed, err := mgcq.process(ctx, tc.repl, cfg) if err != nil { t.Fatal(err) } @@ -957,9 +957,9 @@ func TestGCQueueTransactionTable(t *testing.T) { tc.repl.raftMu.Unlock() } -// TestGCQueueIntentResolution verifies intent resolution with many -// intents spanning just two transactions. -func TestGCQueueIntentResolution(t *testing.T) { +// TestMVCCGCQueueIntentResolution verifies intent resolution with many intents +// spanning just two transactions. +func TestMVCCGCQueueIntentResolution(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() @@ -1005,8 +1005,8 @@ func TestGCQueueIntentResolution(t *testing.T) { if err != nil { t.Fatal(err) } - gcQ := newGCQueue(tc.store) - processed, err := gcQ.process(ctx, tc.repl, confReader) + mgcq := newMVCCGCQueue(tc.store) + processed, err := mgcq.process(ctx, tc.repl, confReader) if err != nil { t.Fatal(err) } @@ -1033,7 +1033,7 @@ func TestGCQueueIntentResolution(t *testing.T) { }) } -func TestGCQueueLastProcessedTimestamps(t *testing.T) { +func TestMVCCGCQueueLastProcessedTimestamps(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() @@ -1068,8 +1068,8 @@ func TestGCQueueLastProcessedTimestamps(t *testing.T) { } // Process through a scan queue. - gcQ := newGCQueue(tc.store) - processed, err := gcQ.process(ctx, tc.repl, confReader) + mgcq := newMVCCGCQueue(tc.store) + processed, err := mgcq.process(ctx, tc.repl, confReader) if err != nil { t.Fatal(err) } @@ -1091,10 +1091,10 @@ func TestGCQueueLastProcessedTimestamps(t *testing.T) { }) } -// TestGCQueueChunkRequests verifies that many intents are chunked -// into separate batches. This is verified both for many different -// keys and also for many different versions of keys. -func TestGCQueueChunkRequests(t *testing.T) { +// TestMVCCGCQueueChunkRequests verifies that many intents are chunked into +// separate batches. This is verified both for many different keys and also for +// many different versions of keys. +func TestMVCCGCQueueChunkRequests(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() @@ -1176,8 +1176,8 @@ func TestGCQueueChunkRequests(t *testing.T) { t.Fatalf("could not find span config for range %s", err) } tc.manualClock.Increment(conf.TTL().Nanoseconds() + 1) - gcQ := newGCQueue(tc.store) - processed, err := gcQ.process(ctx, tc.repl, confReader) + mgcq := newMVCCGCQueue(tc.store) + processed, err := mgcq.process(ctx, tc.repl, confReader) if err != nil { t.Fatal(err) } diff --git a/pkg/kv/kvserver/queue_helpers_testutil.go b/pkg/kv/kvserver/queue_helpers_testutil.go index 6ec847f01af1..ca232ddccfec 100644 --- a/pkg/kv/kvserver/queue_helpers_testutil.go +++ b/pkg/kv/kvserver/queue_helpers_testutil.go @@ -103,7 +103,7 @@ func (s *Store) ForceConsistencyQueueProcess() error { // is only meant to happen in tests. func (s *Store) setGCQueueActive(active bool) { - s.gcQueue.SetDisabled(!active) + s.mvccGCQueue.SetDisabled(!active) } func (s *Store) setMergeQueueActive(active bool) { s.mergeQueue.SetDisabled(!active) diff --git a/pkg/kv/kvserver/replica_gc_queue.go b/pkg/kv/kvserver/replica_gc_queue.go index 33a82568c5a3..b64b6876f685 100644 --- a/pkg/kv/kvserver/replica_gc_queue.go +++ b/pkg/kv/kvserver/replica_gc_queue.go @@ -57,7 +57,7 @@ const ( var ( metaReplicaGCQueueRemoveReplicaCount = metric.Metadata{ Name: "queue.replicagc.removereplica", - Help: "Number of replica removals attempted by the replica gc queue", + Help: "Number of replica removals attempted by the replica GC queue", Measurement: "Replica Removals", Unit: metric.Unit_COUNT, } diff --git a/pkg/kv/kvserver/replica_protected_timestamp.go b/pkg/kv/kvserver/replica_protected_timestamp.go index 485bc177d75e..bb5d82f22f8c 100644 --- a/pkg/kv/kvserver/replica_protected_timestamp.go +++ b/pkg/kv/kvserver/replica_protected_timestamp.go @@ -228,9 +228,9 @@ func (r *Replica) protectedTimestampRecordCurrentlyApplies( // to run GC will observe the Record if it still exists. The one hazard we // need to avoid is a race whereby an attempt to run GC first checks the // protected timestamp state and then attempts to increase the GC threshold. - // We set the minStateReadTimestamp here to avoid such races. The GC queue - // will call markPendingGC just prior to sending a request to update the GC - // threshold which will verify the safety of the new value relative to + // We set the minStateReadTimestamp here to avoid such races. The MVCC GC + // queue will call markPendingGC just prior to sending a request to update the + // GC threshold which will verify the safety of the new value relative to // minStateReadTimestamp. if seen { r.protectedTimestampMu.minStateReadTimestamp = read.readAt @@ -303,7 +303,7 @@ func (r *Replica) checkProtectedTimestampsForGC( } // markPendingGC is called just prior to sending the GC request to increase the -// GC threshold during GC queue processing. This method synchronizes such +// GC threshold during MVCC GC queue processing. This method synchronizes such // requests with the processing of AdminVerifyProtectedTimestamp requests. Such // synchronization is important to prevent races where the protected timestamp // state is read from a stale point in time and then concurrently, a diff --git a/pkg/kv/kvserver/store.go b/pkg/kv/kvserver/store.go index d335bada8628..1cecc68ccfc9 100644 --- a/pkg/kv/kvserver/store.go +++ b/pkg/kv/kvserver/store.go @@ -710,7 +710,7 @@ type Store struct { replRankings *replicaRankings storeRebalancer *StoreRebalancer rangeIDAlloc *idalloc.Allocator // Range ID allocator - gcQueue *gcQueue // Garbage collection queue + mvccGCQueue *mvccGCQueue // MVCC GC queue mergeQueue *mergeQueue // Range merging queue splitQueue *splitQueue // Range splitting queue replicateQueue *replicateQueue // Replication queue @@ -834,7 +834,7 @@ type Store struct { // only needs to be held during splitTrigger, not all triggers. // // * baseQueue.mu: The mutex contained in each of the store's queues (such - // as the replicate queue, replica GC queue, GC queue, ...). The mutex is + // as the replicate queue, replica GC queue, MVCC GC queue, ...). The mutex is // typically acquired when deciding whether to add a replica to the respective // queue. // @@ -1283,7 +1283,7 @@ func NewStore( s.cfg.AmbientCtx, s.cfg.Clock, cfg.ScanInterval, cfg.ScanMinIdleTime, cfg.ScanMaxIdleTime, newStoreReplicaVisitor(s), ) - s.gcQueue = newGCQueue(s) + s.mvccGCQueue = newMVCCGCQueue(s) s.mergeQueue = newMergeQueue(s, s.db) s.splitQueue = newSplitQueue(s, s.db) s.replicateQueue = newReplicateQueue(s, s.allocator) @@ -1295,7 +1295,7 @@ func NewStore( // queues on the EnqueueRange debug page as defined in // pkg/ui/src/views/reports/containers/enqueueRange/index.tsx s.scanner.AddQueues( - s.gcQueue, s.mergeQueue, s.splitQueue, s.replicateQueue, s.replicaGCQueue, + s.mvccGCQueue, s.mergeQueue, s.splitQueue, s.replicateQueue, s.replicaGCQueue, s.raftLogQueue, s.raftSnapshotQueue, s.consistencyQueue) tsDS := s.cfg.TimeSeriesDataStore if s.cfg.TestingKnobs.TimeSeriesDataStore != nil { diff --git a/pkg/kv/kvserver/store_snapshot.go b/pkg/kv/kvserver/store_snapshot.go index 0276938a6e02..8f9d028fb41d 100644 --- a/pkg/kv/kvserver/store_snapshot.go +++ b/pkg/kv/kvserver/store_snapshot.go @@ -506,7 +506,8 @@ func (s *Store) canAcceptSnapshotLocked( // checkSnapshotOverlapLocked returns an error if the snapshot overlaps an // existing replica or placeholder. Any replicas that do overlap have a good -// chance of being abandoned, so they're proactively handed to the GC queue . +// chance of being abandoned, so they're proactively handed to the replica GC +// queue. func (s *Store) checkSnapshotOverlapLocked( ctx context.Context, snapHeader *SnapshotRequest_Header, ) error { @@ -542,16 +543,16 @@ func (s *Store) checkSnapshotOverlapLocked( // stops sending this replica heartbeats. return !r.CurrentLeaseStatus(ctx).IsValid() } - // We unconditionally send this replica through the GC queue. It's - // reasonably likely that the GC queue will do nothing because the replica - // needs to split instead, but better to err on the side of queueing too - // frequently. Blocking Raft snapshots for too long can wedge a cluster, - // and if the replica does need to be GC'd, this might be the only code - // path that notices in a timely fashion. + // We unconditionally send this replica through the replica GC queue. It's + // reasonably likely that the replica GC queue will do nothing because the + // replica needs to split instead, but better to err on the side of + // queueing too frequently. Blocking Raft snapshots for too long can wedge + // a cluster, and if the replica does need to be GC'd, this might be the + // only code path that notices in a timely fashion. // - // We're careful to avoid starving out other replicas in the GC queue by - // queueing at a low priority unless we can prove that the range is - // inactive and thus unlikely to be about to process a split. + // We're careful to avoid starving out other replicas in the replica GC + // queue by queueing at a low priority unless we can prove that the range + // is inactive and thus unlikely to be about to process a split. gcPriority := replicaGCPriorityDefault if inactive(exReplica) { gcPriority = replicaGCPrioritySuspect diff --git a/pkg/server/admin.go b/pkg/server/admin.go index a8d6ea609c6e..ac7abef37627 100644 --- a/pkg/server/admin.go +++ b/pkg/server/admin.go @@ -2603,7 +2603,24 @@ func (s *adminServer) enqueueRangeLocal( return response, nil } - traceSpans, processErr, err := store.ManuallyEnqueue(ctx, req.Queue, repl, req.SkipShouldQueue) + // Handle mixed-version clusters across the "gc" to "mvccGC" queue rename. + // TODO(nvanbenschoten): remove this in v23.1. Inline req.Queue again. + // The client logic in pkg/ui/workspaces/db-console/src/views/reports/containers/enqueueRange/index.tsx + // should stop sending "gc" in v22.2. When removing, confirm that the + // associated TODO in index.tsx was addressed in the previous release. + // + // Explanation of migration: + // - v22.1 will understand "gc" and "mvccGC" on the server. Its client will + // continue to send "gc" to interop with v21.2 servers. + // - v22.2's client will send "mvccGC" but will still have to understand "gc" + // on the server to deal with v22.1 clients. + // - v23.1's server can stop understanding "gc". + queueName := req.Queue + if strings.ToLower(queueName) == "gc" { + queueName = "mvccGC" + } + + traceSpans, processErr, err := store.ManuallyEnqueue(ctx, queueName, repl, req.SkipShouldQueue) if err != nil { response.Details[0].Error = err.Error() return response, nil diff --git a/pkg/server/admin_test.go b/pkg/server/admin_test.go index 09946f5310bd..bf69b1f3e179 100644 --- a/pkg/server/admin_test.go +++ b/pkg/server/admin_test.go @@ -2096,7 +2096,7 @@ func TestEnqueueRange(t *testing.T) { expectedNonErrors int }{ // Success cases - {0, "gc", realRangeID, allReplicas, leaseholder}, + {0, "mvccGC", realRangeID, allReplicas, leaseholder}, {0, "split", realRangeID, allReplicas, leaseholder}, {0, "replicaGC", realRangeID, allReplicas, allReplicas}, {0, "RaFtLoG", realRangeID, allReplicas, allReplicas}, @@ -2106,6 +2106,10 @@ func TestEnqueueRange(t *testing.T) { {1, "raftlog", realRangeID, leaseholder, leaseholder}, {2, "raftlog", realRangeID, leaseholder, 1}, {3, "raftlog", realRangeID, leaseholder, 1}, + // Compatibility cases. + // TODO(nvanbenschoten): remove this in v23.1. + {0, "gc", realRangeID, allReplicas, leaseholder}, + {0, "GC", realRangeID, allReplicas, leaseholder}, // Error cases {0, "gv", realRangeID, allReplicas, none}, {0, "GC", fakeRangeID, allReplicas, none}, @@ -2139,9 +2143,9 @@ func TestEnqueueRange(t *testing.T) { // Finally, test a few more basic error cases. reqs := []*serverpb.EnqueueRangeRequest{ - {NodeID: -1, Queue: "gc"}, + {NodeID: -1, Queue: "mvccGC"}, {Queue: ""}, - {RangeID: -1, Queue: "gc"}, + {RangeID: -1, Queue: "mvccGC"}, } for _, req := range reqs { t.Run(fmt.Sprint(req), func(t *testing.T) { diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index efa24c826d43..39c616b5a9c7 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -3393,7 +3393,7 @@ CREATE TABLE crdb_internal.zones ( if index == nil { // If we can't find an active index that corresponds to this index // ID then continue, as the index is being dropped, or is already - // dropped and in the GC queue. + // dropped and in the MVCC GC queue. continue } if zoneSpecifier != nil { diff --git a/pkg/ts/catalog/chart_catalog.go b/pkg/ts/catalog/chart_catalog.go index dca3889de3e1..8536788a8358 100644 --- a/pkg/ts/catalog/chart_catalog.go +++ b/pkg/ts/catalog/chart_catalog.go @@ -724,7 +724,7 @@ var charts = []sectionDescription{ }, }, { - Organization: [][]string{{KVTransactionLayer, "Garbage Collection (GC)", "Keys"}}, + Organization: [][]string{{KVTransactionLayer, "MVCC Garbage Collection (GC)", "Keys"}}, Charts: []chartDescription{ { Title: "AbortSpan", diff --git a/pkg/ui/workspaces/db-console/src/views/reports/containers/enqueueRange/index.tsx b/pkg/ui/workspaces/db-console/src/views/reports/containers/enqueueRange/index.tsx index d93d73cb568e..47bd9dda766e 100644 --- a/pkg/ui/workspaces/db-console/src/views/reports/containers/enqueueRange/index.tsx +++ b/pkg/ui/workspaces/db-console/src/views/reports/containers/enqueueRange/index.tsx @@ -23,7 +23,7 @@ import EnqueueRangeResponse = cockroach.server.serverpb.EnqueueRangeResponse; const QUEUES = [ "replicate", - "gc", + "mvccGC", "merge", "split", "replicaGC", @@ -88,6 +88,12 @@ export class EnqueueRange extends React.Component< nodeID: number, skipShouldQueue: boolean, ) => { + // Handle mixed-version clusters across the "gc" to "mvccGC" queue rename. + // TODO(nvanbenschoten): remove this in v22.2. The server logic will continue + // to map "gc" to "mvccGC" until v23.1. + if (queue === "mvccGC") { + queue = "gc"; + } const req = new EnqueueRangeRequest({ queue: queue, range_id: rangeID,