diff --git a/docs/generated/metrics/metrics.html b/docs/generated/metrics/metrics.html
index 5711d767437f..a0b9a359ad68 100644
--- a/docs/generated/metrics/metrics.html
+++ b/docs/generated/metrics/metrics.html
@@ -815,7 +815,6 @@
STORAGE | storage.l6-level-score | Compaction score of level 6 | Score | GAUGE | COUNT | AVG | NONE |
STORAGE | storage.l6-level-size | Size of the SSTables in level 6 | Bytes | GAUGE | BYTES | AVG | NONE |
STORAGE | storage.marked-for-compaction-files | Count of SSTables marked for compaction | SSTables | GAUGE | COUNT | AVG | NONE |
-STORAGE | storage.queue.store-failures | Number of replicas which failed processing in replica queues due to retryable store errors | Replicas | COUNTER | COUNT | AVG | NON_NEGATIVE_DERIVATIVE |
STORAGE | storage.secondary-cache.count | The count of cache blocks in the secondary cache (not sstable blocks) | Cache items | GAUGE | COUNT | AVG | NONE |
STORAGE | storage.secondary-cache.evictions | The number of times a cache block was evicted from the secondary cache | Num evictions | COUNTER | COUNT | AVG | NON_NEGATIVE_DERIVATIVE |
STORAGE | storage.secondary-cache.reads-full-hit | The number of reads where all data returned was read from the secondary cache | Num reads | COUNTER | COUNT | AVG | NON_NEGATIVE_DERIVATIVE |
diff --git a/pkg/cmd/roachtest/tests/BUILD.bazel b/pkg/cmd/roachtest/tests/BUILD.bazel
index a0f1272fb7f7..9ea941efea17 100644
--- a/pkg/cmd/roachtest/tests/BUILD.bazel
+++ b/pkg/cmd/roachtest/tests/BUILD.bazel
@@ -168,6 +168,7 @@ go_library(
"rust_postgres_blocklist.go",
"s3_clone_backup_restore.go",
"s3_microceph.go",
+ "s3_minio.go",
"schemachange.go",
"schemachange_random_load.go",
"scrub.go",
diff --git a/pkg/cmd/roachtest/tests/perturbation/index_backfill.go b/pkg/cmd/roachtest/tests/perturbation/index_backfill.go
index df3ab056ba74..868843880540 100644
--- a/pkg/cmd/roachtest/tests/perturbation/index_backfill.go
+++ b/pkg/cmd/roachtest/tests/perturbation/index_backfill.go
@@ -42,6 +42,13 @@ func (b backfill) setupMetamorphic(rng *rand.Rand) variations {
if v.mem == spec.Low {
v.mem = spec.Standard
}
+
+ // TODO(#139319): This can be removed once we stop testing the non "full"
+ // mode. Without full AC, these tests can OOM.
+ if v.numNodes >= 30 && (v.acMode == elasticOnlyBoth || v.acMode == fullNormalElasticRepl) {
+ v.acMode = fullBoth
+ }
+
// TODO(#136848): The backfill test will cause WAL failover resulting in
// OOMs even with high memory configurations. This test passes without WAL
// failover enabled or with more vCPUs per node.
diff --git a/pkg/cmd/roachtest/tests/s3_clone_backup_restore.go b/pkg/cmd/roachtest/tests/s3_clone_backup_restore.go
index a91e0a5087fb..1da4c4a06fa8 100644
--- a/pkg/cmd/roachtest/tests/s3_clone_backup_restore.go
+++ b/pkg/cmd/roachtest/tests/s3_clone_backup_restore.go
@@ -10,6 +10,8 @@ import (
gosql "database/sql"
"fmt"
"math/rand"
+ "os"
+ "path"
"time"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster"
@@ -65,6 +67,42 @@ func registerBackupS3Clones(r registry.Registry) {
},
})
}
+
+ r.Add(registry.TestSpec{
+ Name: "backup/minio",
+ Owner: registry.OwnerFieldEng,
+ Cluster: r.MakeClusterSpec(4, spec.WorkloadNodeCount(1)),
+ EncryptionSupport: registry.EncryptionMetamorphic,
+ Leases: registry.MetamorphicLeases,
+ CompatibleClouds: registry.Clouds(spec.GCE),
+ Suites: registry.Suites(registry.Nightly),
+ TestSelectionOptOutSuites: registry.Suites(registry.Nightly),
+ Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
+ v := s3BackupRestoreValidator{
+ t: t,
+ c: c,
+ crdbNodes: c.CRDBNodes(),
+ csvPort: 8081,
+ importNode: c.Node(1),
+ rows: 1000,
+ workloadNode: c.WorkloadNode(),
+ }
+ v.startCluster(ctx)
+ mgr := minioManager{
+ t: t,
+ c: c,
+ bucket: backupTestingBucket,
+ // For now, we use the workload node as the minio cluster
+ minioNodes: c.Node(c.Spec().NodeCount),
+ key: randomString(32),
+ secret: randomString(64),
+ }
+ mgr.install(ctx)
+ defer mgr.cleanup(ctx)
+ v.validateBackupRestore(ctx, mgr)
+ },
+ })
+
}
// s3Provider defines the methods that the S3 object store has to provide
@@ -229,6 +267,32 @@ func (v *s3BackupRestoreValidator) runWorload(ctx context.Context, duration time
return v.c.RunE(ctx, option.WithNodes(v.workloadNode), cmd)
}
+func installCa(ctx context.Context, t test.Test, c cluster.Cluster) error {
+ localCertsDir, err := os.MkdirTemp("", "roachtest-certs")
+ if err != nil {
+ return err
+ }
+ // get the ca file from one of the nodes.
+ caFile := path.Join(localCertsDir, "ca.crt")
+ conn := c.Conn(ctx, t.L(), 1)
+ defer conn.Close()
+ if err := c.Get(ctx, t.L(), "certs/ca.crt", caFile, c.Node(1)); err != nil {
+ return err
+ }
+ caCert, err := os.ReadFile(caFile)
+ if err != nil {
+ return err
+ }
+ // Disabling caching for Custom CA, see https://github.com/cockroachdb/cockroach/issues/125051.
+ if _, err := conn.ExecContext(ctx, "set cluster setting cloudstorage.s3.session_reuse.enabled = false"); err != nil {
+ return err
+ }
+ if _, err := conn.ExecContext(ctx, "set cluster setting cloudstorage.http.custom_ca=$1", caCert); err != nil {
+ return err
+ }
+ return nil
+}
+
// randomString returns a random string with the given size.
func randomString(size int) string {
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
diff --git a/pkg/cmd/roachtest/tests/s3_microceph.go b/pkg/cmd/roachtest/tests/s3_microceph.go
index 2f30d3e5bc07..b7e44077980d 100644
--- a/pkg/cmd/roachtest/tests/s3_microceph.go
+++ b/pkg/cmd/roachtest/tests/s3_microceph.go
@@ -9,8 +9,6 @@ import (
"context"
"fmt"
"net/url"
- "os"
- "path"
"path/filepath"
"github.com/cockroachdb/cockroach/pkg/cloud/amazon"
@@ -38,7 +36,7 @@ for l in a b c; do
sudo microceph disk add --wipe "/dev/sdi${l}"
done`
-// cephCleanup remove microceph and the loop devices.
+// cephCleanup removes microceph and the loop devices.
const cephCleanup = `
#!/bin/bash
sudo microceph disable rgw
@@ -147,29 +145,7 @@ func (m cephManager) maybeInstallCa(ctx context.Context) error {
if !m.secure {
return nil
}
- localCertsDir, err := os.MkdirTemp("", "roachtest-certs")
- if err != nil {
- return err
- }
- // get the ca file from one of the nodes.
- caFile := path.Join(localCertsDir, "ca.crt")
- conn := m.c.Conn(ctx, m.t.L(), 1)
- defer conn.Close()
- if err := m.c.Get(ctx, m.t.L(), "certs/ca.crt", caFile, m.c.Node(1)); err != nil {
- return err
- }
- caCert, err := os.ReadFile(caFile)
- if err != nil {
- return err
- }
- // Disabling caching for Custom CA, see https://github.com/cockroachdb/cockroach/issues/125051.
- if _, err := conn.ExecContext(ctx, "set cluster setting cloudstorage.s3.session_reuse.enabled = false"); err != nil {
- return err
- }
- if _, err := conn.ExecContext(ctx, "set cluster setting cloudstorage.http.custom_ca=$1", caCert); err != nil {
- return err
- }
- return nil
+ return installCa(ctx, m.t, m.c)
}
// put creates a file in the ceph node with the given content.
diff --git a/pkg/cmd/roachtest/tests/s3_minio.go b/pkg/cmd/roachtest/tests/s3_minio.go
new file mode 100644
index 000000000000..369919a3df5d
--- /dev/null
+++ b/pkg/cmd/roachtest/tests/s3_minio.go
@@ -0,0 +1,93 @@
+// Copyright 2025 The Cockroach Authors.
+//
+// Use of this software is governed by the CockroachDB Software License
+// included in the /LICENSE file.
+
+package tests
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "path"
+
+ "github.com/cockroachdb/cockroach/pkg/cloud/amazon"
+ "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster"
+ "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option"
+ "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test"
+)
+
+// minioDir is the directory for supporting files.
+var minioDir = "/tmp/minio"
+
+// minioManager manages a single node minio cluster, used to
+// validate the backup and restore functionality.
+type minioManager struct {
+ t test.Test
+ c cluster.Cluster
+ bucket string
+ minioNodes option.NodeListOption // The nodes within the cluster used by Minio.
+ key string
+ secret string
+}
+
+// minioManager implements s3Provider
+var _ s3Provider = &minioManager{}
+
+// getBackupURI implements s3Provider.
+func (m minioManager) getBackupURI(ctx context.Context, dest string) (string, error) {
+ addr, err := m.c.InternalIP(ctx, m.t.L(), m.minioNodes)
+ if err != nil {
+ return "", err
+ }
+ m.t.Status("minio: ", addr)
+ endpointURL := `https://` + addr[0]
+
+ q := make(url.Values)
+ q.Add(amazon.AWSAccessKeyParam, m.key)
+ q.Add(amazon.AWSSecretParam, m.secret)
+ q.Add(amazon.AWSUsePathStyle, "true")
+ // Region is required in the URL, but not used in Minio.
+ q.Add(amazon.S3RegionParam, "dummy")
+ q.Add(amazon.AWSEndpointParam, endpointURL)
+ uri := fmt.Sprintf("s3://%s/%s?%s", m.bucket, dest, q.Encode())
+ return uri, nil
+}
+
+func (m minioManager) cleanup(ctx context.Context) {
+ m.run(ctx, "removing minio", "sudo docker rm -f minio")
+ m.run(ctx, "removing minio dir", fmt.Sprintf(`rm -rf %s`, minioDir))
+}
+
+// install a single node minio cluster within a docker container.
+// It is fatal on errors.
+func (m minioManager) install(ctx context.Context) {
+ if err := m.c.Install(ctx, m.t.L(), m.minioNodes, "docker"); err != nil {
+ m.t.Fatalf("failed to install docker: %v", err)
+ }
+ certsDir := path.Join(minioDir, "certs")
+ m.run(ctx, `copy CA`,
+ fmt.Sprintf(`mkdir -p %[1]s/CAs ; cp certs/ca.crt %[1]s/CAs/ca.crt; `, certsDir))
+ m.run(ctx, `copy certs/key`,
+ fmt.Sprintf(`cp certs/node.crt %[1]s/public.crt; cp certs/node.key %[1]s/private.key; `,
+ certsDir))
+ m.run(ctx, `installing minio`,
+ fmt.Sprintf(`sudo docker run --name minio -d -p 443:9000 -e "MINIO_ROOT_USER=%s" -e "MINIO_ROOT_PASSWORD=%s" --privileged -v %s:/root/.minio minio/minio server /data`,
+ m.key, m.secret, minioDir))
+
+ m.run(ctx, `install s3cmd`, `sudo apt install -y s3cmd`)
+ m.run(ctx, `creating bucket`,
+ fmt.Sprintf(s3cmdSsl, m.key, m.secret, "mb s3://"+m.bucket))
+
+ if err := installCa(ctx, m.t, m.c); err != nil {
+ m.t.Fatal(err)
+ }
+}
+
+// run the given command on the minio node.
+func (m minioManager) run(ctx context.Context, msg string, cmd ...string) {
+ m.t.Status(msg, "...")
+ m.t.Status(cmd)
+ m.c.Run(ctx, option.WithNodes(m.minioNodes), cmd...)
+ m.t.Status(msg, " done")
+}
diff --git a/pkg/kv/kvclient/rangefeed/rangefeed_external_test.go b/pkg/kv/kvclient/rangefeed/rangefeed_external_test.go
index a9b14c1c5172..01954aa9b71a 100644
--- a/pkg/kv/kvclient/rangefeed/rangefeed_external_test.go
+++ b/pkg/kv/kvclient/rangefeed/rangefeed_external_test.go
@@ -1585,7 +1585,7 @@ func TestRangeFeedIntentResolutionRace(t *testing.T) {
require.False(t, commitTS.LessEq(c.ResolvedTS),
"repl %s emitted checkpoint %s beyond write timestamp %s", repl3, c.ResolvedTS, commitTS)
case v := <-valueC:
- require.Fail(t, "repl3 emitted premature value %s", v)
+ require.Failf(t, "repl3 emitted premature value", "value: %#+v", v)
case <-waitC:
done = true
}
diff --git a/pkg/kv/kvserver/benignerror/BUILD.bazel b/pkg/kv/kvserver/benignerror/BUILD.bazel
index 5e7be4ad03f3..4da2c67866ca 100644
--- a/pkg/kv/kvserver/benignerror/BUILD.bazel
+++ b/pkg/kv/kvserver/benignerror/BUILD.bazel
@@ -2,10 +2,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "benignerror",
- srcs = [
- "benign_error.go",
- "store_benign_error.go",
- ],
+ srcs = ["benign_error.go"],
importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvserver/benignerror",
visibility = ["//visibility:public"],
deps = ["@com_github_cockroachdb_errors//:errors"],
diff --git a/pkg/kv/kvserver/benignerror/store_benign_error.go b/pkg/kv/kvserver/benignerror/store_benign_error.go
deleted file mode 100644
index 6377c66f2027..000000000000
--- a/pkg/kv/kvserver/benignerror/store_benign_error.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2023 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package benignerror
-
-import "github.com/cockroachdb/errors"
-
-// StoreBenignError may be used for declaring an error that is less serious i.e.
-// benign, originated from a store, and requires a retry. These errors are
-// tracked in a metric away from other benign errors.
-type StoreBenignError struct {
- cause error
-}
-
-// NewStoreBenign returns a new store benign error with the given error cause.
-func NewStoreBenign(cause error) *StoreBenignError {
- return &StoreBenignError{cause: cause}
-}
-
-func (be *StoreBenignError) Error() string { return be.cause.Error() }
-func (be *StoreBenignError) Cause() error { return be.cause }
-
-func IsStoreBenign(err error) bool {
- return errors.HasType(err, (*StoreBenignError)(nil))
-}
diff --git a/pkg/kv/kvserver/consistency_queue.go b/pkg/kv/kvserver/consistency_queue.go
index 59ad7056fe93..26f6e125c847 100644
--- a/pkg/kv/kvserver/consistency_queue.go
+++ b/pkg/kv/kvserver/consistency_queue.go
@@ -104,7 +104,6 @@ func newConsistencyQueue(store *Store) *consistencyQueue {
acceptsUnsplitRanges: true,
successes: store.metrics.ConsistencyQueueSuccesses,
failures: store.metrics.ConsistencyQueueFailures,
- storeFailures: store.metrics.StoreFailures,
pending: store.metrics.ConsistencyQueuePending,
processingNanos: store.metrics.ConsistencyQueueProcessingNanos,
processTimeoutFunc: makeRateLimitedTimeoutFunc(consistencyCheckRate),
diff --git a/pkg/kv/kvserver/merge_queue.go b/pkg/kv/kvserver/merge_queue.go
index c8063c71f07c..ede6f669dcd5 100644
--- a/pkg/kv/kvserver/merge_queue.go
+++ b/pkg/kv/kvserver/merge_queue.go
@@ -125,7 +125,6 @@ func newMergeQueue(store *Store, db *kv.DB) *mergeQueue {
acceptsUnsplitRanges: false,
successes: store.metrics.MergeQueueSuccesses,
failures: store.metrics.MergeQueueFailures,
- storeFailures: store.metrics.StoreFailures,
pending: store.metrics.MergeQueuePending,
processingNanos: store.metrics.MergeQueueProcessingNanos,
purgatory: store.metrics.MergeQueuePurgatory,
diff --git a/pkg/kv/kvserver/metrics.go b/pkg/kv/kvserver/metrics.go
index 06de7ae64f40..bf7679cfa898 100644
--- a/pkg/kv/kvserver/metrics.go
+++ b/pkg/kv/kvserver/metrics.go
@@ -1815,13 +1815,6 @@ The messages are dropped to help these replicas to recover from I/O overload.`,
Unit: metric.Unit_PERCENT,
}
- // Replica queue metrics.
- metaStoreFailures = metric.Metadata{
- Name: "storage.queue.store-failures",
- Help: "Number of replicas which failed processing in replica queues due to retryable store errors",
- Measurement: "Replicas",
- Unit: metric.Unit_COUNT,
- }
metaMVCCGCQueueSuccesses = metric.Metadata{
Name: "queue.gc.process.success",
Help: "Number of replicas successfully processed by the MVCC GC queue",
@@ -2875,7 +2868,6 @@ type StoreMetrics struct {
RaftCoalescedHeartbeatsPending *metric.Gauge
// Replica queue metrics.
- StoreFailures *metric.Counter
MVCCGCQueueSuccesses *metric.Counter
MVCCGCQueueFailures *metric.Counter
MVCCGCQueuePending *metric.Gauge
@@ -3645,7 +3637,6 @@ func newStoreMetrics(histogramWindow time.Duration) *StoreMetrics {
RaftCoalescedHeartbeatsPending: metric.NewGauge(metaRaftCoalescedHeartbeatsPending),
// Replica queue metrics.
- StoreFailures: metric.NewCounter(metaStoreFailures),
MVCCGCQueueSuccesses: metric.NewCounter(metaMVCCGCQueueSuccesses),
MVCCGCQueueFailures: metric.NewCounter(metaMVCCGCQueueFailures),
MVCCGCQueuePending: metric.NewGauge(metaMVCCGCQueuePending),
diff --git a/pkg/kv/kvserver/mvcc_gc_queue.go b/pkg/kv/kvserver/mvcc_gc_queue.go
index 49461adc1dfa..d39c6fb395ec 100644
--- a/pkg/kv/kvserver/mvcc_gc_queue.go
+++ b/pkg/kv/kvserver/mvcc_gc_queue.go
@@ -199,7 +199,6 @@ func newMVCCGCQueue(store *Store) *mvccGCQueue {
},
successes: store.metrics.MVCCGCQueueSuccesses,
failures: store.metrics.MVCCGCQueueFailures,
- storeFailures: store.metrics.StoreFailures,
pending: store.metrics.MVCCGCQueuePending,
processingNanos: store.metrics.MVCCGCQueueProcessingNanos,
disabledConfig: kvserverbase.MVCCGCQueueEnabled,
diff --git a/pkg/kv/kvserver/queue.go b/pkg/kv/kvserver/queue.go
index 376888bd969e..5d9da3efe1b4 100644
--- a/pkg/kv/kvserver/queue.go
+++ b/pkg/kv/kvserver/queue.go
@@ -329,10 +329,6 @@ type queueConfig struct {
successes *metric.Counter
// failures is a counter of replicas which failed processing.
failures *metric.Counter
- // storeFailures is a counter of replicas that failed processing due to a
- // StoreBenignError. These errors must be counted independently of the above
- // failures metric.
- storeFailures *metric.Counter
// pending is a gauge measuring current replica count pending.
pending *metric.Gauge
// processingNanos is a counter measuring total nanoseconds spent processing
@@ -1170,17 +1166,12 @@ func (bq *baseQueue) finishProcessingReplica(
// Handle failures.
if err != nil {
benign := benignerror.IsBenign(err)
- storeBenign := benignerror.IsStoreBenign(err)
// Increment failures metric.
//
// TODO(tschottdorf): once we start asserting zero failures in tests
// (and production), move benign failures into a dedicated category.
bq.failures.Inc(1)
- if storeBenign {
- bq.storeFailures.Inc(1)
- requeue = true
- }
// Determine whether a failure is a purgatory error. If it is, add
// the failing replica to purgatory. Note that even if the item was
diff --git a/pkg/kv/kvserver/queue_concurrency_test.go b/pkg/kv/kvserver/queue_concurrency_test.go
index 63c0c0dad1d3..f224c6c4d5f0 100644
--- a/pkg/kv/kvserver/queue_concurrency_test.go
+++ b/pkg/kv/kvserver/queue_concurrency_test.go
@@ -58,7 +58,6 @@ func TestBaseQueueConcurrent(t *testing.T) {
// We don't care about these, but we don't want to crash.
successes: metric.NewCounter(metric.Metadata{Name: "processed"}),
failures: metric.NewCounter(metric.Metadata{Name: "failures"}),
- storeFailures: metric.NewCounter(metric.Metadata{Name: "store_failures"}),
pending: metric.NewGauge(metric.Metadata{Name: "pending"}),
processingNanos: metric.NewCounter(metric.Metadata{Name: "processingnanos"}),
purgatory: metric.NewGauge(metric.Metadata{Name: "purgatory"}),
diff --git a/pkg/kv/kvserver/queue_test.go b/pkg/kv/kvserver/queue_test.go
index c0274e8f7192..9443c2cb9e0e 100644
--- a/pkg/kv/kvserver/queue_test.go
+++ b/pkg/kv/kvserver/queue_test.go
@@ -18,7 +18,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/config"
"github.com/cockroachdb/cockroach/pkg/config/zonepb"
"github.com/cockroachdb/cockroach/pkg/keys"
- "github.com/cockroachdb/cockroach/pkg/kv/kvserver/benignerror"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings"
@@ -108,7 +107,6 @@ func makeTestBaseQueue(name string, impl queueImpl, store *Store, cfg queueConfi
}
cfg.successes = metric.NewCounter(metric.Metadata{Name: "processed"})
cfg.failures = metric.NewCounter(metric.Metadata{Name: "failures"})
- cfg.storeFailures = metric.NewCounter(metric.Metadata{Name: "store_failures"})
cfg.pending = metric.NewGauge(metric.Metadata{Name: "pending"})
cfg.processingNanos = metric.NewCounter(metric.Metadata{Name: "processingnanos"})
cfg.purgatory = metric.NewGauge(metric.Metadata{Name: "purgatory"})
@@ -1559,18 +1557,4 @@ func TestBaseQueueRequeue(t *testing.T) {
bq.maybeAdd(ctx, r1, hlc.ClockTimestamp{})
assertShouldQueueCount(6)
assertProcessedAndProcessing(2, 0)
-
- // Reset shouldQueueCount so we actually process the replica. Then return
- // a StoreBenign error. It should requeue the replica.
- atomic.StoreInt64(&shouldQueueCount, 0)
- pQueue.err = benignerror.NewStoreBenign(errors.New("test"))
- bq.maybeAdd(ctx, r1, hlc.ClockTimestamp{})
- assertShouldQueueCount(1)
- assertProcessedAndProcessing(2, 1)
- // Let the first processing attempt finish. It should requeue.
- pQueue.processBlocker <- struct{}{}
- assertProcessedAndProcessing(3, 1)
- pQueue.err = nil
- pQueue.processBlocker <- struct{}{}
- assertProcessedAndProcessing(4, 0)
}
diff --git a/pkg/kv/kvserver/raft_log_queue.go b/pkg/kv/kvserver/raft_log_queue.go
index 5a42dbddd930..66bbfbb6aa26 100644
--- a/pkg/kv/kvserver/raft_log_queue.go
+++ b/pkg/kv/kvserver/raft_log_queue.go
@@ -175,7 +175,6 @@ func newRaftLogQueue(store *Store, db *kv.DB) *raftLogQueue {
acceptsUnsplitRanges: true,
successes: store.metrics.RaftLogQueueSuccesses,
failures: store.metrics.RaftLogQueueFailures,
- storeFailures: store.metrics.StoreFailures,
pending: store.metrics.RaftLogQueuePending,
processingNanos: store.metrics.RaftLogQueueProcessingNanos,
disabledConfig: kvserverbase.RaftLogQueueEnabled,
diff --git a/pkg/kv/kvserver/raft_snapshot_queue.go b/pkg/kv/kvserver/raft_snapshot_queue.go
index 13a1cb6a2663..34267fd83a05 100644
--- a/pkg/kv/kvserver/raft_snapshot_queue.go
+++ b/pkg/kv/kvserver/raft_snapshot_queue.go
@@ -51,7 +51,6 @@ func newRaftSnapshotQueue(store *Store) *raftSnapshotQueue {
processTimeoutFunc: makeRateLimitedTimeoutFunc(rebalanceSnapshotRate),
successes: store.metrics.RaftSnapshotQueueSuccesses,
failures: store.metrics.RaftSnapshotQueueFailures,
- storeFailures: store.metrics.StoreFailures,
pending: store.metrics.RaftSnapshotQueuePending,
processingNanos: store.metrics.RaftSnapshotQueueProcessingNanos,
disabledConfig: kvserverbase.RaftSnapshotQueueEnabled,
diff --git a/pkg/kv/kvserver/replica_gc_queue.go b/pkg/kv/kvserver/replica_gc_queue.go
index 24376896fcd2..9d6be6e639d5 100644
--- a/pkg/kv/kvserver/replica_gc_queue.go
+++ b/pkg/kv/kvserver/replica_gc_queue.go
@@ -101,7 +101,6 @@ func newReplicaGCQueue(store *Store, db *kv.DB) *replicaGCQueue {
processDestroyedReplicas: true,
successes: store.metrics.ReplicaGCQueueSuccesses,
failures: store.metrics.ReplicaGCQueueFailures,
- storeFailures: store.metrics.StoreFailures,
pending: store.metrics.ReplicaGCQueuePending,
processingNanos: store.metrics.ReplicaGCQueueProcessingNanos,
disabledConfig: kvserverbase.ReplicaGCQueueEnabled,
diff --git a/pkg/kv/kvserver/replicate_queue.go b/pkg/kv/kvserver/replicate_queue.go
index 69e433280913..5988442a3917 100644
--- a/pkg/kv/kvserver/replicate_queue.go
+++ b/pkg/kv/kvserver/replicate_queue.go
@@ -572,7 +572,6 @@ func newReplicateQueue(store *Store, allocator allocatorimpl.Allocator) *replica
processTimeoutFunc: makeRateLimitedTimeoutFunc(rebalanceSnapshotRate),
successes: store.metrics.ReplicateQueueSuccesses,
failures: store.metrics.ReplicateQueueFailures,
- storeFailures: store.metrics.StoreFailures,
pending: store.metrics.ReplicateQueuePending,
processingNanos: store.metrics.ReplicateQueueProcessingNanos,
purgatory: store.metrics.ReplicateQueuePurgatory,
diff --git a/pkg/kv/kvserver/split_queue.go b/pkg/kv/kvserver/split_queue.go
index 170964f3d5cb..7b44caa6ab66 100644
--- a/pkg/kv/kvserver/split_queue.go
+++ b/pkg/kv/kvserver/split_queue.go
@@ -134,7 +134,6 @@ func newSplitQueue(store *Store, db *kv.DB) *splitQueue {
acceptsUnsplitRanges: true,
successes: store.metrics.SplitQueueSuccesses,
failures: store.metrics.SplitQueueFailures,
- storeFailures: store.metrics.StoreFailures,
pending: store.metrics.SplitQueuePending,
processingNanos: store.metrics.SplitQueueProcessingNanos,
purgatory: store.metrics.SplitQueuePurgatory,
diff --git a/pkg/kv/kvserver/ts_maintenance_queue.go b/pkg/kv/kvserver/ts_maintenance_queue.go
index aa4807fdd931..b8e94aadac40 100644
--- a/pkg/kv/kvserver/ts_maintenance_queue.go
+++ b/pkg/kv/kvserver/ts_maintenance_queue.go
@@ -105,7 +105,6 @@ func newTimeSeriesMaintenanceQueue(
acceptsUnsplitRanges: true,
successes: store.metrics.TimeSeriesMaintenanceQueueSuccesses,
failures: store.metrics.TimeSeriesMaintenanceQueueFailures,
- storeFailures: store.metrics.StoreFailures,
pending: store.metrics.TimeSeriesMaintenanceQueuePending,
processingNanos: store.metrics.TimeSeriesMaintenanceQueueProcessingNanos,
disabledConfig: kvserverbase.TimeSeriesMaintenanceQueueEnabled,