Skip to content

Commit

Permalink
kvserver: minor renames
Browse files Browse the repository at this point in the history
Rename some variables to match the cluster setting controlling them.

Release note: None
  • Loading branch information
andreimatei committed Oct 16, 2020
1 parent b0a1069 commit 63ad540
Showing 1 changed file with 8 additions and 8 deletions.
16 changes: 8 additions & 8 deletions pkg/kv/kvserver/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,17 +149,17 @@ var queueAdditionOnSystemConfigUpdateBurst = settings.RegisterNonNegativeIntSett
"the burst rate at which the store will add all replicas to the split and merge queue due to system config gossip",
32)

// raftLeadershipTransferTimeout limits the amount of time a drain command
// waits for lease transfers.
var raftLeadershipTransferWait = func() *settings.DurationSetting {
// leaseTransferWait limits the amount of time a drain command waits for lease
// and Raft leadership transfers.
var leaseTransferWait = func() *settings.DurationSetting {
s := settings.RegisterValidatedDurationSetting(
raftLeadershipTransferWaitKey,
leaseTransferWaitSettingName,
"the amount of time a server waits to transfer range leases before proceeding with the rest of the shutdown process",
5*time.Second,
func(v time.Duration) error {
if v < 0 {
return errors.Errorf("cannot set %s to a negative duration: %s",
raftLeadershipTransferWaitKey, v)
leaseTransferWaitSettingName, v)
}
return nil
},
Expand All @@ -168,7 +168,7 @@ var raftLeadershipTransferWait = func() *settings.DurationSetting {
return s
}()

const raftLeadershipTransferWaitKey = "server.shutdown.lease_transfer_wait"
const leaseTransferWaitSettingName = "server.shutdown.lease_transfer_wait"

// ExportRequestsLimit is the number of Export requests that can run at once.
// Each extracts data from RocksDB to a temp file and then uploads it to cloud
Expand Down Expand Up @@ -1218,7 +1218,7 @@ func (s *Store) SetDraining(drain bool, reporter func(int, redact.SafeString)) {

// We've seen all the replicas once. Now we're going to iterate
// until they're all gone, up to the configured timeout.
transferTimeout := raftLeadershipTransferWait.Get(&s.cfg.Settings.SV)
transferTimeout := leaseTransferWait.Get(&s.cfg.Settings.SV)

drainLeasesOp := "transfer range leases"
if err := contextutil.RunWithTimeout(ctx, drainLeasesOp, transferTimeout,
Expand Down Expand Up @@ -1258,7 +1258,7 @@ func (s *Store) SetDraining(drain bool, reporter func(int, redact.SafeString)) {
// same time. If we see it on healthy ones, there's likely something to fix.
log.Warningf(ctx, "unable to drain cleanly within %s (cluster setting %s), "+
"service might briefly deteriorate if the node is terminated: %s",
transferTimeout, raftLeadershipTransferWaitKey, tErr.Cause())
transferTimeout, leaseTransferWaitSettingName, tErr.Cause())
} else {
log.Warningf(ctx, "drain error: %+v", err)
}
Expand Down

0 comments on commit 63ad540

Please sign in to comment.