Skip to content

Commit

Permalink
storage: revert cluster setting to expire sticky bits
Browse files Browse the repository at this point in the history
Reverts #37728. A cluster setting is coarse to be useful. There could be
instances where multiple people are working in the same cluster and
there would be conflicts on when they want their sticky bits to expire.

Release note(general change): Remove kv.range_merge.manual_split_ttl
                              setting
  • Loading branch information
jeffrey-xiao committed Jun 4, 2019
1 parent 9521979 commit 9f38ee4
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 50 deletions.
1 change: 0 additions & 1 deletion docs/generated/settings/settings.html
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
<tr><td><code>kv.raft_log.disable_synchronization_unsafe</code></td><td>boolean</td><td><code>false</code></td><td>set to true to disable synchronization on Raft log writes to persistent storage. Setting to true risks data loss or data corruption on server crashes. The setting is meant for internal testing only and SHOULD NOT be used in production.</td></tr>
<tr><td><code>kv.range.backpressure_range_size_multiplier</code></td><td>float</td><td><code>2</code></td><td>multiple of range_max_bytes that a range is allowed to grow to without splitting before writes to that range are blocked, or 0 to disable</td></tr>
<tr><td><code>kv.range_descriptor_cache.size</code></td><td>integer</td><td><code>1000000</code></td><td>maximum number of entries in the range descriptor and leaseholder caches</td></tr>
<tr><td><code>kv.range_merge.manual_split.ttl</code></td><td>duration</td><td><code>0s</code></td><td>if nonzero, manual splits older than this duration will be considered for automatic range merging</td></tr>
<tr><td><code>kv.range_merge.queue_enabled</code></td><td>boolean</td><td><code>true</code></td><td>whether the automatic merge queue is enabled</td></tr>
<tr><td><code>kv.range_merge.queue_interval</code></td><td>duration</td><td><code>1s</code></td><td>how long the merge queue waits between processing replicas (WARNING: may compromise cluster stability or correctness; do not edit without supervision)</td></tr>
<tr><td><code>kv.range_split.by_load_enabled</code></td><td>boolean</td><td><code>true</code></td><td>allow automatic splits of ranges based on where load is concentrated</td></tr>
Expand Down
28 changes: 0 additions & 28 deletions pkg/storage/client_merge_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3275,24 +3275,18 @@ func TestMergeQueue(t *testing.T) {
defer leaktest.AfterTest(t)()

ctx := context.Background()
manualClock := hlc.NewManualClock(123)
clock := hlc.NewClock(manualClock.UnixNano, time.Nanosecond)
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
storeCfg.TestingKnobs.DisableScanner = true
sv := &storeCfg.Settings.SV
storagebase.MergeQueueEnabled.Override(sv, true)
storage.MergeQueueInterval.Override(sv, 0) // process greedily
manualSplitTTL := time.Millisecond * 200
storage.ManualSplitTTL.Override(sv, manualSplitTTL)
var mtc multiTestContext
// This test was written before the multiTestContext started creating many
// system ranges at startup, and hasn't been update to take that into account.
mtc.startWithSingleRange = true

mtc.storeConfig = &storeCfg
// Inject clock for manipulation in tests
mtc.clock = clock
mtc.Start(t, 2)
defer mtc.Stop()
mtc.initGossipNetwork() // needed for the non-collocated case's rebalancing to work
Expand Down Expand Up @@ -3447,28 +3441,6 @@ func TestMergeQueue(t *testing.T) {
store.MustForceMergeScanAndProcess()
verifyMerged(t)
})

t.Run("sticky bit ttl", func(t *testing.T) {
reset(t)
store.MustForceMergeScanAndProcess()
verifyUnmerged(t)

// Perform manual merge and verify that no merge occurred.
split(t, rhsStartKey.AsRawKey(), true /* manual */)
clearRange(t, lhsStartKey, rhsEndKey)
store.MustForceMergeScanAndProcess()
verifyUnmerged(t)

// Sticky bit is not expired yet.
manualClock.Set(manualSplitTTL.Nanoseconds())
store.MustForceMergeScanAndProcess()
verifyUnmerged(t)

// Sticky bit is expired.
manualClock.Set(manualSplitTTL.Nanoseconds() * 2)
store.MustForceMergeScanAndProcess()
verifyMerged(t)
})
}

func TestInvalidSubsumeRequest(t *testing.T) {
Expand Down
26 changes: 5 additions & 21 deletions pkg/storage/merge_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,17 +56,6 @@ var MergeQueueInterval = func() *settings.DurationSetting {
return s
}()

// ManualSplitTTL is the amount of time that the merge queue will not consider
// a manually split range for merging if it is nonzero. If ManualSplitTTL is
// zero, then no manual splits are considered for automatic merging. If a range
// is split in a LHS and a RHS, then the merge queue will not consider to merge
// the RHS with the range to its left until ManualSplitTTL time has passed.
var ManualSplitTTL = settings.RegisterNonNegativeDurationSetting(
"kv.range_merge.manual_split.ttl",
"if nonzero, manual splits older than this duration will be considered for automatic range merging",
0,
)

// mergeQueue manages a queue of ranges slated to be merged with their right-
// hand neighbor.
//
Expand Down Expand Up @@ -300,17 +289,12 @@ func (mq *mergeQueue) process(
}
}

// Range was manually split and not expired, so skip merging.
// Range was manually split, so skip merging.
if rhsDesc.StickyBit != nil {
manualSplitTTL := ManualSplitTTL.Get(&mq.store.ClusterSettings().SV)
manualSplitExpired := manualSplitTTL != 0 && rhsDesc.StickyBit.GoTime().Add(manualSplitTTL).Before(mq.store.Clock().PhysicalTime())
if !manualSplitExpired {
log.VEventf(ctx, 2, "skipping merge: ranges were manually split and sticky bit was not expired")
// TODO(jeffreyxiao): Consider returning a purgatory error to avoid
// repeatedly processing ranges that cannot be merged.
return nil
}
log.VEventf(ctx, 2, "ranges were manually split, but sticky bit was expired")
log.VEventf(ctx, 2, "skipping merge: ranges were manually split")
// TODO(jeffreyxiao): Consider returning a purgatory error to avoid
// repeatedly processing ranges that cannot be merged.
return nil
}

log.VEventf(ctx, 2, "merging to produce range: %s-%s", mergedDesc.StartKey, mergedDesc.EndKey)
Expand Down

0 comments on commit 9f38ee4

Please sign in to comment.