From 4c2ec63e8c5205c51d0aa587efcd6868c3d1b3c8 Mon Sep 17 00:00:00 2001 From: irfan sharif Date: Wed, 12 Jul 2023 00:24:31 -0400 Subject: [PATCH] admission: add l0 control metrics + settings Part of #82743. We add cluster settings to control: - smoothing alpha for byte token computations; - reduction factor for L0 compaction tokens, based on observed compactions; We've found these to be useful in internal experiments, and also when looking to paper over L0 compaction variability effects up in AC. While here, print out observed smoothed compaction bytes in io_load_listener logging and introduce metrics for - l0 compacted bytes; - generated l0 tokens; - l0 tokens returned. Release note: None --- pkg/util/admission/grant_coordinator.go | 27 ++++++-- pkg/util/admission/granter.go | 69 ++++++++++++++----- pkg/util/admission/granter_test.go | 3 + pkg/util/admission/io_load_listener.go | 39 +++++++++-- pkg/util/admission/io_load_listener_test.go | 18 ++++- .../testdata/format_adjust_tokens_stats.txt | 2 +- pkg/util/admission/testdata/io_load_listener | 40 +++++------ 7 files changed, 144 insertions(+), 54 deletions(-) diff --git a/pkg/util/admission/grant_coordinator.go b/pkg/util/admission/grant_coordinator.go index afa7dd4a2f33..ce2ccb6fc206 100644 --- a/pkg/util/admission/grant_coordinator.go +++ b/pkg/util/admission/grant_coordinator.go @@ -56,6 +56,9 @@ type StoreGrantCoordinators struct { kvElasticIOTokensAvailable *metric.Gauge kvIOTokensTookWithoutPermission *metric.Counter kvIOTotalTokensTaken *metric.Counter + kvIOTotalTokensReturned *metric.Counter + l0CompactedBytes *metric.Counter + l0TokensProduced *metric.Counter // These metrics are shared by WorkQueues across stores. workQueueMetrics *WorkQueueMetrics @@ -166,12 +169,13 @@ func (sgc *StoreGrantCoordinators) initGrantCoordinator(storeID roachpb.StoreID) // Setting tokens to unlimited is defensive. We expect that // pebbleMetricsTick and allocateIOTokensTick will get called during // initialization, which will also set these to unlimited. - startingIOTokens: unlimitedTokens / unloadedDuration.ticksInAdjustmentInterval(), - ioTokensExhaustedDurationMetric: sgc.kvIOTokensExhaustedDuration, - availableTokensMetrics: sgc.kvIOTokensAvailable, - availableElasticTokensMetric: sgc.kvElasticIOTokensAvailable, - tookWithoutPermissionMetric: sgc.kvIOTokensTookWithoutPermission, - totalTokensTaken: sgc.kvIOTotalTokensTaken, + startingIOTokens: unlimitedTokens / unloadedDuration.ticksInAdjustmentInterval(), + ioTokensExhaustedDurationMetric: sgc.kvIOTokensExhaustedDuration, + availableTokensMetric: sgc.kvIOTokensAvailable, + availableElasticTokensMetric: sgc.kvElasticIOTokensAvailable, + tokensTakenWithoutPermissionMetric: sgc.kvIOTokensTookWithoutPermission, + tokensTakenMetric: sgc.kvIOTotalTokensTaken, + tokensReturnedMetric: sgc.kvIOTotalTokensReturned, } kvg.coordMu.availableIOTokens = unlimitedTokens / unloadedDuration.ticksInAdjustmentInterval() kvg.coordMu.availableElasticIOTokens = kvg.coordMu.availableIOTokens @@ -215,6 +219,8 @@ func (sgc *StoreGrantCoordinators) initGrantCoordinator(storeID roachpb.StoreID) perWorkTokenEstimator: makeStorePerWorkTokenEstimator(), diskBandwidthLimiter: makeDiskBandwidthLimiter(), kvGranter: kvg, + l0CompactedBytes: sgc.l0CompactedBytes, + l0TokensProduced: sgc.l0TokensProduced, } return coord } @@ -468,8 +474,11 @@ func makeStoresGrantCoordinators( kvIOTokensExhaustedDuration: metrics.KVIOTokensExhaustedDuration, kvIOTokensTookWithoutPermission: metrics.KVIOTokensTookWithoutPermission, kvIOTotalTokensTaken: metrics.KVIOTotalTokensTaken, + kvIOTotalTokensReturned: metrics.KVIOTotalTokensReturned, kvIOTokensAvailable: metrics.KVIOTokensAvailable, kvElasticIOTokensAvailable: metrics.KVElasticIOTokensAvailable, + l0CompactedBytes: metrics.L0CompactedBytes, + l0TokensProduced: metrics.L0TokensProduced, workQueueMetrics: storeWorkQueueMetrics, onLogEntryAdmitted: onLogEntryAdmitted, knobs: knobs, @@ -1015,8 +1024,11 @@ type GrantCoordinatorMetrics struct { KVIOTokensExhaustedDuration *metric.Counter KVIOTokensTookWithoutPermission *metric.Counter KVIOTotalTokensTaken *metric.Counter + KVIOTotalTokensReturned *metric.Counter KVIOTokensAvailable *metric.Gauge KVElasticIOTokensAvailable *metric.Gauge + L0CompactedBytes *metric.Counter + L0TokensProduced *metric.Counter SQLLeafStartUsedSlots *metric.Gauge SQLRootStartUsedSlots *metric.Gauge } @@ -1038,8 +1050,11 @@ func makeGrantCoordinatorMetrics() GrantCoordinatorMetrics { SQLRootStartUsedSlots: metric.NewGauge(addName(workKindString(SQLStatementRootStartWork), usedSlots)), KVIOTokensTookWithoutPermission: metric.NewCounter(kvIONumIOTokensTookWithoutPermission), KVIOTotalTokensTaken: metric.NewCounter(kvIOTotalTokensTaken), + KVIOTotalTokensReturned: metric.NewCounter(kvIOTotalTokensReturned), KVIOTokensAvailable: metric.NewGauge(kvIOTokensAvailable), KVElasticIOTokensAvailable: metric.NewGauge(kvElasticIOTokensAvailable), + L0CompactedBytes: metric.NewCounter(l0CompactedBytes), + L0TokensProduced: metric.NewCounter(l0TokensProduced), } return m } diff --git a/pkg/util/admission/granter.go b/pkg/util/admission/granter.go index efb7471ec064..c31b1f38ba37 100644 --- a/pkg/util/admission/granter.go +++ b/pkg/util/admission/granter.go @@ -313,13 +313,15 @@ type kvStoreTokenGranter struct { // startingIOTokens is the number of tokens set by // setAvailableTokens. It is used to compute the tokens used, by // computing startingIOTokens-availableIOTokens. - startingIOTokens int64 - ioTokensExhaustedDurationMetric *metric.Counter - availableTokensMetrics *metric.Gauge - availableElasticTokensMetric *metric.Gauge - tookWithoutPermissionMetric *metric.Counter - totalTokensTaken *metric.Counter - exhaustedStart time.Time + startingIOTokens int64 + ioTokensExhaustedDurationMetric *metric.Counter + availableTokensMetric *metric.Gauge + availableElasticTokensMetric *metric.Gauge + tokensReturnedMetric *metric.Counter + tokensTakenMetric *metric.Counter + tokensTakenWithoutPermissionMetric *metric.Counter + + exhaustedStart time.Time // Estimation models. l0WriteLM, l0IngestLM, ingestLM tokensLinearModel @@ -374,8 +376,10 @@ func (cg *kvStoreTokenChildGranter) storeWriteDone( // it. The one difference is that post token adjustments, if we observe the // granter was previously exhausted but is no longer so, we're allowed to // admit other waiting requests. - return cg.parent.storeReplicatedWorkAdmittedLocked( + additionalTokensTaken := cg.parent.storeReplicatedWorkAdmittedLocked( cg.workClass, originalTokens, storeReplicatedWorkAdmittedInfo(doneInfo), true /* canGrantAnother */) + cg.parent.tokensTakenWithoutPermissionMetric.Inc(additionalTokensTaken) + return additionalTokensTaken } // storeReplicatedWorkAdmitted implements granterWithStoreReplicatedWorkAdmitted. @@ -404,7 +408,6 @@ func (sg *kvStoreTokenGranter) tryGetLocked(count int64, demuxHandle int8) grant if sg.coordMu.availableIOTokens > 0 { sg.subtractTokensLocked(count, count, false) sg.coordMu.diskBWTokensUsed[wc] += count - sg.totalTokensTaken.Inc(count) return grantSuccess } case admissionpb.ElasticWorkClass: @@ -414,7 +417,6 @@ func (sg *kvStoreTokenGranter) tryGetLocked(count int64, demuxHandle int8) grant sg.subtractTokensLocked(count, count, false) sg.coordMu.elasticIOTokensUsedByElastic += count sg.coordMu.diskBWTokensUsed[wc] += count - sg.totalTokensTaken.Inc(count) return grantSuccess } } @@ -446,8 +448,7 @@ func (sg *kvStoreTokenGranter) tookWithoutPermission(workClass admissionpb.WorkC func (sg *kvStoreTokenGranter) tookWithoutPermissionLocked(count int64, demuxHandle int8) { wc := admissionpb.WorkClass(demuxHandle) sg.subtractTokensLocked(count, count, false) - sg.tookWithoutPermissionMetric.Inc(count) - sg.totalTokensTaken.Inc(count) + sg.tokensTakenWithoutPermissionMetric.Inc(count) if wc == admissionpb.ElasticWorkClass { sg.coordMu.elasticDiskBWTokensAvailable -= count sg.coordMu.elasticIOTokensUsedByElastic += count @@ -462,6 +463,19 @@ func (sg *kvStoreTokenGranter) subtractTokensLocked( ) { avail := sg.coordMu.availableIOTokens sg.coordMu.availableIOTokens -= count + sg.coordMu.availableElasticIOTokens -= elasticCount + // Only update when not unlimited. Keep it whatever it was last otherwise. + if sg.coordMu.availableIOTokens != unlimitedTokens { + sg.availableTokensMetric.Update(sg.coordMu.availableIOTokens) + } + if sg.coordMu.availableElasticIOTokens != unlimitedTokens { + sg.availableElasticTokensMetric.Update(sg.coordMu.availableElasticIOTokens) + } + if count > 0 { + sg.tokensTakenMetric.Inc(count) + } else { + sg.tokensReturnedMetric.Inc(count) + } if count > 0 && avail > 0 && sg.coordMu.availableIOTokens <= 0 { // Transition from > 0 to <= 0. sg.exhaustedStart = timeutil.Now() @@ -477,9 +491,6 @@ func (sg *kvStoreTokenGranter) subtractTokensLocked( sg.exhaustedStart = now } } - sg.availableTokensMetrics.Update(sg.coordMu.availableIOTokens) - sg.coordMu.availableElasticIOTokens -= elasticCount - sg.availableElasticTokensMetric.Update(sg.coordMu.availableElasticIOTokens) } // requesterHasWaitingRequests implements granterWithLockedCalls. @@ -570,10 +581,14 @@ func (sg *kvStoreTokenGranter) setAvailableTokens( sg.coordMu.availableElasticIOTokens = min(sg.coordMu.availableElasticIOTokens, sg.coordMu.availableIOTokens) } - + // Only update when not unlimited. Keep it whatever it was last otherwise. + if sg.coordMu.availableIOTokens != unlimitedTokens { + sg.availableTokensMetric.Update(sg.coordMu.availableIOTokens) + } + if sg.coordMu.availableElasticIOTokens != unlimitedTokens { + sg.availableElasticTokensMetric.Update(sg.coordMu.availableElasticIOTokens) + } sg.startingIOTokens = sg.coordMu.availableIOTokens - sg.availableTokensMetrics.Update(sg.coordMu.availableIOTokens) - sg.availableElasticTokensMetric.Update(sg.coordMu.availableElasticIOTokens) sg.coordMu.elasticDiskBWTokensAvailable += elasticDiskBandwidthTokens if sg.coordMu.elasticDiskBWTokensAvailable > elasticDiskBandwidthTokensCapacity { @@ -748,6 +763,12 @@ var ( Measurement: "Tokens", Unit: metric.Unit_COUNT, } + kvIOTotalTokensReturned = metric.Metadata{ + Name: "admission.granter.io_tokens_returned.kv", + Help: "Total number of tokens returned", + Measurement: "Tokens", + Unit: metric.Unit_COUNT, + } kvIOTokensAvailable = metric.Metadata{ Name: "admission.granter.io_tokens_available.kv", Help: "Number of tokens available", @@ -760,6 +781,18 @@ var ( Measurement: "Tokens", Unit: metric.Unit_COUNT, } + l0CompactedBytes = metric.Metadata{ + Name: "admission.l0_compacted_bytes.kv", + Help: "Total bytes compacted out of L0 (used to generate IO tokens)", + Measurement: "Tokens", + Unit: metric.Unit_COUNT, + } + l0TokensProduced = metric.Metadata{ + Name: "admission.l0_tokens_produced.kv", + Help: "Total bytes produced for L0 writes", + Measurement: "Tokens", + Unit: metric.Unit_COUNT, + } ) // TODO(irfansharif): we are lacking metrics for IO tokens and load, including diff --git a/pkg/util/admission/granter_test.go b/pkg/util/admission/granter_test.go index 83700a801aff..f329f75f023e 100644 --- a/pkg/util/admission/granter_test.go +++ b/pkg/util/admission/granter_test.go @@ -142,6 +142,9 @@ func TestGranterBasic(t *testing.T) { kvElasticIOTokensAvailable: metrics.KVElasticIOTokensAvailable, kvIOTokensTookWithoutPermission: metrics.KVIOTokensTookWithoutPermission, kvIOTotalTokensTaken: metrics.KVIOTotalTokensTaken, + kvIOTotalTokensReturned: metrics.KVIOTotalTokensReturned, + l0CompactedBytes: metrics.L0CompactedBytes, + l0TokensProduced: metrics.L0TokensProduced, workQueueMetrics: workQueueMetrics, disableTickerForTesting: true, knobs: &TestingKnobs{}, diff --git a/pkg/util/admission/io_load_listener.go b/pkg/util/admission/io_load_listener.go index 5b29ba618bbf..b2b48d97e31d 100644 --- a/pkg/util/admission/io_load_listener.go +++ b/pkg/util/admission/io_load_listener.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" "github.com/cockroachdb/logtags" @@ -108,6 +109,23 @@ var L0MinimumSizePerSubLevel = settings.RegisterIntSetting( "when non-zero, this indicates the minimum size that is needed to count towards one sub-level", 5<<20, settings.NonNegativeInt) +// L0CompactionAlpha is the exponential smoothing term used when measuring L0 +// compactions, which in turn is used to generate IO tokens. +var L0CompactionAlpha = settings.RegisterFloatSetting( + settings.TenantWritable, + "admission.l0_compacted_alpha", + "exponential smoothing term used when measuring L0 compactions to generate IO tokens", + 0.5, settings.PositiveFloat) + +// L0ReductionFactor is the exponential smoothing term used when measuring L0 +// compactions, which in turn is used to generate IO tokens. +var L0ReductionFactor = settings.RegisterFloatSetting( + settings.TenantWritable, + "admission.l0_reduction_factor", + "once overloaded, factor by which we reduce L0 compaction tokens based on observed compactions", + 2.0, + settings.FloatWithMinimum(1.0)) + // Experimental observations: // - Sub-level count of ~40 caused a node heartbeat latency p90, p99 of 2.5s, // 4s. With a setting that limits sub-level count to 10, before the system @@ -188,6 +206,9 @@ type ioLoadListener struct { adjustTokensResult perWorkTokenEstimator storePerWorkTokenEstimator diskBandwidthLimiter diskBandwidthLimiter + + l0CompactedBytes *metric.Counter + l0TokensProduced *metric.Counter } type ioLoadListenerState struct { @@ -641,7 +662,7 @@ type adjustTokensAuxComputations struct { // adjustTokensInner is used for computing tokens based on compaction and // flush bottlenecks. -func (*ioLoadListener) adjustTokensInner( +func (io *ioLoadListener) adjustTokensInner( ctx context.Context, prev ioLoadListenerState, l0Metrics pebble.LevelMetrics, @@ -677,9 +698,11 @@ func (*ioLoadListener) adjustTokensInner( // bytes (gauge). intL0CompactedBytes = 0 } - const alpha = 0.5 + io.l0CompactedBytes.Inc(intL0CompactedBytes) + // Compaction scheduling can be uneven in prioritizing L0 for compactions, // so smooth out what is being removed by compactions. + alpha := L0CompactionAlpha.Get(&io.settings.SV) smoothedIntL0CompactedBytes := int64(alpha*float64(intL0CompactedBytes) + (1-alpha)*float64(prev.smoothedIntL0CompactedBytes)) // Flush tokens: @@ -868,6 +891,7 @@ func (*ioLoadListener) adjustTokensInner( // threshold. var totalNumByteTokens int64 var smoothedCompactionByteTokens float64 + l0ReductionFactor := L0ReductionFactor.Get(&io.settings.SV) score, _ := ioThreshold.Score() // Multiplying score by 2 for ease of calculation. @@ -909,7 +933,7 @@ func (*ioLoadListener) adjustTokensInner( // Don't admit more byte work than we can remove via compactions. // totalNumByteTokens tracks our goal for admission. Scale down // since we want to get under the thresholds over time. - fTotalNumByteTokens = float64(smoothedIntL0CompactedBytes / 2.0) + fTotalNumByteTokens = float64(smoothedIntL0CompactedBytes) / l0ReductionFactor } else if score >= 0.5 && score < 1 { // Low load. Score in [0.5, 1). Tokens should be // smoothedIntL0CompactedBytes at 1, and 2 * smoothedIntL0CompactedBytes @@ -919,8 +943,8 @@ func (*ioLoadListener) adjustTokensInner( // Medium load. Score in [1, 2). We use linear interpolation from // medium load to overload, to slowly give out fewer tokens as we // move towards overload. - halfSmoothedBytes := float64(smoothedIntL0CompactedBytes / 2.0) - fTotalNumByteTokens = -score*halfSmoothedBytes + 3*halfSmoothedBytes + reducedSmoothedBytes := float64(smoothedIntL0CompactedBytes) / l0ReductionFactor + fTotalNumByteTokens = -score*reducedSmoothedBytes + 3*reducedSmoothedBytes } smoothedCompactionByteTokens = alpha*fTotalNumByteTokens + (1-alpha)*prev.smoothedCompactionByteTokens if float64(math.MaxInt64) < smoothedCompactionByteTokens { @@ -958,6 +982,9 @@ func (*ioLoadListener) adjustTokensInner( if totalNumElasticByteTokens > totalNumByteTokens { totalNumElasticByteTokens = totalNumByteTokens } + + io.l0TokensProduced.Inc(totalNumByteTokens) + // Install the latest cumulative stats. return adjustTokensResult{ ioLoadListenerState: ioLoadListenerState{ @@ -1047,7 +1074,7 @@ func (res adjustTokensResult) SafeFormat(p redact.SafePrinter, _ rune) { ib(m/adjustmentInterval)) switch res.aux.tokenKind { case compactionTokenKind: - p.Printf(" due to L0 growth") + p.Printf(" due to L0 growth [≈%s]", ib(int64(res.smoothedCompactionByteTokens))) case flushTokenKind: p.Printf(" due to memtable flush (multiplier %.3f)", res.flushUtilTargetFraction) } diff --git a/pkg/util/admission/io_load_listener_test.go b/pkg/util/admission/io_load_listener_test.go index 334a14709ebc..a6bcf38a84eb 100644 --- a/pkg/util/admission/io_load_listener_test.go +++ b/pkg/util/admission/io_load_listener_test.go @@ -25,6 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/testutils/echotest" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" + "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/datadriven" "github.com/cockroachdb/pebble" @@ -55,6 +56,8 @@ func TestIOLoadListener(t *testing.T) { kvRequester: req, perWorkTokenEstimator: makeStorePerWorkTokenEstimator(), diskBandwidthLimiter: makeDiskBandwidthLimiter(), + l0CompactedBytes: metric.NewCounter(l0CompactedBytes), + l0TokensProduced: metric.NewCounter(l0TokensProduced), } // The mutex is needed by ioLoadListener but is not useful in this // test -- the channels provide synchronization and prevent this @@ -214,8 +217,10 @@ func TestIOLoadListenerOverflow(t *testing.T) { ctx := context.Background() st := cluster.MakeTestingClusterSettings() ioll := ioLoadListener{ - settings: st, - kvRequester: req, + settings: st, + kvRequester: req, + l0CompactedBytes: metric.NewCounter(l0CompactedBytes), + l0TokensProduced: metric.NewCounter(l0TokensProduced), } ioll.kvGranter = kvGranter // Bug 1: overflow when totalNumByteTokens is too large. @@ -275,7 +280,12 @@ func TestAdjustTokensInnerAndLogging(t *testing.T) { var buf redact.StringBuilder for _, tt := range tests { buf.Printf("%s:\n", tt.name) - res := (*ioLoadListener)(nil).adjustTokensInner( + ioll := &ioLoadListener{ + settings: cluster.MakeTestingClusterSettings(), + l0CompactedBytes: metric.NewCounter(l0CompactedBytes), + l0TokensProduced: metric.NewCounter(l0TokensProduced), + } + res := ioll.adjustTokensInner( ctx, tt.prev, tt.l0Metrics, 12, pebble.ThroughputMetric{}, 100, 10, 0, 0.50) buf.Printf("%s\n", res) @@ -316,6 +326,8 @@ func TestBadIOLoadListenerStats(t *testing.T) { kvRequester: req, perWorkTokenEstimator: makeStorePerWorkTokenEstimator(), diskBandwidthLimiter: makeDiskBandwidthLimiter(), + l0CompactedBytes: metric.NewCounter(l0CompactedBytes), + l0TokensProduced: metric.NewCounter(l0TokensProduced), } ioll.kvGranter = kvGranter for i := 0; i < 100; i++ { diff --git a/pkg/util/admission/testdata/format_adjust_tokens_stats.txt b/pkg/util/admission/testdata/format_adjust_tokens_stats.txt index d709f52571ea..b4d6422854a4 100644 --- a/pkg/util/admission/testdata/format_adjust_tokens_stats.txt +++ b/pkg/util/admission/testdata/format_adjust_tokens_stats.txt @@ -3,4 +3,4 @@ echo zero: compaction score 0.000 (0 ssts, 0 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 0.00x+0 B) + ingested-model 0.00x+0 B (smoothed 0.00x+0 B) + at-admission-tokens 0 B, compacted 0 B [≈0 B], flushed 0 B [≈0 B]; admitting all; elastic-disk-bw tokens 0 B (used 0 B, regular used 0 B): write model 0.00x+0 B ingest model 0.00x+0 B, disk bw read 0 B write 0 B provisioned 0 B; write stalls 12 real-numbers: -compaction score 2.700[L0-overload] (195 ssts, 27 sub-levels), L0 growth 577 MiB (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 0.00x+0 B) + ingested-model 0.00x+0 B (smoothed 0.00x+0 B) + at-admission-tokens 0 B, compacted 77 MiB [≈62 MiB], flushed 0 B [≈0 B]; admitting 116 MiB (rate 7.7 MiB/s) (elastic 1 B rate 0 B/s) due to L0 growth (used total: 0 B elastic 0 B); elastic-disk-bw tokens 0 B (used 0 B, regular used 0 B): write model 0.00x+0 B ingest model 0.00x+0 B, disk bw read 0 B write 0 B provisioned 0 B; write stalls 2 +compaction score 2.700[L0-overload] (195 ssts, 27 sub-levels), L0 growth 577 MiB (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 0.00x+0 B) + ingested-model 0.00x+0 B (smoothed 0.00x+0 B) + at-admission-tokens 0 B, compacted 77 MiB [≈62 MiB], flushed 0 B [≈0 B]; admitting 116 MiB (rate 7.7 MiB/s) (elastic 1 B rate 0 B/s) due to L0 growth [≈116 MiB] (used total: 0 B elastic 0 B); elastic-disk-bw tokens 0 B (used 0 B, regular used 0 B): write model 0.00x+0 B ingest model 0.00x+0 B, disk bw read 0 B write 0 B provisioned 0 B; write stalls 2 diff --git a/pkg/util/admission/testdata/io_load_listener b/pkg/util/admission/testdata/io_load_listener index 8f6ba8b86bf6..19ccf9ab9717 100644 --- a/pkg/util/admission/testdata/io_load_listener +++ b/pkg/util/admission/testdata/io_load_listener @@ -84,7 +84,7 @@ prep-admission-stats admitted=10000 write-bytes=40000 # removed), but smoothing it drops the tokens to 12,500. set-state l0-bytes=10000 l0-added-write=101000 l0-files=21 l0-sublevels=21 ---- -compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 10000 (0 bypassed) with 39 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 2.25x+1 B (smoothed 2.00x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 5 B, compacted 98 KiB [≈49 KiB], flushed 0 B [≈0 B]; admitting 12 KiB (rate 833 B/s) (elastic 1 B rate 0 B/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 +compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 10000 (0 bypassed) with 39 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 2.25x+1 B (smoothed 2.00x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 5 B, compacted 98 KiB [≈49 KiB], flushed 0 B [≈0 B]; admitting 12 KiB (rate 833 B/s) (elastic 1 B rate 0 B/s) due to L0 growth [≈12 KiB] (used total: 0 B elastic 0 B); write stalls 0 {ioLoadListenerState:{cumL0AddedBytes:101000 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:101000} smoothedIntL0CompactedBytes:50000 smoothedCompactionByteTokens:12500 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:12500 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:1 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:5} l0WriteLM:{multiplier:2 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:100000 intL0CompactedBytes:100000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10000 intL0WriteBytes:100000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:40000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:2.25 constant:1} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:100000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 5 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 2.00x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 @@ -156,7 +156,7 @@ prep-admission-stats admitted=20000 write-bytes=80000 # Same delta as previous but smoothing bumps up the tokens to 25,000. set-state l0-bytes=10000 l0-added-write=201000 l0-files=21 l0-sublevels=21 ---- -compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 10000 (0 bypassed) with 39 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 2.25x+1 B (smoothed 2.12x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 7 B, compacted 98 KiB [≈73 KiB], flushed 0 B [≈0 B]; admitting 24 KiB (rate 1.6 KiB/s) (elastic 1 B rate 0 B/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 +compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 10000 (0 bypassed) with 39 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 2.25x+1 B (smoothed 2.12x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 7 B, compacted 98 KiB [≈73 KiB], flushed 0 B [≈0 B]; admitting 24 KiB (rate 1.6 KiB/s) (elastic 1 B rate 0 B/s) due to L0 growth [≈24 KiB] (used total: 0 B elastic 0 B); write stalls 0 {ioLoadListenerState:{cumL0AddedBytes:201000 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:201000} smoothedIntL0CompactedBytes:75000 smoothedCompactionByteTokens:25000 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:25000 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:1 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:7} l0WriteLM:{multiplier:2.125 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:100000 intL0CompactedBytes:100000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10000 intL0WriteBytes:100000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:40000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:2.25 constant:1} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:100000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 7 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 2.12x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 @@ -224,7 +224,7 @@ tick: 59, setAvailableTokens: io-tokens=416(elastic 0) elastic-disk-bw-tokens=un # No delta. This used to trigger an overflow bug. set-state l0-bytes=10000 l0-added-write=201000 l0-files=21 l0-sublevels=21 print-only-first-tick=true ---- -compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 2.12x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 7 B, compacted 0 B [≈37 KiB], flushed 0 B [≈0 B]; admitting 21 KiB (rate 1.4 KiB/s) (elastic 1 B rate 0 B/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 +compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 2.12x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 7 B, compacted 0 B [≈37 KiB], flushed 0 B [≈0 B]; admitting 21 KiB (rate 1.4 KiB/s) (elastic 1 B rate 0 B/s) due to L0 growth [≈21 KiB] (used total: 0 B elastic 0 B); write stalls 0 {ioLoadListenerState:{cumL0AddedBytes:201000 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:201000} smoothedIntL0CompactedBytes:37500 smoothedCompactionByteTokens:21875 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:21875 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:1 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:7} l0WriteLM:{multiplier:2.125 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 7 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 2.12x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 @@ -238,7 +238,7 @@ prep-admission-stats admitted=30000 write-bytes=120000 # don't limit the tokens. set-state l0-bytes=10000 l0-added-write=501000 l0-files=21 l0-sublevels=9 print-only-first-tick=true ---- -compaction score 0.450 (21 ssts, 9 sub-levels), L0 growth 293 KiB (write 293 KiB ingest 0 B ignored 0 B): requests 10000 (0 bypassed) with 39 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 3.00x+18 B (smoothed 2.56x+9 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 18 B, compacted 293 KiB [≈165 KiB], flushed 0 B [≈0 B]; admitting 110 KiB (rate 7.3 KiB/s) (elastic 41 KiB rate 2.7 KiB/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 +compaction score 0.450 (21 ssts, 9 sub-levels), L0 growth 293 KiB (write 293 KiB ingest 0 B ignored 0 B): requests 10000 (0 bypassed) with 39 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 3.00x+18 B (smoothed 2.56x+9 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 18 B, compacted 293 KiB [≈165 KiB], flushed 0 B [≈0 B]; admitting 110 KiB (rate 7.3 KiB/s) (elastic 41 KiB rate 2.7 KiB/s) due to L0 growth [≈110 KiB] (used total: 0 B elastic 0 B); write stalls 0 {ioLoadListenerState:{cumL0AddedBytes:501000 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:501000} smoothedIntL0CompactedBytes:168750 smoothedCompactionByteTokens:112187.5 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:112187 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:42187 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:18} l0WriteLM:{multiplier:2.5625 constant:9} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:300000 intL0CompactedBytes:300000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10000 intL0WriteBytes:300000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:40000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:3 constant:18} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:300000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 18 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 2.56x+9 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 @@ -250,7 +250,7 @@ prep-admission-stats admitted=40000 write-bytes=160000 set-state l0-bytes=10000 l0-added-write=501000 l0-files=21 l0-sublevels=5 print-only-first-tick=true ---- -compaction score 0.250 (21 ssts, 5 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 10000 (0 bypassed) with 39 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 2.56x+4 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 18 B, compacted 0 B [≈82 KiB], flushed 0 B [≈0 B]; admitting 137 KiB (rate 9.1 KiB/s) (elastic 62 KiB rate 4.1 KiB/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 +compaction score 0.250 (21 ssts, 5 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 10000 (0 bypassed) with 39 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 2.56x+4 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 18 B, compacted 0 B [≈82 KiB], flushed 0 B [≈0 B]; admitting 137 KiB (rate 9.1 KiB/s) (elastic 62 KiB rate 4.1 KiB/s) due to L0 growth [≈137 KiB] (used total: 0 B elastic 0 B); write stalls 0 {ioLoadListenerState:{cumL0AddedBytes:501000 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:501000} smoothedIntL0CompactedBytes:84375 smoothedCompactionByteTokens:140468.75 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:140468 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:63281 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:18} l0WriteLM:{multiplier:2.5625 constant:4} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10000 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:40000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 18 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 2.56x+4 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 @@ -296,7 +296,7 @@ prep-admission-stats admitted=10 write-bytes=130000 ingested-bytes=20000 # of 1.12 and 1.25 respectively. set-state l0-bytes=1000 l0-added-write=171000 l0-added-ingested=30000 l0-files=21 l0-sublevels=21 print-only-first-tick=true ---- -compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 195 KiB (write 166 KiB ingest 29 KiB ignored 0 B): requests 10 (0 bypassed) with 127 KiB acc-write (0 B bypassed) + 20 KiB acc-ingest (0 B bypassed) + write-model 1.31x+1 B (smoothed 1.53x+1 B) + ingested-model 1.50x+1 B (smoothed 1.12x+1 B) + at-admission-tokens 9.8 KiB, compacted 195 KiB [≈98 KiB], flushed 0 B [≈0 B]; admitting 24 KiB (rate 1.6 KiB/s) (elastic 1 B rate 0 B/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 +compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 195 KiB (write 166 KiB ingest 29 KiB ignored 0 B): requests 10 (0 bypassed) with 127 KiB acc-write (0 B bypassed) + 20 KiB acc-ingest (0 B bypassed) + write-model 1.31x+1 B (smoothed 1.53x+1 B) + ingested-model 1.50x+1 B (smoothed 1.12x+1 B) + at-admission-tokens 9.8 KiB, compacted 195 KiB [≈98 KiB], flushed 0 B [≈0 B]; admitting 24 KiB (rate 1.6 KiB/s) (elastic 1 B rate 0 B/s) due to L0 growth [≈24 KiB] (used total: 0 B elastic 0 B); write stalls 0 {ioLoadListenerState:{cumL0AddedBytes:201000 curL0Bytes:1000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:201000} smoothedIntL0CompactedBytes:100000 smoothedCompactionByteTokens:25000 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:25000 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:1 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:10000} l0WriteLM:{multiplier:1.5288076923076923 constant:1} l0IngestLM:{multiplier:1.125 constant:1} ingestLM:{multiplier:1.2497500000000001 constant:1} aux:{intL0AddedBytes:200000 intL0CompactedBytes:200000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10 intL0WriteBytes:170000 intL0IngestedBytes:30000 intLSMIngestedBytes:30000 intL0WriteAccountedBytes:130000 intIngestedAccountedBytes:20000 intL0WriteLinearModel:{multiplier:1.3076153846153846 constant:1} intL0IngestedLinearModel:{multiplier:1.4995 constant:1} intIngestedLinearModel:{multiplier:1.4995 constant:1} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:200000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 10000 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.53x+1 l0-ingest-lm: 1.12x+1 ingest-lm: 1.25x+1 @@ -311,7 +311,7 @@ prep-admission-stats admitted=20 write-bytes=150000 ingested-bytes=20000 set-state l0-bytes=1000 l0-added-write=191000 l0-added-ingested=30000 l0-files=21 l0-sublevels=21 print-only-first-tick=true ---- -compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 20 KiB (write 20 KiB ingest 0 B ignored 0 B): requests 10 (0 bypassed) with 20 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 1.00x+1 B (smoothed 1.26x+1 B) + ingested-model 0.00x+0 B (smoothed 1.12x+1 B) + at-admission-tokens 5.9 KiB, compacted 20 KiB [≈59 KiB], flushed 0 B [≈0 B]; admitting 27 KiB (rate 1.8 KiB/s) (elastic 1 B rate 0 B/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 +compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 20 KiB (write 20 KiB ingest 0 B ignored 0 B): requests 10 (0 bypassed) with 20 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 1.00x+1 B (smoothed 1.26x+1 B) + ingested-model 0.00x+0 B (smoothed 1.12x+1 B) + at-admission-tokens 5.9 KiB, compacted 20 KiB [≈59 KiB], flushed 0 B [≈0 B]; admitting 27 KiB (rate 1.8 KiB/s) (elastic 1 B rate 0 B/s) due to L0 growth [≈27 KiB] (used total: 0 B elastic 0 B); write stalls 0 {ioLoadListenerState:{cumL0AddedBytes:221000 curL0Bytes:1000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:221000} smoothedIntL0CompactedBytes:60000 smoothedCompactionByteTokens:27500 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:27500 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:1 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:6000} l0WriteLM:{multiplier:1.2641538461538462 constant:1} l0IngestLM:{multiplier:1.125 constant:1} ingestLM:{multiplier:1.2497500000000001 constant:1} aux:{intL0AddedBytes:20000 intL0CompactedBytes:20000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10 intL0WriteBytes:20000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:20000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0.9995 constant:1} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:20000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 6000 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.26x+1 l0-ingest-lm: 1.12x+1 ingest-lm: 1.25x+1 @@ -325,7 +325,7 @@ prep-admission-stats admitted=30 write-bytes=250000 ingested-bytes=20000 ingeste set-state l0-bytes=1000 l0-added-write=211000 l0-added-ingested=30000 l0-files=21 l0-sublevels=21 print-only-first-tick=true ---- -compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 20 KiB (write 20 KiB ingest 0 B ignored 0 B): requests 10 (0 bypassed) with 98 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.50x+1 B (smoothed 0.88x+1 B) + ingested-model 0.00x+0 B (smoothed 1.12x+1 B) + at-admission-tokens 3.9 KiB, compacted 20 KiB [≈39 KiB], flushed 0 B [≈0 B]; admitting 23 KiB (rate 1.5 KiB/s) (elastic 1 B rate 0 B/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 +compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 20 KiB (write 20 KiB ingest 0 B ignored 0 B): requests 10 (0 bypassed) with 98 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.50x+1 B (smoothed 0.88x+1 B) + ingested-model 0.00x+0 B (smoothed 1.12x+1 B) + at-admission-tokens 3.9 KiB, compacted 20 KiB [≈39 KiB], flushed 0 B [≈0 B]; admitting 23 KiB (rate 1.5 KiB/s) (elastic 1 B rate 0 B/s) due to L0 growth [≈23 KiB] (used total: 0 B elastic 0 B); write stalls 0 {ioLoadListenerState:{cumL0AddedBytes:241000 curL0Bytes:1000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:241000} smoothedIntL0CompactedBytes:40000 smoothedCompactionByteTokens:23750 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:23750 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:1 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:4000} l0WriteLM:{multiplier:0.8820769230769231 constant:1} l0IngestLM:{multiplier:1.125 constant:1} ingestLM:{multiplier:1.2497500000000001 constant:1} aux:{intL0AddedBytes:20000 intL0CompactedBytes:20000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10 intL0WriteBytes:20000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:100000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0.5 constant:1} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:20000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 4000 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 0.88x+1 l0-ingest-lm: 1.12x+1 ingest-lm: 1.25x+1 @@ -573,7 +573,7 @@ prep-admission-stats admitted=10000 write-bytes=40000 # removed), but smoothing it drops the tokens to 12,500. set-state l0-bytes=10000 l0-added-write=101000 l0-files=21 l0-sublevels=21 print-only-first-tick=true loaded=true ---- -compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 10000 (0 bypassed) with 39 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 2.25x+1 B (smoothed 2.00x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 5 B, compacted 98 KiB [≈49 KiB], flushed 0 B [≈0 B]; admitting 12 KiB (rate 833 B/s) (elastic 1 B rate 0 B/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 +compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 10000 (0 bypassed) with 39 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 2.25x+1 B (smoothed 2.00x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 5 B, compacted 98 KiB [≈49 KiB], flushed 0 B [≈0 B]; admitting 12 KiB (rate 833 B/s) (elastic 1 B rate 0 B/s) due to L0 growth [≈12 KiB] (used total: 0 B elastic 0 B); write stalls 0 {ioLoadListenerState:{cumL0AddedBytes:101000 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:101000} smoothedIntL0CompactedBytes:50000 smoothedCompactionByteTokens:12500 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:12500 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:1 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:5} l0WriteLM:{multiplier:2 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:100000 intL0CompactedBytes:100000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10000 intL0WriteBytes:100000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:40000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:2.25 constant:1} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:100000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 5 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 2.00x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 @@ -634,8 +634,8 @@ setAvailableTokens: io-tokens=unlimited(elastic 21) elastic-disk-bw-tokens=unlim # generating different output on an M1 macbook and linux with Intel Xeon. set-state l0-bytes=10000 l0-added-write=501002 l0-files=10 l0-sublevels=10 print-only-first-tick=true loaded=true ---- -compaction score 0.500 (10 ssts, 10 sub-levels), L0 growth 2 B (write 2 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 2 B [≈122 KiB], flushed 0 B [≈0 B]; admitting 183 KiB (rate 12 KiB/s) (elastic 15 KiB rate 1.0 KiB/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 -{ioLoadListenerState:{cumL0AddedBytes:501002 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:501002} smoothedIntL0CompactedBytes:125001 smoothedCompactionByteTokens:187500 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:187500 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:15625 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:2 intL0CompactedBytes:2 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:2 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:2 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +compaction score 0.500 (10 ssts, 10 sub-levels), L0 growth 2 B (write 2 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 2 B [≈122 KiB], flushed 0 B [≈0 B]; admitting 183 KiB (rate 12 KiB/s) (elastic 15 KiB rate 1.0 KiB/s) due to L0 growth [≈183 KiB] (used total: 0 B elastic 0 B); write stalls 0 +{ioLoadListenerState:{cumL0AddedBytes:501002 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:501002} smoothedIntL0CompactedBytes:125001 smoothedCompactionByteTokens:187500.5 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:187500 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:15625 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:2 intL0CompactedBytes:2 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:2 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:2 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableTokens: io-tokens=13(elastic 2) elastic-disk-bw-tokens=unlimited max-byte-tokens=3125(elastic 261) max-disk-bw-tokens=unlimited lastTick=false @@ -647,8 +647,8 @@ setAvailableTokens: io-tokens=13(elastic 2) elastic-disk-bw-tokens=unlimited max # 187500 is the previous smoothedCompactionByteTokens. set-state l0-bytes=10000 l0-added-write=501000 l0-files=10 l0-sublevels=15 print-only-first-tick=true loaded=true ---- -compaction score 0.750 (10 ssts, 15 sub-levels), L0 growth 0 B (write -2 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈61 KiB], flushed 0 B [≈0 B]; admitting 114 KiB (rate 7.6 KiB/s) (elastic 1 B rate 0 B/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 -{ioLoadListenerState:{cumL0AddedBytes:501000 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:501000} smoothedIntL0CompactedBytes:62500 smoothedCompactionByteTokens:117187.5 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:117187 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:1 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:-2 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:-2 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +compaction score 0.750 (10 ssts, 15 sub-levels), L0 growth 0 B (write -2 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈61 KiB], flushed 0 B [≈0 B]; admitting 114 KiB (rate 7.6 KiB/s) (elastic 1 B rate 0 B/s) due to L0 growth [≈114 KiB] (used total: 0 B elastic 0 B); write stalls 0 +{ioLoadListenerState:{cumL0AddedBytes:501000 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:501000} smoothedIntL0CompactedBytes:62500 smoothedCompactionByteTokens:117187.75 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:117187 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:1 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:-2 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:-2 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableTokens: io-tokens=8(elastic 1) elastic-disk-bw-tokens=unlimited max-byte-tokens=1954(elastic 1) max-disk-bw-tokens=unlimited lastTick=false @@ -661,8 +661,8 @@ setAvailableTokens: io-tokens=8(elastic 1) elastic-disk-bw-tokens=unlimited max- # at 20 sublevels. 117187.5 is the previous smoothedCompactionByteTokens. set-state l0-bytes=10000 l0-added-write=501000 l0-files=10 l0-sublevels=20 print-only-first-tick=true loaded=true ---- -compaction score 1.000 (10 ssts, 20 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈30 KiB], flushed 0 B [≈0 B]; admitting 65 KiB (rate 4.3 KiB/s) (elastic 1 B rate 0 B/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 -{ioLoadListenerState:{cumL0AddedBytes:501000 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:501000} smoothedIntL0CompactedBytes:31250 smoothedCompactionByteTokens:66406.25 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:66406 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:1 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +compaction score 1.000 (10 ssts, 20 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈30 KiB], flushed 0 B [≈0 B]; admitting 65 KiB (rate 4.3 KiB/s) (elastic 1 B rate 0 B/s) due to L0 growth [≈65 KiB] (used total: 0 B elastic 0 B); write stalls 0 +{ioLoadListenerState:{cumL0AddedBytes:501000 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:501000} smoothedIntL0CompactedBytes:31250 smoothedCompactionByteTokens:66406.375 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:66406 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:1 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableTokens: io-tokens=5(elastic 1) elastic-disk-bw-tokens=unlimited max-byte-tokens=1107(elastic 1) max-disk-bw-tokens=unlimited lastTick=false @@ -673,8 +673,8 @@ setAvailableTokens: io-tokens=5(elastic 1) elastic-disk-bw-tokens=unlimited max- # 48828.125 = 0.5 * 66406.25 + 0.5 * 2 * 15625. Checks out. set-state l0-bytes=10000 l0-added-write=501000 l0-files=10 l0-sublevels=5 print-only-first-tick=true loaded=true ---- -compaction score 0.250 (10 ssts, 5 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈15 KiB], flushed 0 B [≈0 B]; admitting 48 KiB (rate 3.2 KiB/s) (elastic 11 KiB rate 781 B/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 -{ioLoadListenerState:{cumL0AddedBytes:501000 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:501000} smoothedIntL0CompactedBytes:15625 smoothedCompactionByteTokens:48828.125 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:48828 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:11718 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +compaction score 0.250 (10 ssts, 5 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈15 KiB], flushed 0 B [≈0 B]; admitting 48 KiB (rate 3.2 KiB/s) (elastic 11 KiB rate 781 B/s) due to L0 growth [≈48 KiB] (used total: 0 B elastic 0 B); write stalls 0 +{ioLoadListenerState:{cumL0AddedBytes:501000 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:501000} smoothedIntL0CompactedBytes:15625 smoothedCompactionByteTokens:48828.1875 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:48828 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:11718 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableTokens: io-tokens=4(elastic 1) elastic-disk-bw-tokens=unlimited max-byte-tokens=814(elastic 196) max-disk-bw-tokens=unlimited lastTick=false @@ -696,7 +696,7 @@ tick: 0, setAvailableTokens: io-tokens=unlimited(elastic unlimited) elastic-disk # Score is 21/20. set-state l0-bytes=10000 l0-added-write=101000 l0-files=21 l0-sublevels=21 print-only-first-tick=true ---- -compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 98 KiB [≈49 KiB], flushed 0 B [≈0 B]; admitting 12 KiB (rate 833 B/s) (elastic 1 B rate 0 B/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 +compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 98 KiB [≈49 KiB], flushed 0 B [≈0 B]; admitting 12 KiB (rate 833 B/s) (elastic 1 B rate 0 B/s) due to L0 growth [≈12 KiB] (used total: 0 B elastic 0 B); write stalls 0 {ioLoadListenerState:{cumL0AddedBytes:101000 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:101000} smoothedIntL0CompactedBytes:50000 smoothedCompactionByteTokens:12500 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:12500 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:1 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:100000 intL0CompactedBytes:100000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:100000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:100000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 @@ -708,7 +708,7 @@ set-min-size-per-sub-level size=5000 # Score is (21/3)/20 since min sub-levels is 21/3 and max is 10000/5000=2. set-state l0-bytes=10000 l0-added-write=201000 l0-files=21 l0-sublevels=21 print-only-first-tick=true ---- -compaction score 0.350 (21 ssts, 21 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 98 KiB [≈73 KiB], flushed 0 B [≈0 B]; admitting 65 KiB (rate 4.3 KiB/s) (elastic 37 KiB rate 2.4 KiB/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 +compaction score 0.350 (21 ssts, 21 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 98 KiB [≈73 KiB], flushed 0 B [≈0 B]; admitting 65 KiB (rate 4.3 KiB/s) (elastic 37 KiB rate 2.4 KiB/s) due to L0 growth [≈65 KiB] (used total: 0 B elastic 0 B); write stalls 0 {ioLoadListenerState:{cumL0AddedBytes:201000 curL0Bytes:10000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:201000} smoothedIntL0CompactedBytes:75000 smoothedCompactionByteTokens:66250 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:66250 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:37500 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:100000 intL0CompactedBytes:100000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:100000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:100000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 @@ -717,7 +717,7 @@ setAvailableTokens: io-tokens=1105(elastic 625) elastic-disk-bw-tokens=unlimited # Score is decided by max sub-levels, which is 50000/5000=10. So score is 10/20=0.5. set-state l0-bytes=50000 l0-added-write=201000 l0-files=21 l0-sublevels=21 print-only-first-tick=true ---- -compaction score 0.500 (21 ssts, 21 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈37 KiB], flushed 0 B [≈0 B]; admitting 51 KiB (rate 3.4 KiB/s) (elastic 4.6 KiB rate 312 B/s) due to L0 growth (used total: 0 B elastic 0 B); write stalls 0 +compaction score 0.500 (21 ssts, 21 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈37 KiB], flushed 0 B [≈0 B]; admitting 51 KiB (rate 3.4 KiB/s) (elastic 4.6 KiB rate 312 B/s) due to L0 growth [≈51 KiB] (used total: 0 B elastic 0 B); write stalls 0 {ioLoadListenerState:{cumL0AddedBytes:201000 curL0Bytes:50000 cumWriteStallCount:0 cumFlushWriteThroughput:{Bytes:0 WorkDuration:0 IdleDuration:0} diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:201000} smoothedIntL0CompactedBytes:37500 smoothedCompactionByteTokens:51875 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:51875 byteTokensAllocated:0 byteTokensUsed:0 byteTokensUsedByElasticWork:0 totalNumElasticByteTokens:4687 elasticByteTokensAllocated:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 prevTokensUsedByElasticWork:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1