Skip to content

Commit

Permalink
compaction: incorporate L0 size into compaction priority
Browse files Browse the repository at this point in the history
Currently, L0's compaction picking score is determined only by the
number of sublevels. This doesn't account for scenarios where L0
consists of many non-overlapping files. Adjust the compaction heuristic
to the maximum of scores computed from sublevels and size.

See #1623.
  • Loading branch information
jbowens committed Apr 6, 2022
1 parent c50a066 commit 5fa13db
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 22 deletions.
32 changes: 15 additions & 17 deletions compaction_picker.go
Original file line number Diff line number Diff line change
Expand Up @@ -689,6 +689,8 @@ func (p *compactionPickerByScore) initLevelMaxBytes(inProgressCompactions []comp
p.estimatedMaxWAmp = float64(numLevels-p.baseLevel) * (smoothedLevelMultiplier + 1)

levelSize := float64(baseBytesMax)
// L0 also uses LBaseMaxBytes as its max size.
p.levelMaxBytes[0] = baseBytesMax
for level := p.baseLevel; level < numLevels; level++ {
if level > p.baseLevel && levelSize > 0 {
levelSize *= smoothedLevelMultiplier
Expand Down Expand Up @@ -742,15 +744,25 @@ func (p *compactionPickerByScore) calculateScores(
scores[i].level = i
scores[i].outputLevel = i + 1
}
scores[0] = p.calculateL0Score(inProgressCompactions)

sizeAdjust := calculateSizeAdjust(inProgressCompactions)
for level := 1; level < numLevels; level++ {
for level := 0; level < numLevels; level++ {
levelSize := int64(levelCompensatedSize(p.vers.Levels[level])) + sizeAdjust[level]
scores[level].score = float64(levelSize) / float64(p.levelMaxBytes[level])
scores[level].origScore = scores[level].score
}

// Calculate a L0 score based on the sublevel count. The base vs intra-L0
// compaction determination happens in pickAuto, not here. If the
// sublevel-based score is higher than the size-based score, use the
// sublevel-based score.
sublevelScore := float64(2*p.vers.L0Sublevels.MaxDepthAfterOngoingCompactions()) /
float64(p.opts.L0CompactionThreshold)
if sublevelScore > scores[0].score {
scores[0].score = sublevelScore
scores[0].origScore = sublevelScore
}
scores[0].outputLevel = p.baseLevel

// Adjust each level's score by the score of the next level. If the next
// level has a high score, and is thus a priority for compaction, this
// reduces the priority for compacting the current level. If the next level
Expand Down Expand Up @@ -791,20 +803,6 @@ func (p *compactionPickerByScore) calculateScores(
return scores
}

func (p *compactionPickerByScore) calculateL0Score(
inProgressCompactions []compactionInfo,
) candidateLevelInfo {
var info candidateLevelInfo
info.outputLevel = p.baseLevel

// If L0Sublevels are present, we use the sublevel count as opposed to
// the L0 file count to score this level. The base vs intra-L0
// compaction determination happens in pickAuto, not here.
info.score = float64(2*p.vers.L0Sublevels.MaxDepthAfterOngoingCompactions()) /
float64(p.opts.L0CompactionThreshold)
return info
}

func (p *compactionPickerByScore) pickFile(
level, outputLevel int, earliestSnapshotSeqNum uint64,
) (manifest.LevelFile, bool) {
Expand Down
10 changes: 5 additions & 5 deletions testdata/compaction_picker_target_level
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ L4->L5: 7.7

pick ongoing=(5,6)
----
no compaction
L0->L4: 1.0

pick ongoing=(0,4)
----
Expand Down Expand Up @@ -224,11 +224,11 @@ base: 4

queue
----
L0->L4: 1000.0
L0->L4: 2000.0

pick
----
L0->L4: 1000.0
L0->L4: 2000.0

pick ongoing=(0,4)
----
Expand All @@ -249,15 +249,15 @@ base: 4

queue
----
L0->L4: 1000.0
L0->L4: 2000.0

pick ongoing=(0,4,4,5)
----
no compaction

pick ongoing=(4,5)
----
L0->L4: 1000.0
L0->L4: 2000.0

# Verify we can start concurrent Ln->Ln+1 compactions given sufficient
# priority.
Expand Down

0 comments on commit 5fa13db

Please sign in to comment.