Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

scheduler: fix a bug where force GC wasn't respected #24456

Merged
merged 6 commits into from
Nov 21, 2024
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/24456.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:bug
scheduler: Fix bug where forced garbage collection does not ignore GC thresholds
```
108 changes: 69 additions & 39 deletions nomad/core_sched.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,29 +28,42 @@ type CoreScheduler struct {
snap *state.StateSnapshot
logger log.Logger

// custom GC Threshold values can be used by unit tests to simulate time
// manipulation
customJobGCThreshold time.Duration
customEvalGCThreshold time.Duration
customBatchEvalGCThreshold time.Duration
customNodeGCThreshold time.Duration
customDeploymentGCThreshold time.Duration
customCSIVolumeClaimGCThreshold time.Duration
customCSIPluginGCThreshold time.Duration
customACLTokenExpirationGCThreshold time.Duration
customRootKeyGCThreshold time.Duration
// custom GC Threshold is a map of object names to threshold values which can be
// used by unit tests to simulate time manipulation, and is used by forceGC to
// set minimal threshold so that virtually all eligible objects will get GCd
customGCThreshold map[string]time.Duration
}

// NewCoreScheduler is used to return a new system scheduler instance
func NewCoreScheduler(srv *Server, snap *state.StateSnapshot) scheduler.Scheduler {
s := &CoreScheduler{
srv: srv,
snap: snap,
logger: srv.logger.ResetNamed("core.sched"),
srv: srv,
snap: snap,
logger: srv.logger.ResetNamed("core.sched"),
customGCThreshold: make(map[string]time.Duration),
}
return s
}

func (c *CoreScheduler) setCustomThresholdForObject(objectName string, threshold time.Duration) {
c.customGCThreshold[objectName] = threshold
}

func (c *CoreScheduler) setCustomThresholdForAllObjects(threshold time.Duration) {
for _, objectName := range []string{
"job",
"eval",
"batchEval",
"deployment",
"csiPlugin",
"csiVolume",
"token",
"node",
} {
c.setCustomThresholdForObject(objectName, threshold)
}
}
pkazmierczak marked this conversation as resolved.
Show resolved Hide resolved

// Process is used to implement the scheduler.Scheduler interface
func (c *CoreScheduler) Process(eval *structs.Evaluation) error {
job := strings.Split(eval.JobID, ":") // extra data can be smuggled in w/ JobID
Expand Down Expand Up @@ -86,6 +99,11 @@ func (c *CoreScheduler) Process(eval *structs.Evaluation) error {

// forceGC is used to garbage collect all eligible objects.
func (c *CoreScheduler) forceGC(eval *structs.Evaluation) error {
// set a minimal threshold for all objects to make force GC possible, and
// remember to reset it when we're done
c.setCustomThresholdForAllObjects(time.Millisecond)
defer c.setCustomThresholdForAllObjects(0)

if err := c.jobGC(eval); err != nil {
return err
}
Expand Down Expand Up @@ -113,6 +131,7 @@ func (c *CoreScheduler) forceGC(eval *structs.Evaluation) error {
if err := c.rootKeyGC(eval, time.Now()); err != nil {
return err
}

// Node GC must occur after the others to ensure the allocations are
// cleared.
return c.nodeGC(eval)
Expand All @@ -131,8 +150,10 @@ func (c *CoreScheduler) jobGC(eval *structs.Evaluation) error {
threshold = c.srv.config.JobGCThreshold

// custom threshold override
if c.customJobGCThreshold != 0 {
threshold = c.customJobGCThreshold
if val, ok := c.customGCThreshold["job"]; ok {
if val != 0 {
threshold = val
}
}

cutoffTime := c.getCutoffTime(threshold)
Expand Down Expand Up @@ -276,11 +297,15 @@ func (c *CoreScheduler) evalGC() error {
batchThreshold = c.srv.config.BatchEvalGCThreshold

// custom threshold override
if c.customEvalGCThreshold != 0 {
threshold = c.customEvalGCThreshold
if val, ok := c.customGCThreshold["eval"]; ok {
if val != 0 {
threshold = val
}
}
if c.customBatchEvalGCThreshold != 0 {
batchThreshold = c.customBatchEvalGCThreshold
if val, ok := c.customGCThreshold["batchEval"]; ok {
if val != 0 {
batchThreshold = val
}
}

cutoffTime := c.getCutoffTime(threshold)
Expand Down Expand Up @@ -376,8 +401,7 @@ func (c *CoreScheduler) gcEval(eval *structs.Evaluation, cutoffTime time.Time, a
var gcAllocIDs []string
for _, alloc := range allocs {
if !allocGCEligible(alloc, job, time.Now().UTC(), cutoffTime) {
// Can't GC the evaluation since not all of the allocations are
// terminal
// Can't GC the evaluation since not all the allocations are terminal
gcEval = false
} else {
// The allocation is eligible to be GC'd
Expand Down Expand Up @@ -474,8 +498,10 @@ func (c *CoreScheduler) nodeGC(eval *structs.Evaluation) error {
threshold = c.srv.config.NodeGCThreshold

// custom threshold override
if c.customNodeGCThreshold != 0 {
threshold = c.customNodeGCThreshold
if val, ok := c.customGCThreshold["node"]; ok {
if val != 0 {
threshold = val
}
}
cutoffTime := c.getCutoffTime(threshold)

Expand Down Expand Up @@ -578,8 +604,10 @@ func (c *CoreScheduler) deploymentGC() error {
threshold = c.srv.config.DeploymentGCThreshold

// custom threshold override
if c.customDeploymentGCThreshold != 0 {
threshold = c.customDeploymentGCThreshold
if val, ok := c.customGCThreshold["deployment"]; ok {
if val != 0 {
threshold = val
}
}
cutoffTime := c.getCutoffTime(threshold)

Expand Down Expand Up @@ -778,8 +806,10 @@ func (c *CoreScheduler) csiVolumeClaimGC(eval *structs.Evaluation) error {
threshold = c.srv.config.CSIVolumeClaimGCThreshold

// custom threshold override
if c.customCSIVolumeClaimGCThreshold != 0 {
threshold = c.customCSIVolumeClaimGCThreshold
if val, ok := c.customGCThreshold["csiVolume"]; ok {
if val != 0 {
threshold = val
}
}
cutoffTime := c.getCutoffTime(threshold)

Expand Down Expand Up @@ -825,8 +855,10 @@ func (c *CoreScheduler) csiPluginGC(eval *structs.Evaluation) error {
threshold = c.srv.config.CSIPluginGCThreshold

// custom threshold override
if c.customCSIPluginGCThreshold != 0 {
threshold = c.customCSIPluginGCThreshold
if val, ok := c.customGCThreshold["csiPlugin"]; ok {
if val != 0 {
threshold = val
}
}
cutoffTime := c.getCutoffTime(threshold)

Expand Down Expand Up @@ -893,8 +925,10 @@ func (c *CoreScheduler) expiredACLTokenGC(eval *structs.Evaluation, global bool)
threshold = c.srv.config.ACLTokenExpirationGCThreshold

// custom threshold override
if c.customACLTokenExpirationGCThreshold != 0 {
threshold = c.customACLTokenExpirationGCThreshold
if val, ok := c.customGCThreshold["token"]; ok {
if val != 0 {
threshold = val
}
}
cutoffTime := c.getCutoffTime(threshold)

Expand Down Expand Up @@ -1003,13 +1037,9 @@ func (c *CoreScheduler) rootKeyGC(eval *structs.Evaluation, now time.Time) error
return err
}

var threshold time.Duration
threshold = c.srv.config.RootKeyGCThreshold

// custom threshold override
if c.customRootKeyGCThreshold != 0 {
threshold = c.customRootKeyGCThreshold
}
// we don't do custom overrides for root keys because they are never subject to
// force GC
threshold := c.srv.config.RootKeyGCThreshold

// the threshold is longer than we can support with the time table, and we
// never want to force-GC keys because that will orphan signed Workload
Expand Down
4 changes: 1 addition & 3 deletions nomad/core_sched_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -527,9 +527,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) {

// set a shorter GC threshold this time
gc = s1.coreJobEval(structs.CoreJobEvalGC, jobModifyIdx*2)
core.(*CoreScheduler).customBatchEvalGCThreshold = time.Minute
//core.(*CoreScheduler).customEvalGCThreshold = time.Minute
//core.(*CoreScheduler).customJobGCThreshold = time.Minute
core.(*CoreScheduler).setCustomThresholdForObject("batchEval", time.Minute)
must.NoError(t, core.Process(gc))

// We expect the following:
Expand Down
8 changes: 4 additions & 4 deletions nomad/system_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,15 @@ func TestSystemEndpoint_GarbageCollect(t *testing.T) {
job := mock.Job()
job.Type = structs.JobTypeBatch
job.Stop = true
// submit time must be older than default job GC
job.SubmitTime = time.Now().Add(-6 * time.Hour).UnixNano()
// set submit time older than now but still newer than default GC threshold
job.SubmitTime = time.Now().Add(-10 * time.Millisecond).UnixNano()
must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job))

eval := mock.Eval()
eval.Status = structs.EvalStatusComplete
eval.JobID = job.ID
// modify time must be older than default eval GC
eval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano()
// set modify time older than now but still newer than default GC threshold
eval.ModifyTime = time.Now().Add(-10 * time.Millisecond).UnixNano()
must.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval}))

// Make the GC request
Expand Down