From bc0e60ddb91b73675f129bba5c6a3d5fc931a149 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Thu, 11 Aug 2016 15:26:25 -0700 Subject: [PATCH] Plan on system scheduler doesn't count nodes who don't meet constraints --- command/plan.go | 12 +++++++++--- scheduler/system_sched.go | 13 +++++++++++-- scheduler/system_sched_test.go | 22 +++++++++++++++++----- 3 files changed, 37 insertions(+), 10 deletions(-) diff --git a/command/plan.go b/command/plan.go index d1844be1617..fc1bff4592a 100644 --- a/command/plan.go +++ b/command/plan.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/jobspec" + "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/scheduler" "github.com/mitchellh/colorstring" ) @@ -172,7 +173,7 @@ func (c *PlanCommand) Run(args []string) int { // Print the scheduler dry-run output c.Ui.Output(c.Colorize().Color("[bold]Scheduler dry-run:[reset]")) - c.Ui.Output(c.Colorize().Color(formatDryRun(resp))) + c.Ui.Output(c.Colorize().Color(formatDryRun(resp, job))) c.Ui.Output("") // Print the job index info @@ -203,7 +204,7 @@ func formatJobModifyIndex(jobModifyIndex uint64, jobName string) string { } // formatDryRun produces a string explaining the results of the dry run. -func formatDryRun(resp *api.JobPlanResponse) string { +func formatDryRun(resp *api.JobPlanResponse, job *structs.Job) string { var rolling *api.Evaluation for _, eval := range resp.CreatedEvals { if eval.TriggeredBy == "rolling-update" { @@ -215,7 +216,12 @@ func formatDryRun(resp *api.JobPlanResponse) string { if len(resp.FailedTGAllocs) == 0 { out = "[bold][green]- All tasks successfully allocated.[reset]\n" } else { - out = "[bold][yellow]- WARNING: Failed to place all allocations.[reset]\n" + // Change the output depending on if we are a system job or not + if job.Type == "system" { + out = "[bold][yellow]- WARNING: Failed to place allocations on all nodes.[reset]\n" + } else { + out = "[bold][yellow]- WARNING: Failed to place all allocations.[reset]\n" + } sorted := sortedTaskGroupFromMetrics(resp.FailedTGAllocs) for _, tg := range sorted { metrics := resp.FailedTGAllocs[tg] diff --git a/scheduler/system_sched.go b/scheduler/system_sched.go index d999852f792..a46798f9390 100644 --- a/scheduler/system_sched.go +++ b/scheduler/system_sched.go @@ -277,13 +277,22 @@ func (s *SystemScheduler) computePlacements(place []allocTuple) error { option, _ := s.stack.Select(missing.TaskGroup) if option == nil { - // if nodes were filtered because of constain mismatches and we + // If nodes were filtered because of constain mismatches and we // couldn't create an allocation then decrementing queued for that // task group if s.ctx.metrics.NodesFiltered > nodesFiltered { s.queuedAllocs[missing.TaskGroup.Name] -= 1 + + // If we are annotating the plan, then decrement the desired + // placements based on whether the node meets the constraints + if s.eval.AnnotatePlan && s.plan.Annotations != nil && + s.plan.Annotations.DesiredTGUpdates != nil { + desired := s.plan.Annotations.DesiredTGUpdates[missing.TaskGroup.Name] + desired.Place -= 1 + } } - // record the current number of nodes filtered in this iteration + + // Record the current number of nodes filtered in this iteration nodesFiltered = s.ctx.metrics.NodesFiltered // Check if this task group has already failed diff --git a/scheduler/system_sched_test.go b/scheduler/system_sched_test.go index 5ad28606a9a..0f8aa34f5e4 100644 --- a/scheduler/system_sched_test.go +++ b/scheduler/system_sched_test.go @@ -136,11 +136,23 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() + if i < 9 { + node.NodeClass = "foo" + } else { + node.NodeClass = "bar" + } + node.ComputeClass() noErr(t, h.State.UpsertNode(h.NextIndex(), node)) } - // Create a job + // Create a job constraining on node class job := mock.SystemJob() + fooConstraint := &structs.Constraint{ + LTarget: "${node.class}", + RTarget: "foo", + Operand: "==", + } + job.Constraints = append(job.Constraints, fooConstraint) noErr(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to deregister the job @@ -169,8 +181,8 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } - if len(planned) != 10 { - t.Fatalf("bad: %#v", plan) + if len(planned) != 9 { + t.Fatalf("bad: %#v %d", planned, len(planned)) } // Lookup the allocations by JobID @@ -178,7 +190,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { noErr(t, err) // Ensure all allocations placed - if len(out) != 10 { + if len(out) != 9 { t.Fatalf("bad: %#v", out) } @@ -204,7 +216,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { t.Fatalf("expected task group web to have desired changes") } - expected := &structs.DesiredUpdates{Place: 10} + expected := &structs.DesiredUpdates{Place: 9} if !reflect.DeepEqual(desiredChanges, expected) { t.Fatalf("Unexpected desired updates; got %#v; want %#v", desiredChanges, expected) }