-
Notifications
You must be signed in to change notification settings - Fork 2k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Plan queue apply overlaps plan verification with plan application to increase throughput #272
Changes from 10 commits
985ef83
5a6687b
1d2e813
28428e9
bb3e940
712393f
5348bd0
7464bcb
9c14964
c4cc21d
ad76822
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -7,12 +7,42 @@ import ( | |
"github.com/armon/go-metrics" | ||
"github.com/hashicorp/nomad/nomad/state" | ||
"github.com/hashicorp/nomad/nomad/structs" | ||
"github.com/hashicorp/raft" | ||
) | ||
|
||
// planApply is a long lived goroutine that reads plan allocations from | ||
// the plan queue, determines if they can be applied safely and applies | ||
// them via Raft. | ||
// | ||
// Naively, we could simply dequeue a plan, verify, apply and then respond. | ||
// However, the plan application is bounded by the Raft apply time and | ||
// subject to some latency. This creates a stall condition, where we are | ||
// not evaluating, but simply waiting for a transaction to complete. | ||
// | ||
// To avoid this, we overlap verification with apply. This means once | ||
// we've verified plan N we attempt to apply it. However, while waiting | ||
// for apply, we begin to verify plan N+1 under the assumption that plan | ||
// N has succeeded. | ||
// | ||
// In this sense, we track two parallel versions of the world. One is | ||
// the pessimistic one driven by the Raft log which is replicated. The | ||
// other is optimistic and assumes our transactions will succeed. In the | ||
// happy path, this lets us do productive work during the latency of | ||
// apply. | ||
// | ||
// In the unhappy path (Raft transaction fails), effectively we only | ||
// wasted work during a time we would have been waiting anyways. However, | ||
// in anticipation of this case we cannot respond to the plan until | ||
// the Raft log is updated. This means our schedulers will stall, | ||
// but there are many of those and only a single plan verifier. | ||
// | ||
func (s *Server) planApply() { | ||
// waitCh is used to track an outstanding application | ||
// while snap holds an optimistic state which includes | ||
// that plan application. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why the oddly short comment length? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Armon doesn't run a 80 character viewer in his vim setup, so he just guesses and does things like this. We just try to limit to less than 80 chars. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 😆 |
||
var waitCh chan struct{} | ||
var snap *state.StateSnapshot | ||
|
||
for { | ||
// Pull the next pending plan, exit if we are no longer leader | ||
pending, err := s.planQueue.Dequeue(0) | ||
|
@@ -35,12 +65,23 @@ func (s *Server) planApply() { | |
continue | ||
} | ||
|
||
// Check if out last plan has completed | ||
select { | ||
case <-waitCh: | ||
waitCh = nil | ||
snap = nil | ||
default: | ||
} | ||
|
||
// Snapshot the state so that we have a consistent view of the world | ||
snap, err := s.fsm.State().Snapshot() | ||
if err != nil { | ||
s.logger.Printf("[ERR] nomad: failed to snapshot state: %v", err) | ||
pending.respond(nil, err) | ||
continue | ||
// if no snapshot is available | ||
if waitCh == nil || snap == nil { | ||
snap, err = s.fsm.State().Snapshot() | ||
if err != nil { | ||
s.logger.Printf("[ERR] nomad: failed to snapshot state: %v", err) | ||
pending.respond(nil, err) | ||
continue | ||
} | ||
} | ||
|
||
// Evaluate the plan | ||
|
@@ -51,25 +92,41 @@ func (s *Server) planApply() { | |
continue | ||
} | ||
|
||
// Apply the plan if there is anything to do | ||
if !result.IsNoOp() { | ||
allocIndex, err := s.applyPlan(result) | ||
// Fast-path the response if there is nothing to do | ||
if result.IsNoOp() { | ||
pending.respond(result, nil) | ||
continue | ||
} | ||
|
||
// Ensure any parallel apply is complete before | ||
// starting the next one. This also limits how out | ||
// of date our snapshot can be. | ||
if waitCh != nil { | ||
<-waitCh | ||
snap, err = s.fsm.State().Snapshot() | ||
if err != nil { | ||
s.logger.Printf("[ERR] nomad: failed to apply plan: %v", err) | ||
s.logger.Printf("[ERR] nomad: failed to snapshot state: %v", err) | ||
pending.respond(nil, err) | ||
continue | ||
} | ||
result.AllocIndex = allocIndex | ||
} | ||
|
||
// Respond to the plan | ||
pending.respond(result, nil) | ||
// Dispatch the Raft transaction for the plan | ||
future, err := s.applyPlan(result, snap) | ||
if err != nil { | ||
s.logger.Printf("[ERR] nomad: failed to submit plan: %v", err) | ||
pending.respond(nil, err) | ||
continue | ||
} | ||
|
||
// Respond to the plan in async | ||
waitCh = make(chan struct{}) | ||
go s.asyncPlanWait(waitCh, future, result, pending) | ||
} | ||
} | ||
|
||
// applyPlan is used to apply the plan result and to return the alloc index | ||
func (s *Server) applyPlan(result *structs.PlanResult) (uint64, error) { | ||
defer metrics.MeasureSince([]string{"nomad", "plan", "apply"}, time.Now()) | ||
func (s *Server) applyPlan(result *structs.PlanResult, snap *state.StateSnapshot) (raft.ApplyFuture, error) { | ||
req := structs.AllocUpdateRequest{} | ||
for _, updateList := range result.NodeUpdate { | ||
req.Alloc = append(req.Alloc, updateList...) | ||
|
@@ -79,8 +136,38 @@ func (s *Server) applyPlan(result *structs.PlanResult) (uint64, error) { | |
} | ||
req.Alloc = append(req.Alloc, result.FailedAllocs...) | ||
|
||
_, index, err := s.raftApply(structs.AllocUpdateRequestType, &req) | ||
return index, err | ||
// Dispatch the Raft transaction | ||
future, err := s.raftApplyFuture(structs.AllocUpdateRequestType, &req) | ||
if err != nil { | ||
return nil, err | ||
} | ||
|
||
// Optimistically apply to our state view | ||
if snap != nil { | ||
nextIdx := s.raft.AppliedIndex() + 1 | ||
if err := snap.UpsertAllocs(nextIdx, req.Alloc); err != nil { | ||
return future, err | ||
} | ||
} | ||
return future, nil | ||
} | ||
|
||
// asyncPlanWait is used to apply and respond to a plan async | ||
func (s *Server) asyncPlanWait(waitCh chan struct{}, future raft.ApplyFuture, | ||
result *structs.PlanResult, pending *pendingPlan) { | ||
defer metrics.MeasureSince([]string{"nomad", "plan", "apply"}, time.Now()) | ||
defer close(waitCh) | ||
|
||
// Wait for the plan to apply | ||
if err := future.Error(); err != nil { | ||
s.logger.Printf("[ERR] nomad: failed to apply plan: %v", err) | ||
pending.respond(nil, err) | ||
return | ||
} | ||
|
||
// Respond to the plan | ||
result.AllocIndex = future.Index() | ||
pending.respond(result, nil) | ||
} | ||
|
||
// evaluatePlan is used to determine what portions of a plan | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
complete -> apply.