From 5741bc780ca0b9c8cc12126fe025d2259b6c4f59 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Wed, 2 Oct 2024 16:44:50 +0200 Subject: [PATCH 01/40] fsm: adjust timeTableLimit according to longest GC threshold --- nomad/core_sched.go | 2 +- nomad/core_sched_test.go | 2 +- nomad/fsm.go | 18 ++++++++++++++++-- nomad/server.go | 21 +++++++++++++++++++++ 4 files changed, 39 insertions(+), 4 deletions(-) diff --git a/nomad/core_sched.go b/nomad/core_sched.go index 3433daac7cc..0c008489de5 100644 --- a/nomad/core_sched.go +++ b/nomad/core_sched.go @@ -1300,7 +1300,7 @@ func (c *CoreScheduler) getThreshold(eval *structs.Evaluation, objectName, confi c.logger.Debug(fmt.Sprintf("forced %s GC", objectName)) } else { // Compute the old threshold limit for GC using the FSM - // time table. This is a rough mapping of a time to the + // time table. This is a rough mapping of a time to the // Raft index it belongs to. tt := c.srv.fsm.TimeTable() cutoff := time.Now().UTC().Add(-1 * configThreshold) diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 1ad94d3b6c8..a637661fd6a 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -3073,7 +3073,7 @@ func TestCoreScheduler_ExpiredACLTokenGC(t *testing.T) { // Overwrite the timetable. The existing timetable has an entry due to the // ACL bootstrapping which makes witnessing a new index at a timestamp in // the past impossible. - tt := NewTimeTable(timeTableGranularity, timeTableLimit) + tt := NewTimeTable(timeTableGranularity, timeTableDefaultLimit) tt.Witness(20, time.Now().UTC().Add(-1*testServer.config.ACLTokenExpirationGCThreshold)) testServer.fsm.timetable = tt diff --git a/nomad/fsm.go b/nomad/fsm.go index 4e8494eac2d..9eb62714485 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -26,9 +26,11 @@ import ( const ( // timeTableGranularity is the granularity of index to time tracking timeTableGranularity = 5 * time.Minute +) - // timeTableLimit is the maximum limit of our tracking - timeTableLimit = 72 * time.Hour +var ( + // timeTableDefaultLimit is the default maximum limit of our tracking + timeTableDefaultLimit = 72 * time.Hour ) // SnapshotType is prefixed to a record in the FSM snapshot @@ -192,6 +194,11 @@ type FSMConfig struct { // JobTrackedVersions is the number of historic job versions that are kept. JobTrackedVersions int + + // LongestThreshold is the longest GC threshold that has been set in the server + // config. We use it to adjust timeTableDefaultLimit, which defaults to 72h, if + // necessary (users can have longer GC thresholds). + LongestThreshold *time.Duration } // NewFSM is used to construct a new FSM with a blank state. @@ -209,6 +216,13 @@ func NewFSM(config *FSMConfig) (*nomadFSM, error) { return nil, err } + // adjust the timeTableLimit if there's any configured GC threshold longer than + // the default 72h + timeTableLimit := timeTableDefaultLimit + if config.LongestThreshold != nil && *config.LongestThreshold > timeTableDefaultLimit { + timeTableLimit = *config.LongestThreshold * 2 + } + fsm := &nomadFSM{ evalBroker: config.EvalBroker, periodicDispatcher: config.Periodic, diff --git a/nomad/server.go b/nomad/server.go index 4e5a33d9c9c..2d35e1538d9 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -1388,6 +1388,13 @@ func (s *Server) setupRaft() error { EventBufferSize: s.config.EventBufferSize, JobTrackedVersions: s.config.JobTrackedVersions, } + + // Check for any GC thresholds that have been set + longestThreshold := s.findLongestThreshold() + if longestThreshold != 0 { + fsmConfig.LongestThreshold = &longestThreshold + } + var err error s.fsm, err = NewFSM(fsmConfig) if err != nil { @@ -1658,6 +1665,20 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string) ( return serf.Create(conf) } +func (s *Server) findLongestThreshold() time.Duration { + return max( + s.config.ACLTokenExpirationGCThreshold, + s.config.BatchEvalGCThreshold, + s.config.CSIPluginGCThreshold, + s.config.CSIVolumeClaimGCThreshold, + s.config.DeploymentGCThreshold, + s.config.EvalGCThreshold, + s.config.JobGCThreshold, + s.config.NodeGCThreshold, + s.config.RootKeyGCThreshold, + ) +} + // shouldReloadSchedulers checks the new config to determine if the scheduler worker pool // needs to be updated. If so, returns true and a pointer to a populated SchedulerWorkerPoolArgs func shouldReloadSchedulers(s *Server, newPoolArgs *SchedulerWorkerPoolArgs) (bool, *SchedulerWorkerPoolArgs) { From 807efc50517a15c0aadb669e4cdfdad8b2c8bf72 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Wed, 2 Oct 2024 16:47:52 +0200 Subject: [PATCH 02/40] simplify --- nomad/fsm.go | 6 +++--- nomad/server.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nomad/fsm.go b/nomad/fsm.go index 9eb62714485..7abfaee6dce 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -198,7 +198,7 @@ type FSMConfig struct { // LongestThreshold is the longest GC threshold that has been set in the server // config. We use it to adjust timeTableDefaultLimit, which defaults to 72h, if // necessary (users can have longer GC thresholds). - LongestThreshold *time.Duration + LongestThreshold time.Duration } // NewFSM is used to construct a new FSM with a blank state. @@ -219,8 +219,8 @@ func NewFSM(config *FSMConfig) (*nomadFSM, error) { // adjust the timeTableLimit if there's any configured GC threshold longer than // the default 72h timeTableLimit := timeTableDefaultLimit - if config.LongestThreshold != nil && *config.LongestThreshold > timeTableDefaultLimit { - timeTableLimit = *config.LongestThreshold * 2 + if config.LongestThreshold > timeTableDefaultLimit { + timeTableLimit = config.LongestThreshold * 2 } fsm := &nomadFSM{ diff --git a/nomad/server.go b/nomad/server.go index 2d35e1538d9..5d70872405b 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -1392,7 +1392,7 @@ func (s *Server) setupRaft() error { // Check for any GC thresholds that have been set longestThreshold := s.findLongestThreshold() if longestThreshold != 0 { - fsmConfig.LongestThreshold = &longestThreshold + fsmConfig.LongestThreshold = longestThreshold } var err error From 5e794528dfb50b6fb8b1b9f1325ff0168dcdea97 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Mon, 14 Oct 2024 09:48:26 +0200 Subject: [PATCH 03/40] remove timeTable from fsm --- nomad/fsm.go | 27 +-------------------------- 1 file changed, 1 insertion(+), 26 deletions(-) diff --git a/nomad/fsm.go b/nomad/fsm.go index 7abfaee6dce..61a8f6782a5 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -23,16 +23,6 @@ import ( "github.com/hashicorp/raft" ) -const ( - // timeTableGranularity is the granularity of index to time tracking - timeTableGranularity = 5 * time.Minute -) - -var ( - // timeTableDefaultLimit is the default maximum limit of our tracking - timeTableDefaultLimit = 72 * time.Hour -) - // SnapshotType is prefixed to a record in the FSM snapshot // so that we can determine the type for restore type SnapshotType byte @@ -133,7 +123,6 @@ type nomadFSM struct { encrypter *Encrypter logger hclog.Logger state *state.StateStore - timetable *TimeTable // config is the FSM config config *FSMConfig @@ -155,8 +144,7 @@ type nomadFSM struct { // state in a way that can be accessed concurrently with operations // that may modify the live state. type nomadSnapshot struct { - snap *state.StateSnapshot - timetable *TimeTable + snap *state.StateSnapshot } // SnapshotHeader is the first entry in our snapshot @@ -194,11 +182,6 @@ type FSMConfig struct { // JobTrackedVersions is the number of historic job versions that are kept. JobTrackedVersions int - - // LongestThreshold is the longest GC threshold that has been set in the server - // config. We use it to adjust timeTableDefaultLimit, which defaults to 72h, if - // necessary (users can have longer GC thresholds). - LongestThreshold time.Duration } // NewFSM is used to construct a new FSM with a blank state. @@ -216,13 +199,6 @@ func NewFSM(config *FSMConfig) (*nomadFSM, error) { return nil, err } - // adjust the timeTableLimit if there's any configured GC threshold longer than - // the default 72h - timeTableLimit := timeTableDefaultLimit - if config.LongestThreshold > timeTableDefaultLimit { - timeTableLimit = config.LongestThreshold * 2 - } - fsm := &nomadFSM{ evalBroker: config.EvalBroker, periodicDispatcher: config.Periodic, @@ -231,7 +207,6 @@ func NewFSM(config *FSMConfig) (*nomadFSM, error) { logger: config.Logger.Named("fsm"), config: config, state: state, - timetable: NewTimeTable(timeTableGranularity, timeTableLimit), enterpriseAppliers: make(map[structs.MessageType]LogApplier, 8), enterpriseRestorers: make(map[SnapshotType]SnapshotRestorer, 8), } From acd05b0cf87a21a66b8baa97deed74c8af490999 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 24 Oct 2024 18:59:52 +0200 Subject: [PATCH 04/40] remove timetable --- nomad/timetable.go | 124 ------------------------------ nomad/timetable_test.go | 166 ---------------------------------------- 2 files changed, 290 deletions(-) delete mode 100644 nomad/timetable.go delete mode 100644 nomad/timetable_test.go diff --git a/nomad/timetable.go b/nomad/timetable.go deleted file mode 100644 index cc99d6f2c14..00000000000 --- a/nomad/timetable.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package nomad - -import ( - "sort" - "sync" - "time" - - "github.com/hashicorp/go-msgpack/v2/codec" -) - -// TimeTable is used to associate a Raft index with a timestamp. -// This is used so that we can quickly go from a timestamp to an -// index or visa versa. -type TimeTable struct { - granularity time.Duration - limit time.Duration - table []TimeTableEntry - l sync.RWMutex -} - -// TimeTableEntry is used to track a time and index -type TimeTableEntry struct { - Index uint64 - Time time.Time -} - -// NewTimeTable creates a new time table which stores entries -// at a given granularity for a maximum limit. The storage space -// required is (limit/granularity) -func NewTimeTable(granularity time.Duration, limit time.Duration) *TimeTable { - size := limit / granularity - if size < 1 { - size = 1 - } - t := &TimeTable{ - granularity: granularity, - limit: limit, - table: make([]TimeTableEntry, 1, size), - } - return t -} - -// Serialize is used to serialize the time table -func (t *TimeTable) Serialize(enc *codec.Encoder) error { - t.l.RLock() - defer t.l.RUnlock() - return enc.Encode(t.table) -} - -// Deserialize is used to deserialize the time table -// and restore the state -func (t *TimeTable) Deserialize(dec *codec.Decoder) error { - // Decode the table - var table []TimeTableEntry - if err := dec.Decode(&table); err != nil { - return err - } - - // Witness from oldest to newest - n := len(table) - for i := n - 1; i >= 0; i-- { - t.Witness(table[i].Index, table[i].Time) - } - return nil -} - -// Witness is used to witness a new index and time. -func (t *TimeTable) Witness(index uint64, when time.Time) { - t.l.Lock() - defer t.l.Unlock() - - // Ensure monotonic indexes - if t.table[0].Index > index { - return - } - - // Skip if we already have a recent enough entry - if when.Sub(t.table[0].Time) < t.granularity { - return - } - - // Grow the table if we haven't reached the size - if len(t.table) < cap(t.table) { - t.table = append(t.table, TimeTableEntry{}) - } - - // Add this entry - copy(t.table[1:], t.table[:len(t.table)-1]) - t.table[0].Index = index - t.table[0].Time = when -} - -// NearestIndex returns the nearest index older than the given time -func (t *TimeTable) NearestIndex(when time.Time) uint64 { - t.l.RLock() - defer t.l.RUnlock() - - n := len(t.table) - idx := sort.Search(n, func(i int) bool { - return !t.table[i].Time.After(when) - }) - if idx < n && idx >= 0 { - return t.table[idx].Index - } - return 0 -} - -// NearestTime returns the nearest time older than the given index -func (t *TimeTable) NearestTime(index uint64) time.Time { - t.l.RLock() - defer t.l.RUnlock() - - n := len(t.table) - idx := sort.Search(n, func(i int) bool { - return t.table[i].Index <= index - }) - if idx < n && idx >= 0 { - return t.table[idx].Time - } - return time.Time{} -} diff --git a/nomad/timetable_test.go b/nomad/timetable_test.go deleted file mode 100644 index 5396218fab9..00000000000 --- a/nomad/timetable_test.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package nomad - -import ( - "bytes" - "sync" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/go-msgpack/v2/codec" - "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/nomad/structs" -) - -func TestTimeTable(t *testing.T) { - ci.Parallel(t) - tt := NewTimeTable(time.Second, time.Minute) - - index := tt.NearestIndex(time.Now()) - if index != 0 { - t.Fatalf("bad: %v", index) - } - - when := tt.NearestTime(1000) - if !when.IsZero() { - t.Fatalf("bad: %v", when) - } - - // Witness some data - start := time.Now() - plusOne := start.Add(time.Minute) - plusTwo := start.Add(2 * time.Minute) - plusFive := start.Add(5 * time.Minute) - plusThirty := start.Add(30 * time.Minute) - plusHour := start.Add(60 * time.Minute) - plusHourHalf := start.Add(90 * time.Minute) - - tt.Witness(2, start) - tt.Witness(2, start) - - tt.Witness(10, plusOne) - tt.Witness(10, plusOne) - - tt.Witness(20, plusTwo) - tt.Witness(20, plusTwo) - - tt.Witness(30, plusFive) - tt.Witness(30, plusFive) - - tt.Witness(40, plusThirty) - tt.Witness(40, plusThirty) - - tt.Witness(50, plusHour) - tt.Witness(50, plusHour) - - type tcase struct { - when time.Time - expectIndex uint64 - - index uint64 - expectWhen time.Time - } - cases := []tcase{ - // Exact match - {start, 2, 2, start}, - {plusOne, 10, 10, plusOne}, - {plusHour, 50, 50, plusHour}, - - // Before the newest entry - {plusHourHalf, 50, 51, plusHour}, - - // After the oldest entry - {time.Time{}, 0, 1, time.Time{}}, - - // Mid range - {start.Add(3 * time.Minute), 20, 25, plusTwo}, - } - - for _, tc := range cases { - index := tt.NearestIndex(tc.when) - if index != tc.expectIndex { - t.Fatalf("bad: %v %v", index, tc.expectIndex) - } - - when := tt.NearestTime(tc.index) - if when != tc.expectWhen { - t.Fatalf("bad: for %d %v %v", tc.index, when, tc.expectWhen) - } - } -} - -func TestTimeTable_SerializeDeserialize(t *testing.T) { - ci.Parallel(t) - tt := NewTimeTable(time.Second, time.Minute) - - // Witness some data - start := time.Now() - plusOne := start.Add(time.Minute) - plusTwo := start.Add(2 * time.Minute) - plusFive := start.Add(5 * time.Minute) - plusThirty := start.Add(30 * time.Minute) - plusHour := start.Add(60 * time.Minute) - - tt.Witness(2, start) - tt.Witness(10, plusOne) - tt.Witness(20, plusTwo) - tt.Witness(30, plusFive) - tt.Witness(40, plusThirty) - tt.Witness(50, plusHour) - - var buf bytes.Buffer - enc := codec.NewEncoder(&buf, structs.MsgpackHandle) - - err := tt.Serialize(enc) - if err != nil { - t.Fatalf("err: %v", err) - } - - dec := codec.NewDecoder(&buf, structs.MsgpackHandle) - - tt2 := NewTimeTable(time.Second, time.Minute) - err = tt2.Deserialize(dec) - if err != nil { - t.Fatalf("err: %v", err) - } - - o := cmp.AllowUnexported(TimeTable{}) - o2 := cmpopts.IgnoreTypes(sync.RWMutex{}) - if !cmp.Equal(tt.table, tt2.table, o, o2) { - t.Fatalf("bad: %s", cmp.Diff(tt, tt2, o, o2)) - } -} - -func TestTimeTable_Overflow(t *testing.T) { - ci.Parallel(t) - tt := NewTimeTable(time.Second, 3*time.Second) - - // Witness some data - start := time.Now() - plusOne := start.Add(time.Second) - plusTwo := start.Add(2 * time.Second) - plusThree := start.Add(3 * time.Second) - - tt.Witness(10, start) - tt.Witness(20, plusOne) - tt.Witness(30, plusTwo) - tt.Witness(40, plusThree) - - if len(tt.table) != 3 { - t.Fatalf("bad") - } - - index := tt.NearestIndex(start) - if index != 0 { - t.Fatalf("bad: %v %v", index, 0) - } - - when := tt.NearestTime(15) - if !when.IsZero() { - t.Fatalf("bad: %v", when) - } -} From 811cde51cb6a566d1242941fe759923848b669bb Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 24 Oct 2024 19:06:16 +0200 Subject: [PATCH 05/40] remove tt completely --- nomad/blocked_evals.go | 39 ++++-------- nomad/core_sched_test.go | 132 --------------------------------------- nomad/fsm.go | 23 +------ nomad/fsm_test.go | 48 -------------- 4 files changed, 13 insertions(+), 229 deletions(-) diff --git a/nomad/blocked_evals.go b/nomad/blocked_evals.go index f227a3c7cfc..95917a5ab78 100644 --- a/nomad/blocked_evals.go +++ b/nomad/blocked_evals.go @@ -76,10 +76,6 @@ type BlockedEvals struct { // duplicates. duplicateCh chan struct{} - // timetable is used to correlate indexes with their insertion time. This - // allows us to prune based on time. - timetable *TimeTable - // stopCh is used to stop any created goroutines. stopCh chan struct{} } @@ -143,12 +139,6 @@ func (b *BlockedEvals) SetEnabled(enabled bool) { } } -func (b *BlockedEvals) SetTimetable(timetable *TimeTable) { - b.l.Lock() - b.timetable = timetable - b.l.Unlock() -} - // Block tracks the passed evaluation and enqueues it into the eval broker when // a suitable node calls unblock. func (b *BlockedEvals) Block(eval *structs.Evaluation) { @@ -700,7 +690,6 @@ func (b *BlockedEvals) Flush() { b.escaped = make(map[string]wrappedEval) b.jobs = make(map[structs.NamespacedID]string) b.unblockIndexes = make(map[string]uint64) - b.timetable = nil b.duplicates = nil b.capacityChangeCh = make(chan *capacityUpdate, unblockBuffer) b.stopCh = make(chan struct{}) @@ -774,7 +763,7 @@ func (b *BlockedEvals) prune(stopCh <-chan struct{}) { return case t := <-ticker.C: cutoff := t.UTC().Add(-1 * pruneThreshold) - b.pruneUnblockIndexes(cutoff) + // b.pruneUnblockIndexes(cutoff) b.pruneStats(cutoff) } } @@ -782,21 +771,17 @@ func (b *BlockedEvals) prune(stopCh <-chan struct{}) { // pruneUnblockIndexes is used to prune any tracked entry that is excessively // old. This protects againsts unbounded growth of the map. -func (b *BlockedEvals) pruneUnblockIndexes(cutoff time.Time) { - b.l.Lock() - defer b.l.Unlock() - - if b.timetable == nil { - return - } - - oldThreshold := b.timetable.NearestIndex(cutoff) - for key, index := range b.unblockIndexes { - if index < oldThreshold { - delete(b.unblockIndexes, key) - } - } -} +// func (b *BlockedEvals) pruneUnblockIndexes(cutoff time.Time) { +// b.l.Lock() +// defer b.l.Unlock() + +// oldThreshold := b.timetable.NearestIndex(cutoff) +// for key, index := range b.unblockIndexes { +// if index < oldThreshold { +// delete(b.unblockIndexes, key) +// } +// } +// } // pruneStats is used to prune any zero value stats that are excessively old. func (b *BlockedEvals) pruneStats(cutoff time.Time) { diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index a637661fd6a..31f5d71a12c 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -31,9 +31,6 @@ func TestCoreScheduler_EvalGC(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert "dead" eval store := s1.fsm.State() eval := mock.Eval() @@ -81,10 +78,6 @@ func TestCoreScheduler_EvalGC(t *testing.T) { must.NoError(t, store.UpsertServiceRegistrations( structs.MsgTypeTestSetup, 1002, []*structs.ServiceRegistration{service})) - // Update the time tables to make this work - tt := s1.fsm.TimeTable() - tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold)) - // Create a core scheduler snap, err := store.Snapshot() must.NoError(t, err) @@ -121,9 +114,6 @@ func TestCoreScheduler_EvalGC_ReschedulingAllocs(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert "dead" eval store := s1.fsm.State() eval := mock.Eval() @@ -184,10 +174,6 @@ func TestCoreScheduler_EvalGC_ReschedulingAllocs(t *testing.T) { err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2}) require.Nil(t, err) - // Update the time tables to make this work - tt := s1.fsm.TimeTable() - tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold)) - // Create a core scheduler snap, err := store.Snapshot() if err != nil { @@ -225,9 +211,6 @@ func TestCoreScheduler_EvalGC_StoppedJob_Reschedulable(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert "dead" eval store := s1.fsm.State() eval := mock.Eval() @@ -263,10 +246,6 @@ func TestCoreScheduler_EvalGC_StoppedJob_Reschedulable(t *testing.T) { err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) require.Nil(t, err) - // Update the time tables to make this work - tt := s1.fsm.TimeTable() - tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold)) - // Create a core scheduler snap, err := store.Snapshot() if err != nil { @@ -305,9 +284,6 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 2, 10) - var jobModifyIdx uint64 = 1000 // A "stopped" job containing one "complete" eval with one terminal allocation. @@ -549,11 +525,6 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { []*structs.Allocation{}, ) - // Update the time tables by half of the BatchEvalGCThreshold which is too - // small to GC anything. - tt := s1.fsm.TimeTable() - tt.Witness(2*jobModifyIdx, time.Now().UTC().Add((-1)*s1.config.BatchEvalGCThreshold/2)) - gc = s1.coreJobEval(structs.CoreJobEvalGC, jobModifyIdx*2) err = core.Process(gc) must.NoError(t, err) @@ -578,11 +549,6 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { []*structs.Allocation{}, ) - // Update the time tables so that BatchEvalGCThreshold has elapsed. - s1.fsm.timetable.table = make([]TimeTableEntry, 2, 10) - tt = s1.fsm.TimeTable() - tt.Witness(2*jobModifyIdx, time.Now().UTC().Add(-1*s1.config.BatchEvalGCThreshold)) - gc = s1.coreJobEval(structs.CoreJobEvalGC, jobModifyIdx*2) err = core.Process(gc) must.NoError(t, err) @@ -705,9 +671,6 @@ func TestCoreScheduler_EvalGC_Partial(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert "dead" eval store := s1.fsm.State() eval := mock.Eval() @@ -761,10 +724,6 @@ func TestCoreScheduler_EvalGC_Partial(t *testing.T) { err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job) require.Nil(t, err) - // Update the time tables to make this work - tt := s1.fsm.TimeTable() - tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold)) - // Create a core scheduler snap, err := store.Snapshot() if err != nil { @@ -829,9 +788,6 @@ func TestCoreScheduler_EvalGC_Force(t *testing.T) { defer cleanup() testutil.WaitForLeader(t, server.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - server.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert "dead" eval store := server.fsm.State() eval := mock.Eval() @@ -912,9 +868,6 @@ func TestCoreScheduler_NodeGC(t *testing.T) { defer cleanup() testutil.WaitForLeader(t, server.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - server.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert "dead" node store := server.fsm.State() node := mock.Node() @@ -924,10 +877,6 @@ func TestCoreScheduler_NodeGC(t *testing.T) { t.Fatalf("err: %v", err) } - // Update the time tables to make this work - tt := server.fsm.TimeTable() - tt.Witness(2000, time.Now().UTC().Add(-1*server.config.NodeGCThreshold)) - // Create a core scheduler snap, err := store.Snapshot() if err != nil { @@ -962,9 +911,6 @@ func TestCoreScheduler_NodeGC_TerminalAllocs(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert "dead" node store := s1.fsm.State() node := mock.Node() @@ -982,10 +928,6 @@ func TestCoreScheduler_NodeGC_TerminalAllocs(t *testing.T) { t.Fatalf("err: %v", err) } - // Update the time tables to make this work - tt := s1.fsm.TimeTable() - tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.NodeGCThreshold)) - // Create a core scheduler snap, err := store.Snapshot() if err != nil { @@ -1018,9 +960,6 @@ func TestCoreScheduler_NodeGC_RunningAllocs(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert "dead" node store := s1.fsm.State() node := mock.Node() @@ -1040,10 +979,6 @@ func TestCoreScheduler_NodeGC_RunningAllocs(t *testing.T) { t.Fatalf("err: %v", err) } - // Update the time tables to make this work - tt := s1.fsm.TimeTable() - tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.NodeGCThreshold)) - // Create a core scheduler snap, err := store.Snapshot() if err != nil { @@ -1076,9 +1011,6 @@ func TestCoreScheduler_NodeGC_Force(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert "dead" node store := s1.fsm.State() node := mock.Node() @@ -1120,9 +1052,6 @@ func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert job. store := s1.fsm.State() job := mock.Job() @@ -1146,10 +1075,6 @@ func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) { t.Fatalf("err: %v", err) } - // Update the time tables to make this work - tt := s1.fsm.TimeTable() - tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold)) - // Create a core scheduler snap, err := store.Snapshot() if err != nil { @@ -1244,9 +1169,6 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert job. store := s1.fsm.State() job := mock.Job() @@ -1290,10 +1212,6 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { t.Fatalf("err: %v", err) } - // Update the time tables to make this work - tt := s1.fsm.TimeTable() - tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold)) - // Create a core scheduler snap, err := store.Snapshot() if err != nil { @@ -1390,9 +1308,6 @@ func TestCoreScheduler_JobGC_OneShot(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert job. store := s1.fsm.State() job := mock.Job() @@ -1435,10 +1350,6 @@ func TestCoreScheduler_JobGC_OneShot(t *testing.T) { // Force the jobs state to dead job.Status = structs.JobStatusDead - // Update the time tables to make this work - tt := s1.fsm.TimeTable() - tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold)) - // Create a core scheduler snap, err := store.Snapshot() if err != nil { @@ -1503,9 +1414,6 @@ func TestCoreScheduler_JobGC_Stopped(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert job. store := s1.fsm.State() job := mock.Job() @@ -1544,10 +1452,6 @@ func TestCoreScheduler_JobGC_Stopped(t *testing.T) { t.Fatalf("err: %v", err) } - // Update the time tables to make this work - tt := s1.fsm.TimeTable() - tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold)) - // Create a core scheduler snap, err := store.Snapshot() if err != nil { @@ -1611,9 +1515,6 @@ func TestCoreScheduler_JobGC_Force(t *testing.T) { defer cleanup() testutil.WaitForLeader(t, server.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - server.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert job. store := server.fsm.State() job := mock.Job() @@ -1676,9 +1577,6 @@ func TestCoreScheduler_JobGC_Parameterized(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert a parameterized job. store := s1.fsm.State() job := mock.Job() @@ -1756,9 +1654,6 @@ func TestCoreScheduler_JobGC_Periodic(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert a parameterized job. store := s1.fsm.State() job := mock.PeriodicJob() @@ -1957,9 +1852,6 @@ func TestCoreScheduler_DeploymentGC(t *testing.T) { testutil.WaitForLeader(t, s1.RPC) assert := assert.New(t) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert an active, terminal, and terminal with allocations deployment store := s1.fsm.State() d1, d2, d3 := mock.Deployment(), mock.Deployment(), mock.Deployment() @@ -1974,10 +1866,6 @@ func TestCoreScheduler_DeploymentGC(t *testing.T) { a.DeploymentID = d3.ID assert.Nil(store.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}), "UpsertAllocs") - // Update the time tables to make this work - tt := s1.fsm.TimeTable() - tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.DeploymentGCThreshold)) - // Create a core scheduler snap, err := store.Snapshot() assert.Nil(err, "Snapshot") @@ -2015,9 +1903,6 @@ func TestCoreScheduler_DeploymentGC_Force(t *testing.T) { testutil.WaitForLeader(t, server.RPC) assert := assert.New(t) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - server.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Insert terminal and active deployment store := server.fsm.State() d1, d2 := mock.Deployment(), mock.Deployment() @@ -2053,9 +1938,6 @@ func TestCoreScheduler_PartitionEvalReap(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Create a core scheduler snap, err := s1.fsm.State().Snapshot() if err != nil { @@ -2095,9 +1977,6 @@ func TestCoreScheduler_PartitionDeploymentReap(t *testing.T) { defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - // Create a core scheduler snap, err := s1.fsm.State().Snapshot() if err != nil { @@ -2424,16 +2303,12 @@ func TestCoreScheduler_CSIPluginGC(t *testing.T) { defer cleanupSRV() testutil.WaitForLeader(t, srv.RPC) - srv.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - deleteNodes := state.CreateTestCSIPlugin(srv.fsm.State(), "foo") defer deleteNodes() store := srv.fsm.State() // Update the time tables to make this work - tt := srv.fsm.TimeTable() index := uint64(2000) - tt.Witness(index, time.Now().UTC().Add(-1*srv.config.CSIPluginGCThreshold)) // Create a core scheduler snap, err := store.Snapshot() @@ -3070,13 +2945,6 @@ func TestCoreScheduler_ExpiredACLTokenGC(t *testing.T) { }) require.NoError(t, err) - // Overwrite the timetable. The existing timetable has an entry due to the - // ACL bootstrapping which makes witnessing a new index at a timestamp in - // the past impossible. - tt := NewTimeTable(timeTableGranularity, timeTableDefaultLimit) - tt.Witness(20, time.Now().UTC().Add(-1*testServer.config.ACLTokenExpirationGCThreshold)) - testServer.fsm.timetable = tt - // Generate the core scheduler. snap, err := testServer.State().Snapshot() require.NoError(t, err) diff --git a/nomad/fsm.go b/nomad/fsm.go index 61a8f6782a5..de16e67ed9f 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -233,18 +233,10 @@ func (n *nomadFSM) State() *state.StateStore { return n.state } -// TimeTable returns the time table of transactions -func (n *nomadFSM) TimeTable() *TimeTable { - return n.timetable -} - func (n *nomadFSM) Apply(log *raft.Log) interface{} { buf := log.Data msgType := structs.MessageType(buf[0]) - // Witness this write - n.timetable.Witness(log.Index, time.Now().UTC()) - // Check if this message type should be ignored when unknown. This is // used so that new commands can be added with developer control if older // versions can safely ignore the command, or if they should crash. @@ -1507,8 +1499,7 @@ func (n *nomadFSM) Snapshot() (raft.FSMSnapshot, error) { } ns := &nomadSnapshot{ - snap: snap, - timetable: n.timetable, + snap: snap, } return ns, nil } @@ -1572,11 +1563,6 @@ func (n *nomadFSM) restoreImpl(old io.ReadCloser, filter *FSMFilter) error { // Decode snapType := SnapshotType(msgType[0]) switch snapType { - case TimeTableSnapshot: - if err := n.timetable.Deserialize(dec); err != nil { - return fmt.Errorf("time table deserialize failed: %v", err) - } - case NodeSnapshot: node := new(structs.Node) if err := dec.Decode(node); err != nil { @@ -2415,13 +2401,6 @@ func (s *nomadSnapshot) Persist(sink raft.SnapshotSink) error { return err } - // Write the time table - sink.Write([]byte{byte(TimeTableSnapshot)}) - if err := s.timetable.Serialize(encoder); err != nil { - sink.Cancel() - return err - } - // Write all the data out if err := s.persistIndexes(sink, encoder); err != nil { sink.Cancel() diff --git a/nomad/fsm_test.go b/nomad/fsm_test.go index 335b7c81451..1d5375cbb1e 100644 --- a/nomad/fsm_test.go +++ b/nomad/fsm_test.go @@ -169,12 +169,6 @@ func TestFSM_UpsertNode(t *testing.T) { t.Fatalf("bad index: %d", node.CreateIndex) } - tt := fsm.TimeTable() - index := tt.NearestIndex(time.Now().UTC()) - if index != 1 { - t.Fatalf("bad: %d", index) - } - // Verify the eval was unblocked. testutil.WaitForResult(func() (bool, error) { bStats := fsm.blockedEvals.Stats() @@ -1600,12 +1594,6 @@ func TestFSM_UpsertVaultAccessor(t *testing.T) { if out1.CreateIndex != 1 { t.Fatalf("bad index: %d", out2.CreateIndex) } - - tt := fsm.TimeTable() - index := tt.NearestIndex(time.Now().UTC()) - if index != 1 { - t.Fatalf("bad: %d", index) - } } func TestFSM_DeregisterVaultAccessor(t *testing.T) { @@ -1643,12 +1631,6 @@ func TestFSM_DeregisterVaultAccessor(t *testing.T) { if out1 != nil { t.Fatalf("not deleted!") } - - tt := fsm.TimeTable() - index := tt.NearestIndex(time.Now().UTC()) - if index != 1 { - t.Fatalf("bad: %d", index) - } } func TestFSM_UpsertSITokenAccessor(t *testing.T) { @@ -1680,10 +1662,6 @@ func TestFSM_UpsertSITokenAccessor(t *testing.T) { r.NoError(err) r.NotNil(result2) r.Equal(uint64(1), result2.CreateIndex) - - tt := fsm.TimeTable() - latestIndex := tt.NearestIndex(time.Now()) - r.Equal(uint64(1), latestIndex) } func TestFSM_DeregisterSITokenAccessor(t *testing.T) { @@ -1718,10 +1696,6 @@ func TestFSM_DeregisterSITokenAccessor(t *testing.T) { result2, err := fsm.State().SITokenAccessor(ws, a2.AccessorID) r.NoError(err) r.Nil(result2) // should have been deleted - - tt := fsm.TimeTable() - latestIndex := tt.NearestIndex(time.Now()) - r.Equal(uint64(1), latestIndex) } func TestFSM_ApplyPlanResults(t *testing.T) { @@ -2567,28 +2541,6 @@ func TestFSM_SnapshotRestore_Indexes(t *testing.T) { } } -func TestFSM_SnapshotRestore_TimeTable(t *testing.T) { - ci.Parallel(t) - // Add some state - fsm := testFSM(t) - - tt := fsm.TimeTable() - start := time.Now().UTC() - tt.Witness(1000, start) - tt.Witness(2000, start.Add(10*time.Minute)) - - // Verify the contents - fsm2 := testSnapshotRestore(t, fsm) - - tt2 := fsm2.TimeTable() - if tt2.NearestTime(1500) != start { - t.Fatalf("bad") - } - if tt2.NearestIndex(start.Add(15*time.Minute)) != 2000 { - t.Fatalf("bad") - } -} - func TestFSM_SnapshotRestore_PeriodicLaunches(t *testing.T) { ci.Parallel(t) // Add some state From 97631106882dadd9f91ab50dcf97d5a1e028217b Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 24 Oct 2024 19:06:38 +0200 Subject: [PATCH 06/40] adjust core sched for jobs, nodes and deployments --- nomad/core_sched.go | 56 ++++++++++++++------------------------------- nomad/server.go | 20 ---------------- 2 files changed, 17 insertions(+), 59 deletions(-) diff --git a/nomad/core_sched.go b/nomad/core_sched.go index 0c008489de5..0f76131ea12 100644 --- a/nomad/core_sched.go +++ b/nomad/core_sched.go @@ -7,7 +7,6 @@ import ( "context" "encoding/json" "fmt" - "math" "strings" "time" @@ -116,8 +115,7 @@ func (c *CoreScheduler) jobGC(eval *structs.Evaluation) error { return err } - oldThreshold := c.getThreshold(eval, "job", - "job_gc_threshold", c.srv.config.JobGCThreshold) + cutoffTime := c.getCutoffTime(c.srv.config.JobGCThreshold) // Collect the allocations, evaluations and jobs to GC var gcAlloc, gcEval []string @@ -128,7 +126,8 @@ OUTER: job := i.(*structs.Job) // Ignore new jobs. - if job.CreateIndex > oldThreshold { + st := time.Unix(job.SubmitTime, 0) + if st.After(cutoffTime) { continue } @@ -252,22 +251,20 @@ func (c *CoreScheduler) evalGC(eval *structs.Evaluation) error { return err } - oldThreshold := c.getThreshold(eval, "eval", - "eval_gc_threshold", c.srv.config.EvalGCThreshold) - batchOldThreshold := c.getThreshold(eval, "eval", - "batch_eval_gc_threshold", c.srv.config.BatchEvalGCThreshold) + cutoffTime := c.getCutoffTime(c.srv.config.EvalGCThreshold) + batchCutoffTime := c.getCutoffTime(c.srv.config.BatchEvalGCThreshold) // Collect the allocations and evaluations to GC var gcAlloc, gcEval []string for raw := iter.Next(); raw != nil; raw = iter.Next() { eval := raw.(*structs.Evaluation) - gcThreshold := oldThreshold + gcCutoffTime := cutoffTime if eval.Type == structs.JobTypeBatch { - gcThreshold = batchOldThreshold + gcCutoffTime = batchCutoffTime } - gc, allocs, err := c.gcEval(eval, gcThreshold, false) + gc, allocs, err := c.gcEval(eval, gcCutoffTime, false) if err != nil { return err } @@ -439,8 +436,7 @@ func (c *CoreScheduler) nodeGC(eval *structs.Evaluation) error { return err } - oldThreshold := c.getThreshold(eval, "node", - "node_gc_threshold", c.srv.config.NodeGCThreshold) + cutoffTime := c.getCutoffTime(c.srv.config.NodeGCThreshold) // Collect the nodes to GC var gcNode []string @@ -453,7 +449,8 @@ OUTER: node := raw.(*structs.Node) // Ignore non-terminal and new nodes - if !node.TerminalStatus() || node.ModifyIndex > oldThreshold { + st := time.Unix(node.StatusUpdatedAt, 0) + if !node.TerminalStatus() || st.After(cutoffTime) { continue } @@ -536,8 +533,7 @@ func (c *CoreScheduler) deploymentGC(eval *structs.Evaluation) error { return err } - oldThreshold := c.getThreshold(eval, "deployment", - "deployment_gc_threshold", c.srv.config.DeploymentGCThreshold) + cutoffTime := c.getCutoffTime(c.srv.config.DeploymentGCThreshold) // Collect the deployments to GC var gcDeployment []string @@ -551,7 +547,8 @@ OUTER: deploy := raw.(*structs.Deployment) // Ignore non-terminal and new deployments - if deploy.Active() || deploy.ModifyIndex > oldThreshold { + mt := time.Unix(deploy.ModifyTime, 0) + if deploy.Active() || mt.After(cutoffTime) { continue } @@ -1289,26 +1286,7 @@ func (c *CoreScheduler) rotateVariables(iter memdb.ResultIterator, eval *structs return nil } -// getThreshold returns the index threshold for determining whether an -// object is old enough to GC -func (c *CoreScheduler) getThreshold(eval *structs.Evaluation, objectName, configName string, configThreshold time.Duration) uint64 { - var oldThreshold uint64 - if eval.JobID == structs.CoreJobForceGC { - // The GC was forced, so set the threshold to its maximum so - // everything will GC. - oldThreshold = math.MaxUint64 - c.logger.Debug(fmt.Sprintf("forced %s GC", objectName)) - } else { - // Compute the old threshold limit for GC using the FSM - // time table. This is a rough mapping of a time to the - // Raft index it belongs to. - tt := c.srv.fsm.TimeTable() - cutoff := time.Now().UTC().Add(-1 * configThreshold) - oldThreshold = tt.NearestIndex(cutoff) - c.logger.Debug( - fmt.Sprintf("%s GC scanning before cutoff index", objectName), - "index", oldThreshold, - configName, configThreshold) - } - return oldThreshold +// getCutoffTime returns a time.Time of the latest object that should be GCd +func (c *CoreScheduler) getCutoffTime(configThreshold time.Duration) time.Time { + return time.Now().UTC().Add(-1 * configThreshold) } diff --git a/nomad/server.go b/nomad/server.go index 5d70872405b..d69cb2b8fc7 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -1389,12 +1389,6 @@ func (s *Server) setupRaft() error { JobTrackedVersions: s.config.JobTrackedVersions, } - // Check for any GC thresholds that have been set - longestThreshold := s.findLongestThreshold() - if longestThreshold != 0 { - fsmConfig.LongestThreshold = longestThreshold - } - var err error s.fsm, err = NewFSM(fsmConfig) if err != nil { @@ -1665,20 +1659,6 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string) ( return serf.Create(conf) } -func (s *Server) findLongestThreshold() time.Duration { - return max( - s.config.ACLTokenExpirationGCThreshold, - s.config.BatchEvalGCThreshold, - s.config.CSIPluginGCThreshold, - s.config.CSIVolumeClaimGCThreshold, - s.config.DeploymentGCThreshold, - s.config.EvalGCThreshold, - s.config.JobGCThreshold, - s.config.NodeGCThreshold, - s.config.RootKeyGCThreshold, - ) -} - // shouldReloadSchedulers checks the new config to determine if the scheduler worker pool // needs to be updated. If so, returns true and a pointer to a populated SchedulerWorkerPoolArgs func shouldReloadSchedulers(s *Server, newPoolArgs *SchedulerWorkerPoolArgs) (bool, *SchedulerWorkerPoolArgs) { From e79d38fac80cf6d68c2e6d8b8c037beb41f4ea17 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 24 Oct 2024 19:06:47 +0200 Subject: [PATCH 07/40] add create and modify time to deployments --- nomad/structs/structs.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index ee505b46049..e41c3baba7b 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -10638,6 +10638,9 @@ type Deployment struct { CreateIndex uint64 ModifyIndex uint64 + + CreateTime int64 + ModifyTime int64 } // NewDeployment creates a new deployment given the job. From 065f03e0186416c1d27cdd3830f6a0dff6eac007 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 24 Oct 2024 19:48:21 +0200 Subject: [PATCH 08/40] remove threshold index from other objects in the core scheduler --- nomad/core_sched.go | 46 ++++++++++++++++++++------------------------ nomad/structs/csi.go | 6 ++++++ 2 files changed, 27 insertions(+), 25 deletions(-) diff --git a/nomad/core_sched.go b/nomad/core_sched.go index 0f76131ea12..c82b919e520 100644 --- a/nomad/core_sched.go +++ b/nomad/core_sched.go @@ -141,7 +141,7 @@ OUTER: allEvalsGC := true var jobAlloc, jobEval []string for _, eval := range evals { - gc, allocs, err := c.gcEval(eval, oldThreshold, true) + gc, allocs, err := c.gcEval(eval, cutoffTime, true) if err != nil { continue OUTER } else if gc { @@ -290,10 +290,12 @@ func (c *CoreScheduler) evalGC(eval *structs.Evaluation) error { // allocs are not older than the threshold. If the eval should be garbage // collected, the associated alloc ids that should also be removed are also // returned -func (c *CoreScheduler) gcEval(eval *structs.Evaluation, thresholdIndex uint64, allowBatch bool) ( +func (c *CoreScheduler) gcEval(eval *structs.Evaluation, cutoffTime time.Time, allowBatch bool) ( bool, []string, error) { + // Ignore non-terminal and new evaluations - if !eval.TerminalStatus() || eval.ModifyIndex > thresholdIndex { + mt := time.Unix(eval.ModifyTime, 0) + if !eval.TerminalStatus() || mt.After(cutoffTime) { return false, nil, nil } @@ -332,7 +334,7 @@ func (c *CoreScheduler) gcEval(eval *structs.Evaluation, thresholdIndex uint64, // If we cannot collect outright, check if a partial GC may occur collect := job == nil || job.Status == structs.JobStatusDead && (job.Stop || allowBatch) if !collect { - oldAllocs := olderVersionTerminalAllocs(allocs, job, thresholdIndex) + oldAllocs := olderVersionTerminalAllocs(allocs, job, cutoffTime) gcEval := (len(oldAllocs) == len(allocs)) return gcEval, oldAllocs, nil } @@ -342,7 +344,7 @@ func (c *CoreScheduler) gcEval(eval *structs.Evaluation, thresholdIndex uint64, gcEval := true var gcAllocIDs []string for _, alloc := range allocs { - if !allocGCEligible(alloc, job, time.Now(), thresholdIndex) { + if !allocGCEligible(alloc, job, time.Now(), cutoffTime) { // Can't GC the evaluation since not all of the allocations are // terminal gcEval = false @@ -357,10 +359,11 @@ func (c *CoreScheduler) gcEval(eval *structs.Evaluation, thresholdIndex uint64, // olderVersionTerminalAllocs returns a list of terminal allocations that belong to the evaluation and may be // GCed. -func olderVersionTerminalAllocs(allocs []*structs.Allocation, job *structs.Job, thresholdIndex uint64) []string { +func olderVersionTerminalAllocs(allocs []*structs.Allocation, job *structs.Job, cutoffTime time.Time) []string { var ret []string for _, alloc := range allocs { - if alloc.CreateIndex < job.JobModifyIndex && alloc.ModifyIndex < thresholdIndex && alloc.TerminalStatus() { + mi := time.Unix(alloc.ModifyTime, 0) + if alloc.CreateIndex < job.JobModifyIndex && mi.Before(cutoffTime) && alloc.TerminalStatus() { ret = append(ret, alloc.ID) } } @@ -625,9 +628,10 @@ func (c *CoreScheduler) partitionDeploymentReap(deployments []string, batchSize // allocGCEligible returns if the allocation is eligible to be garbage collected // according to its terminal status and its reschedule trackers -func allocGCEligible(a *structs.Allocation, job *structs.Job, gcTime time.Time, thresholdIndex uint64) bool { +func allocGCEligible(a *structs.Allocation, job *structs.Job, gcTime, cutoffTime time.Time) bool { // Not in a terminal status and old enough - if !a.TerminalStatus() || a.ModifyIndex > thresholdIndex { + mt := time.Unix(a.ModifyTime, 0) + if !a.TerminalStatus() || mt.After(cutoffTime) { return false } @@ -725,14 +729,14 @@ func (c *CoreScheduler) csiVolumeClaimGC(eval *structs.Evaluation) error { return err } - oldThreshold := c.getThreshold(eval, "CSI volume claim", - "csi_volume_claim_gc_threshold", c.srv.config.CSIVolumeClaimGCThreshold) + cutoffTime := c.getCutoffTime(c.srv.config.CSIVolumeClaimGCThreshold) for i := iter.Next(); i != nil; i = iter.Next() { vol := i.(*structs.CSIVolume) // Ignore new volumes - if vol.CreateIndex > oldThreshold { + mt := time.Unix(vol.ModifyTime, 0) + if mt.After(cutoffTime) { continue } @@ -765,14 +769,14 @@ func (c *CoreScheduler) csiPluginGC(eval *structs.Evaluation) error { return err } - oldThreshold := c.getThreshold(eval, "CSI plugin", - "csi_plugin_gc_threshold", c.srv.config.CSIPluginGCThreshold) + cutoffTime := c.getCutoffTime(c.srv.config.CSIPluginGCThreshold) for i := iter.Next(); i != nil; i = iter.Next() { plugin := i.(*structs.CSIPlugin) // Ignore new plugins - if plugin.CreateIndex > oldThreshold { + mt := time.Unix(plugin.ModifyTime, 0) + if mt.After(cutoffTime) { continue } @@ -826,15 +830,7 @@ func (c *CoreScheduler) expiredACLTokenGC(eval *structs.Evaluation, global bool) return nil } - // The object name is logged within the getThreshold function, therefore we - // want to be clear what token type this trigger is for. - tokenScope := "local" - if global { - tokenScope = "global" - } - - expiryThresholdIdx := c.getThreshold(eval, tokenScope+" expired ACL tokens", - "acl_token_expiration_gc_threshold", c.srv.config.ACLTokenExpirationGCThreshold) + cutoffTime := c.getCutoffTime(c.srv.config.ACLTokenExpirationGCThreshold) expiredIter, err := c.snap.ACLTokensByExpired(global) if err != nil { @@ -865,7 +861,7 @@ func (c *CoreScheduler) expiredACLTokenGC(eval *structs.Evaluation, global bool) // Check if the token is recent enough to skip, otherwise we'll delete // it. - if token.CreateIndex > expiryThresholdIdx { + if token.CreateTime.After(cutoffTime) { continue } diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index a0eac38a6c5..e0714148778 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -313,6 +313,9 @@ type CSIVolume struct { CreateIndex uint64 ModifyIndex uint64 + + CreateTime int64 + ModifyTime int64 } // GetID implements the IDGetter interface, required for pagination. @@ -1097,6 +1100,9 @@ type CSIPlugin struct { CreateIndex uint64 ModifyIndex uint64 + + CreateTime int64 + ModifyTime int64 } // NewCSIPlugin creates the plugin struct. No side-effects From fdead8e5993a7ddf4a6bd5bf2ff5190f52633f99 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 24 Oct 2024 20:00:00 +0200 Subject: [PATCH 09/40] i love that we mix unix and unixnano --- nomad/core_sched.go | 14 +++++++------- nomad/structs/structs.go | 8 +++++--- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/nomad/core_sched.go b/nomad/core_sched.go index c82b919e520..81c9ded458a 100644 --- a/nomad/core_sched.go +++ b/nomad/core_sched.go @@ -126,7 +126,7 @@ OUTER: job := i.(*structs.Job) // Ignore new jobs. - st := time.Unix(job.SubmitTime, 0) + st := time.Unix(0, job.SubmitTime) if st.After(cutoffTime) { continue } @@ -294,7 +294,7 @@ func (c *CoreScheduler) gcEval(eval *structs.Evaluation, cutoffTime time.Time, a bool, []string, error) { // Ignore non-terminal and new evaluations - mt := time.Unix(eval.ModifyTime, 0) + mt := time.Unix(0, eval.ModifyTime) if !eval.TerminalStatus() || mt.After(cutoffTime) { return false, nil, nil } @@ -362,7 +362,7 @@ func (c *CoreScheduler) gcEval(eval *structs.Evaluation, cutoffTime time.Time, a func olderVersionTerminalAllocs(allocs []*structs.Allocation, job *structs.Job, cutoffTime time.Time) []string { var ret []string for _, alloc := range allocs { - mi := time.Unix(alloc.ModifyTime, 0) + mi := time.Unix(0, alloc.ModifyTime) if alloc.CreateIndex < job.JobModifyIndex && mi.Before(cutoffTime) && alloc.TerminalStatus() { ret = append(ret, alloc.ID) } @@ -550,7 +550,7 @@ OUTER: deploy := raw.(*structs.Deployment) // Ignore non-terminal and new deployments - mt := time.Unix(deploy.ModifyTime, 0) + mt := time.Unix(0, deploy.ModifyTime) if deploy.Active() || mt.After(cutoffTime) { continue } @@ -630,7 +630,7 @@ func (c *CoreScheduler) partitionDeploymentReap(deployments []string, batchSize // according to its terminal status and its reschedule trackers func allocGCEligible(a *structs.Allocation, job *structs.Job, gcTime, cutoffTime time.Time) bool { // Not in a terminal status and old enough - mt := time.Unix(a.ModifyTime, 0) + mt := time.Unix(0, a.ModifyTime) if !a.TerminalStatus() || mt.After(cutoffTime) { return false } @@ -735,7 +735,7 @@ func (c *CoreScheduler) csiVolumeClaimGC(eval *structs.Evaluation) error { vol := i.(*structs.CSIVolume) // Ignore new volumes - mt := time.Unix(vol.ModifyTime, 0) + mt := time.Unix(0, vol.ModifyTime) if mt.After(cutoffTime) { continue } @@ -775,7 +775,7 @@ func (c *CoreScheduler) csiPluginGC(eval *structs.Evaluation) error { plugin := i.(*structs.CSIPlugin) // Ignore new plugins - mt := time.Unix(plugin.ModifyTime, 0) + mt := time.Unix(0, plugin.ModifyTime) if mt.After(cutoffTime) { continue } diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index e41c3baba7b..b23477246f5 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -2169,7 +2169,7 @@ type Node struct { StatusDescription string // StatusUpdatedAt is the time stamp at which the state of the node was - // updated + // updated, stored as Unix (no nano seconds!) StatusUpdatedAt int64 // Events is the most recent set of events generated for the node, @@ -10639,6 +10639,7 @@ type Deployment struct { CreateIndex uint64 ModifyIndex uint64 + // Creation and modification times, stored as UnixNano CreateTime int64 ModifyTime int64 } @@ -11166,10 +11167,10 @@ type Allocation struct { AllocModifyIndex uint64 // CreateTime is the time the allocation has finished scheduling and been - // verified by the plan applier. + // verified by the plan applier, stored as UnixNano. CreateTime int64 - // ModifyTime is the time the allocation was last updated. + // ModifyTime is the time the allocation was last updated stored as UnixNano. ModifyTime int64 } @@ -12555,6 +12556,7 @@ type Evaluation struct { CreateIndex uint64 ModifyIndex uint64 + // Creation and modification times stored as UnixNano CreateTime int64 ModifyTime int64 } From 4583cb648af17e03a752c85feccedcd4c8e62e0e Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 24 Oct 2024 20:00:16 +0200 Subject: [PATCH 10/40] csi volumes create/modify time --- nomad/state/state_store.go | 6 +++--- nomad/structs/csi.go | 9 +++++++++ 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index f24fb7a84a4..8bb142b4fea 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -2583,8 +2583,10 @@ func (s *StateStore) UpsertCSIVolume(index uint64, volumes []*structs.CSIVolume) } } else { v.CreateIndex = index + v.CreateTime = time.Now().UnixNano() } v.ModifyIndex = index + v.ModifyTime = time.Now().UnixNano() // Allocations are copy on write, so we want to keep the Allocation ID // but we need to clear the pointer so that we don't store it when we @@ -2805,9 +2807,6 @@ func (s *StateStore) CSIVolumeClaim(index uint64, namespace, id string, claim *s } if alloc == nil { s.logger.Error("AllocByID failed to find alloc", "alloc_id", claim.AllocationID) - if err != nil { - return fmt.Errorf(structs.ErrUnknownAllocationPrefix) - } } } @@ -2831,6 +2830,7 @@ func (s *StateStore) CSIVolumeClaim(index uint64, namespace, id string, claim *s } volume.ModifyIndex = index + volume.ModifyTime = time.Now().UnixNano() // Allocations are copy on write, so we want to keep the Allocation ID // but we need to clear the pointer so that we don't store it when we diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index e0714148778..32663d53e7b 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -314,6 +314,7 @@ type CSIVolume struct { CreateIndex uint64 ModifyIndex uint64 + // Creation and modification times stored as UnixNano CreateTime int64 ModifyTime int64 } @@ -367,6 +368,8 @@ type CSIVolListStub struct { CreateIndex uint64 ModifyIndex uint64 + CreateTime int64 + ModifyTime int64 } // NewCSIVolume creates the volume struct. No side-effects @@ -375,6 +378,8 @@ func NewCSIVolume(volumeID string, index uint64) *CSIVolume { ID: volumeID, CreateIndex: index, ModifyIndex: index, + CreateTime: time.Now().UnixNano(), + ModifyTime: time.Now().UnixNano(), } out.newStructs() @@ -424,6 +429,8 @@ func (v *CSIVolume) Stub() *CSIVolListStub { ResourceExhausted: v.ResourceExhausted, CreateIndex: v.CreateIndex, ModifyIndex: v.ModifyIndex, + CreateTime: v.CreateTime, + ModifyTime: v.ModifyTime, } } @@ -1111,6 +1118,8 @@ func NewCSIPlugin(id string, index uint64) *CSIPlugin { ID: id, CreateIndex: index, ModifyIndex: index, + CreateTime: time.Now().UnixNano(), + ModifyTime: time.Now().UnixNano(), } out.newStructs() From 11e29a05a0584606bcfda47e5a5abd3e92ae5e53 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 24 Oct 2024 20:12:41 +0200 Subject: [PATCH 11/40] deployment create/modify times --- nomad/leader.go | 4 ---- nomad/state/state_store.go | 11 ++++++++++- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/nomad/leader.go b/nomad/leader.go index e17cc74ef1a..271635b01ad 100644 --- a/nomad/leader.go +++ b/nomad/leader.go @@ -2872,10 +2872,6 @@ func (s *Server) handleEvalBrokerStateChange(schedConfig *structs.SchedulerConfi s.logger.Info("blocked evals status modified", "paused", !enableBrokers) s.blockedEvals.SetEnabled(enableBrokers) restoreEvals = enableBrokers - - if enableBrokers { - s.blockedEvals.SetTimetable(s.fsm.TimeTable()) - } } return restoreEvals diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 8bb142b4fea..c359359161e 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -591,13 +591,19 @@ func (s *StateStore) upsertDeploymentImpl(index uint64, deployment *structs.Depl return fmt.Errorf("deployment lookup failed: %v", err) } - // Setup the indexes correctly + now := time.Now().UnixNano() + + // Setup the indexes and timestamps correctly if existing != nil { deployment.CreateIndex = existing.(*structs.Deployment).CreateIndex deployment.ModifyIndex = index + deployment.CreateTime = existing.(*structs.Deployment).CreateTime + deployment.ModifyTime = now } else { deployment.CreateIndex = index deployment.ModifyIndex = index + deployment.CreateTime = now + deployment.ModifyTime = now } // Insert the deployment @@ -4866,6 +4872,7 @@ func (s *StateStore) updateDeploymentStatusImpl(index uint64, u *structs.Deploym copy.Status = u.Status copy.StatusDescription = u.StatusDescription copy.ModifyIndex = index + copy.ModifyTime = time.Now().UnixNano() // Insert the deployment if err := txn.Insert("deployment", copy); err != nil { @@ -5107,6 +5114,7 @@ func (s *StateStore) UpdateDeploymentPromotion(msgType structs.MessageType, inde // Update deployment copy := deployment.Copy() copy.ModifyIndex = index + copy.ModifyTime = time.Now().UnixNano() for tg, status := range copy.TaskGroups { _, ok := groupIndex[tg] if !req.All && !ok { @@ -5971,6 +5979,7 @@ func (s *StateStore) updateDeploymentWithAlloc(index uint64, alloc, existing *st // Create a copy of the deployment object deploymentCopy := deployment.Copy() deploymentCopy.ModifyIndex = index + deploymentCopy.ModifyTime = time.Now().UnixNano() dstate := deploymentCopy.TaskGroups[alloc.TaskGroup] dstate.PlacedAllocs += placed From a97b72bec6e6cf4b5c08b703092caabef2090c64 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 24 Oct 2024 20:15:21 +0200 Subject: [PATCH 12/40] csi plugin create/modify time on upsert --- nomad/state/state_store.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index c359359161e..cfff64bafd1 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -3178,8 +3178,10 @@ func (s *StateStore) UpsertCSIPlugin(index uint64, plug *structs.CSIPlugin) erro } plug.ModifyIndex = index + plug.ModifyTime = time.Now().UnixNano() if existing != nil { plug.CreateIndex = existing.(*structs.CSIPlugin).CreateIndex + plug.CreateTime = existing.(*structs.CSIPlugin).CreateTime } err = txn.Insert("csi_plugins", plug) From b3fc1cbf2083ffe30a40fbb01b5af818a8d1038c Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Mon, 28 Oct 2024 15:46:26 +0100 Subject: [PATCH 13/40] oh this is tedious --- client/client_test.go | 16 +- client/gc_test.go | 11 +- command/agent/alloc_endpoint_test.go | 16 +- command/agent/eval_endpoint_test.go | 3 +- command/agent/event_endpoint_test.go | 2 +- command/agent/fs_endpoint_test.go | 2 +- command/agent/search_endpoint_test.go | 9 +- command/alloc_exec_test.go | 3 +- command/alloc_logs_test.go | 3 +- command/alloc_signal_test.go | 3 +- command/deployment_fail_test.go | 3 +- command/deployment_promote_test.go | 3 +- command/job_allocs_test.go | 9 +- command/job_promote_test.go | 3 +- command/status_test.go | 5 +- nomad/alloc_endpoint_test.go | 56 +- .../deployments_watcher_test.go | 502 ++++++++-------- nomad/deploymentwatcher/testutil_test.go | 7 +- nomad/drainer/watch_nodes_test.go | 4 +- nomad/drainer_int_test.go | 2 +- nomad/fsm_test.go | 36 +- nomad/job_endpoint_statuses_test.go | 4 +- nomad/job_endpoint_test.go | 56 +- nomad/node_endpoint_test.go | 56 +- nomad/operator_endpoint_test.go | 2 +- nomad/periodic_test.go | 2 +- nomad/search_endpoint_test.go | 539 +++++++++--------- nomad/service_registration_endpoint_test.go | 4 +- nomad/state/state_store.go | 76 ++- nomad/state/testing.go | 4 +- scheduler/feasible_test.go | 20 +- scheduler/preemption_test.go | 5 +- scheduler/scheduler_sysbatch_test.go | 29 +- scheduler/testing.go | 2 +- scheduler/util_test.go | 12 +- 35 files changed, 757 insertions(+), 752 deletions(-) diff --git a/client/client_test.go b/client/client_test.go index 92914004208..36d144a7e43 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -603,7 +603,7 @@ func TestClient_WatchAllocs(t *testing.T) { if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil { t.Fatal(err) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc1, alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 102, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -628,7 +628,7 @@ func TestClient_WatchAllocs(t *testing.T) { // alloc runner. alloc2_2 := alloc2.Copy() alloc2_2.DesiredStatus = structs.AllocDesiredStatusStop - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 104, []*structs.Allocation{alloc2_2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 104, time.Now().UnixNano(), []*structs.Allocation{alloc2_2}); err != nil { t.Fatalf("err upserting stopped alloc: %v", err) } @@ -992,7 +992,7 @@ func TestClient_AddAllocError(t *testing.T) { err = state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)) require.Nil(err) - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc1}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 102, time.Now().UnixNano(), []*structs.Allocation{alloc1}) require.Nil(err) // Push this alloc update to the client @@ -1092,7 +1092,7 @@ func TestClient_BlockedAllocations(t *testing.T) { } state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) - state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}) // Wait until the client downloads and starts the allocation testutil.WaitForResult(func() (bool, error) { @@ -1115,7 +1115,7 @@ func TestClient_BlockedAllocations(t *testing.T) { alloc2.Job = alloc.Job alloc2.JobID = alloc.JobID alloc2.PreviousAllocation = alloc.ID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{alloc2}); err != nil { t.Fatalf("err: %v", err) } @@ -1136,7 +1136,7 @@ func TestClient_BlockedAllocations(t *testing.T) { // Change the desired state of the parent alloc to stop alloc1 := alloc.Copy() alloc1.DesiredStatus = structs.AllocDesiredStatusStop - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 300, []*structs.Allocation{alloc1}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 300, time.Now().UnixNano(), []*structs.Allocation{alloc1}); err != nil { t.Fatalf("err: %v", err) } @@ -2047,7 +2047,7 @@ func TestClient_ReconnectAllocs(t *testing.T) { err = state.UpsertJobSummary(101, mock.JobSummary(runningAlloc.JobID)) require.NoError(t, err) - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{runningAlloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 102, time.Now().UnixNano(), []*structs.Allocation{runningAlloc}) require.NoError(t, err) // Ensure allocation gets upserted with desired status. @@ -2065,7 +2065,7 @@ func TestClient_ReconnectAllocs(t *testing.T) { require.NoError(t, err) unknownAlloc.ClientStatus = structs.AllocClientStatusUnknown unknownAlloc.AppendState(structs.AllocStateFieldClientStatus, structs.AllocClientStatusUnknown) - err = state.UpsertAllocs(structs.MsgTypeTestSetup, runningAlloc.AllocModifyIndex+1, []*structs.Allocation{unknownAlloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, runningAlloc.AllocModifyIndex+1, time.Now().UnixNano(), []*structs.Allocation{unknownAlloc}) require.NoError(t, err) updates := &allocUpdates{ diff --git a/client/gc_test.go b/client/gc_test.go index fa7ebdffb2b..ca61bb366ae 100644 --- a/client/gc_test.go +++ b/client/gc_test.go @@ -18,7 +18,7 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" - "github.com/stretchr/testify/require" + "github.com/shoenig/test/must" ) func gcConfig() *GCConfig { @@ -364,7 +364,6 @@ func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) { ci.Parallel(t) const maxAllocs = 6 - require := require.New(t) server, serverAddr, cleanupS := testServer(t, nil) defer cleanupS() @@ -398,8 +397,8 @@ func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) { upsertJobFn := func(server *nomad.Server, j *structs.Job) { state := server.State() - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, nextIndex(), nil, j)) - require.NoError(state.UpsertJobSummary(nextIndex(), mock.JobSummary(j.ID))) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, nextIndex(), nil, j)) + must.NoError(t, state.UpsertJobSummary(nextIndex(), mock.JobSummary(j.ID))) } // Insert the Job @@ -407,7 +406,7 @@ func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) { upsertAllocFn := func(server *nomad.Server, a *structs.Allocation) { state := server.State() - require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(), []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a})) } upsertNewAllocFn := func(server *nomad.Server, j *structs.Job) *structs.Allocation { @@ -504,7 +503,7 @@ func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) { } return true, nil }, func(err error) { - require.NoError(err) + must.NoError(t, err) }) } diff --git a/command/agent/alloc_endpoint_test.go b/command/agent/alloc_endpoint_test.go index fa5014dcdea..5043b2c9448 100644 --- a/command/agent/alloc_endpoint_test.go +++ b/command/agent/alloc_endpoint_test.go @@ -53,7 +53,7 @@ func TestHTTP_AllocsList(t *testing.T) { state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -121,7 +121,7 @@ func TestHTTP_AllocsPrefixList(t *testing.T) { if err := state.UpsertJobSummary(999, summary2); err != nil { t.Fatal(err) } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}); err != nil { t.Fatalf("err: %v", err) } @@ -174,7 +174,7 @@ func TestHTTP_AllocQuery(t *testing.T) { state := s.Agent.server.State() alloc := mock.Alloc() require.NoError(state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID))) - require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) + require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) // Make the HTTP request req, err := http.NewRequest(http.MethodGet, "/v1/allocation/"+alloc.ID, nil) @@ -222,7 +222,7 @@ func TestHTTP_AllocQuery_Payload(t *testing.T) { compressed := snappy.Encode(nil, expected) alloc.Job.Payload = compressed - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -406,7 +406,7 @@ func TestHTTP_AllocStop(t *testing.T) { require := require.New(t) require.NoError(state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID))) - require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) + require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) // Test that the happy path works { @@ -459,7 +459,7 @@ func TestHTTP_allocServiceRegistrations(t *testing.T) { // Generate an alloc and upsert this. alloc := mock.Alloc() require.NoError(t, testState.UpsertAllocs( - structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc})) + structs.MsgTypeTestSetup, 10, time.Now().UnixNano(), []*structs.Allocation{alloc})) // Generate a service registration, assigned the allocID to the // mocked allocation ID, and upsert this. @@ -494,7 +494,7 @@ func TestHTTP_allocServiceRegistrations(t *testing.T) { // Generate an alloc and upsert this. alloc := mock.Alloc() require.NoError(t, testState.UpsertAllocs( - structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc})) + structs.MsgTypeTestSetup, 10, time.Now().UnixNano(), []*structs.Allocation{alloc})) // Build the HTTP request. path := fmt.Sprintf("/v1/allocation/%s/services", alloc.ID) @@ -744,7 +744,7 @@ func TestHTTP_AllocSnapshot_Atomic(t *testing.T) { } alloc.NodeID = s.client.NodeID() state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID)) - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc.Copy()}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc.Copy()}); err != nil { t.Fatalf("error upserting alloc: %v", err) } diff --git a/command/agent/eval_endpoint_test.go b/command/agent/eval_endpoint_test.go index 506ba5a2cc4..5bb49b7c41b 100644 --- a/command/agent/eval_endpoint_test.go +++ b/command/agent/eval_endpoint_test.go @@ -9,6 +9,7 @@ import ( "net/http/httptest" "net/url" "testing" + "time" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" @@ -258,7 +259,7 @@ func TestHTTP_EvalAllocations(t *testing.T) { alloc2.EvalID = alloc1.EvalID state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) if err != nil { t.Fatalf("err: %v", err) } diff --git a/command/agent/event_endpoint_test.go b/command/agent/event_endpoint_test.go index 66415b5f46a..bea4105e61b 100644 --- a/command/agent/event_endpoint_test.go +++ b/command/agent/event_endpoint_test.go @@ -231,7 +231,7 @@ func TestHTTP_Alloc_Port_Response(t *testing.T) { alloc.NodeID = srv.client.NodeID() require.Nil(t, srv.server.State().UpsertJobSummary(101, mock.JobSummary(alloc.JobID))) - require.Nil(t, srv.server.State().UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc})) + require.Nil(t, srv.server.State().UpsertAllocs(structs.MsgTypeTestSetup, 102, time.Now().UnixNano(), []*structs.Allocation{alloc})) running := false testutil.WaitForResult(func() (bool, error) { diff --git a/command/agent/fs_endpoint_test.go b/command/agent/fs_endpoint_test.go index 1a07e8165a5..2ba5b434c90 100644 --- a/command/agent/fs_endpoint_test.go +++ b/command/agent/fs_endpoint_test.go @@ -67,7 +67,7 @@ func addAllocToClient(agent *TestAgent, alloc *structs.Allocation, wait clientAl // Upsert the allocation state := agent.server.State() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{alloc})) if wait == noWaitClientAlloc { return diff --git a/command/agent/search_endpoint_test.go b/command/agent/search_endpoint_test.go index 491ae159dd5..8f506ca2344 100644 --- a/command/agent/search_endpoint_test.go +++ b/command/agent/search_endpoint_test.go @@ -8,6 +8,7 @@ import ( "net/http" "net/http/httptest" "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" @@ -344,7 +345,7 @@ func TestHTTP_PrefixSearch_Allocations(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() alloc := mockAlloc() - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 7000, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 7000, time.Now().UnixNano(), []*structs.Allocation{alloc}) require.NoError(t, err) prefix := alloc.ID[:len(alloc.ID)-2] @@ -375,7 +376,7 @@ func TestHTTP_FuzzySearch_Allocations(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() alloc := mockAlloc() - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 7000, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 7000, time.Now().UnixNano(), []*structs.Allocation{alloc}) require.NoError(t, err) data := structs.FuzzySearchRequest{Text: "-job", Context: structs.Allocs} @@ -466,7 +467,7 @@ func TestHTTP_PrefixSearch_Deployments(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() deployment := mock.Deployment() - require.NoError(t, state.UpsertDeployment(999, deployment), "UpsertDeployment") + require.NoError(t, state.UpsertDeployment(999, time.Now().UnixNano(), deployment), "UpsertDeployment") prefix := deployment.ID[:len(deployment.ID)-2] data := structs.SearchRequest{Prefix: prefix, Context: structs.Deployments} @@ -494,7 +495,7 @@ func TestHTTP_FuzzySearch_Deployments(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() deployment := mock.Deployment() - require.NoError(t, state.UpsertDeployment(999, deployment), "UpsertDeployment") + require.NoError(t, state.UpsertDeployment(999, time.Now().UnixNano(), deployment), "UpsertDeployment") // fuzzy search of deployments are prefix searches prefix := deployment.ID[:len(deployment.ID)-2] diff --git a/command/alloc_exec_test.go b/command/alloc_exec_test.go index 3c730d65f02..d4383f29716 100644 --- a/command/alloc_exec_test.go +++ b/command/alloc_exec_test.go @@ -8,6 +8,7 @@ import ( "fmt" "strings" "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" @@ -153,7 +154,7 @@ func TestAllocExecCommand_AutocompleteArgs(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() a := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) prefix := a.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/alloc_logs_test.go b/command/alloc_logs_test.go index 5cee74972ec..88e262cb0c0 100644 --- a/command/alloc_logs_test.go +++ b/command/alloc_logs_test.go @@ -5,6 +5,7 @@ package command import ( "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -80,7 +81,7 @@ func TestLogsCommand_AutocompleteArgs(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() a := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) prefix := a.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/alloc_signal_test.go b/command/alloc_signal_test.go index f27bb64dc06..40addffe6a0 100644 --- a/command/alloc_signal_test.go +++ b/command/alloc_signal_test.go @@ -5,6 +5,7 @@ package command import ( "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -85,7 +86,7 @@ func TestAllocSignalCommand_AutocompleteArgs(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() a := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) prefix := a.ID[:5] args := complete.Args{All: []string{"signal", prefix}, Last: prefix} diff --git a/command/deployment_fail_test.go b/command/deployment_fail_test.go index c6d3c72330d..bceae5584e9 100644 --- a/command/deployment_fail_test.go +++ b/command/deployment_fail_test.go @@ -6,6 +6,7 @@ package command import ( "strings" "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -54,7 +55,7 @@ func TestDeploymentFailCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.Nil(t, state.UpsertDeployment(1000, d)) + must.Nil(t, state.UpsertDeployment(1000, time.Now().UnixNano(), d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/deployment_promote_test.go b/command/deployment_promote_test.go index 38aefcddb9e..37107f9b792 100644 --- a/command/deployment_promote_test.go +++ b/command/deployment_promote_test.go @@ -6,6 +6,7 @@ package command import ( "strings" "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -54,7 +55,7 @@ func TestDeploymentPromoteCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, d)) + must.NoError(t, state.UpsertDeployment(1000, time.Now().UnixNano(), d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/job_allocs_test.go b/command/job_allocs_test.go index 4f51eec6fdd..b4d1a08e1a7 100644 --- a/command/job_allocs_test.go +++ b/command/job_allocs_test.go @@ -5,6 +5,7 @@ package command import ( "testing" + "time" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" @@ -83,7 +84,7 @@ func TestJobAllocsCommand_Run(t *testing.T) { a.Metrics = &structs.AllocMetric{} a.DesiredStatus = structs.AllocDesiredStatusRun a.ClientStatus = structs.AllocClientStatusRunning - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{a})) // Should now display the alloc code = cmd.Run([]string{"-address=" + url, "-verbose", job.ID}) @@ -118,7 +119,7 @@ func TestJobAllocsCommand_Template(t *testing.T) { a.Metrics = &structs.AllocMetric{} a.DesiredStatus = structs.AllocDesiredStatusRun a.ClientStatus = structs.AllocClientStatusRunning - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{a})) // Inject a pending allocation b := mock.Alloc() @@ -128,7 +129,7 @@ func TestJobAllocsCommand_Template(t *testing.T) { b.Metrics = &structs.AllocMetric{} b.DesiredStatus = structs.AllocDesiredStatusRun b.ClientStatus = structs.AllocClientStatusPending - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 300, []*structs.Allocation{b})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 300, time.Now().UnixNano(), []*structs.Allocation{b})) // Should display an AllocacitonListStub object code := cmd.Run([]string{"-address=" + url, "-t", "'{{printf \"%#+v\" .}}'", job.ID}) @@ -200,7 +201,7 @@ func TestJobAllocsCommand_ACL(t *testing.T) { a.Metrics = &structs.AllocMetric{} a.DesiredStatus = structs.AllocDesiredStatusRun a.ClientStatus = structs.AllocClientStatusRunning - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{a}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{a}) must.NoError(t, err) testCases := []struct { diff --git a/command/job_promote_test.go b/command/job_promote_test.go index 299bb370438..86891c567db 100644 --- a/command/job_promote_test.go +++ b/command/job_promote_test.go @@ -6,6 +6,7 @@ package command import ( "strings" "testing" + "time" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" @@ -154,7 +155,7 @@ namespace "default" { d := mock.Deployment() d.JobID = job.ID d.JobCreateIndex = job.CreateIndex - err = state.UpsertDeployment(uint64(301+i), d) + err = state.UpsertDeployment(uint64(301+i), time.Now().UnixNano(), d) must.NoError(t, err) if tc.aclPolicy != "" { diff --git a/command/status_test.go b/command/status_test.go index cce94977b4e..c9233552ad8 100644 --- a/command/status_test.go +++ b/command/status_test.go @@ -7,6 +7,7 @@ import ( "fmt" "regexp" "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" @@ -145,7 +146,7 @@ func TestStatusCommand_Run_AllocStatus(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() alloc := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) code := cmd.Run([]string{"-address=" + url, alloc.ID}) must.Zero(t, code) @@ -168,7 +169,7 @@ func TestStatusCommand_Run_DeploymentStatus(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() deployment := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, deployment)) + must.NoError(t, state.UpsertDeployment(1000, time.Now().UnixNano(), deployment)) // Query to check the deployment status code := cmd.Run([]string{"-address=" + url, deployment.ID}) diff --git a/nomad/alloc_endpoint_test.go b/nomad/alloc_endpoint_test.go index 715f311150d..b2063c74799 100644 --- a/nomad/alloc_endpoint_test.go +++ b/nomad/alloc_endpoint_test.go @@ -38,7 +38,7 @@ func TestAllocEndpoint_List(t *testing.T) { if err := state.UpsertJobSummary(999, summary); err != nil { t.Fatalf("err: %v", err) } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -151,7 +151,7 @@ func TestAllocEndpoint_List_PaginationFiltering(t *testing.T) { } // other fields index := 1000 + uint64(i) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, allocsInTx)) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), allocsInTx)) } aclToken := mock.CreatePolicyAndToken(t, @@ -337,17 +337,17 @@ func TestAllocEndpoint_List_order(t *testing.T) { alloc3 := mock.Alloc() alloc3.ID = uuid3 - err := s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1}) + err := s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1}) require.NoError(t, err) - err = s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc2}) + err = s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc2}) require.NoError(t, err) - err = s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc3}) + err = s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1002, time.Now().UnixNano(), []*structs.Allocation{alloc3}) require.NoError(t, err) // update alloc2 again so we can later assert create index order did not change - err = s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc2}) + err = s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{alloc2}) require.NoError(t, err) t.Run("default", func(t *testing.T) { @@ -426,7 +426,7 @@ func TestAllocEndpoint_List_Fields(t *testing.T) { state := s1.fsm.State() require.NoError(t, state.UpsertJobSummary(999, summary)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) cases := []struct { Name string @@ -521,7 +521,7 @@ func TestAllocEndpoint_List_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJobSummary(999, summary), "UpsertJobSummary") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs), "UpsertAllocs") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), allocs), "UpsertAllocs") stubAllocs := []*structs.AllocListStub{alloc.Stub(nil)} stubAllocs[0].CreateIndex = 1000 @@ -580,7 +580,7 @@ func TestAllocEndpoint_List_Blocking(t *testing.T) { } // Upsert alloc triggers watches time.AfterFunc(100*time.Millisecond, func() { - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } }) @@ -614,7 +614,7 @@ func TestAllocEndpoint_List_Blocking(t *testing.T) { alloc2.ClientStatus = structs.AllocClientStatusRunning time.AfterFunc(100*time.Millisecond, func() { state.UpsertJobSummary(3, mock.JobSummary(alloc2.JobID)) - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 4, []*structs.Allocation{alloc2}); err != nil { + if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 4, time.Now().UnixNano(), []*structs.Allocation{alloc2}); err != nil { t.Fatalf("err: %v", err) } }) @@ -670,8 +670,8 @@ func TestAllocEndpoint_List_AllNamespaces_OSS(t *testing.T) { require.NoError(t, state.UpsertJobSummary(1000, summary1)) require.NoError(t, state.UpsertJobSummary(1001, summary2)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc1})) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, time.Now().UnixNano(), []*structs.Allocation{alloc1})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{alloc2})) t.Run("looking up all allocations", func(t *testing.T) { get := &structs.AllocListRequest{ @@ -739,7 +739,7 @@ func TestAllocEndpoint_GetAlloc(t *testing.T) { } state := s1.fsm.State() state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -778,7 +778,7 @@ func TestAllocEndpoint_GetAlloc_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJobSummary(999, summary), "UpsertJobSummary") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs), "UpsertAllocs") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), allocs), "UpsertAllocs") // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", @@ -889,7 +889,7 @@ func TestAllocEndpoint_GetAlloc_Blocking(t *testing.T) { // First create an unrelated alloc time.AfterFunc(100*time.Millisecond, func() { state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -898,7 +898,7 @@ func TestAllocEndpoint_GetAlloc_Blocking(t *testing.T) { // Create the alloc we are watching later time.AfterFunc(200*time.Millisecond, func() { state.UpsertJobSummary(199, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -943,7 +943,7 @@ func TestAllocEndpoint_GetAllocs(t *testing.T) { state := s1.fsm.State() state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc, alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -1000,7 +1000,7 @@ func TestAllocEndpoint_GetAllocs_Blocking(t *testing.T) { // First create an unrelated alloc time.AfterFunc(100*time.Millisecond, func() { state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -1009,7 +1009,7 @@ func TestAllocEndpoint_GetAllocs_Blocking(t *testing.T) { // Create the alloc we are watching later time.AfterFunc(200*time.Millisecond, func() { state.UpsertJobSummary(199, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -1056,7 +1056,7 @@ func TestAllocEndpoint_UpdateDesiredTransition(t *testing.T) { state := s1.fsm.State() require.Nil(state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID))) require.Nil(state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc, alloc2})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2})) t1 := &structs.DesiredTransition{ Migrate: pointer.Of(true), @@ -1140,7 +1140,7 @@ func TestAllocEndpoint_Stop_ACL(t *testing.T) { state := s1.fsm.State() require.Nil(state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID))) require.Nil(state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc, alloc2})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2})) req := &structs.AllocStopRequest{ AllocID: alloc.ID, @@ -1213,7 +1213,7 @@ func TestAllocEndpoint_List_AllNamespaces_ACL_OSS(t *testing.T) { require.NoError(t, state.UpsertJobSummary(999, summary1)) require.NoError(t, state.UpsertJobSummary(999, summary2)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) alloc1.CreateIndex = 1000 alloc1.ModifyIndex = 1000 alloc2.CreateIndex = 1000 @@ -1382,7 +1382,7 @@ func TestAlloc_GetServiceRegistrations(t *testing.T) { correctSetupFn := func(s *Server) (error, string, *structs.ServiceRegistration) { // Generate an upsert an allocation. alloc := mock.Alloc() - err := s.State().UpsertAllocs(structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc}) + err := s.State().UpsertAllocs(structs.MsgTypeTestSetup, 10, time.Now().UnixNano(), []*structs.Allocation{alloc}) if err != nil { return nil, "", nil } @@ -1502,7 +1502,7 @@ func TestAlloc_GetServiceRegistrations(t *testing.T) { // Generate an upsert an allocation. alloc := mock.Alloc() require.NoError(t, s.State().UpsertAllocs( - structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc})) + structs.MsgTypeTestSetup, 10, time.Now().UnixNano(), []*structs.Allocation{alloc})) // Perform a lookup using the allocation information. serviceRegReq := &structs.AllocServiceRegistrationsRequest{ @@ -1733,7 +1733,7 @@ func TestAlloc_SignIdentities_Bad(t *testing.T) { summary := mock.JobSummary(alloc.JobID) state := s1.fsm.State() must.NoError(t, state.UpsertJobSummary(100, summary)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 101, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 101, time.Now().UnixNano(), []*structs.Allocation{alloc})) // A valid alloc and invalid TaskName is an error req.Identities[0].AllocID = alloc.ID @@ -1801,7 +1801,7 @@ func TestAlloc_SignIdentities_Blocking(t *testing.T) { otherAlloc := mock.Alloc() otherSummary := mock.JobSummary(otherAlloc.JobID) must.NoError(t, state.UpsertJobSummary(999, otherSummary)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{otherAlloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{otherAlloc})) type resultT struct { Err error @@ -1848,7 +1848,7 @@ func TestAlloc_SignIdentities_Blocking(t *testing.T) { otherAlloc = mock.Alloc() otherSummary = mock.JobSummary(otherAlloc.JobID) must.NoError(t, state.UpsertJobSummary(1997, otherSummary)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1998, []*structs.Allocation{otherAlloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1998, time.Now().UnixNano(), []*structs.Allocation{otherAlloc})) select { case result := <-resultCh: @@ -1858,7 +1858,7 @@ func TestAlloc_SignIdentities_Blocking(t *testing.T) { // Finally add the alloc we're waiting for must.NoError(t, state.UpsertJobSummary(1999, summary)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2000, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2000, time.Now().UnixNano(), []*structs.Allocation{alloc})) select { case result := <-resultCh: diff --git a/nomad/deploymentwatcher/deployments_watcher_test.go b/nomad/deploymentwatcher/deployments_watcher_test.go index a6b880441f7..f2cb2e6ac2f 100644 --- a/nomad/deploymentwatcher/deployments_watcher_test.go +++ b/nomad/deploymentwatcher/deployments_watcher_test.go @@ -20,7 +20,6 @@ import ( "github.com/shoenig/test/wait" "github.com/stretchr/testify/assert" mocker "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" ) func testDeploymentWatcher(t *testing.T, qps float64, batchDur time.Duration) (*Watcher, *mockBackend) { @@ -36,7 +35,6 @@ func defaultTestDeploymentWatcher(t *testing.T) (*Watcher, *mockBackend) { // Tests that the watcher properly watches for deployments and reconciles them func TestWatcher_WatchDeployments(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -45,9 +43,9 @@ func TestWatcher_WatchDeployments(t *testing.T) { // Create three jobs j1, j2, j3 := mock.Job(), mock.Job(), mock.Job() - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, j1)) - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, j2)) - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, 102, nil, j3)) + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, j1)) + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, j2)) + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, 102, nil, j3)) // Create three deployments all running d1, d2, d3 := mock.Deployment(), mock.Deployment(), mock.Deployment() @@ -56,14 +54,14 @@ func TestWatcher_WatchDeployments(t *testing.T) { d3.JobID = j3.ID // Upsert the first deployment - require.Nil(m.state.UpsertDeployment(103, d1)) + must.Nil(t, m.state.UpsertDeployment(103, time.Now().UnixNano(), d1)) // Next list 3 block1 := make(chan time.Time) go func() { <-block1 - require.Nil(m.state.UpsertDeployment(104, d2)) - require.Nil(m.state.UpsertDeployment(105, d3)) + must.Nil(t, m.state.UpsertDeployment(104, time.Now().UnixNano(), d2)) + must.Nil(t, m.state.UpsertDeployment(105, time.Now().UnixNano(), d3)) }() //// Next list 3 but have one be terminal @@ -72,27 +70,25 @@ func TestWatcher_WatchDeployments(t *testing.T) { d3terminal.Status = structs.DeploymentStatusFailed go func() { <-block2 - require.Nil(m.state.UpsertDeployment(106, d3terminal)) + must.Nil(t, m.state.UpsertDeployment(106, time.Now().UnixNano(), d3terminal)) }() w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "1 deployment returned") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("1 deployment returned")) }) close(block1) testutil.WaitForResult(func() (bool, error) { return 3 == watchersCount(w), nil }, - func(err error) { require.Equal(3, watchersCount(w), "3 deployment returned") }) + func(err error) { must.Eq(t, 3, watchersCount(w), must.Sprint("3 deployment returned")) }) close(block2) testutil.WaitForResult(func() (bool, error) { return 2 == watchersCount(w), nil }, - func(err error) { require.Equal(3, watchersCount(w), "3 deployment returned - 1 terminal") }) + func(err error) { must.Eq(t, 3, watchersCount(w), must.Sprint("3 deployment returned - 1 terminal")) }) } // Tests that calls against an unknown deployment fail func TestWatcher_UnknownDeployment(t *testing.T) { ci.Parallel(t) - assert := assert.New(t) - require := require.New(t) w, m := defaultTestDeploymentWatcher(t) w.SetEnabled(true, m.state) @@ -111,9 +107,8 @@ func TestWatcher_UnknownDeployment(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.SetAllocHealth(req, &resp) - if assert.NotNil(err, "should have error for unknown deployment") { - require.Contains(err.Error(), expected) - } + must.Error(t, err, must.Sprint("should have error for unknown deployment")) + must.ErrorContains(t, err, expected) // Request promoting against an unknown deployment req2 := &structs.DeploymentPromoteRequest{ @@ -121,9 +116,8 @@ func TestWatcher_UnknownDeployment(t *testing.T) { All: true, } err = w.PromoteDeployment(req2, &resp) - if assert.NotNil(err, "should have error for unknown deployment") { - require.Contains(err.Error(), expected) - } + must.Error(t, err, must.Sprint("should have error for unknown deployment")) + must.ErrorContains(t, err, expected) // Request pausing against an unknown deployment req3 := &structs.DeploymentPauseRequest{ @@ -131,25 +125,21 @@ func TestWatcher_UnknownDeployment(t *testing.T) { Pause: true, } err = w.PauseDeployment(req3, &resp) - if assert.NotNil(err, "should have error for unknown deployment") { - require.Contains(err.Error(), expected) - } + must.Error(t, err, must.Sprint("should have error for unknown deployment")) + must.ErrorContains(t, err, expected) // Request failing against an unknown deployment req4 := &structs.DeploymentFailRequest{ DeploymentID: dID, } err = w.FailDeployment(req4, &resp) - if assert.NotNil(err, "should have error for unknown deployment") { - require.Contains(err.Error(), expected) - } + must.Error(t, err, must.Sprint("should have error for unknown deployment")) + must.ErrorContains(t, err, expected) } // Test setting an unknown allocation's health func TestWatcher_SetAllocHealth_Unknown(t *testing.T) { ci.Parallel(t) - assert := assert.New(t) - require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -160,8 +150,8 @@ func TestWatcher_SetAllocHealth_Unknown(t *testing.T) { j := mock.Job() d := mock.Deployment() d.JobID = j.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) // require that we get a call to UpsertDeploymentAllocHealth a := mock.Alloc() @@ -175,7 +165,7 @@ func TestWatcher_SetAllocHealth_Unknown(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Call SetAllocHealth req := &structs.DeploymentAllocHealthRequest{ @@ -184,16 +174,14 @@ func TestWatcher_SetAllocHealth_Unknown(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.SetAllocHealth(req, &resp) - if assert.NotNil(err, "Set health of unknown allocation") { - require.Contains(err.Error(), "unknown") - } - require.Equal(1, watchersCount(w), "Deployment should still be active") + must.Error(t, err, must.Sprint("Set health of unknown allocation")) + must.ErrorContains(t, err, "unknown") + must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) } // Test setting allocation health func TestWatcher_SetAllocHealth_Healthy(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -206,9 +194,10 @@ func TestWatcher_SetAllocHealth_Healthy(t *testing.T) { d.JobID = j.ID a := mock.Alloc() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) // require that we get a call to UpsertDeploymentAllocHealth matchConfig := &matchDeploymentAllocHealthRequestConfig{ @@ -221,7 +210,7 @@ func TestWatcher_SetAllocHealth_Healthy(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Call SetAllocHealth req := &structs.DeploymentAllocHealthRequest{ @@ -230,15 +219,14 @@ func TestWatcher_SetAllocHealth_Healthy(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.SetAllocHealth(req, &resp) - require.Nil(err, "SetAllocHealth") - require.Equal(1, watchersCount(w), "Deployment should still be active") + must.Nil(t, err, must.Sprint("SetAllocHealth")) + must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) m.AssertCalled(t, "UpdateDeploymentAllocHealth", mocker.MatchedBy(matcher)) } // Test setting allocation unhealthy func TestWatcher_SetAllocHealth_Unhealthy(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -251,9 +239,10 @@ func TestWatcher_SetAllocHealth_Unhealthy(t *testing.T) { d.JobID = j.ID a := mock.Alloc() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) // require that we get a call to UpsertDeploymentAllocHealth matchConfig := &matchDeploymentAllocHealthRequestConfig{ @@ -271,7 +260,7 @@ func TestWatcher_SetAllocHealth_Unhealthy(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Call SetAllocHealth req := &structs.DeploymentAllocHealthRequest{ @@ -280,17 +269,16 @@ func TestWatcher_SetAllocHealth_Unhealthy(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.SetAllocHealth(req, &resp) - require.Nil(err, "SetAllocHealth") + must.Nil(t, err, must.Sprint("SetAllocHealth")) testutil.WaitForResult(func() (bool, error) { return 0 == watchersCount(w), nil }, - func(err error) { require.Equal(0, watchersCount(w), "Should have no deployment") }) + func(err error) { must.Eq(t, 0, watchersCount(w), must.Sprint("Should have no deployment")) }) m.AssertNumberOfCalls(t, "UpdateDeploymentAllocHealth", 1) } // Test setting allocation unhealthy and that there should be a rollback func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -309,9 +297,10 @@ func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { d.TaskGroups["web"].AutoRevert = true a := mock.Alloc() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) // Upsert the job again to get a new version j2 := j.Copy() @@ -319,7 +308,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { // Modify the job to make its specification different j2.Meta["foo"] = "bar" - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), "UpsertJob2") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), must.Sprint("UpsertJob2")) // require that we get a call to UpsertDeploymentAllocHealth matchConfig := &matchDeploymentAllocHealthRequestConfig{ @@ -338,7 +327,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Call SetAllocHealth req := &structs.DeploymentAllocHealthRequest{ @@ -347,17 +336,16 @@ func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.SetAllocHealth(req, &resp) - require.Nil(err, "SetAllocHealth") + must.Nil(t, err, must.Sprint("SetAllocHealth")) testutil.WaitForResult(func() (bool, error) { return 0 == watchersCount(w), nil }, - func(err error) { require.Equal(0, watchersCount(w), "Should have no deployment") }) + func(err error) { must.Eq(t, 0, watchersCount(w), must.Sprint("Should have no deployment")) }) m.AssertNumberOfCalls(t, "UpdateDeploymentAllocHealth", 1) } // Test setting allocation unhealthy on job with identical spec and there should be no rollback func TestWatcher_SetAllocHealth_Unhealthy_NoRollback(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -376,15 +364,16 @@ func TestWatcher_SetAllocHealth_Unhealthy_NoRollback(t *testing.T) { d.TaskGroups["web"].AutoRevert = true a := mock.Alloc() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) // Upsert the job again to get a new version j2 := j.Copy() j2.Stable = false - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), "UpsertJob2") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), must.Sprint("UpsertJob2")) // require that we get a call to UpsertDeploymentAllocHealth matchConfig := &matchDeploymentAllocHealthRequestConfig{ @@ -403,7 +392,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_NoRollback(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Call SetAllocHealth req := &structs.DeploymentAllocHealthRequest{ @@ -412,17 +401,16 @@ func TestWatcher_SetAllocHealth_Unhealthy_NoRollback(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.SetAllocHealth(req, &resp) - require.Nil(err, "SetAllocHealth") + must.Nil(t, err, must.Sprint("SetAllocHealth")) testutil.WaitForResult(func() (bool, error) { return 0 == watchersCount(w), nil }, - func(err error) { require.Equal(0, watchersCount(w), "Should have no deployment") }) + func(err error) { must.Eq(t, 0, watchersCount(w), must.Sprint("Should have no deployment")) }) m.AssertNumberOfCalls(t, "UpdateDeploymentAllocHealth", 1) } // Test promoting a deployment func TestWatcher_PromoteDeployment_HealthyCanaries(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -444,9 +432,10 @@ func TestWatcher_PromoteDeployment_HealthyCanaries(t *testing.T) { Healthy: pointer.Of(true), } a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) // require that we get a call to UpsertDeploymentPromotion matchConfig := &matchDeploymentPromoteRequestConfig{ @@ -465,7 +454,7 @@ func TestWatcher_PromoteDeployment_HealthyCanaries(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Call PromoteDeployment req := &structs.DeploymentPromoteRequest{ @@ -474,15 +463,14 @@ func TestWatcher_PromoteDeployment_HealthyCanaries(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.PromoteDeployment(req, &resp) - require.Nil(err, "PromoteDeployment") - require.Equal(1, watchersCount(w), "Deployment should still be active") + must.Nil(t, err, must.Sprint("PromoteDeployment")) + must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) m.AssertCalled(t, "UpdateDeploymentPromotion", mocker.MatchedBy(matcher)) } // Test promoting a deployment with unhealthy canaries func TestWatcher_PromoteDeployment_UnhealthyCanaries(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -501,9 +489,10 @@ func TestWatcher_PromoteDeployment_UnhealthyCanaries(t *testing.T) { d.TaskGroups[a.TaskGroup].PlacedCanaries = []string{a.ID} d.TaskGroups[a.TaskGroup].DesiredCanaries = 2 a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) // require that we get a call to UpsertDeploymentPromotion matchConfig := &matchDeploymentPromoteRequestConfig{ @@ -518,7 +507,7 @@ func TestWatcher_PromoteDeployment_UnhealthyCanaries(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Call SetAllocHealth req := &structs.DeploymentPromoteRequest{ @@ -529,10 +518,10 @@ func TestWatcher_PromoteDeployment_UnhealthyCanaries(t *testing.T) { err := w.PromoteDeployment(req, &resp) if assert.NotNil(t, err, "PromoteDeployment") { // 0/2 because the old version has been stopped but the canary isn't marked healthy yet - require.Contains(err.Error(), `Task group "web" has 0/2 healthy allocations`, "Should error because canary isn't marked healthy") + must.ErrorContains(t, err, `Task group "web" has 0/2 healthy allocations`, must.Sprint("Should error because canary isn't marked healthy")) } - require.Equal(1, watchersCount(w), "Deployment should still be active") + must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) m.AssertCalled(t, "UpdateDeploymentPromotion", mocker.MatchedBy(matcher)) } @@ -612,9 +601,10 @@ func TestWatcher_AutoPromoteDeployment(t *testing.T) { d.TaskGroups[ca1.TaskGroup].PlacedCanaries = []string{ca1.ID, ca2.ID} d.TaskGroups[ca1.TaskGroup].DesiredCanaries = 2 d.TaskGroups[ra1.TaskGroup].PlacedAllocs = 2 - require.NoError(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.NoError(t, m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") - require.NoError(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{ca1, ca2, ra1, ra2}), "UpsertAllocs") + must.NoError(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.NoError(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.NoError(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{ca1, ca2, ra1, ra2}), must.Sprint("UpsertAllocs")) // ============================================================= // Support method calls @@ -660,7 +650,7 @@ func TestWatcher_AutoPromoteDeployment(t *testing.T) { func(err error) { w.l.RLock() defer w.l.RUnlock() - require.Equal(t, 1, len(w.watchers), "Should have 1 deployment") + must.Eq(t, 1, len(w.watchers), must.Sprint("Should have 1 deployment")) }, ) @@ -673,7 +663,7 @@ func TestWatcher_AutoPromoteDeployment(t *testing.T) { // Calls w.raft.UpdateDeploymentAllocHealth, which is implemented by StateStore in // state.UpdateDeploymentAllocHealth via a raft shim? err := w.SetAllocHealth(req, &resp) - require.NoError(t, err) + must.NoError(t, err) ws := memdb.NewWatchSet() @@ -683,22 +673,22 @@ func TestWatcher_AutoPromoteDeployment(t *testing.T) { d = ds[0] return 2 == d.TaskGroups["web"].HealthyAllocs, nil }, - func(err error) { require.NoError(t, err) }, + func(err error) { must.NoError(t, err) }, ) - require.Equal(t, 1, len(w.watchers), "Deployment should still be active") + must.Eq(t, 1, len(w.watchers), must.Sprint("Deployment should still be active")) m.AssertCalled(t, "UpdateDeploymentPromotion", mocker.MatchedBy(matcher2)) - require.Equal(t, "running", d.Status) - require.True(t, d.TaskGroups["web"].Promoted) + must.Eq(t, "running", d.Status) + must.True(t, d.TaskGroups["web"].Promoted) a1, _ := m.state.AllocByID(ws, ca1.ID) - require.False(t, a1.DeploymentStatus.Canary) - require.Equal(t, "pending", a1.ClientStatus) - require.Equal(t, "run", a1.DesiredStatus) + must.False(t, a1.DeploymentStatus.Canary) + must.Eq(t, "pending", a1.ClientStatus) + must.Eq(t, "run", a1.DesiredStatus) b1, _ := m.state.AllocByID(ws, ca2.ID) - require.False(t, b1.DeploymentStatus.Canary) + must.False(t, b1.DeploymentStatus.Canary) } func TestWatcher_AutoPromoteDeployment_UnhealthyCanaries(t *testing.T) { @@ -747,9 +737,10 @@ func TestWatcher_AutoPromoteDeployment_UnhealthyCanaries(t *testing.T) { d.TaskGroups[ca1.TaskGroup].PlacedCanaries = []string{ca1.ID, ca2.ID, ca3.ID} d.TaskGroups[ca1.TaskGroup].DesiredCanaries = 2 - require.NoError(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.NoError(t, m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") - require.NoError(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{ca1, ca2, ca3}), "UpsertAllocs") + must.NoError(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.NoError(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.NoError(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{ca1, ca2, ca3}), must.Sprint("UpsertAllocs")) // ============================================================= // Support method calls @@ -795,7 +786,7 @@ func TestWatcher_AutoPromoteDeployment_UnhealthyCanaries(t *testing.T) { func(err error) { w.l.RLock() defer w.l.RUnlock() - require.Equal(t, 1, len(w.watchers), "Should have 1 deployment") + must.Eq(t, 1, len(w.watchers), must.Sprint("Should have 1 deployment")) }, ) @@ -808,7 +799,7 @@ func TestWatcher_AutoPromoteDeployment_UnhealthyCanaries(t *testing.T) { // Calls w.raft.UpdateDeploymentAllocHealth, which is implemented by StateStore in // state.UpdateDeploymentAllocHealth via a raft shim? err := w.SetAllocHealth(req, &resp) - require.NoError(t, err) + must.NoError(t, err) ws := memdb.NewWatchSet() @@ -818,29 +809,28 @@ func TestWatcher_AutoPromoteDeployment_UnhealthyCanaries(t *testing.T) { d = ds[0] return 2 == d.TaskGroups["web"].HealthyAllocs, nil }, - func(err error) { require.NoError(t, err) }, + func(err error) { must.NoError(t, err) }, ) // Verify that a promotion request was submitted. - require.Equal(t, 1, len(w.watchers), "Deployment should still be active") + must.Eq(t, 1, len(w.watchers), must.Sprint("Deployment should still be active")) m.AssertCalled(t, "UpdateDeploymentPromotion", mocker.MatchedBy(matcher2)) - require.Equal(t, "running", d.Status) - require.True(t, d.TaskGroups["web"].Promoted) + must.Eq(t, "running", d.Status) + must.True(t, d.TaskGroups["web"].Promoted) a1, _ := m.state.AllocByID(ws, ca1.ID) - require.False(t, a1.DeploymentStatus.Canary) - require.Equal(t, "pending", a1.ClientStatus) - require.Equal(t, "run", a1.DesiredStatus) + must.False(t, a1.DeploymentStatus.Canary) + must.Eq(t, "pending", a1.ClientStatus) + must.Eq(t, "run", a1.DesiredStatus) b1, _ := m.state.AllocByID(ws, ca2.ID) - require.False(t, b1.DeploymentStatus.Canary) + must.False(t, b1.DeploymentStatus.Canary) } // Test pausing a deployment that is running func TestWatcher_PauseDeployment_Pause_Running(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := defaultTestDeploymentWatcher(t) // clear UpdateDeploymentStatus default expectation @@ -850,8 +840,8 @@ func TestWatcher_PauseDeployment_Pause_Running(t *testing.T) { j := mock.Job() d := mock.Deployment() d.JobID = j.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) // require that we get a call to UpsertDeploymentStatusUpdate matchConfig := &matchDeploymentStatusUpdateConfig{ @@ -864,7 +854,7 @@ func TestWatcher_PauseDeployment_Pause_Running(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Call PauseDeployment req := &structs.DeploymentPauseRequest{ @@ -873,16 +863,15 @@ func TestWatcher_PauseDeployment_Pause_Running(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.PauseDeployment(req, &resp) - require.Nil(err, "PauseDeployment") + must.Nil(t, err, must.Sprint("PauseDeployment")) - require.Equal(1, watchersCount(w), "Deployment should still be active") + must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) m.AssertCalled(t, "UpdateDeploymentStatus", mocker.MatchedBy(matcher)) } // Test pausing a deployment that is paused func TestWatcher_PauseDeployment_Pause_Paused(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := defaultTestDeploymentWatcher(t) // clear UpdateDeploymentStatus default expectation @@ -893,8 +882,8 @@ func TestWatcher_PauseDeployment_Pause_Paused(t *testing.T) { d := mock.Deployment() d.JobID = j.ID d.Status = structs.DeploymentStatusPaused - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) // require that we get a call to UpsertDeploymentStatusUpdate matchConfig := &matchDeploymentStatusUpdateConfig{ @@ -907,7 +896,7 @@ func TestWatcher_PauseDeployment_Pause_Paused(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Call PauseDeployment req := &structs.DeploymentPauseRequest{ @@ -916,16 +905,15 @@ func TestWatcher_PauseDeployment_Pause_Paused(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.PauseDeployment(req, &resp) - require.Nil(err, "PauseDeployment") + must.Nil(t, err, must.Sprint("PauseDeployment")) - require.Equal(1, watchersCount(w), "Deployment should still be active") + must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) m.AssertCalled(t, "UpdateDeploymentStatus", mocker.MatchedBy(matcher)) } // Test unpausing a deployment that is paused func TestWatcher_PauseDeployment_Unpause_Paused(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := defaultTestDeploymentWatcher(t) // Create a job and a deployment @@ -933,8 +921,8 @@ func TestWatcher_PauseDeployment_Unpause_Paused(t *testing.T) { d := mock.Deployment() d.JobID = j.ID d.Status = structs.DeploymentStatusPaused - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) // require that we get a call to UpsertDeploymentStatusUpdate matchConfig := &matchDeploymentStatusUpdateConfig{ @@ -948,7 +936,7 @@ func TestWatcher_PauseDeployment_Unpause_Paused(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Call PauseDeployment req := &structs.DeploymentPauseRequest{ @@ -957,24 +945,23 @@ func TestWatcher_PauseDeployment_Unpause_Paused(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.PauseDeployment(req, &resp) - require.Nil(err, "PauseDeployment") + must.Nil(t, err, must.Sprint("PauseDeployment")) - require.Equal(1, watchersCount(w), "Deployment should still be active") + must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) m.AssertCalled(t, "UpdateDeploymentStatus", mocker.MatchedBy(matcher)) } // Test unpausing a deployment that is running func TestWatcher_PauseDeployment_Unpause_Running(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := defaultTestDeploymentWatcher(t) // Create a job and a deployment j := mock.Job() d := mock.Deployment() d.JobID = j.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) // require that we get a call to UpsertDeploymentStatusUpdate matchConfig := &matchDeploymentStatusUpdateConfig{ @@ -988,7 +975,7 @@ func TestWatcher_PauseDeployment_Unpause_Running(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Call PauseDeployment req := &structs.DeploymentPauseRequest{ @@ -997,24 +984,23 @@ func TestWatcher_PauseDeployment_Unpause_Running(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.PauseDeployment(req, &resp) - require.Nil(err, "PauseDeployment") + must.Nil(t, err, must.Sprint("PauseDeployment")) - require.Equal(1, watchersCount(w), "Deployment should still be active") + must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) m.AssertCalled(t, "UpdateDeploymentStatus", mocker.MatchedBy(matcher)) } // Test failing a deployment that is running func TestWatcher_FailDeployment_Running(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := defaultTestDeploymentWatcher(t) // Create a job and a deployment j := mock.Job() d := mock.Deployment() d.JobID = j.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) // require that we get a call to UpsertDeploymentStatusUpdate matchConfig := &matchDeploymentStatusUpdateConfig{ @@ -1028,7 +1014,7 @@ func TestWatcher_FailDeployment_Running(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Call PauseDeployment req := &structs.DeploymentFailRequest{ @@ -1036,9 +1022,9 @@ func TestWatcher_FailDeployment_Running(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.FailDeployment(req, &resp) - require.Nil(err, "FailDeployment") + must.Nil(t, err, must.Sprint("FailDeployment")) - require.Equal(1, watchersCount(w), "Deployment should still be active") + must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) m.AssertCalled(t, "UpdateDeploymentStatus", mocker.MatchedBy(matcher)) } @@ -1046,7 +1032,6 @@ func TestWatcher_FailDeployment_Running(t *testing.T) { // proper actions func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) // Create a job, alloc, and a deployment @@ -1061,16 +1046,17 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { d.TaskGroups["web"].AutoRevert = true a := mock.Alloc() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) // Upsert the job again to get a new version j2 := j.Copy() // Modify the job to make its specification different j2.Meta["foo"] = "bar" j2.Stable = false - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), "UpsertJob2") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), must.Sprint("UpsertJob2")) // require that we will get a update allocation call only once. This will // verify that the watcher is batching allocation changes @@ -1090,7 +1076,7 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Update the allocs health to healthy which should create an evaluation for i := 0; i < 5; i++ { @@ -1100,7 +1086,8 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { HealthyAllocationIDs: []string{a.ID}, }, } - require.Nil(m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, m.nextIndex(), req), "UpsertDeploymentAllocHealth") + must.Nil(t, m.state.UpdateDeploymentAllocHealth( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), req), must.Sprint("UpsertDeploymentAllocHealth")) } // Wait for there to be one eval @@ -1128,7 +1115,8 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { UnhealthyAllocationIDs: []string{a.ID}, }, } - require.Nil(m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, m.nextIndex(), req2), "UpsertDeploymentAllocHealth") + must.Nil(t, m.state.UpdateDeploymentAllocHealth( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), req2), must.Sprint("UpsertDeploymentAllocHealth")) // Wait for there to be one eval testutil.WaitForResult(func() (bool, error) { @@ -1161,12 +1149,11 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { m3 := matchDeploymentStatusUpdateRequest(c2) m.AssertCalled(t, "UpdateDeploymentStatus", mocker.MatchedBy(m3)) testutil.WaitForResult(func() (bool, error) { return 0 == watchersCount(w), nil }, - func(err error) { require.Equal(0, watchersCount(w), "Should have no deployment") }) + func(err error) { must.Eq(t, 0, watchersCount(w), must.Sprint("Should have no deployment")) }) } func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) // Create a job, alloc, and a deployment @@ -1183,9 +1170,9 @@ func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { a.CreateTime = now.UnixNano() a.ModifyTime = now.UnixNano() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) // require that we get a call to UpsertDeploymentStatusUpdate c := &matchDeploymentStatusUpdateConfig{ @@ -1199,7 +1186,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Update the alloc to be unhealthy and require that nothing happens. a2 := a.Copy() @@ -1207,7 +1194,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { Healthy: pointer.Of(false), Timestamp: now, } - require.Nil(m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 100, []*structs.Allocation{a2})) + must.Nil(t, m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{a2})) // Wait for the deployment to be failed testutil.WaitForResult(func() (bool, error) { @@ -1242,7 +1229,6 @@ func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { // Test that progress deadline handling works when there are multiple groups func TestDeploymentWatcher_ProgressCutoff(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -1278,9 +1264,10 @@ func TestDeploymentWatcher_ProgressCutoff(t *testing.T) { a2.ModifyTime = now.UnixNano() a2.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a, a2}), "UpsertAllocs") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a, a2}), must.Sprint("UpsertAllocs")) // We may get an update for the desired transition. m1 := matchUpdateAllocDesiredTransitions([]string{d.ID}) @@ -1288,67 +1275,67 @@ func TestDeploymentWatcher_ProgressCutoff(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) watcher, err := w.getOrCreateWatcher(d.ID) - require.NoError(err) - require.NotNil(watcher) + must.NoError(t, err) + must.NotNil(t, watcher) d1, err := m.state.DeploymentByID(nil, d.ID) - require.NoError(err) + must.NoError(t, err) done := watcher.doneGroups(d1) - require.Contains(done, "web") - require.False(done["web"]) - require.Contains(done, "foo") - require.False(done["foo"]) + must.MapContainsKey(t, done, "web") + must.False(t, done["web"]) + must.MapContainsKey(t, done, "foo") + must.False(t, done["foo"]) cutoff1 := watcher.getDeploymentProgressCutoff(d1) - require.False(cutoff1.IsZero()) + must.False(t, cutoff1.IsZero()) // Update the first allocation to be healthy a3 := a.Copy() a3.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a3}), "UpsertAllocs") + must.Nil(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a3}), must.Sprint("UpsertAllocs")) // Get the updated deployment d2, err := m.state.DeploymentByID(nil, d.ID) - require.NoError(err) + must.NoError(t, err) done = watcher.doneGroups(d2) - require.Contains(done, "web") - require.True(done["web"]) - require.Contains(done, "foo") - require.False(done["foo"]) + must.MapContainsKey(t, done, "web") + must.True(t, done["web"]) + must.MapContainsKey(t, done, "foo") + must.False(t, done["foo"]) cutoff2 := watcher.getDeploymentProgressCutoff(d2) - require.False(cutoff2.IsZero()) - require.True(cutoff1.UnixNano() < cutoff2.UnixNano()) + must.False(t, cutoff2.IsZero()) + must.True(t, cutoff1.UnixNano() < cutoff2.UnixNano()) // Update the second allocation to be healthy a4 := a2.Copy() a4.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a4}), "UpsertAllocs") + must.Nil(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a4}), must.Sprint("UpsertAllocs")) // Get the updated deployment d3, err := m.state.DeploymentByID(nil, d.ID) - require.NoError(err) + must.NoError(t, err) done = watcher.doneGroups(d3) - require.Contains(done, "web") - require.True(done["web"]) - require.Contains(done, "foo") - require.True(done["foo"]) + must.MapContainsKey(t, done, "web") + must.True(t, done["web"]) + must.MapContainsKey(t, done, "foo") + must.True(t, done["foo"]) cutoff3 := watcher.getDeploymentProgressCutoff(d2) - require.True(cutoff3.IsZero()) + must.True(t, cutoff3.IsZero()) } // Test that we will allow the progress deadline to be reached when the canaries // are healthy but we haven't promoted func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -1372,9 +1359,10 @@ func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { a.CreateTime = now.UnixNano() a.ModifyTime = now.UnixNano() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) // require that we will get a createEvaluation call only once. This will // verify that the watcher is batching allocation changes @@ -1383,7 +1371,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Update the alloc to be unhealthy and require that nothing happens. a2 := a.Copy() @@ -1391,22 +1379,22 @@ func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { Healthy: pointer.Of(true), Timestamp: now, } - require.Nil(m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a2})) + must.Nil(t, m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a2})) // Wait for the deployment to cross the deadline dout, err := m.state.DeploymentByID(nil, d.ID) - require.NoError(err) - require.NotNil(dout) + must.NoError(t, err) + must.NotNil(t, dout) state := dout.TaskGroups["web"] - require.NotNil(state) + must.NotNil(t, state) time.Sleep(state.RequireProgressBy.Add(time.Second).Sub(now)) // Require the deployment is still running dout, err = m.state.DeploymentByID(nil, d.ID) - require.NoError(err) - require.NotNil(dout) - require.Equal(structs.DeploymentStatusRunning, dout.Status) - require.Equal(structs.DeploymentStatusDescriptionRunningNeedsPromotion, dout.StatusDescription) + must.NoError(t, err) + must.NotNil(t, dout) + must.Eq(t, structs.DeploymentStatusRunning, dout.Status) + must.Eq(t, structs.DeploymentStatusDescriptionRunningNeedsPromotion, dout.StatusDescription) // require there are is only one evaluation testutil.WaitForResult(func() (bool, error) { @@ -1430,7 +1418,6 @@ func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { // evals to move the deployment forward func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -1464,13 +1451,14 @@ func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { Healthy: pointer.Of(true), Timestamp: now, } - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) m1 := matchUpdateAllocDesiredTransitions([]string{d.ID}) m.On("UpdateAllocDesiredTransition", mocker.MatchedBy(m1)).Return(nil).Twice() @@ -1486,17 +1474,18 @@ func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { Timestamp: now, } d.TaskGroups["web"].RequireProgressBy = time.Now().Add(2 * time.Second) - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) // Wait until batch eval period passes before updating another alloc time.Sleep(1 * time.Second) - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a2}), "UpsertAllocs") + must.Nil(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a2}), must.Sprint("UpsertAllocs")) // Wait for the deployment to cross the deadline dout, err := m.state.DeploymentByID(nil, d.ID) - require.NoError(err) - require.NotNil(dout) + must.NoError(t, err) + must.NotNil(t, dout) state := dout.TaskGroups["web"] - require.NotNil(state) + must.NotNil(t, state) time.Sleep(state.RequireProgressBy.Add(time.Second).Sub(now)) // There should be two evals @@ -1519,7 +1508,6 @@ func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { ci.Parallel(t) - require := require.New(t) mtype := structs.MsgTypeTestSetup w, m := defaultTestDeploymentWatcher(t) @@ -1564,8 +1552,8 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { }, } - require.NoError(m.state.UpsertJob(mtype, m.nextIndex(), nil, j)) - require.NoError(m.state.UpsertDeployment(m.nextIndex(), d)) + must.NoError(t, m.state.UpsertJob(mtype, m.nextIndex(), nil, j)) + must.NoError(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d)) // require that we get a call to UpsertDeploymentPromotion matchConfig := &matchDeploymentPromoteRequestConfig{ @@ -1599,8 +1587,8 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { canary2.ModifyTime = now.UnixNano() allocs := []*structs.Allocation{canary1, canary2} - err := m.state.UpsertAllocs(mtype, m.nextIndex(), allocs) - require.NoError(err) + err := m.state.UpsertAllocs(mtype, m.nextIndex(), time.Now().UnixNano(), allocs) + must.NoError(t, err) // 2nd group's canary becomes healthy @@ -1615,8 +1603,8 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { } allocs = []*structs.Allocation{canary2} - err = m.state.UpdateAllocsFromClient(mtype, m.nextIndex(), allocs) - require.NoError(err) + err = m.state.UpdateAllocsFromClient(mtype, m.nextIndex(), now.UnixNano(), allocs) + must.NoError(t, err) // wait for long enough to ensure we read deployment update channel // this sleep creates the race condition associated with #7058 @@ -1634,8 +1622,8 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { } allocs = []*structs.Allocation{canary1} - err = m.state.UpdateAllocsFromClient(mtype, m.nextIndex(), allocs) - require.NoError(err) + err = m.state.UpdateAllocsFromClient(mtype, m.nextIndex(), now.UnixNano(), allocs) + must.NoError(t, err) // ensure progress_deadline has definitely expired time.Sleep(progressTimeout) @@ -1647,7 +1635,7 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { All: true, } err = w.PromoteDeployment(req, &structs.DeploymentUpdateResponse{}) - require.NoError(err) + must.NoError(t, err) // wait for long enough to ensure we read deployment update channel time.Sleep(50 * time.Millisecond) @@ -1673,8 +1661,8 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { alloc1b.ModifyTime = now.UnixNano() allocs = []*structs.Allocation{alloc1a, alloc1b} - err = m.state.UpsertAllocs(mtype, m.nextIndex(), allocs) - require.NoError(err) + err = m.state.UpsertAllocs(mtype, m.nextIndex(), now.UnixNano(), allocs) + must.NoError(t, err) // allocs become healthy @@ -1699,8 +1687,8 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { } allocs = []*structs.Allocation{alloc1a, alloc1b} - err = m.state.UpdateAllocsFromClient(mtype, m.nextIndex(), allocs) - require.NoError(err) + err = m.state.UpdateAllocsFromClient(mtype, m.nextIndex(), now.UnixNano(), allocs) + must.NoError(t, err) // ensure any progress deadline has expired time.Sleep(progressTimeout) @@ -1708,26 +1696,25 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { // without a scheduler running we'll never mark the deployment as // successful, so test that healthy == desired and that we haven't failed deployment, err := m.state.DeploymentByID(nil, d.ID) - require.NoError(err) - require.Equal(structs.DeploymentStatusRunning, deployment.Status) + must.NoError(t, err) + must.Eq(t, structs.DeploymentStatusRunning, deployment.Status) group1 := deployment.TaskGroups["group1"] - require.Equal(group1.DesiredTotal, group1.HealthyAllocs, "not enough healthy") - require.Equal(group1.DesiredTotal, group1.PlacedAllocs, "not enough placed") - require.Equal(0, group1.UnhealthyAllocs) + must.Eq(t, group1.DesiredTotal, group1.HealthyAllocs, must.Sprint("not enough healthy")) + must.Eq(t, group1.DesiredTotal, group1.PlacedAllocs, must.Sprint("not enough placed")) + must.Eq(t, 0, group1.UnhealthyAllocs) group2 := deployment.TaskGroups["group2"] - require.Equal(group2.DesiredTotal, group2.HealthyAllocs, "not enough healthy") - require.Equal(group2.DesiredTotal, group2.PlacedAllocs, "not enough placed") - require.Equal(0, group2.UnhealthyAllocs) + must.Eq(t, group2.DesiredTotal, group2.HealthyAllocs, must.Sprint("not enough healthy")) + must.Eq(t, group2.DesiredTotal, group2.PlacedAllocs, must.Sprint("not enough placed")) + must.Eq(t, 0, group2.UnhealthyAllocs) } // Test scenario where deployment initially has no progress deadline // After the deployment is updated, a failed alloc's DesiredTransition should be set func TestDeploymentWatcher_Watch_StartWithoutProgressDeadline(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -1743,18 +1730,19 @@ func TestDeploymentWatcher_Watch_StartWithoutProgressDeadline(t *testing.T) { d := mock.Deployment() d.JobID = j.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) a := mock.Alloc() a.CreateTime = time.Now().UnixNano() a.DeploymentID = d.ID - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") + must.Nil(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) d.TaskGroups["web"].ProgressDeadline = 500 * time.Millisecond // Update the deployment with a progress deadline - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) // Match on DesiredTransition set to Reschedule for the failed alloc m1 := matchUpdateAllocDesiredTransitionReschedule([]string{a.ID}) @@ -1762,7 +1750,7 @@ func TestDeploymentWatcher_Watch_StartWithoutProgressDeadline(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Update the alloc to be unhealthy a2 := a.Copy() @@ -1770,7 +1758,8 @@ func TestDeploymentWatcher_Watch_StartWithoutProgressDeadline(t *testing.T) { Healthy: pointer.Of(false), Timestamp: time.Now(), } - require.Nil(m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a2})) + must.Nil(t, m.state.UpdateAllocsFromClient( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a2})) // Wait for the alloc's DesiredState to set reschedule testutil.WaitForResult(func() (bool, error) { @@ -1811,8 +1800,8 @@ func TestDeploymentWatcher_Watch_FailEarly(t *testing.T) { a.ModifyTime = now.UnixNano() a.DeploymentID = d.ID must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), d), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), now.UnixNano(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), now.UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) // require that we get a call to UpsertDeploymentStatusUpdate c := &matchDeploymentStatusUpdateConfig{ @@ -1834,7 +1823,7 @@ func TestDeploymentWatcher_Watch_FailEarly(t *testing.T) { Healthy: pointer.Of(false), Timestamp: now, } - must.Nil(t, m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a2})) + must.Nil(t, m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, m.nextIndex(), now.UnixNano(), []*structs.Allocation{a2})) // Wait for the deployment to be failed testutil.WaitForResult(func() (bool, error) { @@ -1873,7 +1862,6 @@ func TestDeploymentWatcher_Watch_FailEarly(t *testing.T) { // Tests that the watcher fails rollback when the spec hasn't changed func TestDeploymentWatcher_RollbackFailed(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) // Create a job, alloc, and a deployment @@ -1888,15 +1876,16 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) { d.TaskGroups["web"].AutoRevert = true a := mock.Alloc() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertAllocs( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) // Upsert the job again to get a new version j2 := j.Copy() // Modify the job to make its specification different j2.Stable = false - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), "UpsertJob2") + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), must.Sprint("UpsertJob2")) // require that we will get a createEvaluation call only once. This will // verify that the watcher is batching allocation changes @@ -1916,7 +1905,7 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) + func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) // Update the allocs health to healthy which should create an evaluation for i := 0; i < 5; i++ { @@ -1926,7 +1915,8 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) { HealthyAllocationIDs: []string{a.ID}, }, } - require.Nil(m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, m.nextIndex(), req), "UpsertDeploymentAllocHealth") + must.Nil(t, m.state.UpdateDeploymentAllocHealth( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), req), must.Sprint("UpsertDeploymentAllocHealth")) } // Wait for there to be one eval @@ -1954,7 +1944,8 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) { UnhealthyAllocationIDs: []string{a.ID}, }, } - require.Nil(m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, m.nextIndex(), req2), "UpsertDeploymentAllocHealth") + must.Nil(t, m.state.UpdateDeploymentAllocHealth( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), req2), must.Sprint("UpsertDeploymentAllocHealth")) // Wait for there to be one eval testutil.WaitForResult(func() (bool, error) { @@ -1977,13 +1968,12 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) { // verify that the job version hasn't changed after upsert m.state.JobByID(nil, structs.DefaultNamespace, j.ID) - require.Equal(uint64(0), j.Version, "Expected job version 0 but got ", j.Version) + must.Eq(t, uint64(0), j.Version, must.Sprintf("Expected job version 0 but got ", j.Version)) } // Test allocation updates and evaluation creation is batched between watchers func TestWatcher_BatchAllocUpdates(t *testing.T) { ci.Parallel(t) - require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Second) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -2011,12 +2001,14 @@ func TestWatcher_BatchAllocUpdates(t *testing.T) { a2.JobID = j2.ID a2.DeploymentID = d2.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j1), "UpsertJob") - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), "UpsertJob") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d1), "UpsertDeployment") - require.Nil(m.state.UpsertDeployment(m.nextIndex(), d2), "UpsertDeployment") - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a1}), "UpsertAllocs") - require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a2}), "UpsertAllocs") + now := time.Now().UnixNano() + + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j1), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), must.Sprint("UpsertJob")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), now, d1), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), now, d2), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), now, []*structs.Allocation{a1}), must.Sprint("UpsertAllocs")) + must.Nil(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), now, []*structs.Allocation{a2}), must.Sprint("UpsertAllocs")) // require that we will get a createEvaluation call only once and it contains // both deployments. This will verify that the watcher is batching @@ -2026,7 +2018,7 @@ func TestWatcher_BatchAllocUpdates(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 2 == watchersCount(w), nil }, - func(err error) { require.Equal(2, watchersCount(w), "Should have 2 deployment") }) + func(err error) { must.Eq(t, 2, watchersCount(w), must.Sprint("Should have 2 deployment")) }) // Update the allocs health to healthy which should create an evaluation req := &structs.ApplyDeploymentAllocHealthRequest{ @@ -2035,7 +2027,8 @@ func TestWatcher_BatchAllocUpdates(t *testing.T) { HealthyAllocationIDs: []string{a1.ID}, }, } - require.Nil(m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, m.nextIndex(), req), "UpsertDeploymentAllocHealth") + must.Nil(t, m.state.UpdateDeploymentAllocHealth( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), req), must.Sprint("UpsertDeploymentAllocHealth")) req2 := &structs.ApplyDeploymentAllocHealthRequest{ DeploymentAllocHealthRequest: structs.DeploymentAllocHealthRequest{ @@ -2043,7 +2036,8 @@ func TestWatcher_BatchAllocUpdates(t *testing.T) { HealthyAllocationIDs: []string{a2.ID}, }, } - require.Nil(m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, m.nextIndex(), req2), "UpsertDeploymentAllocHealth") + must.Nil(t, m.state.UpdateDeploymentAllocHealth( + structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), req2), must.Sprint("UpsertDeploymentAllocHealth")) // Wait for there to be one eval for each job testutil.WaitForResult(func() (bool, error) { @@ -2073,7 +2067,7 @@ func TestWatcher_BatchAllocUpdates(t *testing.T) { m.AssertCalled(t, "UpdateAllocDesiredTransition", mocker.MatchedBy(m1)) testutil.WaitForResult(func() (bool, error) { return 2 == watchersCount(w), nil }, - func(err error) { require.Equal(2, watchersCount(w), "Should have 2 deployment") }) + func(err error) { must.Eq(t, 2, watchersCount(w), must.Sprint("Should have 2 deployment")) }) } func watchersCount(w *Watcher) int { @@ -2096,7 +2090,7 @@ func TestWatcher_PurgeDeployment(t *testing.T) { d := mock.Deployment() d.JobID = j.ID must.NoError(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j)) - must.NoError(t, m.state.UpsertDeployment(m.nextIndex(), d)) + must.NoError(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d)) // require that we get a call to UpsertDeploymentStatusUpdate matchConfig := &matchDeploymentStatusUpdateConfig{ diff --git a/nomad/deploymentwatcher/testutil_test.go b/nomad/deploymentwatcher/testutil_test.go index 7af7df9cafd..5ae67b9ed29 100644 --- a/nomad/deploymentwatcher/testutil_test.go +++ b/nomad/deploymentwatcher/testutil_test.go @@ -8,6 +8,7 @@ import ( "strings" "sync" "testing" + "time" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -98,7 +99,7 @@ func (m *mockBackend) UpsertJob(job *structs.Job) (uint64, error) { func (m *mockBackend) UpdateDeploymentStatus(u *structs.DeploymentStatusUpdateRequest) (uint64, error) { m.Called(u) i := m.nextIndex() - return i, m.state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, i, u) + return i, m.state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, i, time.Now().UnixNano(), u) } // matchDeploymentStatusUpdateConfig is used to configure the matching @@ -152,7 +153,7 @@ func matchDeploymentStatusUpdateRequest(c *matchDeploymentStatusUpdateConfig) fu func (m *mockBackend) UpdateDeploymentPromotion(req *structs.ApplyDeploymentPromoteRequest) (uint64, error) { m.Called(req) i := m.nextIndex() - return i, m.state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, i, req) + return i, m.state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, i, time.Now().UnixNano(), req) } // matchDeploymentPromoteRequestConfig is used to configure the matching @@ -182,7 +183,7 @@ func matchDeploymentPromoteRequest(c *matchDeploymentPromoteRequestConfig) func( func (m *mockBackend) UpdateDeploymentAllocHealth(req *structs.ApplyDeploymentAllocHealthRequest) (uint64, error) { m.Called(req) i := m.nextIndex() - return i, m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, i, req) + return i, m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, i, time.Now().UnixNano(), req) } // matchDeploymentAllocHealthRequestConfig is used to configure the matching diff --git a/nomad/drainer/watch_nodes_test.go b/nomad/drainer/watch_nodes_test.go index 5e698e0b8b9..580c943cd0e 100644 --- a/nomad/drainer/watch_nodes_test.go +++ b/nomad/drainer/watch_nodes_test.go @@ -50,7 +50,7 @@ func TestNodeDrainWatcher_AddNodes(t *testing.T) { alloc2.NodeID = n2.ID must.NoError(t, store.UpsertAllocs( - structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc1, alloc2})) + structs.MsgTypeTestSetup, 102, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 103, n1)) must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 104, n2)) @@ -228,7 +228,7 @@ func testNodeDrainWatcherSetup( alloc.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} index++ must.NoError(t, store.UpsertAllocs( - structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc})) + structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc})) index++ must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, index, node)) diff --git a/nomad/drainer_int_test.go b/nomad/drainer_int_test.go index 02f4e314231..7f853423fc9 100644 --- a/nomad/drainer_int_test.go +++ b/nomad/drainer_int_test.go @@ -621,7 +621,7 @@ func TestDrainer_AllTypes_Deadline_GarbageCollectedNode(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete badAllocs = append(badAllocs, alloc) } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1, badAllocs)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1, time.Now().UnixNano(), badAllocs)) // Create the second node n2 := mock.Node() diff --git a/nomad/fsm_test.go b/nomad/fsm_test.go index 1d5375cbb1e..331e3929867 100644 --- a/nomad/fsm_test.go +++ b/nomad/fsm_test.go @@ -1387,7 +1387,7 @@ func TestFSM_UpdateAllocFromClient_Unblock(t *testing.T) { alloc2.NodeID = node.ID state.UpsertJobSummary(8, mock.JobSummary(alloc.JobID)) state.UpsertJobSummary(9, mock.JobSummary(alloc2.JobID)) - state.UpsertAllocs(structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc, alloc2}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 10, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2}) clientAlloc := new(structs.Allocation) *clientAlloc = *alloc @@ -1455,7 +1455,7 @@ func TestFSM_UpdateAllocFromClient(t *testing.T) { alloc := mock.Alloc() state.UpsertJobSummary(9, mock.JobSummary(alloc.JobID)) - state.UpsertAllocs(structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 10, time.Now().UnixNano(), []*structs.Allocation{alloc}) clientAlloc := new(structs.Allocation) *clientAlloc = *alloc @@ -1506,7 +1506,7 @@ func TestFSM_UpdateAllocDesiredTransition(t *testing.T) { alloc2.Job = alloc.Job alloc2.JobID = alloc.JobID state.UpsertJobSummary(9, mock.JobSummary(alloc.JobID)) - state.UpsertAllocs(structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc, alloc2}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 10, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2}) t1 := &structs.DesiredTransition{ Migrate: pointer.Of(true), @@ -1735,7 +1735,7 @@ func TestFSM_ApplyPlanResults(t *testing.T) { alloc2.JobID = job2.ID alloc2.PreemptedByAllocation = alloc.ID - fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1, []*structs.Allocation{alloc1, alloc2}) + fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) // evals for preempted jobs eval1 := mock.Eval() @@ -1849,7 +1849,7 @@ func TestFSM_DeploymentStatusUpdate(t *testing.T) { // Upsert a deployment d := mock.Deployment() - if err := state.UpsertDeployment(1, d); err != nil { + if err := state.UpsertDeployment(1, time.Now().UnixNano(), d); err != nil { t.Fatalf("bad: %v", err) } @@ -1980,7 +1980,7 @@ func TestFSM_DeploymentPromotion(t *testing.T) { DesiredCanaries: 1, }, } - if err := state.UpsertDeployment(2, d); err != nil { + if err := state.UpsertDeployment(2, time.Now().UnixNano(), d); err != nil { t.Fatalf("bad: %v", err) } @@ -2001,7 +2001,7 @@ func TestFSM_DeploymentPromotion(t *testing.T) { Healthy: pointer.Of(true), } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{c1, c2}); err != nil { t.Fatalf("err: %v", err) } @@ -2064,7 +2064,7 @@ func TestFSM_DeploymentAllocHealth(t *testing.T) { // Insert a deployment d := mock.Deployment() - if err := state.UpsertDeployment(1, d); err != nil { + if err := state.UpsertDeployment(1, time.Now().UnixNano(), d); err != nil { t.Fatalf("bad: %v", err) } @@ -2073,7 +2073,7 @@ func TestFSM_DeploymentAllocHealth(t *testing.T) { a1.DeploymentID = d.ID a2 := mock.Alloc() a2.DeploymentID = d.ID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{a1, a2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, time.Now().UnixNano(), []*structs.Allocation{a1, a2}); err != nil { t.Fatalf("bad: %v", err) } @@ -2170,7 +2170,7 @@ func TestFSM_DeleteDeployment(t *testing.T) { // Upsert a deployments d := mock.Deployment() - if err := state.UpsertDeployment(1, d); err != nil { + if err := state.UpsertDeployment(1, time.Now().UnixNano(), d); err != nil { t.Fatalf("bad: %v", err) } @@ -2476,8 +2476,8 @@ func TestFSM_SnapshotRestore_Allocs(t *testing.T) { alloc2 := mock.Alloc() state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1}) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc2}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc2}) // Verify the contents fsm2 := testSnapshotRestore(t, fsm) @@ -2504,7 +2504,7 @@ func TestFSM_SnapshotRestore_Allocs_Canonicalize(t *testing.T) { alloc.AllocatedResources = nil state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID)) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}) // Verify the contents fsm2 := testSnapshotRestore(t, fsm) @@ -2668,8 +2668,8 @@ func TestFSM_SnapshotRestore_Deployments(t *testing.T) { d2.JobID = j.ID state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j) - state.UpsertDeployment(1000, d1) - state.UpsertDeployment(1001, d2) + state.UpsertDeployment(1000, time.Now().UnixNano(), d1) + state.UpsertDeployment(1001, time.Now().UnixNano(), d2) // Verify the contents fsm2 := testSnapshotRestore(t, fsm) @@ -2959,7 +2959,7 @@ func TestFSM_ReconcileSummaries(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = node.ID require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*structs.Allocation{alloc})) // Delete the summaries require.NoError(t, state.DeleteJobSummary(1030, job1.Namespace, job1.ID)) @@ -3056,7 +3056,7 @@ func TestFSM_ReconcileParentJobSummary(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusRunning state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, childJob) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*structs.Allocation{alloc}) // Make the summary incorrect in the state store summary, err := state.JobSummaryByID(nil, job1.Namespace, job1.ID) @@ -3098,7 +3098,7 @@ func TestFSM_LeakedDeployments(t *testing.T) { fsm := testFSM(t) state := fsm.State() d := mock.Deployment() - require.NoError(state.UpsertDeployment(1000, d)) + require.NoError(state.UpsertDeployment(1000, time.Now().UnixNano(), d)) // Verify the contents fsm2 := testSnapshotRestore(t, fsm) diff --git a/nomad/job_endpoint_statuses_test.go b/nomad/job_endpoint_statuses_test.go index af43f34d241..f224e487da5 100644 --- a/nomad/job_endpoint_statuses_test.go +++ b/nomad/job_endpoint_statuses_test.go @@ -280,7 +280,7 @@ func TestJob_Statuses(t *testing.T) { t.Helper() a := mock.MinAllocForJob(job) must.NoError(t, - s.State().UpsertAllocs(structs.AllocUpdateRequestType, incIdx(t), []*structs.Allocation{a}), + s.State().UpsertAllocs(structs.AllocUpdateRequestType, incIdx(t), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprintf("error creating alloc for job %s", job.ID)) t.Cleanup(func() { test.NoError(t, s.State().DeleteEval(incIdx(t), []string{}, []string{a.ID}, false)) @@ -290,7 +290,7 @@ func TestJob_Statuses(t *testing.T) { t.Helper() deploy := mock.Deployment() deploy.JobID = job.ID - must.NoError(t, s.State().UpsertDeployment(incIdx(t), deploy)) + must.NoError(t, s.State().UpsertDeployment(incIdx(t), time.Now().UnixNano(), deploy)) t.Cleanup(func() { test.NoError(t, s.State().DeleteDeployment(incIdx(t), []string{deploy.ID})) }) diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index f0f8cef1fa2..72ed6ab52c8 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -3282,7 +3282,7 @@ func TestJobEndpoint_ForceRescheduleEvaluate(t *testing.T) { alloc.TaskGroup = job.TaskGroups[0].Name alloc.Namespace = job.Namespace alloc.ClientStatus = structs.AllocClientStatusFailed - err = s1.State().UpsertAllocs(structs.MsgTypeTestSetup, resp.Index+1, []*structs.Allocation{alloc}) + err = s1.State().UpsertAllocs(structs.MsgTypeTestSetup, resp.Index+1, time.Now().UnixNano(), []*structs.Allocation{alloc}) require.Nil(err) // Force a re-evaluation @@ -5131,7 +5131,7 @@ func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) { alloc := mock.Alloc() alloc.JobID = job1.ID alloc.Job = job1 - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } }) @@ -5632,7 +5632,7 @@ func TestJobEndpoint_Allocations(t *testing.T) { state := s1.fsm.State() state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -5674,7 +5674,7 @@ func TestJobEndpoint_Allocations_ACL(t *testing.T) { alloc2.JobID = alloc1.JobID state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) require.Nil(err) // Look up allocations for that job @@ -5737,7 +5737,7 @@ func TestJobEndpoint_Allocations_Blocking(t *testing.T) { // First upsert an unrelated alloc time.AfterFunc(100*time.Millisecond, func() { state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -5746,7 +5746,7 @@ func TestJobEndpoint_Allocations_Blocking(t *testing.T) { // Upsert an alloc for the job we are interested in later time.AfterFunc(200*time.Millisecond, func() { state.UpsertJobSummary(199, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -5977,8 +5977,8 @@ func TestJobEndpoint_Deployments(t *testing.T) { d1.JobCreateIndex = j.CreateIndex d2.JobCreateIndex = j.CreateIndex - require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment") - require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1001, time.Now().UnixNano(), d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1002, time.Now().UnixNano(), d2), "UpsertDeployment") // Lookup the jobs get := &structs.JobSpecificRequest{ @@ -6013,8 +6013,8 @@ func TestJobEndpoint_Deployments_ACL(t *testing.T) { require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j), "UpsertJob") d1.JobCreateIndex = j.CreateIndex d2.JobCreateIndex = j.CreateIndex - require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment") - require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1001, time.Now().UnixNano(), d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1002, time.Now().UnixNano(), d2), "UpsertDeployment") // Lookup the jobs get := &structs.JobSpecificRequest{ @@ -6077,12 +6077,12 @@ func TestJobEndpoint_Deployments_Blocking(t *testing.T) { d2.JobCreateIndex = j.CreateIndex // First upsert an unrelated eval time.AfterFunc(100*time.Millisecond, func() { - require.Nil(state.UpsertDeployment(100, d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(100, time.Now().UnixNano(), d1), "UpsertDeployment") }) // Upsert an eval for the job we are interested in later time.AfterFunc(200*time.Millisecond, func() { - require.Nil(state.UpsertDeployment(200, d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(200, time.Now().UnixNano(), d2), "UpsertDeployment") }) // Lookup the jobs @@ -6126,8 +6126,8 @@ func TestJobEndpoint_LatestDeployment(t *testing.T) { require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j), "UpsertJob") d1.JobCreateIndex = j.CreateIndex d2.JobCreateIndex = j.CreateIndex - require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment") - require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1001, time.Now().UnixNano(), d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1002, time.Now().UnixNano(), d2), "UpsertDeployment") // Lookup the jobs get := &structs.JobSpecificRequest{ @@ -6165,8 +6165,8 @@ func TestJobEndpoint_LatestDeployment_ACL(t *testing.T) { require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j), "UpsertJob") d1.JobCreateIndex = j.CreateIndex d2.JobCreateIndex = j.CreateIndex - require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment") - require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1001, time.Now().UnixNano(), d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1002, time.Now().UnixNano(), d2), "UpsertDeployment") // Lookup the jobs get := &structs.JobSpecificRequest{ @@ -6233,12 +6233,12 @@ func TestJobEndpoint_LatestDeployment_Blocking(t *testing.T) { // First upsert an unrelated eval time.AfterFunc(100*time.Millisecond, func() { - require.Nil(state.UpsertDeployment(100, d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(100, time.Now().UnixNano(), d1), "UpsertDeployment") }) // Upsert an eval for the job we are interested in later time.AfterFunc(200*time.Millisecond, func() { - require.Nil(state.UpsertDeployment(200, d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(200, time.Now().UnixNano(), d2), "UpsertDeployment") }) // Lookup the jobs @@ -7502,7 +7502,7 @@ func TestJobEndpoint_Dispatch_JobChildrenSummary(t *testing.T) { alloc.TaskGroup = dispatchedJob.TaskGroups[0].Name alloc.Namespace = dispatchedJob.Namespace alloc.ClientStatus = structs.AllocClientStatusPending - err = s1.State().UpsertAllocs(structs.MsgTypeTestSetup, nextIdx, []*structs.Allocation{alloc}) + err = s1.State().UpsertAllocs(structs.MsgTypeTestSetup, nextIdx, time.Now().UnixNano(), []*structs.Allocation{alloc}) require.NoError(t, err) require.Equal(t, &structs.JobChildrenSummary{Running: 1}, jobChildren()) require.Equal(t, structs.JobStatusRunning, dispatchedStatus()) @@ -7521,7 +7521,7 @@ func TestJobEndpoint_Dispatch_JobChildrenSummary(t *testing.T) { require.NoError(t, err) nalloc = nalloc.Copy() nalloc.ClientStatus = status - err = s1.State().UpdateAllocsFromClient(structs.MsgTypeTestSetup, nextIdx, []*structs.Allocation{nalloc}) + err = s1.State().UpdateAllocsFromClient(structs.MsgTypeTestSetup, nextIdx, time.Now().UnixNano(), []*structs.Allocation{nalloc}) require.NoError(t, err) } @@ -7700,13 +7700,13 @@ func TestJobEndpoint_Scale_DeploymentBlocking(t *testing.T) { d1.StatusDescription = structs.DeploymentStatusDescriptionNewerJob d1.JobID = job.ID d1.JobCreateIndex = job.CreateIndex - require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1001, time.Now().UnixNano(), d1), "UpsertDeployment") d2 := mock.Deployment() d2.Status = structs.DeploymentStatusSuccessful d2.StatusDescription = structs.DeploymentStatusDescriptionSuccessful d2.JobID = job.ID d2.JobCreateIndex = job.CreateIndex - require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1002, time.Now().UnixNano(), d2), "UpsertDeployment") // add the latest deployment for the test case dLatest := mock.Deployment() @@ -7714,7 +7714,7 @@ func TestJobEndpoint_Scale_DeploymentBlocking(t *testing.T) { dLatest.StatusDescription = "description does not matter for this test" dLatest.JobID = job.ID dLatest.JobCreateIndex = job.CreateIndex - require.Nil(state.UpsertDeployment(1003, dLatest), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1003, time.Now().UnixNano(), dLatest), "UpsertDeployment") // attempt to scale originalCount := job.TaskGroups[0].Count @@ -7825,13 +7825,13 @@ func TestJobEndpoint_Scale_InformationalEventsShouldNotBeBlocked(t *testing.T) { d1.StatusDescription = structs.DeploymentStatusDescriptionNewerJob d1.JobID = job.ID d1.JobCreateIndex = job.CreateIndex - require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1001, time.Now().UnixNano(), d1), "UpsertDeployment") d2 := mock.Deployment() d2.Status = structs.DeploymentStatusSuccessful d2.StatusDescription = structs.DeploymentStatusDescriptionSuccessful d2.JobID = job.ID d2.JobCreateIndex = job.CreateIndex - require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1002, time.Now().UnixNano(), d2), "UpsertDeployment") // add the latest deployment for the test case dLatest := mock.Deployment() @@ -7839,7 +7839,7 @@ func TestJobEndpoint_Scale_InformationalEventsShouldNotBeBlocked(t *testing.T) { dLatest.StatusDescription = "description does not matter for this test" dLatest.JobID = job.ID dLatest.JobCreateIndex = job.CreateIndex - require.Nil(state.UpsertDeployment(1003, dLatest), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1003, time.Now().UnixNano(), dLatest), "UpsertDeployment") // register informational scaling event groupName := job.TaskGroups[0].Name @@ -8387,7 +8387,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { a0.Namespace = jobV1.Namespace a0.JobID = jobV1.ID a0.ClientStatus = structs.AllocClientStatusComplete - require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1010, []*structs.Allocation{a0}), "UpsertAllocs") + require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1010, time.Now().UnixNano(), []*structs.Allocation{a0}), "UpsertAllocs") jobV2 := jobV1.Copy() require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1100, nil, jobV2), "UpsertJob") @@ -8426,7 +8426,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { a4.JobID = jobV2.ID a4.ClientStatus = structs.AllocClientStatusRunning // upsert allocations - require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1110, []*structs.Allocation{a1, a2, a3, a4}), "UpsertAllocs") + require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1110, time.Now().UnixNano(), []*structs.Allocation{a1, a2, a3, a4}), "UpsertAllocs") event := &structs.ScalingEvent{ Time: time.Now().Unix(), diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index 3a3d3acc250..a9b15a7b64c 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -2426,7 +2426,7 @@ func TestClientEndpoint_GetAllocs(t *testing.T) { alloc.NodeID = node.ID state := s1.fsm.State() state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -2478,7 +2478,7 @@ func TestClientEndpoint_GetAllocs_ACL_Basic(t *testing.T) { assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode") assert.Nil(state.UpsertJobSummary(2, mock.JobSummary(allocDefaultNS.JobID)), "UpsertJobSummary") allocs := []*structs.Allocation{allocDefaultNS} - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 5, allocs), "UpsertAllocs") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 5, time.Now().UnixNano(), allocs), "UpsertAllocs") // Create the namespace policy and tokens validDefaultToken := mock.CreatePolicyAndToken(t, state, 1001, "test-default-valid", mock.NodePolicy(acl.PolicyRead)+ @@ -2568,7 +2568,7 @@ func TestClientEndpoint_GetAllocs_ACL_Namespaces(t *testing.T) { assert.Nil(state.UpsertJobSummary(4, mock.JobSummary(allocAltNS.JobID)), "UpsertJobSummary") assert.Nil(state.UpsertJobSummary(5, mock.JobSummary(allocOtherNS.JobID)), "UpsertJobSummary") allocs := []*structs.Allocation{allocDefaultNS, allocAltNS, allocOtherNS} - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 6, allocs), "UpsertAllocs") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 6, time.Now().UnixNano(), allocs), "UpsertAllocs") // Create the namespace policy and tokens validDefaultToken := mock.CreatePolicyAndToken(t, state, 1001, "test-default-valid", mock.NodePolicy(acl.PolicyRead)+ @@ -2653,7 +2653,7 @@ func TestClientEndpoint_GetClientAllocs(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = node.ID state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -2744,7 +2744,7 @@ func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) { store.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) start := time.Now() time.AfterFunc(100*time.Millisecond, func() { - err := store.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) + err := store.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -2811,7 +2811,7 @@ func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) { allocUpdate.ID = alloc.ID allocUpdate.ClientStatus = structs.AllocClientStatusRunning store.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID)) - err := store.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{allocUpdate}) + err := store.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{allocUpdate}) if err != nil { t.Fatalf("err: %v", err) } @@ -2865,7 +2865,7 @@ func TestClientEndpoint_GetClientAllocs_Blocking_GC(t *testing.T) { state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID)) start := time.Now() time.AfterFunc(100*time.Millisecond, func() { - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc1, alloc2})) + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) }) // Lookup the allocs in a blocking query @@ -2944,7 +2944,7 @@ func TestClientEndpoint_GetClientAllocs_WithoutMigrateTokens(t *testing.T) { alloc.DesiredStatus = structs.AllocClientStatusComplete state := s1.fsm.State() state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{prevAlloc, alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{prevAlloc, alloc}) assert.Nil(err) // Lookup the allocs @@ -2994,7 +2994,7 @@ func TestClientEndpoint_GetAllocs_Blocking(t *testing.T) { state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) start := time.Now() time.AfterFunc(100*time.Millisecond, func() { - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -3034,7 +3034,7 @@ func TestClientEndpoint_GetAllocs_Blocking(t *testing.T) { allocUpdate.ID = alloc.ID allocUpdate.ClientStatus = structs.AllocClientStatusRunning state.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID)) - err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 200, []*structs.Allocation{allocUpdate}) + err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{allocUpdate}) if err != nil { t.Fatalf("err: %v", err) } @@ -3101,7 +3101,7 @@ func TestNode_UpdateAlloc(t *testing.T) { must.NoError(t, store.UpsertJobSummary(99, mock.JobSummary(alloc2.JobID))) alloc2.TaskGroup = job.TaskGroups[0].Name - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc, alloc2})) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2})) // Attempt updates of more than one alloc for the same job clientAlloc1 := new(structs.Allocation) @@ -3173,7 +3173,7 @@ func TestNode_UpdateAlloc_NodeNotReady(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusRunning must.NoError(t, store.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))) - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc})) // Mark node as down. must.NoError(t, store.UpdateNodeStatus( @@ -3262,7 +3262,7 @@ func TestNode_UpdateAllocServiceRegistrations(t *testing.T) { alloc2.TaskGroup = job.TaskGroups[0].Name index++ - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc0, alloc1, alloc2})) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc0, alloc1, alloc2})) serviceFor := func(allocID string, port int) *structs.ServiceRegistration { return &structs.ServiceRegistration{ @@ -3338,7 +3338,7 @@ func TestClientEndpoint_BatchUpdate(t *testing.T) { alloc.NodeID = node.ID state := s1.fsm.State() state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -3400,7 +3400,7 @@ func TestClientEndpoint_UpdateAlloc_Vault(t *testing.T) { alloc.NodeID = node.ID state := s1.fsm.State() state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -3483,7 +3483,7 @@ func TestClientEndpoint_UpdateAlloc_VaultWorkloadIdentity(t *testing.T) { alloc.NodeID = node.ID state := s1.fsm.State() state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}) must.NoError(t, err) var accessors []*structs.VaultAccessor @@ -3544,13 +3544,13 @@ func TestClientEndpoint_CreateNodeEvals(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = node.ID state.UpsertJobSummary(1, mock.JobSummary(alloc.JobID)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, idx, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, idx, time.Now().UnixNano(), []*structs.Allocation{alloc})) idx++ sysBatchAlloc := mock.SysBatchAlloc() sysBatchAlloc.NodeID = node.ID state.UpsertJobSummary(1, mock.JobSummary(sysBatchAlloc.JobID)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, idx, []*structs.Allocation{sysBatchAlloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, idx, time.Now().UnixNano(), []*structs.Allocation{sysBatchAlloc})) idx++ // Inject a fake system job. @@ -3752,7 +3752,7 @@ func TestClientEndpoint_Evaluate(t *testing.T) { t.Fatalf("err: %v", err) } state.UpsertJobSummary(2, mock.JobSummary(alloc.JobID)) - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -3837,7 +3837,7 @@ func TestClientEndpoint_Evaluate_ACL(t *testing.T) { assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode") assert.Nil(state.UpsertJobSummary(2, mock.JobSummary(alloc.JobID)), "UpsertJobSummary") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}), "UpsertAllocs") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{alloc}), "UpsertAllocs") // Create the policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite)) @@ -4194,7 +4194,7 @@ func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) { alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] tasks := []string{task.Name} - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -4230,7 +4230,7 @@ func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) { // Update to be running on the node alloc.NodeID = node.ID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 4, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -4244,7 +4244,7 @@ func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) { // Update to be client-terminal alloc.ClientStatus = structs.AllocClientStatusFailed - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 5, []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 5, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -4288,7 +4288,7 @@ func TestClientEndpoint_DeriveVaultToken(t *testing.T) { task := alloc.Job.TaskGroups[0].Tasks[0] tasks := []string{task.Name} task.Vault = &structs.Vault{Policies: []string{"a", "b"}} - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -4381,7 +4381,7 @@ func TestClientEndpoint_DeriveVaultToken_VaultError(t *testing.T) { task := alloc.Job.TaskGroups[0].Tasks[0] tasks := []string{task.Name} task.Vault = &structs.Vault{Policies: []string{"a", "b"}} - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -4514,7 +4514,7 @@ func TestClientEndpoint_DeriveSIToken(t *testing.T) { mutateConnectJob(t, alloc.Job) // appends sidecar task sidecarTask := alloc.Job.TaskGroups[0].Tasks[1] - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{alloc}) r.NoError(err) request := &structs.DeriveSITokenRequest{ @@ -4572,7 +4572,7 @@ func TestClientEndpoint_DeriveSIToken_ConsulError(t *testing.T) { m := NewConsulACLsAPI(mockACLsAPI, s1.logger, nil) s1.consulACLs = m - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{alloc}) r.NoError(err) request := &structs.DeriveSITokenRequest{ @@ -4816,7 +4816,7 @@ func TestClientEndpoint_UpdateAlloc_Evals_ByTrigger(t *testing.T) { } if !tc.missingAlloc { - err = fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{serverAlloc}) + err = fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{serverAlloc}) require.NoError(t, err) } diff --git a/nomad/operator_endpoint_test.go b/nomad/operator_endpoint_test.go index 1afcb44b253..4a893637125 100644 --- a/nomad/operator_endpoint_test.go +++ b/nomad/operator_endpoint_test.go @@ -1353,7 +1353,7 @@ func TestOperator_UpgradeCheckRequest_VaultWorkloadIdentity(t *testing.T) { allocJobWithWID.JobID = jobWithWID.ID allocJobWithWID.NodeID = node.ID - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1006, []*structs.Allocation{allocJobNoWID, allocJobWithWID}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1006, time.Now().UnixNano(), []*structs.Allocation{allocJobNoWID, allocJobWithWID}) must.NoError(t, err) // Create Vault token accessor for job without Vault identity and one that diff --git a/nomad/periodic_test.go b/nomad/periodic_test.go index 1718c434ca9..df9b1b4f9b2 100644 --- a/nomad/periodic_test.go +++ b/nomad/periodic_test.go @@ -758,7 +758,7 @@ func TestPeriodicDispatch_RunningChildren_ActiveAllocs(t *testing.T) { alloc.JobID = childjob.ID alloc.EvalID = eval.ID alloc.DesiredStatus = structs.AllocDesiredStatusRun - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { t.Fatalf("UpsertAllocs failed: %v", err) } diff --git a/nomad/search_endpoint_test.go b/nomad/search_endpoint_test.go index e06688ac927..6da307049e1 100644 --- a/nomad/search_endpoint_test.go +++ b/nomad/search_endpoint_test.go @@ -1,4 +1,4 @@ -// Copyright (c) HashiCorp, Inc. +// Copyright (c) HashiCorp, Inc.e // SPDX-License-Identifier: BUSL-1.1 package nomad @@ -9,6 +9,7 @@ import ( "strconv" "strings" "testing" + "time" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" "github.com/hashicorp/nomad/acl" @@ -33,7 +34,7 @@ func registerMockJob(s *Server, t *testing.T, prefix string, counter int) *struc func registerJob(s *Server, t *testing.T, job *structs.Job) { fsmState := s.fsm.State() - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) + must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) } func mockAlloc() *structs.Allocation { @@ -70,9 +71,9 @@ func TestSearch_PrefixSearch_Job(t *testing.T) { t.Fatalf("err: %v", err) } - require.Len(t, resp.Matches[structs.Jobs], 1) - require.Equal(t, job.ID, resp.Matches[structs.Jobs][0]) - require.Equal(t, uint64(jobIndex), resp.Index) + must.Len(t, 1, resp.Matches[structs.Jobs]) + must.Eq(t, job.ID, resp.Matches[structs.Jobs][0]) + must.Eq(t, uint64(jobIndex), resp.Index) } func TestSearch_PrefixSearch_ACL(t *testing.T) { @@ -102,7 +103,7 @@ func TestSearch_PrefixSearch_ACL(t *testing.T) { must.NoError(t, resp.Error) plugin := mock.CSIPlugin() - must.NoError(t, store.UpsertCSIPlugin(1002, plugin)) + must.NoError(t, store.UpsertCSIPlugin(1002, time.Now().UnixNano(), plugin)) node := mock.Node() must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 1003, node)) @@ -282,8 +283,8 @@ func TestSearch_PrefixSearch_All_JobWithHyphen(t *testing.T) { summary := mock.JobSummary(alloc.JobID) fsmState := s.fsm.State() - require.NoError(t, fsmState.UpsertJobSummary(999, summary)) - require.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) + must.NoError(t, fsmState.UpsertJobSummary(999, summary)) + must.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) req := &structs.SearchRequest{ Context: structs.All, @@ -297,10 +298,10 @@ func TestSearch_PrefixSearch_All_JobWithHyphen(t *testing.T) { for i := 1; i < len(prefix); i++ { req.Prefix = prefix[:i] var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Equal(t, 1, len(resp.Matches[structs.Jobs])) - require.Equal(t, job.ID, resp.Matches[structs.Jobs][0]) - require.EqualValues(t, jobIndex, resp.Index) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.Eq(t, 1, len(resp.Matches[structs.Jobs])) + must.Eq(t, job.ID, resp.Matches[structs.Jobs][0]) + must.Eq(t, jobIndex, resp.Index) } } @@ -323,8 +324,8 @@ func TestSearch_PrefixSearch_All_LongJob(t *testing.T) { summary := mock.JobSummary(alloc.JobID) fsmState := s.fsm.State() - require.NoError(t, fsmState.UpsertJobSummary(999, summary)) - require.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) + must.NoError(t, fsmState.UpsertJobSummary(999, summary)) + must.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) req := &structs.SearchRequest{ Prefix: prefix, @@ -336,11 +337,11 @@ func TestSearch_PrefixSearch_All_LongJob(t *testing.T) { } var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.Jobs], 1) - require.Equal(t, job.ID, resp.Matches[structs.Jobs][0]) - require.EqualValues(t, jobIndex, resp.Index) + must.Len(t, 1, resp.Matches[structs.Jobs]) + must.Eq(t, job.ID, resp.Matches[structs.Jobs][0]) + must.Eq(t, jobIndex, resp.Index) } // truncate should limit results to 20 @@ -370,11 +371,11 @@ func TestSearch_PrefixSearch_Truncate(t *testing.T) { } var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.Jobs], 20) - require.True(t, resp.Truncations[structs.Jobs]) - require.Equal(t, uint64(jobIndex), resp.Index) + must.Len(t, 20, resp.Matches[structs.Jobs]) + must.True(t, resp.Truncations[structs.Jobs]) + must.Eq(t, uint64(jobIndex), resp.Index) } func TestSearch_PrefixSearch_AllWithJob(t *testing.T) { @@ -393,7 +394,7 @@ func TestSearch_PrefixSearch_AllWithJob(t *testing.T) { job := registerMockJob(s, t, prefix, 0) eval1 := mock.Eval() eval1.ID = job.ID - require.NoError(t, s.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 2000, []*structs.Evaluation{eval1})) + must.NoError(t, s.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 2000, []*structs.Evaluation{eval1})) req := &structs.SearchRequest{ Prefix: prefix, @@ -405,12 +406,12 @@ func TestSearch_PrefixSearch_AllWithJob(t *testing.T) { } var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.Jobs], 1) - require.Equal(t, job.ID, resp.Matches[structs.Jobs][0]) - require.Len(t, resp.Matches[structs.Evals], 1) - require.Equal(t, eval1.ID, resp.Matches[structs.Evals][0]) + must.Len(t, 1, resp.Matches[structs.Jobs]) + must.Eq(t, job.ID, resp.Matches[structs.Jobs][0]) + must.Len(t, 1, resp.Matches[structs.Evals]) + must.Eq(t, eval1.ID, resp.Matches[structs.Evals][0]) } func TestSearch_PrefixSearch_Evals(t *testing.T) { @@ -424,7 +425,7 @@ func TestSearch_PrefixSearch_Evals(t *testing.T) { testutil.WaitForLeader(t, s.RPC) eval1 := mock.Eval() - require.NoError(t, s.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 2000, []*structs.Evaluation{eval1})) + must.NoError(t, s.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 2000, []*structs.Evaluation{eval1})) prefix := eval1.ID[:len(eval1.ID)-2] @@ -438,12 +439,12 @@ func TestSearch_PrefixSearch_Evals(t *testing.T) { } var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.Evals], 1) - require.Equal(t, eval1.ID, resp.Matches[structs.Evals][0]) - require.False(t, resp.Truncations[structs.Evals]) - require.Equal(t, uint64(2000), resp.Index) + must.Len(t, 1, resp.Matches[structs.Evals]) + must.Eq(t, eval1.ID, resp.Matches[structs.Evals][0]) + must.False(t, resp.Truncations[structs.Evals]) + must.Eq(t, uint64(2000), resp.Index) } func TestSearch_PrefixSearch_Allocation(t *testing.T) { @@ -460,8 +461,8 @@ func TestSearch_PrefixSearch_Allocation(t *testing.T) { summary := mock.JobSummary(alloc.JobID) fsmState := s.fsm.State() - require.NoError(t, fsmState.UpsertJobSummary(999, summary)) - require.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 90, []*structs.Allocation{alloc})) + must.NoError(t, fsmState.UpsertJobSummary(999, summary)) + must.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 90, time.Now().UnixNano(), []*structs.Allocation{alloc})) prefix := alloc.ID[:len(alloc.ID)-2] @@ -475,12 +476,12 @@ func TestSearch_PrefixSearch_Allocation(t *testing.T) { } var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.Allocs], 1) - require.Equal(t, alloc.ID, resp.Matches[structs.Allocs][0]) - require.False(t, resp.Truncations[structs.Allocs]) - require.Equal(t, uint64(90), resp.Index) + must.Len(t, 1, resp.Matches[structs.Allocs]) + must.Eq(t, alloc.ID, resp.Matches[structs.Allocs][0]) + must.False(t, resp.Truncations[structs.Allocs]) + must.Eq(t, uint64(90), resp.Index) } func TestSearch_PrefixSearch_All_UUID(t *testing.T) { @@ -497,15 +498,15 @@ func TestSearch_PrefixSearch_All_UUID(t *testing.T) { summary := mock.JobSummary(alloc.JobID) fsmState := s.fsm.State() - require.NoError(t, fsmState.UpsertJobSummary(999, summary)) - require.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) + must.NoError(t, fsmState.UpsertJobSummary(999, summary)) + must.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) node := mock.Node() - require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, node)) + must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, node)) eval1 := mock.Eval() eval1.ID = node.ID - require.NoError(t, fsmState.UpsertEvals(structs.MsgTypeTestSetup, 1002, []*structs.Evaluation{eval1})) + must.NoError(t, fsmState.UpsertEvals(structs.MsgTypeTestSetup, 1002, []*structs.Evaluation{eval1})) req := &structs.SearchRequest{ Context: structs.All, @@ -518,11 +519,11 @@ func TestSearch_PrefixSearch_All_UUID(t *testing.T) { for i := 1; i < len(alloc.ID); i++ { req.Prefix = alloc.ID[:i] var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.Allocs], 1) - require.Equal(t, alloc.ID, resp.Matches[structs.Allocs][0]) - require.False(t, resp.Truncations[structs.Allocs]) - require.EqualValues(t, 1002, resp.Index) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.Len(t, 1, resp.Matches[structs.Allocs]) + must.Eq(t, alloc.ID, resp.Matches[structs.Allocs][0]) + must.False(t, resp.Truncations[structs.Allocs]) + must.Eq(t, 1002, resp.Index) } } @@ -539,7 +540,7 @@ func TestSearch_PrefixSearch_Node(t *testing.T) { fsmState := s.fsm.State() node := mock.Node() - require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 100, node)) + must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 100, node)) prefix := node.ID[:len(node.ID)-2] @@ -557,10 +558,10 @@ func TestSearch_PrefixSearch_Node(t *testing.T) { t.Fatalf("err: %v", err) } - require.Len(t, resp.Matches[structs.Nodes], 1) - require.Equal(t, node.ID, resp.Matches[structs.Nodes][0]) - require.False(t, resp.Truncations[structs.Nodes]) - require.Equal(t, uint64(100), resp.Index) + must.Len(t, 1, resp.Matches[structs.Nodes]) + must.Eq(t, node.ID, resp.Matches[structs.Nodes][0]) + must.False(t, resp.Truncations[structs.Nodes]) + must.Eq(t, uint64(100), resp.Index) } func TestSearch_PrefixSearch_NodePool(t *testing.T) { @@ -775,7 +776,7 @@ func TestSearch_PrefixSearch_Deployment(t *testing.T) { testutil.WaitForLeader(t, s.RPC) deployment := mock.Deployment() - require.NoError(t, s.fsm.State().UpsertDeployment(2000, deployment)) + must.NoError(t, s.fsm.State().UpsertDeployment(2000, time.Now().UnixNano(), deployment)) prefix := deployment.ID[:len(deployment.ID)-2] @@ -789,11 +790,11 @@ func TestSearch_PrefixSearch_Deployment(t *testing.T) { } var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.Deployments], 1) - require.Equal(t, deployment.ID, resp.Matches[structs.Deployments][0]) - require.False(t, resp.Truncations[structs.Deployments]) - require.Equal(t, uint64(2000), resp.Index) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.Len(t, 1, resp.Matches[structs.Deployments]) + must.Eq(t, deployment.ID, resp.Matches[structs.Deployments][0]) + must.False(t, resp.Truncations[structs.Deployments]) + must.Eq(t, uint64(2000), resp.Index) } func TestSearch_PrefixSearch_AllContext(t *testing.T) { @@ -809,11 +810,11 @@ func TestSearch_PrefixSearch_AllContext(t *testing.T) { fsmState := s.fsm.State() node := mock.Node() - require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 100, node)) + must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 100, node)) eval1 := mock.Eval() eval1.ID = node.ID - require.NoError(t, fsmState.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1})) + must.NoError(t, fsmState.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1})) prefix := node.ID[:len(node.ID)-2] @@ -827,13 +828,13 @@ func TestSearch_PrefixSearch_AllContext(t *testing.T) { } var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.Nodes], 1) - require.Len(t, resp.Matches[structs.Evals], 1) - require.Equal(t, node.ID, resp.Matches[structs.Nodes][0]) - require.Equal(t, eval1.ID, resp.Matches[structs.Evals][0]) - require.Equal(t, uint64(1000), resp.Index) + must.Len(t, 1, resp.Matches[structs.Nodes]) + must.Len(t, 1, resp.Matches[structs.Evals]) + must.Eq(t, node.ID, resp.Matches[structs.Nodes][0]) + must.Eq(t, eval1.ID, resp.Matches[structs.Evals][0]) + must.Eq(t, uint64(1000), resp.Index) } // Tests that the top 20 matches are returned when no prefix is set @@ -861,10 +862,10 @@ func TestSearch_PrefixSearch_NoPrefix(t *testing.T) { } var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.Jobs], 1) - require.Equal(t, job.ID, resp.Matches[structs.Jobs][0]) - require.Equal(t, uint64(jobIndex), resp.Index) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.Len(t, 1, resp.Matches[structs.Jobs]) + must.Eq(t, job.ID, resp.Matches[structs.Jobs][0]) + must.Eq(t, uint64(jobIndex), resp.Index) } // Tests that the zero matches are returned when a prefix has no matching @@ -891,9 +892,9 @@ func TestSearch_PrefixSearch_NoMatches(t *testing.T) { } var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) require.Empty(t, resp.Matches[structs.Jobs]) - require.Equal(t, uint64(0), resp.Index) + must.Eq(t, uint64(0), resp.Index) } // Prefixes can only be looked up if their length is a power of two. For @@ -925,9 +926,9 @@ func TestSearch_PrefixSearch_RoundDownToEven(t *testing.T) { } var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.Jobs], 1) - require.Equal(t, job.ID, resp.Matches[structs.Jobs][0]) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.Len(t, 1, resp.Matches[structs.Jobs]) + must.Eq(t, job.ID, resp.Matches[structs.Jobs][0]) } func TestSearch_PrefixSearch_MultiRegion(t *testing.T) { @@ -964,11 +965,11 @@ func TestSearch_PrefixSearch_MultiRegion(t *testing.T) { codec := rpcClient(t, s2) var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.Jobs], 1) - require.Equal(t, job.ID, resp.Matches[structs.Jobs][0]) - require.Equal(t, uint64(jobIndex), resp.Index) + must.Len(t, 1, resp.Matches[structs.Jobs]) + must.Eq(t, job.ID, resp.Matches[structs.Jobs][0]) + must.Eq(t, uint64(jobIndex), resp.Index) } func TestSearch_PrefixSearch_CSIPlugin(t *testing.T) { @@ -995,11 +996,11 @@ func TestSearch_PrefixSearch_CSIPlugin(t *testing.T) { } var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.Plugins], 1) - require.Equal(t, id, resp.Matches[structs.Plugins][0]) - require.False(t, resp.Truncations[structs.Plugins]) + must.Len(t, 1, resp.Matches[structs.Plugins]) + must.Eq(t, id, resp.Matches[structs.Plugins][0]) + must.False(t, resp.Truncations[structs.Plugins]) } func TestSearch_PrefixSearch_CSIVolume(t *testing.T) { @@ -1013,12 +1014,12 @@ func TestSearch_PrefixSearch_CSIVolume(t *testing.T) { testutil.WaitForLeader(t, s.RPC) id := uuid.Generate() - err := s.fsm.State().UpsertCSIVolume(1000, []*structs.CSIVolume{{ + err := s.fsm.State().UpsertCSIVolume(1000, time.Now().UnixNano(), []*structs.CSIVolume{{ ID: id, Namespace: structs.DefaultNamespace, PluginID: "glade", }}) - require.NoError(t, err) + must.NoError(t, err) prefix := id[:len(id)-2] @@ -1032,11 +1033,11 @@ func TestSearch_PrefixSearch_CSIVolume(t *testing.T) { } var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.Volumes], 1) - require.Equal(t, id, resp.Matches[structs.Volumes][0]) - require.False(t, resp.Truncations[structs.Volumes]) + must.Len(t, 1, resp.Matches[structs.Volumes]) + must.Eq(t, id, resp.Matches[structs.Volumes][0]) + must.False(t, resp.Truncations[structs.Volumes]) } func TestSearch_PrefixSearch_Namespace(t *testing.T) { @@ -1050,7 +1051,7 @@ func TestSearch_PrefixSearch_Namespace(t *testing.T) { testutil.WaitForLeader(t, s.RPC) ns := mock.Namespace() - require.NoError(t, s.fsm.State().UpsertNamespaces(2000, []*structs.Namespace{ns})) + must.NoError(t, s.fsm.State().UpsertNamespaces(2000, []*structs.Namespace{ns})) prefix := ns.Name[:len(ns.Name)-2] @@ -1063,12 +1064,12 @@ func TestSearch_PrefixSearch_Namespace(t *testing.T) { } var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.Namespaces], 1) - require.Equal(t, ns.Name, resp.Matches[structs.Namespaces][0]) - require.False(t, resp.Truncations[structs.Namespaces]) - require.Equal(t, uint64(2000), resp.Index) + must.Len(t, 1, resp.Matches[structs.Namespaces]) + must.Eq(t, ns.Name, resp.Matches[structs.Namespaces][0]) + must.False(t, resp.Truncations[structs.Namespaces]) + must.Eq(t, uint64(2000), resp.Index) } func TestSearch_PrefixSearch_Namespace_ACL(t *testing.T) { @@ -1216,7 +1217,7 @@ func TestSearch_PrefixSearch_ScalingPolicy(t *testing.T) { prefix := policy.ID fsmState := s.fsm.State() - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) + must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) req := &structs.SearchRequest{ Prefix: prefix, @@ -1228,16 +1229,16 @@ func TestSearch_PrefixSearch_ScalingPolicy(t *testing.T) { } var resp structs.SearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.ScalingPolicies], 1) - require.Equal(t, policy.ID, resp.Matches[structs.ScalingPolicies][0]) - require.Equal(t, uint64(jobIndex), resp.Index) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.Len(t, 1, resp.Matches[structs.ScalingPolicies]) + must.Eq(t, policy.ID, resp.Matches[structs.ScalingPolicies][0]) + must.Eq(t, uint64(jobIndex), resp.Index) req.Context = structs.All - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - require.Len(t, resp.Matches[structs.ScalingPolicies], 1) - require.Equal(t, policy.ID, resp.Matches[structs.ScalingPolicies][0]) - require.Equal(t, uint64(jobIndex), resp.Index) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + must.Len(t, 1, resp.Matches[structs.ScalingPolicies]) + must.Eq(t, policy.ID, resp.Matches[structs.ScalingPolicies][0]) + must.Eq(t, uint64(jobIndex), resp.Index) } func TestSearch_FuzzySearch_ACL(t *testing.T) { @@ -1269,7 +1270,7 @@ func TestSearch_FuzzySearch_ACL(t *testing.T) { plugin := mock.CSIPlugin() plugin.ID = "mock.hashicorp.com" - must.NoError(t, store.UpsertCSIPlugin(1002, plugin)) + must.NoError(t, store.UpsertCSIPlugin(1002, time.Now().UnixNano(), plugin)) node := mock.Node() must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 1003, node)) @@ -1343,7 +1344,7 @@ func TestSearch_FuzzySearch_ACL(t *testing.T) { req.Text = "jo" // mock job Name is my-job var resp structs.FuzzySearchResponse must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Jobs], 1) + must.Len(t, 1, resp.Matches[structs.Jobs]) must.Eq(t, structs.FuzzyMatch{ ID: "my-job", Scope: []string{"default", job.ID}, @@ -1425,7 +1426,7 @@ func TestSearch_FuzzySearch_NotEnabled(t *testing.T) { job := mock.Job() registerJob(s, t, job) - require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) + must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) req := &structs.FuzzySearchRequest{ Text: "foo", // min set to 5 @@ -1434,7 +1435,7 @@ func TestSearch_FuzzySearch_NotEnabled(t *testing.T) { } var resp structs.FuzzySearchResponse - require.EqualError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp), + must.EqError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp), "fuzzy search is not enabled") } @@ -1453,7 +1454,7 @@ func TestSearch_FuzzySearch_ShortText(t *testing.T) { job := mock.Job() registerJob(s, t, job) - require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) + must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) req := &structs.FuzzySearchRequest{ Text: "foo", // min set to 5 @@ -1462,7 +1463,7 @@ func TestSearch_FuzzySearch_ShortText(t *testing.T) { } var resp structs.FuzzySearchResponse - require.EqualError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp), + must.EqError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp), "fuzzy search query must be at least 5 characters, got 3") } @@ -1477,7 +1478,7 @@ func TestSearch_FuzzySearch_TruncateLimitQuery(t *testing.T) { testutil.WaitForLeader(t, s.RPC) fsmState := s.fsm.State() - require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) + must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) req := &structs.FuzzySearchRequest{ Text: "job", @@ -1492,11 +1493,11 @@ func TestSearch_FuzzySearch_TruncateLimitQuery(t *testing.T) { } var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Jobs], 20) - require.True(t, resp.Truncations[structs.Jobs]) - require.Equal(t, uint64(jobIndex), resp.Index) + must.Len(t, 20, resp.Matches[structs.Jobs]) + must.True(t, resp.Truncations[structs.Jobs]) + must.Eq(t, uint64(jobIndex), resp.Index) } func TestSearch_FuzzySearch_TruncateLimitResults(t *testing.T) { @@ -1512,7 +1513,7 @@ func TestSearch_FuzzySearch_TruncateLimitResults(t *testing.T) { testutil.WaitForLeader(t, s.RPC) fsmState := s.fsm.State() - require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) + must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) req := &structs.FuzzySearchRequest{ Text: "job", @@ -1527,11 +1528,11 @@ func TestSearch_FuzzySearch_TruncateLimitResults(t *testing.T) { } var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Jobs], 5) - require.True(t, resp.Truncations[structs.Jobs]) - require.Equal(t, uint64(jobIndex), resp.Index) + must.Len(t, 5, resp.Matches[structs.Jobs]) + must.True(t, resp.Truncations[structs.Jobs]) + must.Eq(t, uint64(jobIndex), resp.Index) } func TestSearch_FuzzySearch_Evals(t *testing.T) { @@ -1546,7 +1547,7 @@ func TestSearch_FuzzySearch_Evals(t *testing.T) { eval1 := mock.Eval() eval1.ID = "f7dee5a1-d2b0-2f6a-2e75-6c8e467a4b99" - require.NoError(t, s.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 2000, []*structs.Evaluation{eval1})) + must.NoError(t, s.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 2000, []*structs.Evaluation{eval1})) req := &structs.FuzzySearchRequest{ Text: "f7dee", // evals are prefix searched @@ -1558,12 +1559,12 @@ func TestSearch_FuzzySearch_Evals(t *testing.T) { } var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Evals], 1) - require.Equal(t, eval1.ID, resp.Matches[structs.Evals][0].ID) - require.False(t, resp.Truncations[structs.Evals]) - require.Equal(t, uint64(2000), resp.Index) + must.Len(t, 1, resp.Matches[structs.Evals]) + must.Eq(t, eval1.ID, resp.Matches[structs.Evals][0].ID) + must.False(t, resp.Truncations[structs.Evals]) + must.Eq(t, uint64(2000), resp.Index) } func TestSearch_FuzzySearch_Allocation(t *testing.T) { @@ -1580,8 +1581,8 @@ func TestSearch_FuzzySearch_Allocation(t *testing.T) { summary := mock.JobSummary(alloc.JobID) fsmState := s.fsm.State() - require.NoError(t, fsmState.UpsertJobSummary(999, summary)) - require.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 90, []*structs.Allocation{alloc})) + must.NoError(t, fsmState.UpsertJobSummary(999, summary)) + must.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 90, time.Now().UnixNano(), []*structs.Allocation{alloc})) req := &structs.FuzzySearchRequest{ Text: "web", @@ -1593,12 +1594,12 @@ func TestSearch_FuzzySearch_Allocation(t *testing.T) { } var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Allocs], 1) - require.Equal(t, alloc.Name, resp.Matches[structs.Allocs][0].ID) - require.False(t, resp.Truncations[structs.Allocs]) - require.Equal(t, uint64(90), resp.Index) + must.Len(t, 1, resp.Matches[structs.Allocs]) + must.Eq(t, alloc.Name, resp.Matches[structs.Allocs][0].ID) + must.False(t, resp.Truncations[structs.Allocs]) + must.Eq(t, uint64(90), resp.Index) } func TestSearch_FuzzySearch_Node(t *testing.T) { @@ -1614,7 +1615,7 @@ func TestSearch_FuzzySearch_Node(t *testing.T) { fsmState := s.fsm.State() node := mock.Node() - require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 100, node)) + must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 100, node)) req := &structs.FuzzySearchRequest{ Text: "oo", @@ -1626,11 +1627,11 @@ func TestSearch_FuzzySearch_Node(t *testing.T) { } var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Nodes], 1) - require.Equal(t, node.Name, resp.Matches[structs.Nodes][0].ID) - require.False(t, resp.Truncations[structs.Nodes]) - require.Equal(t, uint64(100), resp.Index) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.Len(t, 1, resp.Matches[structs.Nodes]) + must.Eq(t, node.Name, resp.Matches[structs.Nodes][0].ID) + must.False(t, resp.Truncations[structs.Nodes]) + must.Eq(t, uint64(100), resp.Index) } func TestSearch_FuzzySearch_NodePool(t *testing.T) { @@ -1850,7 +1851,7 @@ func TestSearch_FuzzySearch_Deployment(t *testing.T) { testutil.WaitForLeader(t, s.RPC) deployment := mock.Deployment() - require.NoError(t, s.fsm.State().UpsertDeployment(2000, deployment)) + must.NoError(t, s.fsm.State().UpsertDeployment(2000, time.Now().UnixNano(), deployment)) req := &structs.FuzzySearchRequest{ Text: deployment.ID[0:3], // deployments are prefix searched @@ -1862,11 +1863,11 @@ func TestSearch_FuzzySearch_Deployment(t *testing.T) { } var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Deployments], 1) - require.Equal(t, deployment.ID, resp.Matches[structs.Deployments][0].ID) - require.False(t, resp.Truncations[structs.Deployments]) - require.Equal(t, uint64(2000), resp.Index) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.Len(t, 1, resp.Matches[structs.Deployments]) + must.Eq(t, deployment.ID, resp.Matches[structs.Deployments][0].ID) + must.False(t, resp.Truncations[structs.Deployments]) + must.Eq(t, uint64(2000), resp.Index) } func TestSearch_FuzzySearch_CSIPlugin(t *testing.T) { @@ -1890,11 +1891,11 @@ func TestSearch_FuzzySearch_CSIPlugin(t *testing.T) { } var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Plugins], 1) - require.Equal(t, "my-plugin", resp.Matches[structs.Plugins][0].ID) - require.False(t, resp.Truncations[structs.Plugins]) + must.Len(t, 1, resp.Matches[structs.Plugins]) + must.Eq(t, "my-plugin", resp.Matches[structs.Plugins][0].ID) + must.False(t, resp.Truncations[structs.Plugins]) } func TestSearch_FuzzySearch_CSIVolume(t *testing.T) { @@ -1908,12 +1909,12 @@ func TestSearch_FuzzySearch_CSIVolume(t *testing.T) { testutil.WaitForLeader(t, s.RPC) id := uuid.Generate() - err := s.fsm.State().UpsertCSIVolume(1000, []*structs.CSIVolume{{ + err := s.fsm.State().UpsertCSIVolume(1000, time.Now().UnixNano(), []*structs.CSIVolume{{ ID: id, Namespace: structs.DefaultNamespace, PluginID: "glade", }}) - require.NoError(t, err) + must.NoError(t, err) req := &structs.FuzzySearchRequest{ Text: id[0:3], // volumes are prefix searched @@ -1925,11 +1926,11 @@ func TestSearch_FuzzySearch_CSIVolume(t *testing.T) { } var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Volumes], 1) - require.Equal(t, id, resp.Matches[structs.Volumes][0].ID) - require.False(t, resp.Truncations[structs.Volumes]) + must.Len(t, 1, resp.Matches[structs.Volumes]) + must.Eq(t, id, resp.Matches[structs.Volumes][0].ID) + must.False(t, resp.Truncations[structs.Volumes]) } func TestSearch_FuzzySearch_Namespace(t *testing.T) { @@ -1943,7 +1944,7 @@ func TestSearch_FuzzySearch_Namespace(t *testing.T) { testutil.WaitForLeader(t, s.RPC) ns := mock.Namespace() - require.NoError(t, s.fsm.State().UpsertNamespaces(2000, []*structs.Namespace{ns})) + must.NoError(t, s.fsm.State().UpsertNamespaces(2000, []*structs.Namespace{ns})) req := &structs.FuzzySearchRequest{ Text: "am", // mock is team- @@ -1954,12 +1955,12 @@ func TestSearch_FuzzySearch_Namespace(t *testing.T) { } var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Namespaces], 1) - require.Equal(t, ns.Name, resp.Matches[structs.Namespaces][0].ID) - require.False(t, resp.Truncations[structs.Namespaces]) - require.Equal(t, uint64(2000), resp.Index) + must.Len(t, 1, resp.Matches[structs.Namespaces]) + must.Eq(t, ns.Name, resp.Matches[structs.Namespaces][0].ID) + must.False(t, resp.Truncations[structs.Namespaces]) + must.Eq(t, uint64(2000), resp.Index) } func TestSearch_FuzzySearch_Namespace_caseInsensitive(t *testing.T) { @@ -1974,7 +1975,7 @@ func TestSearch_FuzzySearch_Namespace_caseInsensitive(t *testing.T) { ns := mock.Namespace() ns.Name = "TheFooNamespace" - require.NoError(t, s.fsm.State().UpsertNamespaces(2000, []*structs.Namespace{ns})) + must.NoError(t, s.fsm.State().UpsertNamespaces(2000, []*structs.Namespace{ns})) req := &structs.FuzzySearchRequest{ Text: "foon", @@ -1985,12 +1986,12 @@ func TestSearch_FuzzySearch_Namespace_caseInsensitive(t *testing.T) { } var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Namespaces], 1) - require.Equal(t, ns.Name, resp.Matches[structs.Namespaces][0].ID) - require.False(t, resp.Truncations[structs.Namespaces]) - require.Equal(t, uint64(2000), resp.Index) + must.Len(t, 1, resp.Matches[structs.Namespaces]) + must.Eq(t, ns.Name, resp.Matches[structs.Namespaces][0].ID) + must.False(t, resp.Truncations[structs.Namespaces]) + must.Eq(t, uint64(2000), resp.Index) } func TestSearch_FuzzySearch_ScalingPolicy(t *testing.T) { @@ -2006,7 +2007,7 @@ func TestSearch_FuzzySearch_ScalingPolicy(t *testing.T) { job, policy := mock.JobWithScalingPolicy() fsmState := s.fsm.State() - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) + must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) req := &structs.FuzzySearchRequest{ Text: policy.ID[0:3], // scaling policies are prefix searched @@ -2018,16 +2019,16 @@ func TestSearch_FuzzySearch_ScalingPolicy(t *testing.T) { } var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.ScalingPolicies], 1) - require.Equal(t, policy.ID, resp.Matches[structs.ScalingPolicies][0].ID) - require.Equal(t, uint64(jobIndex), resp.Index) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.Len(t, 1, resp.Matches[structs.ScalingPolicies]) + must.Eq(t, policy.ID, resp.Matches[structs.ScalingPolicies][0].ID) + must.Eq(t, uint64(jobIndex), resp.Index) req.Context = structs.All - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.ScalingPolicies], 1) - require.Equal(t, policy.ID, resp.Matches[structs.ScalingPolicies][0].ID) - require.Equal(t, uint64(jobIndex), resp.Index) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.Len(t, 1, resp.Matches[structs.ScalingPolicies]) + must.Eq(t, policy.ID, resp.Matches[structs.ScalingPolicies][0].ID) + must.Eq(t, uint64(jobIndex), resp.Index) } func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { @@ -2044,18 +2045,18 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { ns := mock.Namespace() ns.Name = "team-job-app" - require.NoError(t, fsmState.UpsertNamespaces(500, []*structs.Namespace{ns})) + must.NoError(t, fsmState.UpsertNamespaces(500, []*structs.Namespace{ns})) job1 := mock.Job() - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 502, nil, job1)) + must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 502, nil, job1)) job2 := mock.Job() job2.Namespace = ns.Name - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 504, nil, job2)) + must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 504, nil, job2)) node := mock.Node() node.Name = "run-jobs" - require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, node)) + must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, node)) req := &structs.FuzzySearchRequest{ Text: "set-text-in-test", @@ -2070,7 +2071,7 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { { var resp structs.FuzzySearchResponse err := msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp) - require.EqualError(t, err, structs.ErrPermissionDenied.Error()) + must.EqError(t, err, structs.ErrPermissionDenied.Error()) } // Try with an invalid token and expect failure @@ -2080,7 +2081,7 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { req.AuthToken = invalidToken.SecretID var resp structs.FuzzySearchResponse err := msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp) - require.EqualError(t, err, structs.ErrPermissionDenied.Error()) + must.EqError(t, err, structs.ErrPermissionDenied.Error()) } // Try with a node:read token and expect failure due to Namespaces being the context @@ -2090,7 +2091,7 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { req.AuthToken = validToken.SecretID var resp structs.FuzzySearchResponse err := msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp) - require.EqualError(t, err, structs.ErrPermissionDenied.Error()) + must.EqError(t, err, structs.ErrPermissionDenied.Error()) } // Try with a node:read token and expect success due to All context @@ -2100,12 +2101,12 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { req.Context = structs.All req.AuthToken = validToken.SecretID var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Equal(t, uint64(1001), resp.Index) - require.Len(t, resp.Matches[structs.Nodes], 1) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.Eq(t, uint64(1001), resp.Index) + must.Len(t, 1, resp.Matches[structs.Nodes]) // Jobs filtered out since token only has access to node:read - require.Len(t, resp.Matches[structs.Jobs], 0) + must.Len(t, 0, resp.Matches[structs.Jobs]) } // Try with a valid token for non-default namespace:read-job @@ -2117,15 +2118,15 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { req.AuthToken = validToken.SecretID req.Namespace = job2.Namespace var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Jobs], 1) - require.Equal(t, job2.Name, resp.Matches[structs.Jobs][0].ID) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.Len(t, 1, resp.Matches[structs.Jobs]) + must.Eq(t, job2.Name, resp.Matches[structs.Jobs][0].ID) // Index of job - not node - because node context is filtered out - require.Equal(t, uint64(504), resp.Index) + must.Eq(t, uint64(504), resp.Index) // Nodes filtered out since token only has access to namespace:read-job - require.Len(t, resp.Matches[structs.Nodes], 0) + must.Len(t, 0, resp.Matches[structs.Nodes]) } // Try with a management token @@ -2135,12 +2136,12 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { req.AuthToken = root.SecretID req.Namespace = job1.Namespace var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Equal(t, uint64(1001), resp.Index) - require.Len(t, resp.Matches[structs.Jobs], 1) - require.Equal(t, job1.Name, resp.Matches[structs.Jobs][0].ID) - require.Len(t, resp.Matches[structs.Nodes], 1) - require.Len(t, resp.Matches[structs.Namespaces], 1) // matches "team-job-app" + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.Eq(t, uint64(1001), resp.Index) + must.Len(t, 1, resp.Matches[structs.Jobs]) + must.Eq(t, job1.Name, resp.Matches[structs.Jobs][0].ID) + must.Len(t, 1, resp.Matches[structs.Nodes]) + must.Len(t, 1, resp.Matches[structs.Namespaces]) // matches "team-job-app" } } @@ -2156,7 +2157,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { testutil.WaitForLeader(t, s.RPC) fsmState := s.fsm.State() - require.NoError(t, fsmState.UpsertNamespaces(500, []*structs.Namespace{{ + must.NoError(t, fsmState.UpsertNamespaces(500, []*structs.Namespace{{ Name: "teamA", Description: "first namespace", CreateIndex: 100, @@ -2185,29 +2186,29 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { job1.Name = "teamA-job1" job1.ID = "job1" job1.Namespace = "teamA" - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), nil, job1)) + must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), nil, job1)) job2 := mock.Job() job2.Name = "teamB-job2" job2.ID = "job2" job2.Namespace = "teamB" - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), nil, job2)) + must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), nil, job2)) job3 := mock.Job() job3.Name = "teamC-job3" job3.ID = "job3" job3.Namespace = "teamC" - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), nil, job3)) + must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), nil, job3)) // Upsert a node node := mock.Node() node.Name = "node-for-teams" - require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, inc(), node)) + must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, inc(), node)) // Upsert a node that will not be matched node2 := mock.Node() node2.Name = "node-for-ops" - require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, inc(), node2)) + must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, inc(), node2)) // Create parameterized requests request := func(text, namespace, token string, context structs.Context) *structs.FuzzySearchRequest { @@ -2226,7 +2227,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { var resp structs.FuzzySearchResponse req := request("anything", job1.Namespace, "", structs.Jobs) err := msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp) - require.EqualError(t, err, structs.ErrPermissionDenied.Error()) + must.EqError(t, err, structs.ErrPermissionDenied.Error()) }) t.Run("with an invalid token expect failure", func(t *testing.T) { @@ -2236,7 +2237,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { var resp structs.FuzzySearchResponse err := msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp) - require.EqualError(t, err, structs.ErrPermissionDenied.Error()) + must.EqError(t, err, structs.ErrPermissionDenied.Error()) }) t.Run("with node:read token search namespaces expect failure", func(t *testing.T) { @@ -2245,7 +2246,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { var resp structs.FuzzySearchResponse err := msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp) - require.EqualError(t, err, structs.ErrPermissionDenied.Error()) + must.EqError(t, err, structs.ErrPermissionDenied.Error()) }) t.Run("with node:read token search all expect success", func(t *testing.T) { @@ -2253,13 +2254,13 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { req := request("team", job1.Namespace, validToken.SecretID, structs.All) var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) // One matching node - require.Len(t, resp.Matches[structs.Nodes], 1) + must.Len(t, 1, resp.Matches[structs.Nodes]) // Jobs filtered out since token only has access to node:read - require.Len(t, resp.Matches[structs.Jobs], 0) + must.Len(t, 0, resp.Matches[structs.Jobs]) }) t.Run("with a teamB/job:read token search all expect 1 job", func(t *testing.T) { @@ -2268,12 +2269,12 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { req := request("team", job2.Namespace, token.SecretID, structs.All) var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Jobs], 1) - require.Equal(t, job2.Name, resp.Matches[structs.Jobs][0].ID) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.Len(t, 1, resp.Matches[structs.Jobs]) + must.Eq(t, job2.Name, resp.Matches[structs.Jobs][0].ID) // Nodes filtered out since token only has access to namespace:read-job - require.Len(t, resp.Matches[structs.Nodes], 0) + must.Len(t, 0, resp.Matches[structs.Nodes]) }) // Using a token that can read jobs in 2 namespaces, we should get job results from @@ -2290,10 +2291,10 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { req := request("team", structs.AllNamespacesSentinel, token.SecretID, structs.Jobs) var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Jobs], 2) - require.Equal(t, job2.Name, resp.Matches[structs.Jobs][0].ID) - require.Equal(t, job3.Name, resp.Matches[structs.Jobs][1].ID) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.Len(t, 2, resp.Matches[structs.Jobs]) + must.Eq(t, job2.Name, resp.Matches[structs.Jobs][0].ID) + must.Eq(t, job3.Name, resp.Matches[structs.Jobs][1].ID) }) // Using a management token, we should get job results from all three namespaces @@ -2302,11 +2303,11 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { req := request("team", structs.AllNamespacesSentinel, root.SecretID, structs.Jobs) var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Jobs], 3) - require.Equal(t, job1.Name, resp.Matches[structs.Jobs][0].ID) - require.Equal(t, job2.Name, resp.Matches[structs.Jobs][1].ID) - require.Equal(t, job3.Name, resp.Matches[structs.Jobs][2].ID) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.Len(t, 3, resp.Matches[structs.Jobs]) + must.Eq(t, job1.Name, resp.Matches[structs.Jobs][0].ID) + must.Eq(t, job2.Name, resp.Matches[structs.Jobs][1].ID) + must.Eq(t, job3.Name, resp.Matches[structs.Jobs][2].ID) }) // Using a token that can read nodes, we should get our 1 matching node when @@ -2319,9 +2320,9 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { req := request("team", structs.AllNamespacesSentinel, token.SecretID, structs.Nodes) var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Nodes], 1) - require.Equal(t, "node-for-teams", resp.Matches[structs.Nodes][0].ID) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.Len(t, 1, resp.Matches[structs.Nodes]) + must.Eq(t, "node-for-teams", resp.Matches[structs.Nodes][0].ID) }) // Using a token that cannot read nodes, we should get no matching nodes when @@ -2333,7 +2334,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { token := mock.CreateToken(t, fsmState, inc(), []string{"agent-read-policy"}) req := request("team", structs.AllNamespacesSentinel, token.SecretID, structs.Nodes) var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) require.Empty(t, resp.Matches[structs.Nodes]) }) @@ -2349,31 +2350,31 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { alloc1.Name = job1.Name + ".task[0]" alloc1.Namespace = job1.Namespace summary1 := mock.JobSummary(alloc1.JobID) - require.NoError(t, fsmState.UpsertJobSummary(inc(), summary1)) + must.NoError(t, fsmState.UpsertJobSummary(inc(), summary1)) alloc2 := mockAlloc() alloc2.JobID = job2.ID alloc2.Name = job2.Name + ".task[0]" alloc2.Namespace = job2.Namespace summary2 := mock.JobSummary(alloc2.JobID) - require.NoError(t, fsmState.UpsertJobSummary(inc(), summary2)) + must.NoError(t, fsmState.UpsertJobSummary(inc(), summary2)) alloc3 := mockAlloc() alloc3.JobID = job3.ID alloc3.Name = job3.Name + ".task[0]" alloc3.Namespace = job3.Namespace summary3 := mock.JobSummary(alloc3.JobID) - require.NoError(t, fsmState.UpsertJobSummary(inc(), summary3)) + must.NoError(t, fsmState.UpsertJobSummary(inc(), summary3)) // Upsert the allocs - require.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, inc(), []*structs.Allocation{alloc1, alloc2, alloc3})) + must.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, inc(), time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2, alloc3})) token := mock.CreateToken(t, fsmState, inc(), []string{"policyD"}) req := request("team", structs.AllNamespacesSentinel, token.SecretID, structs.Allocs) var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Allocs], 1) - require.Equal(t, "teamB-job2.task[0]", resp.Matches[structs.Allocs][0].ID) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.Len(t, 1, resp.Matches[structs.Allocs]) + must.Eq(t, "teamB-job2.task[0]", resp.Matches[structs.Allocs][0].ID) }) // Using a management token should return allocs from all the jobs. @@ -2385,7 +2386,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { alloc1.Name = "test-alloc.one[0]" alloc1.Namespace = job1.Namespace summary1 := mock.JobSummary(alloc1.JobID) - require.NoError(t, fsmState.UpsertJobSummary(inc(), summary1)) + must.NoError(t, fsmState.UpsertJobSummary(inc(), summary1)) alloc2 := mockAlloc() alloc2.ID = uuid.Generate() @@ -2393,7 +2394,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { alloc2.Name = "test-alloc.two[0]" alloc2.Namespace = job2.Namespace summary2 := mock.JobSummary(alloc2.JobID) - require.NoError(t, fsmState.UpsertJobSummary(inc(), summary2)) + must.NoError(t, fsmState.UpsertJobSummary(inc(), summary2)) alloc3 := mockAlloc() alloc3.ID = uuid.Generate() @@ -2401,21 +2402,21 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { alloc3.Name = "test-alloc.three[0]" alloc3.Namespace = job3.Namespace summary3 := mock.JobSummary(alloc3.JobID) - require.NoError(t, fsmState.UpsertJobSummary(inc(), summary3)) + must.NoError(t, fsmState.UpsertJobSummary(inc(), summary3)) // Upsert the allocs - require.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, inc(), []*structs.Allocation{alloc1, alloc2, alloc3})) + must.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, inc(), time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2, alloc3})) req := request("alloc", structs.AllNamespacesSentinel, root.SecretID, structs.Allocs) var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Allocs], 3) - require.Equal(t, alloc1.Name, resp.Matches[structs.Allocs][0].ID) - require.Equal(t, []string{"teamA", alloc1.ID}, resp.Matches[structs.Allocs][0].Scope) - require.Equal(t, alloc2.Name, resp.Matches[structs.Allocs][1].ID) - require.Equal(t, []string{"teamB", alloc2.ID}, resp.Matches[structs.Allocs][1].Scope) - require.Equal(t, alloc3.Name, resp.Matches[structs.Allocs][2].ID) - require.Equal(t, []string{"teamC", alloc3.ID}, resp.Matches[structs.Allocs][2].Scope) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.Len(t, 3, resp.Matches[structs.Allocs]) + must.Eq(t, alloc1.Name, resp.Matches[structs.Allocs][0].ID) + must.Eq(t, []string{"teamA", alloc1.ID}, resp.Matches[structs.Allocs][0].Scope) + must.Eq(t, alloc2.Name, resp.Matches[structs.Allocs][1].ID) + must.Eq(t, []string{"teamB", alloc2.ID}, resp.Matches[structs.Allocs][1].Scope) + must.Eq(t, alloc3.Name, resp.Matches[structs.Allocs][2].ID) + must.Eq(t, []string{"teamC", alloc3.ID}, resp.Matches[structs.Allocs][2].Scope) }) // Allow plugin read and wildcard namespace @@ -2431,9 +2432,9 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { req := request("teams", structs.AllNamespacesSentinel, token.SecretID, structs.Plugins) var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - require.Len(t, resp.Matches[structs.Plugins], 1) + must.Len(t, 1, resp.Matches[structs.Plugins]) require.Empty(t, resp.Matches[structs.Plugins][0].Scope) // no scope }) } @@ -2498,9 +2499,9 @@ func TestSearch_FuzzySearch_Job(t *testing.T) { ns := mock.Namespace() ns.Name = job.Namespace - require.NoError(t, fsmState.UpsertNamespaces(2000, []*structs.Namespace{ns})) + must.NoError(t, fsmState.UpsertNamespaces(2000, []*structs.Namespace{ns})) registerJob(s, t, job) - require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1003, mock.Node())) + must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1003, mock.Node())) t.Run("sleep", func(t *testing.T) { req := &structs.FuzzySearchRequest{ @@ -2512,16 +2513,16 @@ func TestSearch_FuzzySearch_Job(t *testing.T) { }, } var resp structs.FuzzySearchResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) m := resp.Matches - require.Equal(t, uint64(1000), resp.Index) // job is explicit search context, has id=1000 + must.Eq(t, uint64(1000), resp.Index) // job is explicit search context, has id=1000 // just the one job - require.Len(t, m[structs.Jobs], 1) + must.Len(t, 1, m[structs.Jobs]) // 3 services (1 group, 2 task) - require.Len(t, m[structs.Services], 3) - require.Equal(t, []structs.FuzzyMatch{{ + must.Len(t, 3, m[structs.Services]) + must.Eq(t, []structs.FuzzyMatch{{ ID: "some-sleepy-task-svc-one", Scope: []string{"team-sleepy", job.ID, "qa-sleeper-group-one", "qa-sleep-task-one"}, }, { @@ -2533,8 +2534,8 @@ func TestSearch_FuzzySearch_Job(t *testing.T) { }}, m[structs.Services]) // 3 groups - require.Len(t, m[structs.Groups], 3) - require.Equal(t, []structs.FuzzyMatch{{ + must.Len(t, 3, m[structs.Groups]) + must.Eq(t, []structs.FuzzyMatch{{ ID: "sleep-in-java", Scope: []string{"team-sleepy", job.ID}, }, { @@ -2546,8 +2547,8 @@ func TestSearch_FuzzySearch_Job(t *testing.T) { }}, m[structs.Groups]) // 3 tasks (1 does not match) - require.Len(t, m[structs.Tasks], 3) - require.Equal(t, []structs.FuzzyMatch{{ + must.Len(t, 3, m[structs.Tasks]) + must.Eq(t, []structs.FuzzyMatch{{ ID: "qa-sleep-task-one", Scope: []string{"team-sleepy", job.ID, "qa-sleeper-group-one"}, }, { @@ -2559,8 +2560,8 @@ func TestSearch_FuzzySearch_Job(t *testing.T) { }}, m[structs.Tasks]) // 2 tasks with command - require.Len(t, m[structs.Commands], 2) - require.Equal(t, []structs.FuzzyMatch{{ + must.Len(t, 2, m[structs.Commands]) + must.Eq(t, []structs.FuzzyMatch{{ ID: "/bin/sleep", Scope: []string{"team-sleepy", job.ID, "prod-sleeper-group-one", "prod-sleep-task-one"}, }, { @@ -2569,15 +2570,15 @@ func TestSearch_FuzzySearch_Job(t *testing.T) { }}, m[structs.Commands]) // 1 task with image - require.Len(t, m[structs.Images], 1) - require.Equal(t, []structs.FuzzyMatch{{ + must.Len(t, 1, m[structs.Images]) + must.Eq(t, []structs.FuzzyMatch{{ ID: "sleeper:latest", Scope: []string{"team-sleepy", job.ID, "qa-sleeper-group-one", "qa-sleep-task-one"}, }}, m[structs.Images]) // 1 task with class - require.Len(t, m[structs.Classes], 1) - require.Equal(t, []structs.FuzzyMatch{{ + must.Len(t, 1, m[structs.Classes]) + must.Eq(t, []structs.FuzzyMatch{{ ID: "sleep.class", Scope: []string{"team-sleepy", job.ID, "sleep-in-java", "prod-java-sleep"}, }}, m[structs.Classes]) @@ -2596,6 +2597,6 @@ func TestSearch_FuzzySearch_fuzzyIndex(t *testing.T) { {name: "foo-bar-baz", text: "zap", exp: -1}, } { result := fuzzyIndex(tc.name, tc.text) - require.Equal(t, tc.exp, result, "name: %s, text: %s, exp: %d, got: %d", tc.name, tc.text, tc.exp, result) + must.Eq(t, tc.exp, result, must.Sprintf("name: %s, text: %s, exp: %d, got: %d", tc.name, tc.text, tc.exp, result)) } } diff --git a/nomad/service_registration_endpoint_test.go b/nomad/service_registration_endpoint_test.go index 07d8ef17dfb..b192a9fc4ec 100644 --- a/nomad/service_registration_endpoint_test.go +++ b/nomad/service_registration_endpoint_test.go @@ -878,7 +878,7 @@ func TestServiceRegistration_List(t *testing.T) { allocs[0].Namespace = "platform" require.NoError(t, s.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, job)) signAllocIdentities(s.encrypter, job, allocs, time.Now()) - require.NoError(t, s.State().UpsertAllocs(structs.MsgTypeTestSetup, 15, allocs)) + require.NoError(t, s.State().UpsertAllocs(structs.MsgTypeTestSetup, 15, time.Now().UnixNano(), allocs)) signedToken := allocs[0].SignedIdentities["web"] @@ -1155,7 +1155,7 @@ func TestServiceRegistration_GetService(t *testing.T) { job := allocs[0].Job require.NoError(t, s.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, job)) signAllocIdentities(s.encrypter, job, allocs, time.Now()) - require.NoError(t, s.State().UpsertAllocs(structs.MsgTypeTestSetup, 15, allocs)) + require.NoError(t, s.State().UpsertAllocs(structs.MsgTypeTestSetup, 15, time.Now().UnixNano(), allocs)) signedToken := allocs[0].SignedIdentities["web"] diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index cfff64bafd1..4e36280fd17 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -366,7 +366,7 @@ RUN_QUERY: } // UpsertPlanResults is used to upsert the results of a plan. -func (s *StateStore) UpsertPlanResults(msgType structs.MessageType, index uint64, results *structs.ApplyPlanResultsRequest) error { +func (s *StateStore) UpsertPlanResults(msgType structs.MessageType, index uint64, now int64, results *structs.ApplyPlanResultsRequest) error { snapshot, err := s.Snapshot() if err != nil { return err @@ -408,14 +408,14 @@ func (s *StateStore) UpsertPlanResults(msgType structs.MessageType, index uint64 // Upsert the newly created or updated deployment if results.Deployment != nil { - if err := s.upsertDeploymentImpl(index, results.Deployment, txn); err != nil { + if err := s.upsertDeploymentImpl(index, now, results.Deployment, txn); err != nil { return err } } // Update the status of deployments effected by the plan. if len(results.DeploymentUpdates) != 0 { - s.upsertDeploymentUpdates(index, results.DeploymentUpdates, txn) + s.upsertDeploymentUpdates(index, now, results.DeploymentUpdates, txn) } if results.EvalID != "" { @@ -457,7 +457,7 @@ func (s *StateStore) UpsertPlanResults(msgType structs.MessageType, index uint64 alloc.Canonicalize() } - if err := s.upsertAllocsImpl(index, allocsToUpsert, txn); err != nil { + if err := s.upsertAllocsImpl(index, now, allocsToUpsert, txn); err != nil { return err } @@ -515,9 +515,9 @@ func addComputedAllocAttrs(allocs []*structs.Allocation, job *structs.Job) { // upsertDeploymentUpdates updates the deployments given the passed status // updates. -func (s *StateStore) upsertDeploymentUpdates(index uint64, updates []*structs.DeploymentStatusUpdate, txn *txn) error { +func (s *StateStore) upsertDeploymentUpdates(index uint64, now int64, updates []*structs.DeploymentStatusUpdate, txn *txn) error { for _, u := range updates { - if err := s.updateDeploymentStatusImpl(index, u, txn); err != nil { + if err := s.updateDeploymentStatusImpl(index, now, u, txn); err != nil { return err } } @@ -575,24 +575,22 @@ func (s *StateStore) DeleteJobSummary(index uint64, namespace, id string) error } // UpsertDeployment is used to insert or update a new deployment. -func (s *StateStore) UpsertDeployment(index uint64, deployment *structs.Deployment) error { +func (s *StateStore) UpsertDeployment(index uint64, now int64, deployment *structs.Deployment) error { txn := s.db.WriteTxn(index) defer txn.Abort() - if err := s.upsertDeploymentImpl(index, deployment, txn); err != nil { + if err := s.upsertDeploymentImpl(index, now, deployment, txn); err != nil { return err } return txn.Commit() } -func (s *StateStore) upsertDeploymentImpl(index uint64, deployment *structs.Deployment, txn *txn) error { +func (s *StateStore) upsertDeploymentImpl(index uint64, now int64, deployment *structs.Deployment, txn *txn) error { // Check if the deployment already exists existing, err := txn.First("deployment", "id", deployment.ID) if err != nil { return fmt.Errorf("deployment lookup failed: %v", err) } - now := time.Now().UnixNano() - // Setup the indexes and timestamps correctly if existing != nil { deployment.CreateIndex = existing.(*structs.Deployment).CreateIndex @@ -2563,7 +2561,7 @@ func (s *StateStore) JobSummaryByPrefix(ws memdb.WatchSet, namespace, id string) } // UpsertCSIVolume inserts a volume in the state store. -func (s *StateStore) UpsertCSIVolume(index uint64, volumes []*structs.CSIVolume) error { +func (s *StateStore) UpsertCSIVolume(index uint64, now int64, volumes []*structs.CSIVolume) error { txn := s.db.WriteTxn(index) defer txn.Abort() @@ -2589,10 +2587,10 @@ func (s *StateStore) UpsertCSIVolume(index uint64, volumes []*structs.CSIVolume) } } else { v.CreateIndex = index - v.CreateTime = time.Now().UnixNano() + v.CreateTime = now } v.ModifyIndex = index - v.ModifyTime = time.Now().UnixNano() + v.ModifyTime = now // Allocations are copy on write, so we want to keep the Allocation ID // but we need to clear the pointer so that we don't store it when we @@ -2787,7 +2785,7 @@ func (s *StateStore) csiVolumesByNamespaceImpl(txn *txn, ws memdb.WatchSet, name } // CSIVolumeClaim updates the volume's claim count and allocation list -func (s *StateStore) CSIVolumeClaim(index uint64, namespace, id string, claim *structs.CSIVolumeClaim) error { +func (s *StateStore) CSIVolumeClaim(index uint64, now int64, namespace, id string, claim *structs.CSIVolumeClaim) error { txn := s.db.WriteTxn(index) defer txn.Abort() @@ -2836,7 +2834,7 @@ func (s *StateStore) CSIVolumeClaim(index uint64, namespace, id string, claim *s } volume.ModifyIndex = index - volume.ModifyTime = time.Now().UnixNano() + volume.ModifyTime = now // Allocations are copy on write, so we want to keep the Allocation ID // but we need to clear the pointer so that we don't store it when we @@ -3168,7 +3166,7 @@ func (s *StateStore) CSIPluginDenormalizeTxn(txn Txn, ws memdb.WatchSet, plug *s // UpsertCSIPlugin writes the plugin to the state store. Note: there // is currently no raft message for this, as it's intended to support // testing use cases. -func (s *StateStore) UpsertCSIPlugin(index uint64, plug *structs.CSIPlugin) error { +func (s *StateStore) UpsertCSIPlugin(index uint64, now int64, plug *structs.CSIPlugin) error { txn := s.db.WriteTxn(index) defer txn.Abort() @@ -3178,7 +3176,7 @@ func (s *StateStore) UpsertCSIPlugin(index uint64, plug *structs.CSIPlugin) erro } plug.ModifyIndex = index - plug.ModifyTime = time.Now().UnixNano() + plug.ModifyTime = now if existing != nil { plug.CreateIndex = existing.(*structs.CSIPlugin).CreateIndex plug.CreateTime = existing.(*structs.CSIPlugin).CreateTime @@ -3947,7 +3945,7 @@ func (s *StateStore) EvalsByNamespaceOrdered(ws memdb.WatchSet, namespace string // most things, some updates are authoritative from the client. Specifically, // the desired state comes from the schedulers, while the actual state comes // from clients. -func (s *StateStore) UpdateAllocsFromClient(msgType structs.MessageType, index uint64, allocs []*structs.Allocation) error { +func (s *StateStore) UpdateAllocsFromClient(msgType structs.MessageType, index uint64, now int64, allocs []*structs.Allocation) error { txn := s.db.WriteTxnMsgT(msgType, index) defer txn.Abort() @@ -3958,7 +3956,7 @@ func (s *StateStore) UpdateAllocsFromClient(msgType structs.MessageType, index u // Handle each of the updated allocations for _, alloc := range allocs { nodeIDs.Insert(alloc.NodeID) - if err := s.nestedUpdateAllocFromClient(txn, index, alloc); err != nil { + if err := s.nestedUpdateAllocFromClient(txn, index, now, alloc); err != nil { return err } } @@ -3979,7 +3977,7 @@ func (s *StateStore) UpdateAllocsFromClient(msgType structs.MessageType, index u } // nestedUpdateAllocFromClient is used to nest an update of an allocation with client status -func (s *StateStore) nestedUpdateAllocFromClient(txn *txn, index uint64, alloc *structs.Allocation) error { +func (s *StateStore) nestedUpdateAllocFromClient(txn *txn, index uint64, now int64, alloc *structs.Allocation) error { // Look for existing alloc existing, err := txn.First("allocs", "id", alloc.ID) if err != nil { @@ -4027,7 +4025,7 @@ func (s *StateStore) nestedUpdateAllocFromClient(txn *txn, index uint64, alloc * // Update the modify time copyAlloc.ModifyTime = alloc.ModifyTime - if err := s.updateDeploymentWithAlloc(index, copyAlloc, exist, txn); err != nil { + if err := s.updateDeploymentWithAlloc(index, now, copyAlloc, exist, txn); err != nil { return fmt.Errorf("error updating deployment: %v", err) } @@ -4097,10 +4095,10 @@ func (s *StateStore) updateClientAllocUpdateIndex(txn *txn, index uint64, nodeID // UpsertAllocs is used to evict a set of allocations and allocate new ones at // the same time. -func (s *StateStore) UpsertAllocs(msgType structs.MessageType, index uint64, allocs []*structs.Allocation) error { +func (s *StateStore) UpsertAllocs(msgType structs.MessageType, index uint64, now int64, allocs []*structs.Allocation) error { txn := s.db.WriteTxn(index) defer txn.Abort() - if err := s.upsertAllocsImpl(index, allocs, txn); err != nil { + if err := s.upsertAllocsImpl(index, now, allocs, txn); err != nil { return err } return txn.Commit() @@ -4108,7 +4106,7 @@ func (s *StateStore) UpsertAllocs(msgType structs.MessageType, index uint64, all // upsertAllocs is the actual implementation of UpsertAllocs so that it may be // used with an existing transaction. -func (s *StateStore) upsertAllocsImpl(index uint64, allocs []*structs.Allocation, txn *txn) error { +func (s *StateStore) upsertAllocsImpl(index uint64, now int64, allocs []*structs.Allocation, txn *txn) error { // Handle the allocations jobs := make(map[structs.NamespacedID]string, 1) for _, alloc := range allocs { @@ -4166,7 +4164,7 @@ func (s *StateStore) upsertAllocsImpl(index uint64, allocs []*structs.Allocation // These should be given a map of new to old allocation and the updates // should be one on all changes. The current implementation causes O(n) // lookups/copies/insertions rather than O(1) - if err := s.updateDeploymentWithAlloc(index, alloc, exist, txn); err != nil { + if err := s.updateDeploymentWithAlloc(index, now, alloc, exist, txn); err != nil { return fmt.Errorf("error updating deployment: %v", err) } @@ -4831,11 +4829,11 @@ func (s *StateStore) SITokenAccessorsByNode(ws memdb.WatchSet, nodeID string) ([ // UpdateDeploymentStatus is used to make deployment status updates and // potentially make a evaluation -func (s *StateStore) UpdateDeploymentStatus(msgType structs.MessageType, index uint64, req *structs.DeploymentStatusUpdateRequest) error { +func (s *StateStore) UpdateDeploymentStatus(msgType structs.MessageType, index uint64, now int64, req *structs.DeploymentStatusUpdateRequest) error { txn := s.db.WriteTxnMsgT(msgType, index) defer txn.Abort() - if err := s.updateDeploymentStatusImpl(index, req.DeploymentUpdate, txn); err != nil { + if err := s.updateDeploymentStatusImpl(index, now, req.DeploymentUpdate, txn); err != nil { return err } @@ -4857,7 +4855,7 @@ func (s *StateStore) UpdateDeploymentStatus(msgType structs.MessageType, index u } // updateDeploymentStatusImpl is used to make deployment status updates -func (s *StateStore) updateDeploymentStatusImpl(index uint64, u *structs.DeploymentStatusUpdate, txn *txn) error { +func (s *StateStore) updateDeploymentStatusImpl(index uint64, now int64, u *structs.DeploymentStatusUpdate, txn *txn) error { // Retrieve deployment ws := memdb.NewWatchSet() deployment, err := s.deploymentByIDImpl(ws, u.DeploymentID, txn) @@ -4874,7 +4872,7 @@ func (s *StateStore) updateDeploymentStatusImpl(index uint64, u *structs.Deploym copy.Status = u.Status copy.StatusDescription = u.StatusDescription copy.ModifyIndex = index - copy.ModifyTime = time.Now().UnixNano() + copy.ModifyTime = now // Insert the deployment if err := txn.Insert("deployment", copy); err != nil { @@ -5023,7 +5021,7 @@ func (s *StateStore) unsetJobVersionTagImpl(index uint64, namespace, jobID strin // UpdateDeploymentPromotion is used to promote canaries in a deployment and // potentially make a evaluation -func (s *StateStore) UpdateDeploymentPromotion(msgType structs.MessageType, index uint64, req *structs.ApplyDeploymentPromoteRequest) error { +func (s *StateStore) UpdateDeploymentPromotion(msgType structs.MessageType, index uint64, now int64, req *structs.ApplyDeploymentPromoteRequest) error { txn := s.db.WriteTxnMsgT(msgType, index) defer txn.Abort() @@ -5116,7 +5114,7 @@ func (s *StateStore) UpdateDeploymentPromotion(msgType structs.MessageType, inde // Update deployment copy := deployment.Copy() copy.ModifyIndex = index - copy.ModifyTime = time.Now().UnixNano() + copy.ModifyTime = now for tg, status := range copy.TaskGroups { _, ok := groupIndex[tg] if !req.All && !ok { @@ -5136,7 +5134,7 @@ func (s *StateStore) UpdateDeploymentPromotion(msgType structs.MessageType, inde } // Insert the deployment - if err := s.upsertDeploymentImpl(index, copy, txn); err != nil { + if err := s.upsertDeploymentImpl(index, now, copy, txn); err != nil { return err } @@ -5170,7 +5168,7 @@ func (s *StateStore) UpdateDeploymentPromotion(msgType structs.MessageType, inde // UpdateDeploymentAllocHealth is used to update the health of allocations as // part of the deployment and potentially make a evaluation -func (s *StateStore) UpdateDeploymentAllocHealth(msgType structs.MessageType, index uint64, req *structs.ApplyDeploymentAllocHealthRequest) error { +func (s *StateStore) UpdateDeploymentAllocHealth(msgType structs.MessageType, index uint64, now int64, req *structs.ApplyDeploymentAllocHealthRequest) error { txn := s.db.WriteTxnMsgT(msgType, index) defer txn.Abort() @@ -5211,7 +5209,7 @@ func (s *StateStore) UpdateDeploymentAllocHealth(msgType structs.MessageType, in copy.DeploymentStatus.ModifyIndex = index copy.ModifyIndex = index - if err := s.updateDeploymentWithAlloc(index, copy, old, txn); err != nil { + if err := s.updateDeploymentWithAlloc(index, now, copy, old, txn); err != nil { return fmt.Errorf("error updating deployment: %v", err) } @@ -5241,7 +5239,7 @@ func (s *StateStore) UpdateDeploymentAllocHealth(msgType structs.MessageType, in // Update the deployment status as needed. if req.DeploymentUpdate != nil { - if err := s.updateDeploymentStatusImpl(index, req.DeploymentUpdate, txn); err != nil { + if err := s.updateDeploymentStatusImpl(index, now, req.DeploymentUpdate, txn); err != nil { return err } } @@ -5919,7 +5917,7 @@ func (s *StateStore) updateJobCSIPlugins(index uint64, job, prev *structs.Job, t // updateDeploymentWithAlloc is used to update the deployment state associated // with the given allocation. The passed alloc may be updated if the deployment // status has changed to capture the modify index at which it has changed. -func (s *StateStore) updateDeploymentWithAlloc(index uint64, alloc, existing *structs.Allocation, txn *txn) error { +func (s *StateStore) updateDeploymentWithAlloc(index uint64, now int64, alloc, existing *structs.Allocation, txn *txn) error { // Nothing to do if the allocation is not associated with a deployment if alloc.DeploymentID == "" { return nil @@ -5981,7 +5979,7 @@ func (s *StateStore) updateDeploymentWithAlloc(index uint64, alloc, existing *st // Create a copy of the deployment object deploymentCopy := deployment.Copy() deploymentCopy.ModifyIndex = index - deploymentCopy.ModifyTime = time.Now().UnixNano() + deploymentCopy.ModifyTime = now dstate := deploymentCopy.TaskGroups[alloc.TaskGroup] dstate.PlacedAllocs += placed @@ -6017,7 +6015,7 @@ func (s *StateStore) updateDeploymentWithAlloc(index uint64, alloc, existing *st } // Upsert the deployment - if err := s.upsertDeploymentImpl(index, deploymentCopy, txn); err != nil { + if err := s.upsertDeploymentImpl(index, now, deploymentCopy, txn); err != nil { return err } diff --git a/nomad/state/testing.go b/nomad/state/testing.go index cb955ffa46c..76221bc7dd7 100644 --- a/nomad/state/testing.go +++ b/nomad/state/testing.go @@ -231,7 +231,7 @@ func TestBadCSIState(t testing.TB, store *StateStore) error { alloc1.DesiredStatus = structs.AllocDesiredStatusRun // Insert allocs into the state store - err := store.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc1}) + err := store.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc1}) if err != nil { return err } @@ -311,7 +311,7 @@ func TestBadCSIState(t testing.TB, store *StateStore) error { } vol = vol.Copy() // canonicalize - err = store.UpsertCSIVolume(index, []*structs.CSIVolume{vol}) + err = store.UpsertCSIVolume(index, time.Now().UnixNano(), []*structs.CSIVolume{vol}) if err != nil { return err } diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index f552b70c9f3..5453e25c91c 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -368,7 +368,7 @@ func TestCSIVolumeChecker(t *testing.T) { {Segments: map[string]string{"rack": "R1"}}, {Segments: map[string]string{"rack": "R2"}}, } - err := state.UpsertCSIVolume(index, []*structs.CSIVolume{vol}) + err := state.UpsertCSIVolume(index, time.Now().UnixNano(), []*structs.CSIVolume{vol}) must.NoError(t, err) index++ @@ -379,14 +379,14 @@ func TestCSIVolumeChecker(t *testing.T) { vol2.Namespace = structs.DefaultNamespace vol2.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter vol2.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem - err = state.UpsertCSIVolume(index, []*structs.CSIVolume{vol2}) + err = state.UpsertCSIVolume(index, time.Now().UnixNano(), []*structs.CSIVolume{vol2}) must.NoError(t, err) index++ vid3 := "volume-id[0]" vol3 := vol.Copy() vol3.ID = vid3 - err = state.UpsertCSIVolume(index, []*structs.CSIVolume{vol3}) + err = state.UpsertCSIVolume(index, time.Now().UnixNano(), []*structs.CSIVolume{vol3}) must.NoError(t, err) index++ @@ -405,7 +405,7 @@ func TestCSIVolumeChecker(t *testing.T) { summary := mock.JobSummary(alloc.JobID) must.NoError(t, state.UpsertJobSummary(index, summary)) index++ - err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc}) must.NoError(t, err) index++ @@ -1857,7 +1857,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { NodeID: nodes[4].ID, }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } @@ -2066,7 +2066,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { NodeID: nodes[1].ID, }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } @@ -2153,7 +2153,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testin NodeID: nodes[0].ID, }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } @@ -2232,7 +2232,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible(t *testing.T) { NodeID: nodes[1].ID, }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } @@ -2329,7 +2329,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testin NodeID: nodes[1].ID, }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } @@ -2446,7 +2446,7 @@ func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) { NodeID: nodes[2].ID, }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } diff --git a/scheduler/preemption_test.go b/scheduler/preemption_test.go index eb718f5dcd3..3a8c7c9ec97 100644 --- a/scheduler/preemption_test.go +++ b/scheduler/preemption_test.go @@ -8,6 +8,7 @@ import ( "maps" "strconv" "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/lib/numalib" @@ -1396,7 +1397,7 @@ func TestPreemption_Normal(t *testing.T) { alloc.NodeID = node.ID } require := require.New(t) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, tc.currentAllocations) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), tc.currentAllocations) require.Nil(err) if tc.currentPreemptions != nil { @@ -1531,7 +1532,7 @@ func TestPreemptionMultiple(t *testing.T) { allocs = append(allocs, alloc) allocIDs[alloc.ID] = struct{}{} } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().Unix(), allocs)) // new high priority job with 2 allocs, each using 2 GPUs highPrioJob := mock.Job() diff --git a/scheduler/scheduler_sysbatch_test.go b/scheduler/scheduler_sysbatch_test.go index 7c14fa10f47..df1c0b98548 100644 --- a/scheduler/scheduler_sysbatch_test.go +++ b/scheduler/scheduler_sysbatch_test.go @@ -7,6 +7,7 @@ import ( "fmt" "sort" "testing" + "time" "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" @@ -115,7 +116,7 @@ func TestSysBatch_JobRegister_AddNode_Running(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusRunning allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Add a new node. node := mock.Node() @@ -193,7 +194,7 @@ func TestSysBatch_JobRegister_AddNode_Dead(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Add a new node. node := mock.Node() @@ -270,7 +271,7 @@ func TestSysBatch_JobModify(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusPending allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Add a few terminal status allocations, these should be reinstated var terminal []*structs.Allocation @@ -283,7 +284,7 @@ func TestSysBatch_JobModify(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete terminal = append(terminal, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), terminal)) // Update the job job2 := mock.SystemBatchJob() @@ -358,7 +359,7 @@ func TestSysBatch_JobModify_InPlace(t *testing.T) { alloc.Name = "my-sysbatch.pinger[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Update the job job2 := mock.SystemBatchJob() @@ -435,7 +436,7 @@ func TestSysBatch_JobDeregister_Purged(t *testing.T) { for _, alloc := range allocs { require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSysBatchSummary(alloc.JobID))) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -498,7 +499,7 @@ func TestSysBatch_JobDeregister_Stopped(t *testing.T) { for _, alloc := range allocs { require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSysBatchSummary(alloc.JobID))) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -556,7 +557,7 @@ func TestSysBatch_NodeDown(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-sysbatch.pinger[0]" alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -616,7 +617,7 @@ func TestSysBatch_NodeDrain_Down(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-sysbatch.pinger[0]" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -670,7 +671,7 @@ func TestSysBatch_NodeDrain(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-sysbatch.pinger[0]" alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -726,7 +727,7 @@ func TestSysBatch_NodeUpdate(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-system.pinger[0]" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -1334,7 +1335,7 @@ func TestSysBatch_PlanWithDrainedNode(t *testing.T) { alloc2.NodeID = node2.ID alloc2.Name = "my-sysbatch.pinger2[0]" alloc2.TaskGroup = "pinger2" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc, alloc2})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1583,7 +1584,7 @@ func TestSysBatch_Preemption(t *testing.T) { }, Shared: structs.AllocatedSharedResources{DiskMB: 5 * 1024}, } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc1, alloc2, alloc3})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2, alloc3})) // Create a high priority job and allocs for it // These allocs should not be preempted @@ -1627,7 +1628,7 @@ func TestSysBatch_Preemption(t *testing.T) { }, } require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job4)) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc4})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc4})) // Create a system job such that it would need to preempt both allocs to succeed job := mock.SystemBatchJob() diff --git a/scheduler/testing.go b/scheduler/testing.go index 347a279069c..8f35047c8f9 100644 --- a/scheduler/testing.go +++ b/scheduler/testing.go @@ -180,7 +180,7 @@ func (h *Harness) SubmitPlan(plan *structs.Plan) (*structs.PlanResult, State, er } // Apply the full plan - err := h.State.UpsertPlanResults(structs.MsgTypeTestSetup, index, &req) + err := h.State.UpsertPlanResults(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), &req) return result, nil, err } diff --git a/scheduler/util_test.go b/scheduler/util_test.go index b0d17b37aa1..980a5b267ec 100644 --- a/scheduler/util_test.go +++ b/scheduler/util_test.go @@ -829,7 +829,7 @@ func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) { } alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a new task group that prevents in-place updates. tg := &structs.TaskGroup{} @@ -885,7 +885,7 @@ func TestInplaceUpdate_AllocatedResources(t *testing.T) { } alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc})) // Update TG to add a new service (inplace) tg := job.TaskGroups[0] @@ -945,7 +945,7 @@ func TestInplaceUpdate_NoMatch(t *testing.T) { } alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a new task group that requires too much resources. tg := &structs.TaskGroup{} @@ -998,7 +998,7 @@ func TestInplaceUpdate_Success(t *testing.T) { } alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a new task group that updates the resources. tg := &structs.TaskGroup{} @@ -1070,7 +1070,7 @@ func TestInplaceUpdate_WildcardDatacenters(t *testing.T) { alloc.Job = job alloc.JobID = job.ID must.NoError(t, store.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc})) updates := []allocTuple{{Alloc: alloc, TaskGroup: job.TaskGroups[0]}} stack := NewGenericStack(false, ctx) @@ -1111,7 +1111,7 @@ func TestInplaceUpdate_NodePools(t *testing.T) { t.Logf("alloc1=%s alloc2=%s", alloc1.ID, alloc2.ID) - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1004, + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1004, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) updates := []allocTuple{ From a3b1538ec1792e2fe37ba423b139e2fb8de6f0d6 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Mon, 28 Oct 2024 17:55:29 +0100 Subject: [PATCH 14/40] i am miserable now --- command/agent/deployment_endpoint_test.go | 28 +- command/agent/job_endpoint_test.go | 6 +- command/agent/node_endpoint_test.go | 6 +- command/alloc_checks_test.go | 3 +- command/alloc_fs_test.go | 3 +- command/alloc_restart_test.go | 3 +- command/alloc_status_test.go | 14 +- command/deployment_pause_test.go | 3 +- command/deployment_resume_test.go | 3 +- command/deployment_status_test.go | 3 +- command/deployment_unblock_test.go | 3 +- command/job_deployments_test.go | 7 +- command/job_eval_test.go | 3 +- command/job_status_test.go | 2 +- command/volume_status_test.go | 3 +- nomad/acl_endpoint_test.go | 2 +- nomad/auth/auth_test.go | 8 +- nomad/client_alloc_endpoint_test.go | 38 +- nomad/client_fs_endpoint_test.go | 48 +-- nomad/core_sched_test.go | 65 ++-- nomad/csi_endpoint_test.go | 44 +-- nomad/deployment_endpoint_test.go | 89 ++--- .../deployments_watcher_test.go | 2 +- nomad/drainer/draining_node_test.go | 60 ++-- nomad/drainer/watch_jobs_test.go | 30 +- nomad/eval_endpoint_test.go | 14 +- nomad/fsm.go | 23 +- nomad/heartbeat_test.go | 23 +- nomad/namespace_endpoint_test.go | 2 +- nomad/plan_apply.go | 2 +- nomad/plan_apply_test.go | 22 +- nomad/state/deployment_events_test.go | 4 +- nomad/state/events_test.go | 22 +- nomad/state/state_store_test.go | 337 ++++++++++-------- nomad/variables_endpoint_test.go | 6 +- nomad/volumewatcher/volumes_watcher_test.go | 34 +- nomad/worker_test.go | 2 +- scheduler/context_test.go | 5 +- scheduler/generic_sched_test.go | 146 ++++---- scheduler/rank_test.go | 11 +- scheduler/scheduler_system_test.go | 38 +- scheduler/spread_test.go | 6 +- scheduler/stack_test.go | 3 +- 43 files changed, 640 insertions(+), 536 deletions(-) diff --git a/command/agent/deployment_endpoint_test.go b/command/agent/deployment_endpoint_test.go index dc9d3e3105d..e7a0c770262 100644 --- a/command/agent/deployment_endpoint_test.go +++ b/command/agent/deployment_endpoint_test.go @@ -7,6 +7,7 @@ import ( "net/http" "net/http/httptest" "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -17,13 +18,14 @@ import ( func TestHTTP_DeploymentList(t *testing.T) { ci.Parallel(t) assert := assert.New(t) + now := time.Now().UnixNano() httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() d1 := mock.Deployment() d2 := mock.Deployment() - assert.Nil(state.UpsertDeployment(999, d1), "UpsertDeployment") - assert.Nil(state.UpsertDeployment(1000, d2), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(999, now, d1), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, now, d2), "UpsertDeployment") // Make the HTTP request req, err := http.NewRequest(http.MethodGet, "/v1/deployments", nil) @@ -48,6 +50,7 @@ func TestHTTP_DeploymentList(t *testing.T) { func TestHTTP_DeploymentPrefixList(t *testing.T) { ci.Parallel(t) assert := assert.New(t) + now := time.Now().UnixNano() httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -55,8 +58,8 @@ func TestHTTP_DeploymentPrefixList(t *testing.T) { d1.ID = "aaabbbbb-e8f7-fd38-c855-ab94ceb89706" d2 := mock.Deployment() d2.ID = "aaabbbbb-e8f7-fd38-c855-ab94ceb89706" - assert.Nil(state.UpsertDeployment(999, d1), "UpsertDeployment") - assert.Nil(state.UpsertDeployment(1000, d2), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(999, now, d1), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, now, d2), "UpsertDeployment") // Make the HTTP request req, err := http.NewRequest(http.MethodGet, "/v1/deployments?prefix=aaab", nil) @@ -82,6 +85,7 @@ func TestHTTP_DeploymentPrefixList(t *testing.T) { func TestHTTP_DeploymentAllocations(t *testing.T) { ci.Parallel(t) assert := assert.New(t) + now := time.Now().UnixNano() httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -112,8 +116,8 @@ func TestHTTP_DeploymentAllocations(t *testing.T) { a2.TaskStates["test"] = taskState2 assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(999, d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a1, a2}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(999, now, d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{a1, a2}), "UpsertAllocs") // Make the HTTP request req, err := http.NewRequest(http.MethodGet, "/v1/deployment/allocations/"+d.ID, nil) @@ -147,7 +151,7 @@ func TestHTTP_DeploymentQuery(t *testing.T) { // Directly manipulate the state state := s.Agent.server.State() d := mock.Deployment() - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") // Make the HTTP request req, err := http.NewRequest(http.MethodGet, "/v1/deployment/"+d.ID, nil) @@ -179,7 +183,7 @@ func TestHTTP_DeploymentPause(t *testing.T) { d := mock.Deployment() d.JobID = j.ID assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") // Create the pause request args := structs.DeploymentPauseRequest{ @@ -220,7 +224,7 @@ func TestHTTP_DeploymentPromote(t *testing.T) { d := mock.Deployment() d.JobID = j.ID assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") // Create the pause request args := structs.DeploymentPromoteRequest{ @@ -264,8 +268,8 @@ func TestHTTP_DeploymentAllocHealth(t *testing.T) { a.JobID = j.ID a.DeploymentID = d.ID assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(999, d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(999, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") // Create the pause request args := structs.DeploymentAllocHealthRequest{ @@ -306,7 +310,7 @@ func TestHTTP_DeploymentFail(t *testing.T) { d := mock.Deployment() d.JobID = j.ID assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(999, d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(999, time.Now().UnixNano(), d), "UpsertDeployment") // Make the HTTP request req, err := http.NewRequest(http.MethodPut, "/v1/deployment/fail/"+d.ID, nil) diff --git a/command/agent/job_endpoint_test.go b/command/agent/job_endpoint_test.go index be15d0eca0f..443be039193 100644 --- a/command/agent/job_endpoint_test.go +++ b/command/agent/job_endpoint_test.go @@ -1543,7 +1543,7 @@ func TestHTTP_JobAllocations(t *testing.T) { alloc1.TaskStates = make(map[string]*structs.TaskState) alloc1.TaskStates["test"] = taskState state := s.Agent.server.State() - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -1604,7 +1604,7 @@ func TestHTTP_JobDeployments(t *testing.T) { d.JobID = j.ID d.JobCreateIndex = resp.JobModifyIndex - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") // Make the HTTP request req, err := http.NewRequest(http.MethodGet, "/v1/job/"+j.ID+"/deployments", nil) @@ -1647,7 +1647,7 @@ func TestHTTP_JobDeployment(t *testing.T) { d := mock.Deployment() d.JobID = j.ID d.JobCreateIndex = resp.JobModifyIndex - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") // Make the HTTP request req, err := http.NewRequest(http.MethodGet, "/v1/job/"+j.ID+"/deployment", nil) diff --git a/command/agent/node_endpoint_test.go b/command/agent/node_endpoint_test.go index 4ff558c7a69..b2c04345b34 100644 --- a/command/agent/node_endpoint_test.go +++ b/command/agent/node_endpoint_test.go @@ -207,7 +207,7 @@ func TestHTTP_NodeForceEval(t *testing.T) { if err := state.UpsertJobSummary(999, mock.JobSummary(alloc1.JobID)); err != nil { t.Fatal(err) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -267,7 +267,7 @@ func TestHTTP_NodeAllocations(t *testing.T) { alloc1.TaskStates = make(map[string]*structs.TaskState) alloc1.TaskStates["test"] = taskState - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -478,7 +478,7 @@ func TestHTTP_NodePurge(t *testing.T) { if err := state.UpsertJobSummary(999, mock.JobSummary(alloc1.JobID)); err != nil { t.Fatal(err) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } diff --git a/command/alloc_checks_test.go b/command/alloc_checks_test.go index 652bce0a368..7df3fa48aaa 100644 --- a/command/alloc_checks_test.go +++ b/command/alloc_checks_test.go @@ -6,6 +6,7 @@ package command import ( "encoding/json" "testing" + "time" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" @@ -74,7 +75,7 @@ func TestAllocChecksCommand_AutocompleteArgs(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() a := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) prefix := a.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/alloc_fs_test.go b/command/alloc_fs_test.go index 9e6ef96b7f1..f1c7680c8d3 100644 --- a/command/alloc_fs_test.go +++ b/command/alloc_fs_test.go @@ -5,6 +5,7 @@ package command import ( "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -101,7 +102,7 @@ func TestFSCommand_AutocompleteArgs(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() a := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) prefix := a.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/alloc_restart_test.go b/command/alloc_restart_test.go index 0596f01e69a..6e562deab4d 100644 --- a/command/alloc_restart_test.go +++ b/command/alloc_restart_test.go @@ -5,6 +5,7 @@ package command import ( "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -152,7 +153,7 @@ func TestAllocRestartCommand_AutocompleteArgs(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() a := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) prefix := a.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/alloc_status_test.go b/command/alloc_status_test.go index 6260b7963f1..bbd646a7061 100644 --- a/command/alloc_status_test.go +++ b/command/alloc_status_test.go @@ -123,7 +123,7 @@ func TestAllocStatusCommand_LifecycleInfo(t *testing.T) { "prestart_sidecar": {State: "running"}, } - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) code := cmd.Run([]string{"-address=" + url, a.ID}) must.Zero(t, code) @@ -226,7 +226,7 @@ func TestAllocStatusCommand_RescheduleInfo(t *testing.T) { }, }, } - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) if code := cmd.Run([]string{"-address=" + url, a.ID}); code != 0 { t.Fatalf("expected exit 0, got: %d", code) @@ -269,7 +269,7 @@ func TestAllocStatusCommand_ScoreMetrics(t *testing.T) { }, }, } - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) code := cmd.Run([]string{"-address=" + url, "-verbose", a.ID}) must.Zero(t, code) @@ -296,7 +296,7 @@ func TestAllocStatusCommand_AutocompleteArgs(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() a := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) prefix := a.ID[:5] args := complete.Args{Last: prefix} @@ -359,7 +359,7 @@ func TestAllocStatusCommand_HostVolumes(t *testing.T) { } summary := mock.JobSummary(alloc.JobID) must.NoError(t, state.UpsertJobSummary(1004, summary)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, time.Now().UnixNano(), []*structs.Allocation{alloc})) ui := cli.NewMockUi() cmd := &AllocStatusCommand{Meta: Meta{Ui: ui}} @@ -402,7 +402,7 @@ func TestAllocStatusCommand_CSIVolumes(t *testing.T) { Segments: map[string]string{"foo": "bar"}, }}, }} - err = state.UpsertCSIVolume(1002, vols) + err = state.UpsertCSIVolume(1002, time.Now().UnixNano(), vols) must.NoError(t, err) // Upsert the job and alloc @@ -435,7 +435,7 @@ func TestAllocStatusCommand_CSIVolumes(t *testing.T) { } summary := mock.JobSummary(alloc.JobID) must.NoError(t, state.UpsertJobSummary(1004, summary)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, time.Now().UnixNano(), []*structs.Allocation{alloc})) ui := cli.NewMockUi() cmd := &AllocStatusCommand{Meta: Meta{Ui: ui}} diff --git a/command/deployment_pause_test.go b/command/deployment_pause_test.go index f8356af7cac..58a25871cd7 100644 --- a/command/deployment_pause_test.go +++ b/command/deployment_pause_test.go @@ -6,6 +6,7 @@ package command import ( "strings" "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -54,7 +55,7 @@ func TestDeploymentPauseCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, d)) + must.NoError(t, state.UpsertDeployment(1000, time.Now().UnixNano(), d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/deployment_resume_test.go b/command/deployment_resume_test.go index ee4f97c1922..f71f445d4b4 100644 --- a/command/deployment_resume_test.go +++ b/command/deployment_resume_test.go @@ -6,6 +6,7 @@ package command import ( "strings" "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -54,7 +55,7 @@ func TestDeploymentResumeCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, d)) + must.NoError(t, state.UpsertDeployment(1000, time.Now().UnixNano(), d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/deployment_status_test.go b/command/deployment_status_test.go index 62e86428203..85da942e462 100644 --- a/command/deployment_status_test.go +++ b/command/deployment_status_test.go @@ -5,6 +5,7 @@ package command import ( "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -65,7 +66,7 @@ func TestDeploymentStatusCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, d)) + must.NoError(t, state.UpsertDeployment(1000, time.Now().UnixNano(), d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/deployment_unblock_test.go b/command/deployment_unblock_test.go index 96430fc71f5..1a40882bde3 100644 --- a/command/deployment_unblock_test.go +++ b/command/deployment_unblock_test.go @@ -6,6 +6,7 @@ package command import ( "strings" "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -54,7 +55,7 @@ func TestDeploymentUnblockCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, d)) + must.NoError(t, state.UpsertDeployment(1000, time.Now().UnixNano(), d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/job_deployments_test.go b/command/job_deployments_test.go index ab1f4336675..e48c8a79e49 100644 --- a/command/job_deployments_test.go +++ b/command/job_deployments_test.go @@ -6,6 +6,7 @@ package command import ( "strings" "testing" + "time" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" @@ -77,7 +78,7 @@ func TestJobDeploymentsCommand_Run(t *testing.T) { d := mock.Deployment() d.JobID = job.ID d.JobCreateIndex = job.CreateIndex - must.NoError(t, state.UpsertDeployment(200, d)) + must.NoError(t, state.UpsertDeployment(200, time.Now().UnixNano(), d)) // Should now display the deployment if code := cmd.Run([]string{"-address=" + url, "-verbose", job.ID}); code != 0 { @@ -121,7 +122,7 @@ func TestJobDeploymentsCommand_Run_Latest(t *testing.T) { d := mock.Deployment() d.JobID = job.ID d.JobCreateIndex = job.CreateIndex - must.NoError(t, state.UpsertDeployment(200, d)) + must.NoError(t, state.UpsertDeployment(200, time.Now().UnixNano(), d)) // Should now display the deployment if code := cmd.Run([]string{"-address=" + url, "-verbose", "-latest", job.ID}); code != 0 { @@ -174,7 +175,7 @@ func TestJobDeploymentsCommand_ACL(t *testing.T) { d := mock.Deployment() d.JobID = job.ID d.JobCreateIndex = job.CreateIndex - err = state.UpsertDeployment(101, d) + err = state.UpsertDeployment(101, time.Now().UnixNano(), d) must.NoError(t, err) testCases := []struct { diff --git a/command/job_eval_test.go b/command/job_eval_test.go index fd081b9ee97..9004e315796 100644 --- a/command/job_eval_test.go +++ b/command/job_eval_test.go @@ -7,6 +7,7 @@ import ( "fmt" "strings" "testing" + "time" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" @@ -90,7 +91,7 @@ func TestJobEvalCommand_Run(t *testing.T) { alloc.TaskGroup = job.TaskGroups[0].Name alloc.Namespace = job.Namespace alloc.ClientStatus = structs.AllocClientStatusFailed - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 12, []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 12, time.Now().UnixNano(), []*structs.Allocation{alloc}) must.NoError(t, err) if code := cmd.Run([]string{"-address=" + url, "-force-reschedule", "-detach", job.ID}); code != 0 { diff --git a/command/job_status_test.go b/command/job_status_test.go index 90800a52786..e5a3235b098 100644 --- a/command/job_status_test.go +++ b/command/job_status_test.go @@ -380,7 +380,7 @@ func TestJobStatusCommand_RescheduleEvals(t *testing.T) { a.Metrics = &structs.AllocMetric{} a.DesiredStatus = structs.AllocDesiredStatusRun a.ClientStatus = structs.AllocClientStatusRunning - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) // Query jobs with prefix match if code := cmd.Run([]string{"-address=" + url, j.ID}); code != 0 { diff --git a/command/volume_status_test.go b/command/volume_status_test.go index 0fde4610f6b..dcfafe1fcac 100644 --- a/command/volume_status_test.go +++ b/command/volume_status_test.go @@ -5,6 +5,7 @@ package command import ( "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" @@ -50,7 +51,7 @@ func TestCSIVolumeStatusCommand_AutocompleteArgs(t *testing.T) { PluginID: "glade", } - must.NoError(t, state.UpsertCSIVolume(1000, []*structs.CSIVolume{vol})) + must.NoError(t, state.UpsertCSIVolume(1000, time.Now().UnixNano(), []*structs.CSIVolume{vol})) prefix := vol.ID[:len(vol.ID)-5] args := complete.Args{Last: prefix} diff --git a/nomad/acl_endpoint_test.go b/nomad/acl_endpoint_test.go index ce26d6a4d2a..af54538bf8d 100644 --- a/nomad/acl_endpoint_test.go +++ b/nomad/acl_endpoint_test.go @@ -1943,7 +1943,7 @@ func TestACLEndpoint_WhoAmI(t *testing.T) { // Lookup identity claim alloc := mock.Alloc() - s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1500, []*structs.Allocation{alloc}) + s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1500, time.Now().UnixNano(), []*structs.Allocation{alloc}) task := alloc.LookupTask("web") claims := structs.NewIdentityClaimsBuilder(alloc.Job, alloc, wiHandle, // see encrypter_test.go diff --git a/nomad/auth/auth_test.go b/nomad/auth/auth_test.go index 9be3bb78aa8..e7364c9e0d2 100644 --- a/nomad/auth/auth_test.go +++ b/nomad/auth/auth_test.go @@ -271,7 +271,7 @@ func TestAuthenticateDefault(t *testing.T) { must.EqError(t, err, "allocation does not exist") // insert alloc so it's live - store.UpsertAllocs(structs.MsgTypeTestSetup, 200, + store.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{alloc}) args = &structs.GenericRequest{} @@ -289,7 +289,7 @@ func TestAuthenticateDefault(t *testing.T) { // alloc becomes terminal alloc.ClientStatus = structs.AllocClientStatusComplete - store.UpsertAllocs(structs.MsgTypeTestSetup, 200, + store.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{alloc}) args = &structs.GenericRequest{} @@ -924,7 +924,7 @@ func TestIdentityToACLClaim(t *testing.T) { Encrypter: newTestEncrypter(), }) - store.UpsertAllocs(structs.MsgTypeTestSetup, 100, + store.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}) token, err := auth.encrypter.(*testEncrypter).signClaim(claims) @@ -1115,7 +1115,7 @@ func TestResolveClaims(t *testing.T) { // upsert the allocation index++ - err = auth.getState().UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc, dispatchAlloc}) + err = auth.getState().UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc, dispatchAlloc}) must.NoError(t, err) // Resolve claims and check we that the ACL object without policies provides no access diff --git a/nomad/client_alloc_endpoint_test.go b/nomad/client_alloc_endpoint_test.go index 9ccc195dc4e..3ed2e419bf1 100644 --- a/nomad/client_alloc_endpoint_test.go +++ b/nomad/client_alloc_endpoint_test.go @@ -263,7 +263,7 @@ func TestClientAllocations_GarbageCollect_OldNode(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = node.ID - require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1006, []*nstructs.Allocation{alloc})) + require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1006, time.Now().UnixNano(), []*nstructs.Allocation{alloc})) req := &nstructs.AllocSpecificRequest{ AllocID: alloc.ID, @@ -329,7 +329,7 @@ func TestClientAllocations_GarbageCollect_Local(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) + require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*nstructs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -387,7 +387,7 @@ func TestClientAllocations_GarbageCollect_Local_ACL(t *testing.T) { state := s.State() alloc := mock.Alloc() require.NoError(t, state.UpsertJob(nstructs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, []*nstructs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*nstructs.Allocation{alloc})) cases := []struct { Name string @@ -496,10 +496,11 @@ func TestClientAllocations_GarbageCollect_Remote(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() + now := time.Now().UnixNano() require.Nil(state1.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) + require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) require.Nil(state2.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) + require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -554,7 +555,7 @@ func TestClientAllocations_Stats_OldNode(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = node.ID - require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1006, []*nstructs.Allocation{alloc})) + require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1006, time.Now().UnixNano(), []*nstructs.Allocation{alloc})) req := &nstructs.AllocSpecificRequest{ AllocID: alloc.ID, @@ -618,7 +619,7 @@ func TestClientAllocations_Stats_Local(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) + require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*nstructs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -677,7 +678,7 @@ func TestClientAllocations_Stats_Local_ACL(t *testing.T) { state := s.State() alloc := mock.Alloc() require.NoError(t, state.UpsertJob(nstructs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, []*nstructs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*nstructs.Allocation{alloc})) cases := []struct { Name string @@ -773,10 +774,11 @@ func TestClientAllocations_Stats_Remote(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() + now := time.Now().UnixNano() require.Nil(state1.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) + require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) require.Nil(state2.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) + require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -858,7 +860,7 @@ func TestClientAllocations_Restart_Local(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) + require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*nstructs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -971,10 +973,11 @@ func TestClientAllocations_Restart_Remote(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() + now := time.Now().UnixNano() require.Nil(state1.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) + require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) require.Nil(state2.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) + require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -1033,7 +1036,7 @@ func TestClientAllocations_Restart_ACL(t *testing.T) { state := s.State() alloc := mock.Alloc() require.NoError(t, state.UpsertJob(nstructs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, []*nstructs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*nstructs.Allocation{alloc})) cases := []struct { Name string @@ -1139,12 +1142,13 @@ func TestAlloc_ExecStreaming(t *testing.T) { } // Upsert the allocation + now := time.Now().UnixNano() localState := localServer.State() require.Nil(t, localState.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(t, localState.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) + require.Nil(t, localState.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) remoteState := remoteServer.State() require.Nil(t, remoteState.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(t, remoteState.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) + require.Nil(t, remoteState.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -1261,7 +1265,7 @@ func TestAlloc_ExecStreaming_TerminalAlloc(t *testing.T) { state := s.State() err := state.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, alloc.Job) must.NoError(t, err) - err = state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{alloc}) + err = state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*nstructs.Allocation{alloc}) must.NoError(t, err) // Make the exec request. diff --git a/nomad/client_fs_endpoint_test.go b/nomad/client_fs_endpoint_test.go index 470cad3dd21..3fc1437a772 100644 --- a/nomad/client_fs_endpoint_test.go +++ b/nomad/client_fs_endpoint_test.go @@ -69,7 +69,7 @@ func TestClientFS_List_Local(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -129,7 +129,7 @@ func TestClientFS_List_ACL(t *testing.T) { state := s.State() alloc := mock.Alloc() require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*structs.Allocation{alloc})) cases := []struct { Name string @@ -228,10 +228,11 @@ func TestClientFS_List_Remote(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() + now := time.Now().UnixNano() require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -288,7 +289,7 @@ func TestClientFS_Stat_OldNode(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = node.ID - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1006, []*structs.Allocation{alloc})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1006, time.Now().UnixNano(), []*structs.Allocation{alloc})) req := &cstructs.FsStatRequest{ AllocID: alloc.ID, @@ -345,7 +346,7 @@ func TestClientFS_Stat_Local(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -405,7 +406,7 @@ func TestClientFS_Stat_ACL(t *testing.T) { state := s.State() alloc := mock.Alloc() require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*structs.Allocation{alloc})) cases := []struct { Name string @@ -504,10 +505,11 @@ func TestClientFS_Stat_Remote(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() + now := time.Now().UnixNano() require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -637,7 +639,7 @@ func TestClientFS_Streaming_ACL(t *testing.T) { state := s.State() alloc := mock.Alloc() require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*structs.Allocation{alloc})) cases := []struct { Name string @@ -778,7 +780,7 @@ func TestClientFS_Streaming_Local(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -914,7 +916,7 @@ func TestClientFS_Streaming_Local_Follow(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -1056,10 +1058,11 @@ func TestClientFS_Streaming_Remote_Server(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() + now := time.Now().UnixNano() require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -1203,7 +1206,7 @@ func TestClientFS_Streaming_Remote_Region(t *testing.T) { // Upsert the allocation state2 := s2.State() require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -1383,7 +1386,7 @@ func TestClientFS_Logs_OldNode(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = node.ID - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1006, []*structs.Allocation{alloc})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1006, time.Now().UnixNano(), []*structs.Allocation{alloc})) req := &cstructs.FsLogsRequest{ AllocID: alloc.ID, @@ -1466,7 +1469,7 @@ func TestClientFS_Logs_ACL(t *testing.T) { state := s.State() alloc := mock.Alloc() require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*structs.Allocation{alloc})) cases := []struct { Name string @@ -1607,7 +1610,7 @@ func TestClientFS_Logs_Local(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -1744,7 +1747,7 @@ func TestClientFS_Logs_Local_Follow(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -1887,10 +1890,11 @@ func TestClientFS_Logs_Remote_Server(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() + now := time.Now().UnixNano() require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -2035,7 +2039,7 @@ func TestClientFS_Logs_Remote_Region(t *testing.T) { // Upsert the allocation state2 := s2.State() require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) + require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 31f5d71a12c..31bc33f6c03 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -61,7 +61,8 @@ func TestCoreScheduler_EvalGC(t *testing.T) { alloc2.ClientStatus = structs.AllocClientStatusLost alloc2.JobID = eval.JobID alloc2.TaskGroup = job.TaskGroups[0].Name - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2})) + must.NoError(t, store.UpsertAllocs( + structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2})) // Insert service for "dead" alloc service := &structs.ServiceRegistration{ @@ -171,7 +172,7 @@ func TestCoreScheduler_EvalGC_ReschedulingAllocs(t *testing.T) { }, }, } - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2}) require.Nil(t, err) // Create a core scheduler @@ -243,7 +244,7 @@ func TestCoreScheduler_EvalGC_StoppedJob_Reschedulable(t *testing.T) { }, }, } - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) require.Nil(t, err) // Create a core scheduler @@ -320,7 +321,11 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { stoppedJobLostAlloc.DesiredStatus = structs.AllocDesiredStatusRun stoppedJobLostAlloc.ClientStatus = structs.AllocClientStatusLost - err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx+3, []*structs.Allocation{stoppedJobStoppedAlloc, stoppedJobLostAlloc}) + now := time.Now().UnixNano() + + err = store.UpsertAllocs( + structs.MsgTypeTestSetup, jobModifyIdx+3, now, + []*structs.Allocation{stoppedJobStoppedAlloc, stoppedJobLostAlloc}) must.NoError(t, err) // A "dead" job containing one "complete" eval with: @@ -354,7 +359,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { lostAlloc.DesiredStatus = structs.AllocDesiredStatusRun lostAlloc.ClientStatus = structs.AllocClientStatusLost - err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx+2, []*structs.Allocation{stoppedAlloc, lostAlloc}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx+2, now, []*structs.Allocation{stoppedAlloc, lostAlloc}) must.NoError(t, err) // An "alive" job #2 containing two complete evals. The first with: @@ -392,7 +397,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobLostAlloc.DesiredStatus = structs.AllocDesiredStatusRun activeJobLostAlloc.ClientStatus = structs.AllocClientStatusLost - err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{activeJobRunningAlloc, activeJobLostAlloc}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, now, []*structs.Allocation{activeJobRunningAlloc, activeJobLostAlloc}) must.NoError(t, err) activeJobCompleteEval := mock.Eval() @@ -409,7 +414,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobCompletedEvalCompletedAlloc.DesiredStatus = structs.AllocDesiredStatusStop activeJobCompletedEvalCompletedAlloc.ClientStatus = structs.AllocClientStatusComplete - err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{activeJobCompletedEvalCompletedAlloc}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, now, []*structs.Allocation{activeJobCompletedEvalCompletedAlloc}) must.NoError(t, err) // A job that ran once and was then purged. @@ -433,7 +438,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { purgedJobCompleteAlloc.DesiredStatus = structs.AllocDesiredStatusRun purgedJobCompleteAlloc.ClientStatus = structs.AllocClientStatusLost - err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{purgedJobCompleteAlloc}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, now, []*structs.Allocation{purgedJobCompleteAlloc}) must.NoError(t, err) purgedJobCompleteEval := mock.Eval() @@ -701,7 +706,9 @@ func TestCoreScheduler_EvalGC_Partial(t *testing.T) { alloc2.DesiredStatus = structs.AllocDesiredStatusRun alloc2.ClientStatus = structs.AllocClientStatusLost - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc, alloc2}) + now := time.Now().UnixNano() + + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, now, []*structs.Allocation{alloc, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -711,7 +718,7 @@ func TestCoreScheduler_EvalGC_Partial(t *testing.T) { alloc3.EvalID = eval.ID alloc3.JobID = job.ID store.UpsertJobSummary(1003, mock.JobSummary(alloc3.JobID)) - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1004, []*structs.Allocation{alloc3}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1004, now, []*structs.Allocation{alloc3}) if err != nil { t.Fatalf("err: %v", err) } @@ -814,7 +821,7 @@ func TestCoreScheduler_EvalGC_Force(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop alloc.TaskGroup = job.TaskGroups[0].Name store.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID)) - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, time.Now().UnixNano(), []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -924,7 +931,7 @@ func TestCoreScheduler_NodeGC_TerminalAllocs(t *testing.T) { alloc := mock.Alloc() alloc.DesiredStatus = structs.AllocDesiredStatusStop store.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID)) - if err := store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc}); err != nil { + if err := store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -975,7 +982,7 @@ func TestCoreScheduler_NodeGC_RunningAllocs(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusRun alloc.ClientStatus = structs.AllocClientStatusRunning store.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID)) - if err := store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc}); err != nil { + if err := store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -1192,6 +1199,8 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { t.Fatalf("err: %v", err) } + now := time.Now().UnixNano() + // Insert two allocs, one terminal and one not alloc := mock.Alloc() alloc.JobID = job.ID @@ -1207,7 +1216,7 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { alloc2.ClientStatus = structs.AllocClientStatusRunning alloc2.TaskGroup = job.TaskGroups[0].Name - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc, alloc2}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, now, []*structs.Allocation{alloc, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -1254,7 +1263,7 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { // Update the second alloc to be terminal alloc2.ClientStatus = structs.AllocClientStatusComplete - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc2}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -1342,7 +1351,7 @@ func TestCoreScheduler_JobGC_OneShot(t *testing.T) { alloc2.EvalID = eval2.ID alloc2.DesiredStatus = structs.AllocDesiredStatusRun - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc, alloc2}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -1447,7 +1456,7 @@ func TestCoreScheduler_JobGC_Stopped(t *testing.T) { alloc.EvalID = eval.ID alloc.DesiredStatus = structs.AllocDesiredStatusStop alloc.TaskGroup = job.TaskGroups[0].Name - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, time.Now().UnixNano(), []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -1728,6 +1737,8 @@ func TestCoreScheduler_jobGC(t *testing.T) { testFn := func(inputJob *structs.Job) { + now := time.Now().UnixNano() + // Create and upsert a job which has a completed eval and 2 running // allocations associated. inputJob.Status = structs.JobStatusRunning @@ -1752,7 +1763,7 @@ func TestCoreScheduler_jobGC(t *testing.T) { must.NoError(t, testServer.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 10, []*structs.Evaluation{mockEval1})) must.NoError(t, - testServer.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 10, []*structs.Allocation{ + testServer.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 10, now, []*structs.Allocation{ mockJob1Alloc1, mockJob1Alloc2})) // Trigger a run of the job GC using the forced GC max index value to @@ -1814,7 +1825,7 @@ func TestCoreScheduler_jobGC(t *testing.T) { mockJob1Alloc2.ClientStatus = structs.AllocClientStatusComplete must.NoError(t, - testServer.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 30, []*structs.Allocation{ + testServer.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 30, now, []*structs.Allocation{ mockJob1Alloc1, mockJob1Alloc2})) // Force another GC. This time all objects are in a terminal state, so @@ -1852,19 +1863,21 @@ func TestCoreScheduler_DeploymentGC(t *testing.T) { testutil.WaitForLeader(t, s1.RPC) assert := assert.New(t) + now := time.Now().UnixNano() + // Insert an active, terminal, and terminal with allocations deployment store := s1.fsm.State() d1, d2, d3 := mock.Deployment(), mock.Deployment(), mock.Deployment() d1.Status = structs.DeploymentStatusFailed d3.Status = structs.DeploymentStatusSuccessful - assert.Nil(store.UpsertDeployment(1000, d1), "UpsertDeployment") - assert.Nil(store.UpsertDeployment(1001, d2), "UpsertDeployment") - assert.Nil(store.UpsertDeployment(1002, d3), "UpsertDeployment") + assert.Nil(store.UpsertDeployment(1000, now, d1), "UpsertDeployment") + assert.Nil(store.UpsertDeployment(1001, now, d2), "UpsertDeployment") + assert.Nil(store.UpsertDeployment(1002, now, d3), "UpsertDeployment") a := mock.Alloc() a.JobID = d3.JobID a.DeploymentID = d3.ID - assert.Nil(store.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(store.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a}), "UpsertAllocs") // Create a core scheduler snap, err := store.Snapshot() @@ -1903,12 +1916,14 @@ func TestCoreScheduler_DeploymentGC_Force(t *testing.T) { testutil.WaitForLeader(t, server.RPC) assert := assert.New(t) + now := time.Now().UnixNano() + // Insert terminal and active deployment store := server.fsm.State() d1, d2 := mock.Deployment(), mock.Deployment() d1.Status = structs.DeploymentStatusFailed - assert.Nil(store.UpsertDeployment(1000, d1), "UpsertDeployment") - assert.Nil(store.UpsertDeployment(1001, d2), "UpsertDeployment") + assert.Nil(store.UpsertDeployment(1000, now, d1), "UpsertDeployment") + assert.Nil(store.UpsertDeployment(1001, now, d2), "UpsertDeployment") // Create a core scheduler snap, err := store.Snapshot() diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index ecc46bc13b6..fc3bae3a3a3 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -59,7 +59,7 @@ func TestCSIVolumeEndpoint_Get(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }}, }} - err := state.UpsertCSIVolume(999, vols) + err := state.UpsertCSIVolume(999, time.Now().UnixNano(), vols) require.NoError(t, err) // Create the register request @@ -107,7 +107,7 @@ func TestCSIVolumeEndpoint_Get_ACL(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }}, }} - err := state.UpsertCSIVolume(999, vols) + err := state.UpsertCSIVolume(999, time.Now().UnixNano(), vols) require.NoError(t, err) // Create the register request @@ -186,7 +186,7 @@ func TestCSIVolume_pluginValidateVolume(t *testing.T) { if tc.updatePlugin != nil { tc.updatePlugin(plug) } - must.NoError(t, store.UpsertCSIPlugin(1000, plug)) + must.NoError(t, store.UpsertCSIPlugin(1000, time.Now().UnixNano(), plug)) got, err := csiVolume.pluginValidateVolume(vol) @@ -330,7 +330,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { index++ require.NoError(t, state.UpsertJobSummary(index, summary)) index++ - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc})) index++ must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, index, node)) @@ -382,7 +382,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { }}, }} index++ - err = state.UpsertCSIVolume(index, vols) + err = state.UpsertCSIVolume(index, time.Now().UnixNano(), vols) require.NoError(t, err) // Verify that the volume exists, and is healthy @@ -419,7 +419,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { index++ require.NoError(t, state.UpsertJobSummary(index, summary)) index++ - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc2})) claimReq.AllocationID = alloc2.ID err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) require.EqualError(t, err, structs.ErrCSIVolumeMaxClaims.Error(), @@ -453,7 +453,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { index++ require.NoError(t, state.UpsertJobSummary(index, summary)) index++ - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc3})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc3})) claimReq.AllocationID = alloc3.ID err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) require.NoError(t, err) @@ -515,14 +515,14 @@ func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }}, }} - err = state.UpsertCSIVolume(1003, vols) + err = state.UpsertCSIVolume(1003, time.Now().UnixNano(), vols) require.NoError(t, err) alloc := mock.BatchAlloc() alloc.NodeID = node.ID summary := mock.JobSummary(alloc.JobID) require.NoError(t, state.UpsertJobSummary(1004, summary)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, time.Now().UnixNano(), []*structs.Allocation{alloc})) // Make the volume claim claimReq := &structs.CSIVolumeClaimRequest{ @@ -649,7 +649,7 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { } index++ - err = state.UpsertCSIVolume(index, []*structs.CSIVolume{vol}) + err = state.UpsertCSIVolume(index, time.Now().UnixNano(), []*structs.CSIVolume{vol}) must.NoError(t, err) // setup: create an alloc that will claim our volume @@ -663,7 +663,7 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { index++ must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, - []*structs.Allocation{alloc, otherAlloc})) + time.Now().UnixNano(), []*structs.Allocation{alloc, otherAlloc})) // setup: claim the volume for our to-be-failed alloc claim := &structs.CSIVolumeClaim{ @@ -675,7 +675,7 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { index++ claim.State = structs.CSIVolumeClaimStateTaken - err = state.CSIVolumeClaim(index, ns, volID, claim) + err = state.CSIVolumeClaim(index, time.Now().UnixNano(), ns, volID, claim) must.NoError(t, err) // setup: claim the volume for our other alloc @@ -688,7 +688,7 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { index++ otherClaim.State = structs.CSIVolumeClaimStateTaken - err = state.CSIVolumeClaim(index, ns, volID, otherClaim) + err = state.CSIVolumeClaim(index, time.Now().UnixNano(), ns, volID, otherClaim) must.NoError(t, err) // test: unpublish and check the results @@ -707,7 +707,7 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusFailed index++ must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, - []*structs.Allocation{alloc})) + time.Now().UnixNano(), []*structs.Allocation{alloc})) err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Unpublish", req, &structs.CSIVolumeUnpublishResponse{}) @@ -787,7 +787,7 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }}, }} - err = state.UpsertCSIVolume(1002, vols) + err = state.UpsertCSIVolume(1002, time.Now().UnixNano(), vols) require.NoError(t, err) // Query everything in the namespace @@ -866,7 +866,7 @@ func TestCSIVolumeEndpoint_ListAllNamespaces(t *testing.T) { }}, }, } - err = state.UpsertCSIVolume(1001, vols) + err = state.UpsertCSIVolume(1001, time.Now().UnixNano(), vols) require.NoError(t, err) // Lookup volumes in all namespaces @@ -945,7 +945,7 @@ func TestCSIVolumeEndpoint_List_PaginationFiltering(t *testing.T) { volume.Namespace = m.namespace } index := 1000 + uint64(i) - require.NoError(t, state.UpsertCSIVolume(index, []*structs.CSIVolume{volume})) + require.NoError(t, state.UpsertCSIVolume(index, time.Now().UnixNano(), []*structs.CSIVolume{volume})) } cases := []struct { @@ -1328,7 +1328,7 @@ func TestCSIVolumeEndpoint_Delete(t *testing.T) { }, } index++ - err = state.UpsertCSIVolume(index, vols) + err = state.UpsertCSIVolume(index, time.Now().UnixNano(), vols) must.NoError(t, err) // Delete volumes @@ -1571,7 +1571,7 @@ func TestCSIVolumeEndpoint_CreateSnapshot(t *testing.T) { ExternalID: "vol-12345", }} index++ - require.NoError(t, state.UpsertCSIVolume(index, vols)) + require.NoError(t, state.UpsertCSIVolume(index, time.Now().UnixNano(), vols)) // Create the snapshot request req1 := &structs.CSISnapshotCreateRequest{ @@ -2194,7 +2194,7 @@ func TestCSIPluginEndpoint_DeleteViaGC(t *testing.T) { index, _ := state.LatestIndex() index++ - must.NoError(t, state.UpsertCSIPlugin(index, plugin)) + must.NoError(t, state.UpsertCSIPlugin(index, time.Now().UnixNano(), plugin)) // Retry now that it's empty must.NoError(t, msgpackrpc.CallWithCodec(codec, "CSIPlugin.Delete", reqDel, respDel)) @@ -2251,7 +2251,7 @@ func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { ControllerRequired: false, }, } - err = state.UpsertCSIVolume(1002, vols) + err = state.UpsertCSIVolume(1002, time.Now().UnixNano(), vols) require.NoError(t, err) // has controller @@ -2453,7 +2453,7 @@ func TestCSIPluginEndpoint_ACLNamespaceFilterAlloc(t *testing.T) { must.Eq(t, 3, len(allocs)) allocs[0].Namespace = ns1.Name - err := s.UpsertAllocs(structs.MsgTypeTestSetup, 1003, allocs) + err := s.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), allocs) must.NoError(t, err) req := &structs.CSIPluginGetRequest{ diff --git a/nomad/deployment_endpoint_test.go b/nomad/deployment_endpoint_test.go index e63180ead90..c989cbd99a3 100644 --- a/nomad/deployment_endpoint_test.go +++ b/nomad/deployment_endpoint_test.go @@ -37,7 +37,7 @@ func TestDeploymentEndpoint_GetDeployment(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") // Lookup the deployments get := &structs.DeploymentSpecificRequest{ @@ -69,7 +69,7 @@ func TestDeploymentEndpoint_GetDeployment_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", @@ -130,12 +130,12 @@ func TestDeploymentEndpoint_GetDeployment_Blocking(t *testing.T) { // Upsert a deployment we are not interested in first. time.AfterFunc(100*time.Millisecond, func() { - assert.Nil(state.UpsertDeployment(100, d1), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(100, time.Now().UnixNano(), d1), "UpsertDeployment") }) // Upsert another deployment later which should trigger the watch. time.AfterFunc(200*time.Millisecond, func() { - assert.Nil(state.UpsertDeployment(200, d2), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(200, time.Now().UnixNano(), d2), "UpsertDeployment") }) // Lookup the deployments @@ -175,7 +175,7 @@ func TestDeploymentEndpoint_Fail(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") // Mark the deployment as failed req := &structs.DeploymentFailRequest{ @@ -225,7 +225,7 @@ func TestDeploymentEndpoint_Fail_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", @@ -319,8 +319,8 @@ func TestDeploymentEndpoint_Fail_Rollback(t *testing.T) { a.DeploymentID = d.ID assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j2), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") // Mark the deployment as failed req := &structs.DeploymentFailRequest{ @@ -379,7 +379,7 @@ func TestDeploymentEndpoint_Pause(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") // Mark the deployment as failed req := &structs.DeploymentPauseRequest{ @@ -422,7 +422,7 @@ func TestDeploymentEndpoint_Pause_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", @@ -501,8 +501,8 @@ func TestDeploymentEndpoint_Promote(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") // Promote the deployment req := &structs.DeploymentPromoteRequest{ @@ -566,8 +566,8 @@ func TestDeploymentEndpoint_Promote_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", @@ -652,8 +652,8 @@ func TestDeploymentEndpoint_SetAllocHealth(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") // Set the alloc as healthy req := &structs.DeploymentAllocHealthRequest{ @@ -720,8 +720,8 @@ func TestDeploymentEndpoint_SetAllocHealth_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", @@ -826,8 +826,8 @@ func TestDeploymentEndpoint_SetAllocHealth_Rollback(t *testing.T) { a.DeploymentID = d.ID assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j2), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") // Set the alloc as unhealthy req := &structs.DeploymentAllocHealthRequest{ @@ -915,8 +915,8 @@ func TestDeploymentEndpoint_SetAllocHealth_NoRollback(t *testing.T) { a.DeploymentID = d.ID assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j2), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") // Set the alloc as unhealthy req := &structs.DeploymentAllocHealthRequest{ @@ -983,7 +983,7 @@ func TestDeploymentEndpoint_List(t *testing.T) { state := s1.fsm.State() must.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), must.Sprint("UpsertJob")) - must.Nil(t, state.UpsertDeployment(1000, d), must.Sprint("UpsertDeployment")) + must.Nil(t, state.UpsertDeployment(1000, time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) // Lookup the deployments get := &structs.DeploymentListRequest{ @@ -1021,7 +1021,7 @@ func TestDeploymentEndpoint_List(t *testing.T) { d2.JobID = j2.ID must.Nil(t, state.UpsertNamespaces(1001, []*structs.Namespace{{Name: "prod"}})) must.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 1002, nil, j2), must.Sprint("UpsertJob")) - must.Nil(t, state.UpsertDeployment(1003, d2), must.Sprint("UpsertDeployment")) + must.Nil(t, state.UpsertDeployment(1003, time.Now().UnixNano(), d2), must.Sprint("UpsertDeployment")) // Lookup the deployments with wildcard namespace get = &structs.DeploymentListRequest{ @@ -1087,17 +1087,19 @@ func TestDeploymentEndpoint_List_order(t *testing.T) { dep3 := mock.Deployment() dep3.ID = uuid3 - err := s1.fsm.State().UpsertDeployment(1000, dep1) + now := time.Now().UnixNano() + + err := s1.fsm.State().UpsertDeployment(1000, now, dep1) must.NoError(t, err) - err = s1.fsm.State().UpsertDeployment(1001, dep2) + err = s1.fsm.State().UpsertDeployment(1001, now, dep2) must.NoError(t, err) - err = s1.fsm.State().UpsertDeployment(1002, dep3) + err = s1.fsm.State().UpsertDeployment(1002, now, dep3) must.NoError(t, err) // update dep2 again so we can later assert create index order did not change - err = s1.fsm.State().UpsertDeployment(1003, dep2) + err = s1.fsm.State().UpsertDeployment(1003, now, dep2) must.NoError(t, err) t.Run("default", func(t *testing.T) { @@ -1175,8 +1177,9 @@ func TestDeploymentEndpoint_List_ACL(t *testing.T) { d2.Namespace = devNS.Name state := s1.fsm.State() - must.NoError(t, state.UpsertDeployment(1000, d1), must.Sprint("Upsert Deployment failed")) - must.NoError(t, state.UpsertDeployment(1001, d2), must.Sprint("Upsert Deployment failed")) + now := time.Now().UnixNano() + must.NoError(t, state.UpsertDeployment(1000, now, d1), must.Sprint("Upsert Deployment failed")) + must.NoError(t, state.UpsertDeployment(1001, now, d2), must.Sprint("Upsert Deployment failed")) // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1002, "test-valid", @@ -1280,7 +1283,7 @@ func TestDeploymentEndpoint_List_Blocking(t *testing.T) { // Upsert alloc triggers watches time.AfterFunc(100*time.Millisecond, func() { - must.Nil(t, state.UpsertDeployment(3, d), must.Sprint("UpsertDeployment")) + must.Nil(t, state.UpsertDeployment(3, time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) }) req := &structs.DeploymentListRequest{ @@ -1303,7 +1306,7 @@ func TestDeploymentEndpoint_List_Blocking(t *testing.T) { d2 := d.Copy() d2.Status = structs.DeploymentStatusPaused time.AfterFunc(100*time.Millisecond, func() { - must.Nil(t, state.UpsertDeployment(5, d2), must.Sprint("UpsertDeployment")) + must.Nil(t, state.UpsertDeployment(5, time.Now().UnixNano(), d2), must.Sprint("UpsertDeployment")) }) req.MinQueryIndex = 3 @@ -1365,7 +1368,7 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { if m.namespace != "" { // defaults to "default" deployment.Namespace = m.namespace } - must.NoError(t, state.UpsertDeployment(index, deployment)) + must.NoError(t, state.UpsertDeployment(index, time.Now().UnixNano(), deployment)) } aclToken := mock.CreatePolicyAndToken(t, state, 1100, "test-valid-read", @@ -1568,10 +1571,11 @@ func TestDeploymentEndpoint_Allocations(t *testing.T) { summary := mock.JobSummary(a.JobID) state := s1.fsm.State() + now := time.Now().UnixNano() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") assert.Nil(state.UpsertJobSummary(999, summary), "UpsertJobSummary") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, now, d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{a}), "UpsertAllocs") // Lookup the allocations get := &structs.DeploymentSpecificRequest{ @@ -1606,10 +1610,11 @@ func TestDeploymentEndpoint_Allocations_ACL(t *testing.T) { summary := mock.JobSummary(a.JobID) state := s1.fsm.State() + now := time.Now().UnixNano() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") assert.Nil(state.UpsertJobSummary(999, summary), "UpsertJobSummary") - assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, now, d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{a}), "UpsertAllocs") // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", @@ -1681,13 +1686,14 @@ func TestDeploymentEndpoint_Allocations_Blocking(t *testing.T) { a.DeploymentID = d.ID summary := mock.JobSummary(a.JobID) + now := time.Now().UnixNano() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(2, d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(2, now, d), "UpsertDeployment") assert.Nil(state.UpsertJobSummary(3, summary), "UpsertJobSummary") // Upsert alloc triggers watches time.AfterFunc(100*time.Millisecond, func() { - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 4, now, []*structs.Allocation{a}), "UpsertAllocs") }) req := &structs.DeploymentSpecificRequest{ @@ -1715,7 +1721,8 @@ func TestDeploymentEndpoint_Allocations_Blocking(t *testing.T) { a2.ClientStatus = structs.AllocClientStatusRunning time.AfterFunc(100*time.Millisecond, func() { assert.Nil(state.UpsertJobSummary(5, mock.JobSummary(a2.JobID)), "UpsertJobSummary") - assert.Nil(state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 6, []*structs.Allocation{a2}), "updateAllocsFromClient") + assert.Nil(state.UpdateAllocsFromClient( + structs.MsgTypeTestSetup, 6, time.Now().UnixNano(), []*structs.Allocation{a2}), "updateAllocsFromClient") }) req.MinQueryIndex = 4 @@ -1742,7 +1749,7 @@ func TestDeploymentEndpoint_Reap(t *testing.T) { // Create the register request d1 := mock.Deployment() - assert.Nil(s1.fsm.State().UpsertDeployment(1000, d1), "UpsertDeployment") + assert.Nil(s1.fsm.State().UpsertDeployment(1000,time.Now().UnixNano(), d1), "UpsertDeployment") // Reap the eval get := &structs.DeploymentDeleteRequest{ diff --git a/nomad/deploymentwatcher/deployments_watcher_test.go b/nomad/deploymentwatcher/deployments_watcher_test.go index f2cb2e6ac2f..19f9af7a8b1 100644 --- a/nomad/deploymentwatcher/deployments_watcher_test.go +++ b/nomad/deploymentwatcher/deployments_watcher_test.go @@ -1968,7 +1968,7 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) { // verify that the job version hasn't changed after upsert m.state.JobByID(nil, structs.DefaultNamespace, j.ID) - must.Eq(t, uint64(0), j.Version, must.Sprintf("Expected job version 0 but got ", j.Version)) + must.Eq(t, uint64(0), j.Version, must.Sprintf("Expected job version 0 but got %v", j.Version)) } // Test allocation updates and evaluation creation is batched between watchers diff --git a/nomad/drainer/draining_node_test.go b/nomad/drainer/draining_node_test.go index 02c0b3dbafb..53f7fe12b7e 100644 --- a/nomad/drainer/draining_node_test.go +++ b/nomad/drainer/draining_node_test.go @@ -11,8 +11,7 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/shoenig/test/must" ) // testDrainingNode creates a *drainingNode with a 1h deadline but no allocs @@ -27,7 +26,7 @@ func testDrainingNode(t *testing.T) *drainingNode { ForceDeadline: time.Now().Add(time.Hour), } - require.Nil(t, state.UpsertNode(structs.MsgTypeTestSetup, 100, node)) + must.Nil(t, state.UpsertNode(structs.MsgTypeTestSetup, 100, node)) return NewDrainingNode(node, state) } @@ -35,21 +34,22 @@ func assertDrainingNode(t *testing.T, dn *drainingNode, isDone bool, remaining, t.Helper() done, err := dn.IsDone() - require.Nil(t, err) - assert.Equal(t, isDone, done, "IsDone mismatch") + must.Nil(t, err) + must.Eq(t, isDone, done, must.Sprint("IsDone mismatch")) allocs, err := dn.RemainingAllocs() - require.Nil(t, err) - assert.Len(t, allocs, remaining, "RemainingAllocs mismatch") + must.Nil(t, err) + must.Len(t, remaining, allocs, must.Sprint("RemainingAllocs mismatch")) jobs, err := dn.DrainingJobs() - require.Nil(t, err) - assert.Len(t, jobs, running, "DrainingJobs mismatch") + must.Nil(t, err) + must.Len(t, running, jobs, must.Sprint("DrainingJobs mismatch")) } func TestDrainingNode_Table(t *testing.T) { ci.Parallel(t) + now := time.Now().UnixNano() cases := []struct { name string isDone bool @@ -72,8 +72,8 @@ func TestDrainingNode_Table(t *testing.T) { setup: func(t *testing.T, dn *drainingNode) { alloc := mock.BatchAlloc() alloc.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, alloc.Job)) - require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc})) + must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, alloc.Job)) + must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, []*structs.Allocation{alloc})) }, }, { @@ -84,8 +84,8 @@ func TestDrainingNode_Table(t *testing.T) { setup: func(t *testing.T, dn *drainingNode) { alloc := mock.Alloc() alloc.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, alloc.Job)) - require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc})) + must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, alloc.Job)) + must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, []*structs.Allocation{alloc})) }, }, { @@ -96,8 +96,8 @@ func TestDrainingNode_Table(t *testing.T) { setup: func(t *testing.T, dn *drainingNode) { alloc := mock.SystemAlloc() alloc.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, alloc.Job)) - require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc})) + must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, alloc.Job)) + must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, []*structs.Allocation{alloc})) }, }, { @@ -109,9 +109,9 @@ func TestDrainingNode_Table(t *testing.T) { allocs := []*structs.Allocation{mock.Alloc(), mock.BatchAlloc(), mock.SystemAlloc()} for _, a := range allocs { a.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) + must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) } - require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) + must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, allocs)) // StateStore doesn't like inserting new allocs // with a terminal status, so set the status in @@ -119,7 +119,7 @@ func TestDrainingNode_Table(t *testing.T) { for _, a := range allocs { a.ClientStatus = structs.AllocClientStatusComplete } - require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, allocs)) + must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, now, allocs)) }, }, { @@ -131,13 +131,13 @@ func TestDrainingNode_Table(t *testing.T) { allocs := []*structs.Allocation{mock.Alloc(), mock.BatchAlloc(), mock.SystemAlloc()} for _, a := range allocs { a.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) + must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) } - require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) + must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, allocs)) // Set only the service job as terminal allocs[0].ClientStatus = structs.AllocClientStatusComplete - require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, allocs)) + must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, now, allocs)) }, }, { @@ -149,14 +149,14 @@ func TestDrainingNode_Table(t *testing.T) { allocs := []*structs.Allocation{mock.Alloc(), mock.BatchAlloc(), mock.SystemAlloc()} for _, a := range allocs { a.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) + must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) } - require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) + must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, allocs)) // Set only the service and batch jobs as terminal allocs[0].ClientStatus = structs.AllocClientStatusComplete allocs[2].ClientStatus = structs.AllocClientStatusComplete - require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, allocs)) + must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, now, allocs)) }, }, { @@ -168,14 +168,14 @@ func TestDrainingNode_Table(t *testing.T) { allocs := []*structs.Allocation{mock.Alloc(), mock.BatchAlloc(), mock.SystemAlloc()} for _, a := range allocs { a.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) + must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) } - require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) + must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, allocs)) // Set only the service and batch jobs as terminal allocs[0].ClientStatus = structs.AllocClientStatusComplete allocs[1].ClientStatus = structs.AllocClientStatusComplete - require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, allocs)) + must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, now, allocs)) }, }, { @@ -194,15 +194,15 @@ func TestDrainingNode_Table(t *testing.T) { } for _, a := range allocs { a.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) + must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) } - require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) + must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, allocs)) // Set only the service and batch jobs as terminal allocs[0].ClientStatus = structs.AllocClientStatusComplete allocs[1].ClientStatus = structs.AllocClientStatusComplete allocs[2].ClientStatus = structs.AllocClientStatusComplete - require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, allocs)) + must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, now, allocs)) }, }, } diff --git a/nomad/drainer/watch_jobs_test.go b/nomad/drainer/watch_jobs_test.go index 05a2a509e04..5e896c3b69b 100644 --- a/nomad/drainer/watch_jobs_test.go +++ b/nomad/drainer/watch_jobs_test.go @@ -117,6 +117,8 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { var index uint64 = 101 count := 8 + now := time.Now().UnixNano() + newAlloc := func(node *structs.Node, job *structs.Job) *structs.Allocation { a := mock.Alloc() a.JobID = job.ID @@ -147,7 +149,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { allocs = append(allocs, a) } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, allocs)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, allocs)) index++ } @@ -169,7 +171,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { // create a copy so we can reuse this slice drainedAllocs[i] = a.Copy() } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, drainedAllocs)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, drainedAllocs)) drains.Resp.Respond(index, nil) index++ @@ -196,7 +198,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { updates = append(updates, a, replacement) replacements[i] = replacement.Copy() } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, updates)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, updates)) index++ // The drained allocs stopping cause migrations but no new drains @@ -210,7 +212,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { a.ClientStatus = structs.AllocClientStatusComplete completeAllocs[i] = a } - must.NoError(t, store.UpdateAllocsFromClient(structs.MsgTypeTestSetup, index, completeAllocs)) + must.NoError(t, store.UpdateAllocsFromClient(structs.MsgTypeTestSetup, index, now, completeAllocs)) index++ // The drained allocs stopping cause migrations but no new drains @@ -224,7 +226,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { Healthy: pointer.Of(true), } } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, replacements)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, replacements)) index++ must.MapNotEmpty(t, jobWatcher.drainingJobs()) @@ -240,7 +242,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { // create a copy so we can reuse this slice drainedAllocs[i] = a.Copy() } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, drainedAllocs)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, drainedAllocs)) drains.Resp.Respond(index, nil) index++ @@ -257,7 +259,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { updates = append(updates, a, replacement) replacements[i] = replacement.Copy() } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, updates)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, updates)) index++ assertJobWatcherOps(t, jobWatcher, 0, 6) @@ -268,7 +270,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { Healthy: pointer.Of(true), } } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, replacements)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, replacements)) index++ must.MapNotEmpty(t, jobWatcher.drainingJobs()) @@ -284,7 +286,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { // create a copy so we can reuse this slice drainedAllocs[i] = a.Copy() } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, drainedAllocs)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, drainedAllocs)) drains.Resp.Respond(index, nil) index++ @@ -301,7 +303,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { updates = append(updates, a, replacement) replacements[i] = replacement.Copy() } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, updates)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, updates)) index++ assertJobWatcherOps(t, jobWatcher, 0, 4) @@ -312,7 +314,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { Healthy: pointer.Of(true), } } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, replacements)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, replacements)) // No jobs should be left! must.MapEmpty(t, jobWatcher.drainingJobs()) @@ -622,7 +624,7 @@ func TestDrainingJobWatcher_HandleTaskGroup(t *testing.T) { allocs = append(allocs, a) } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 103, allocs)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 103, time.Now().UnixNano(), allocs)) snap, err := store.Snapshot() must.NoError(t, err) @@ -672,7 +674,7 @@ func TestHandleTaskGroup_Migrations(t *testing.T) { } allocs = append(allocs, a) } - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 102, time.Now().UnixNano(), allocs)) snap, err := state.Snapshot() require.Nil(err) @@ -745,7 +747,7 @@ func TestHandleTaskGroup_GarbageCollectedNode(t *testing.T) { // Make the first one be on a GC'd node allocs[0].NodeID = uuid.Generate() - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 102, time.Now().UnixNano(), allocs)) snap, err := state.Snapshot() require.Nil(err) diff --git a/nomad/eval_endpoint_test.go b/nomad/eval_endpoint_test.go index 62e84036016..2421c76488a 100644 --- a/nomad/eval_endpoint_test.go +++ b/nomad/eval_endpoint_test.go @@ -404,7 +404,7 @@ func TestEvalEndpoint_Dequeue_UpdateWaitIndex(t *testing.T) { EvalID: eval.ID, } assert := assert.New(t) - err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) + err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), &res) assert.Nil(err) // Dequeue the eval @@ -930,7 +930,7 @@ func TestEvalEndpoint_Delete(t *testing.T) { allocs = append(allocs, alloc) } index++ - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, allocs)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), allocs)) // Delete all the unwanted evals @@ -1775,7 +1775,7 @@ func TestEvalEndpoint_Allocations(t *testing.T) { state := s1.fsm.State() state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -1814,7 +1814,7 @@ func TestEvalEndpoint_Allocations_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) assert.Nil(state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) // Create ACL tokens validToken := mock.CreatePolicyAndToken(t, state, 1003, "test-valid", @@ -1876,10 +1876,12 @@ func TestEvalEndpoint_Allocations_Blocking(t *testing.T) { alloc1 := mock.Alloc() alloc2 := mock.Alloc() + now := time.Now().UnixNano() + // Upsert an unrelated alloc first time.AfterFunc(100*time.Millisecond, func() { state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, now, []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -1888,7 +1890,7 @@ func TestEvalEndpoint_Allocations_Blocking(t *testing.T) { // Upsert an alloc which will trigger the watch later time.AfterFunc(200*time.Millisecond, func() { state.UpsertJobSummary(199, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, now, []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } diff --git a/nomad/fsm.go b/nomad/fsm.go index de16e67ed9f..1f0a414ab3d 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -680,6 +680,7 @@ func (n *nomadFSM) applyUpsertJob(msgType structs.MessageType, buf []byte, index } } + now := time.Now().UnixNano() if req.Deployment != nil { // Cancel any preivous deployment. lastDeployment, err := n.state.LatestDeploymentByJobID(ws, req.Job.Namespace, req.Job.ID) @@ -690,7 +691,7 @@ func (n *nomadFSM) applyUpsertJob(msgType structs.MessageType, buf []byte, index activeDeployment := lastDeployment.Copy() activeDeployment.Status = structs.DeploymentStatusCancelled activeDeployment.StatusDescription = structs.DeploymentStatusDescriptionNewerJob - if err := n.state.UpsertDeployment(index, activeDeployment); err != nil { + if err := n.state.UpsertDeployment(index, now, activeDeployment); err != nil { return err } } @@ -701,7 +702,7 @@ func (n *nomadFSM) applyUpsertJob(msgType structs.MessageType, buf []byte, index req.Deployment.JobSpecModifyIndex = req.Job.JobModifyIndex req.Deployment.JobVersion = req.Job.Version - if err := n.state.UpsertDeployment(index, req.Deployment); err != nil { + if err := n.state.UpsertDeployment(index, now, req.Deployment); err != nil { return err } } @@ -938,7 +939,7 @@ func (n *nomadFSM) applyAllocClientUpdate(msgType structs.MessageType, buf []byt } // Update all the client allocations - if err := n.state.UpdateAllocsFromClient(msgType, index, req.Alloc); err != nil { + if err := n.state.UpdateAllocsFromClient(msgType, index, time.Now().UnixNano(), req.Alloc); err != nil { n.logger.Error("UpdateAllocFromClient failed", "error", err) return err } @@ -1092,7 +1093,7 @@ func (n *nomadFSM) applyPlanResults(msgType structs.MessageType, buf []byte, ind panic(fmt.Errorf("failed to decode request: %v", err)) } - if err := n.state.UpsertPlanResults(msgType, index, &req); err != nil { + if err := n.state.UpsertPlanResults(msgType, index, time.Now().UnixNano(), &req); err != nil { n.logger.Error("ApplyPlan failed", "error", err) return err } @@ -1111,7 +1112,7 @@ func (n *nomadFSM) applyDeploymentStatusUpdate(msgType structs.MessageType, buf panic(fmt.Errorf("failed to decode request: %v", err)) } - if err := n.state.UpdateDeploymentStatus(msgType, index, &req); err != nil { + if err := n.state.UpdateDeploymentStatus(msgType, index, time.Now().UnixNano(), &req); err != nil { n.logger.Error("UpsertDeploymentStatusUpdate failed", "error", err) return err } @@ -1128,7 +1129,7 @@ func (n *nomadFSM) applyDeploymentPromotion(msgType structs.MessageType, buf []b panic(fmt.Errorf("failed to decode request: %v", err)) } - if err := n.state.UpdateDeploymentPromotion(msgType, index, &req); err != nil { + if err := n.state.UpdateDeploymentPromotion(msgType, index, time.Now().UnixNano(), &req); err != nil { n.logger.Error("UpsertDeploymentPromotion failed", "error", err) return err } @@ -1146,7 +1147,7 @@ func (n *nomadFSM) applyDeploymentAllocHealth(msgType structs.MessageType, buf [ panic(fmt.Errorf("failed to decode request: %v", err)) } - if err := n.state.UpdateDeploymentAllocHealth(msgType, index, &req); err != nil { + if err := n.state.UpdateDeploymentAllocHealth(msgType, index, time.Now().UnixNano(), &req); err != nil { n.logger.Error("UpsertDeploymentAllocHealth failed", "error", err) return err } @@ -1366,7 +1367,7 @@ func (n *nomadFSM) applyCSIVolumeRegister(buf []byte, index uint64) interface{} } defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_register"}, time.Now()) - if err := n.state.UpsertCSIVolume(index, req.Volumes); err != nil { + if err := n.state.UpsertCSIVolume(index, time.Now().UnixNano(), req.Volumes); err != nil { n.logger.Error("CSIVolumeRegister failed", "error", err) return err } @@ -1397,7 +1398,7 @@ func (n *nomadFSM) applyCSIVolumeBatchClaim(buf []byte, index uint64) interface{ defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_batch_claim"}, time.Now()) for _, req := range batch.Claims { - err := n.state.CSIVolumeClaim(index, req.RequestNamespace(), + err := n.state.CSIVolumeClaim(index, time.Now().UnixNano(), req.RequestNamespace(), req.VolumeID, req.ToClaim()) if err != nil { n.logger.Error("CSIVolumeClaim for batch failed", "error", err) @@ -1414,7 +1415,7 @@ func (n *nomadFSM) applyCSIVolumeClaim(buf []byte, index uint64) interface{} { } defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_claim"}, time.Now()) - if err := n.state.CSIVolumeClaim(index, req.RequestNamespace(), req.VolumeID, req.ToClaim()); err != nil { + if err := n.state.CSIVolumeClaim(index, time.Now().UnixNano(), req.RequestNamespace(), req.VolumeID, req.ToClaim()); err != nil { n.logger.Error("CSIVolumeClaim failed", "error", err) return err } @@ -2005,7 +2006,7 @@ func (n *nomadFSM) failLeakedDeployments(store *state.StateStore) error { failed := d.Copy() failed.Status = structs.DeploymentStatusCancelled failed.StatusDescription = structs.DeploymentStatusDescriptionStoppedJob - if err := store.UpsertDeployment(dindex, failed); err != nil { + if err := store.UpsertDeployment(dindex, time.Now().UnixNano(), failed); err != nil { return fmt.Errorf("failed to mark leaked deployment %q as failed: %v", failed.ID, err) } } diff --git a/nomad/heartbeat_test.go b/nomad/heartbeat_test.go index 52d234e6df1..b4027e5b106 100644 --- a/nomad/heartbeat_test.go +++ b/nomad/heartbeat_test.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/shoenig/test/must" - "github.com/stretchr/testify/require" ) func TestHeartbeat_InitializeHeartbeatTimers(t *testing.T) { @@ -71,19 +70,18 @@ func TestHeartbeat_ResetHeartbeatTimer(t *testing.T) { func TestHeartbeat_ResetHeartbeatTimer_Nonleader(t *testing.T) { ci.Parallel(t) - require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 3 // Won't become leader }) defer cleanupS1() - require.False(s1.IsLeader()) + must.False(t, s1.IsLeader()) // Create a new timer _, err := s1.resetHeartbeatTimer("test") - require.NotNil(err) - require.EqualError(err, heartbeatNotLeader) + must.NotNil(t, err) + must.EqError(t, err, heartbeatNotLeader) } func TestHeartbeat_ResetHeartbeatTimerLocked(t *testing.T) { @@ -150,7 +148,6 @@ func TestHeartbeat_ResetHeartbeatTimerLocked_Renew(t *testing.T) { func TestHeartbeat_InvalidateHeartbeat(t *testing.T) { ci.Parallel(t) - require := require.New(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -159,7 +156,7 @@ func TestHeartbeat_InvalidateHeartbeat(t *testing.T) { // Create a node node := mock.Node() state := s1.fsm.State() - require.NoError(state.UpsertNode(structs.MsgTypeTestSetup, 1, node)) + must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1, node)) // This should cause a status update s1.invalidateHeartbeat(node.ID) @@ -167,10 +164,10 @@ func TestHeartbeat_InvalidateHeartbeat(t *testing.T) { // Check it is updated ws := memdb.NewWatchSet() out, err := state.NodeByID(ws, node.ID) - require.NoError(err) - require.True(out.TerminalStatus()) - require.Len(out.Events, 2) - require.Equal(NodeHeartbeatEventMissed, out.Events[1].Message) + must.NoError(t, err) + must.True(t, out.TerminalStatus()) + must.SliceLen(t, 2, out.Events) + must.Eq(t, NodeHeartbeatEventMissed, out.Events[1].Message) } func TestHeartbeat_ClearHeartbeatTimer(t *testing.T) { @@ -343,7 +340,7 @@ func TestHeartbeat_InvalidateHeartbeat_DisconnectedClient(t *testing.T) { Time: tc.now, }} - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2, time.Now().UnixNano(), []*structs.Allocation{alloc})) // Trigger status update s1.invalidateHeartbeat(node.ID) @@ -413,7 +410,7 @@ func TestHeartbeat_InvalidateHeartbeatDisconnectedClient(t *testing.T) { Value: structs.AllocClientStatusUnknown, Time: tc.now, }} - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2, time.Now().UnixNano(), []*structs.Allocation{alloc})) // Trigger status update s1.invalidateHeartbeat(node.ID) diff --git a/nomad/namespace_endpoint_test.go b/nomad/namespace_endpoint_test.go index 6a0583cc620..c0257cd174f 100644 --- a/nomad/namespace_endpoint_test.go +++ b/nomad/namespace_endpoint_test.go @@ -530,7 +530,7 @@ func TestNamespaceEndpoint_DeleteNamespaces_NoAssociatedVolumes_Local(t *testing // Create a volume in one vol := mock.CSIVolume(mock.CSIPlugin()) vol.Namespace = ns1.Name - must.Nil(t, s1.fsm.State().UpsertCSIVolume(1001, []*structs.CSIVolume{vol})) + must.Nil(t, s1.fsm.State().UpsertCSIVolume(1001, time.Now().UnixNano(), []*structs.CSIVolume{vol})) // Lookup the namespaces req := &structs.NamespaceDeleteRequest{ diff --git a/nomad/plan_apply.go b/nomad/plan_apply.go index e2e76894a76..13b56115fcb 100644 --- a/nomad/plan_apply.go +++ b/nomad/plan_apply.go @@ -356,7 +356,7 @@ func (p *planner) applyPlan(plan *structs.Plan, result *structs.PlanResult, snap // Optimistically apply to our state view if snap != nil { nextIdx := p.srv.raft.AppliedIndex() + 1 - if err := snap.UpsertPlanResults(structs.ApplyPlanResultsRequestType, nextIdx, &req); err != nil { + if err := snap.UpsertPlanResults(structs.ApplyPlanResultsRequestType, nextIdx, time.Now().UnixNano(), &req); err != nil { return future, err } } diff --git a/nomad/plan_apply_test.go b/nomad/plan_apply_test.go index 687d5511832..2e7167b4247 100644 --- a/nomad/plan_apply_test.go +++ b/nomad/plan_apply_test.go @@ -83,7 +83,7 @@ func TestPlanApply_applyPlan(t *testing.T) { // Register a fake deployment oldDeployment := mock.Deployment() - if err := s1.State().UpsertDeployment(900, oldDeployment); err != nil { + if err := s1.State().UpsertDeployment(900, time.Now().UnixNano(), oldDeployment); err != nil { t.Fatalf("UpsertDeployment failed: %v", err) } @@ -259,7 +259,7 @@ func TestPlanApply_applyPlanWithNormalizedAllocs(t *testing.T) { // Register a fake deployment oldDeployment := mock.Deployment() - if err := s1.State().UpsertDeployment(900, oldDeployment); err != nil { + if err := s1.State().UpsertDeployment(900, time.Now().UnixNano(), oldDeployment); err != nil { t.Fatalf("UpsertDeployment failed: %v", err) } @@ -290,7 +290,7 @@ func TestPlanApply_applyPlanWithNormalizedAllocs(t *testing.T) { PreemptedByAllocation: alloc.ID, } s1.State().UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)) - s1.State().UpsertAllocs(structs.MsgTypeTestSetup, 1100, []*structs.Allocation{stoppedAlloc, preemptedAlloc}) + s1.State().UpsertAllocs(structs.MsgTypeTestSetup, 1100, time.Now().UnixNano(), []*structs.Allocation{stoppedAlloc, preemptedAlloc}) // Create an eval eval := mock.Eval() eval.JobID = alloc.JobID @@ -615,7 +615,7 @@ func TestPlanApply_EvalPlan_Preemption(t *testing.T) { } // Insert a preempted alloc such that the alloc will fit only after preemption - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{preemptedAlloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{preemptedAlloc}) alloc := mock.Alloc() alloc.AllocatedResources = &structs.AllocatedResources{ @@ -904,7 +904,7 @@ func TestPlanApply_EvalNodePlan_NodeFull(t *testing.T) { alloc.AllocatedResources = structs.NodeResourcesToAllocatedResources(node.NodeResources) state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) alloc2 := mock.Alloc() alloc2.NodeID = node.ID @@ -954,7 +954,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_Device(t *testing.T) { state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) // Alloc2 tries to use the same device alloc2 := mock.Alloc() @@ -994,7 +994,7 @@ func TestPlanApply_EvalNodePlan_UpdateExisting(t *testing.T) { alloc.NodeID = node.ID alloc.AllocatedResources = structs.NodeResourcesToAllocatedResources(node.NodeResources) state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) snap, _ := state.Snapshot() plan := &structs.Plan{ @@ -1027,7 +1027,7 @@ func TestPlanApply_EvalNodePlan_UpdateExisting_Ineligible(t *testing.T) { alloc.NodeID = node.ID alloc.AllocatedResources = structs.NodeResourcesToAllocatedResources(node.NodeResources) state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) snap, _ := state.Snapshot() plan := &structs.Plan{ @@ -1058,7 +1058,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_Evict(t *testing.T) { alloc.NodeID = node.ID alloc.AllocatedResources = structs.NodeResourcesToAllocatedResources(node.NodeResources) state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) snap, _ := state.Snapshot() allocEvict := new(structs.Allocation) @@ -1097,7 +1097,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_AllocEvict(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusEvict alloc.AllocatedResources = structs.NodeResourcesToAllocatedResources(node.NodeResources) state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) snap, _ := state.Snapshot() alloc2 := mock.Alloc() @@ -1130,7 +1130,7 @@ func TestPlanApply_EvalNodePlan_NodeDown_EvictOnly(t *testing.T) { node.ReservedResources = nil node.Status = structs.NodeStatusDown state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) snap, _ := state.Snapshot() allocEvict := new(structs.Allocation) diff --git a/nomad/state/deployment_events_test.go b/nomad/state/deployment_events_test.go index 3bf9e9f6c0a..be8667def36 100644 --- a/nomad/state/deployment_events_test.go +++ b/nomad/state/deployment_events_test.go @@ -31,7 +31,7 @@ func TestDeploymentEventFromChanges(t *testing.T) { d.JobID = j.ID require.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) - require.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) + require.NoError(t, s.upsertDeploymentImpl(10, time.Now().UnixNano(), d, setupTx)) setupTx.Txn.Commit() @@ -47,7 +47,7 @@ func TestDeploymentEventFromChanges(t *testing.T) { // Exlude Job and assert its added } - require.NoError(t, s.UpdateDeploymentStatus(msgType, 100, req)) + require.NoError(t, s.UpdateDeploymentStatus(msgType, 100, time.Now().UnixNano(), req)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) require.Len(t, events, 2) diff --git a/nomad/state/events_test.go b/nomad/state/events_test.go index 8e15e27fb9d..945039fc4e4 100644 --- a/nomad/state/events_test.go +++ b/nomad/state/events_test.go @@ -115,7 +115,7 @@ func TestEventsFromChanges_DeploymentUpdate(t *testing.T) { d.JobID = j.ID require.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) - require.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) + require.NoError(t, s.upsertDeploymentImpl(10, time.Now().UnixNano(), d, setupTx)) setupTx.Txn.Commit() @@ -131,7 +131,7 @@ func TestEventsFromChanges_DeploymentUpdate(t *testing.T) { // Exlude Job and assert its added } - require.NoError(t, s.UpdateDeploymentStatus(msgType, 100, req)) + require.NoError(t, s.UpdateDeploymentStatus(msgType, 100, time.Now().UnixNano(), req)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) require.Len(t, events, 2) @@ -173,7 +173,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { DesiredCanaries: 1, }, } - require.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) + require.NoError(t, s.upsertDeploymentImpl(10, time.Now().UnixNano(), d, setupTx)) // create set of allocs c1 := mock.Alloc() @@ -192,7 +192,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { Healthy: pointer.Of(true), } - require.NoError(t, s.upsertAllocsImpl(10, []*structs.Allocation{c1, c2}, setupTx)) + require.NoError(t, s.upsertAllocsImpl(10, time.Now().UnixNano(), []*structs.Allocation{c1, c2}, setupTx)) // commit setup transaction setupTx.Txn.Commit() @@ -208,7 +208,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { Eval: e, } - require.NoError(t, s.UpdateDeploymentPromotion(msgType, 100, req)) + require.NoError(t, s.UpdateDeploymentPromotion(msgType, 100, time.Now().UnixNano(), req)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) require.Len(t, events, 4) @@ -250,7 +250,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { DesiredCanaries: 1, }, } - require.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) + require.NoError(t, s.upsertDeploymentImpl(10, time.Now().UnixNano(), d, setupTx)) // create set of allocs c1 := mock.Alloc() @@ -269,7 +269,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { Healthy: pointer.Of(true), } - require.NoError(t, s.upsertAllocsImpl(10, []*structs.Allocation{c1, c2}, setupTx)) + require.NoError(t, s.upsertAllocsImpl(10, time.Now().UnixNano(), []*structs.Allocation{c1, c2}, setupTx)) // Commit setup setupTx.Commit() @@ -287,7 +287,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { }, } - require.NoError(t, s.UpdateDeploymentAllocHealth(msgType, 100, req)) + require.NoError(t, s.UpdateDeploymentAllocHealth(msgType, 100, time.Now().UnixNano(), req)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) require.Len(t, events, 3) @@ -514,7 +514,7 @@ func TestEventsFromChanges_ApplyPlanResultsRequestType(t *testing.T) { EvalID: eval.ID, } - require.NoError(t, s.UpsertPlanResults(msgType, 100, req)) + require.NoError(t, s.UpsertPlanResults(msgType, 100, time.Now().UnixNano(), req)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) require.Len(t, events, 5) @@ -644,7 +644,7 @@ func TestEventsFromChanges_AllocUpdateDesiredTransitionRequestType(t *testing.T) alloc := mock.Alloc() require.Nil(t, s.UpsertJob(structs.MsgTypeTestSetup, 10, nil, alloc.Job)) - require.Nil(t, s.UpsertAllocs(structs.MsgTypeTestSetup, 11, []*structs.Allocation{alloc})) + require.Nil(t, s.UpsertAllocs(structs.MsgTypeTestSetup, 11, time.Now().UnixNano(), []*structs.Allocation{alloc})) msgType := structs.AllocUpdateDesiredTransitionRequestType @@ -977,7 +977,7 @@ func TestNodeDrainEventFromChanges(t *testing.T) { alloc2.NodeID = node.ID require.NoError(t, upsertNodeTxn(setupTx, 10, node)) - require.NoError(t, s.upsertAllocsImpl(100, []*structs.Allocation{alloc1, alloc2}, setupTx)) + require.NoError(t, s.upsertAllocsImpl(100, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}, setupTx)) setupTx.Txn.Commit() // changes diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index abb59857669..8cede33131a 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -143,7 +143,7 @@ func TestStateStore_UpsertPlanResults_AllocationsCreated_Denormalized(t *testing EvalID: eval.ID, } assert := assert.New(t) - err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) + err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), &res) assert.Nil(err) ws := memdb.NewWatchSet() @@ -192,7 +192,8 @@ func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) { } require := require.New(t) - require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 900, []*structs.Allocation{stoppedAlloc, preemptedAlloc})) + require.NoError(state.UpsertAllocs( + structs.MsgTypeTestSetup, 900, time.Now().UnixNano(), []*structs.Allocation{stoppedAlloc, preemptedAlloc})) require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job)) // modify job and ensure that stopped and preempted alloc point to original Job @@ -219,7 +220,7 @@ func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) { } assert := assert.New(t) planModifyIndex := uint64(1000) - err := state.UpsertPlanResults(structs.MsgTypeTestSetup, planModifyIndex, &res) + err := state.UpsertPlanResults(structs.MsgTypeTestSetup, planModifyIndex, time.Now().UnixNano(), &res) require.NoError(err) ws := memdb.NewWatchSet() @@ -300,7 +301,8 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { EvalID: eval.ID, } - err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) + now := time.Now().UnixNano() + err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, now, &res) if err != nil { t.Fatalf("err: %v", err) } @@ -348,7 +350,7 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { EvalID: eval.ID, } - err = state.UpsertPlanResults(structs.MsgTypeTestSetup, 1001, &res) + err = state.UpsertPlanResults(structs.MsgTypeTestSetup, 1001, now, &res) if err != nil { t.Fatalf("err: %v", err) } @@ -390,9 +392,11 @@ func TestStateStore_UpsertPlanResults_PreemptedAllocs(t *testing.T) { err = state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval}) require.NoError(err) + now := time.Now().UnixNano() + // Insert alloc that will be preempted in the plan preemptedAlloc := mock.Alloc() - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{preemptedAlloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 2, now, []*structs.Allocation{preemptedAlloc}) require.NoError(err) minimalPreemptedAlloc := &structs.Allocation{ @@ -416,7 +420,7 @@ func TestStateStore_UpsertPlanResults_PreemptedAllocs(t *testing.T) { PreemptionEvals: []*structs.Evaluation{eval2}, } - err = state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) + err = state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, now, &res) require.NoError(err) ws := memdb.NewWatchSet() @@ -456,6 +460,8 @@ func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() + // Create a job that applies to all job := mock.Job() if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, job); err != nil { @@ -466,7 +472,7 @@ func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { doutstanding := mock.Deployment() doutstanding.JobID = job.ID - if err := state.UpsertDeployment(1000, doutstanding); err != nil { + if err := state.UpsertDeployment(1000, now, doutstanding); err != nil { t.Fatalf("err: %v", err) } @@ -502,7 +508,7 @@ func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { EvalID: eval.ID, } - err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) + err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, now, &res) if err != nil { t.Fatalf("err: %v", err) } @@ -570,7 +576,7 @@ func TestStateStore_UpsertPlanResults_AllocationResources(t *testing.T) { EvalID: eval.ID, } - must.NoError(t, state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res)) + must.NoError(t, state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), &res)) out, err := state.AllocByID(nil, alloc.ID) must.NoError(t, err) @@ -592,7 +598,7 @@ func TestStateStore_UpsertDeployment(t *testing.T) { t.Fatalf("bad: %v", err) } - err = state.UpsertDeployment(1000, deployment) + err = state.UpsertDeployment(1000, time.Now().UnixNano(), deployment) if err != nil { t.Fatalf("err: %v", err) } @@ -642,11 +648,13 @@ func TestStateStore_OldDeployment(t *testing.T) { require := require.New(t) + now := time.Now().UnixNano() + // Insert both deployments - err := state.UpsertDeployment(1001, deploy1) + err := state.UpsertDeployment(1001, now, deploy1) require.Nil(err) - err = state.UpsertDeployment(1002, deploy2) + err = state.UpsertDeployment(1002, now, deploy2) require.Nil(err) ws := memdb.NewWatchSet() @@ -669,11 +677,13 @@ func TestStateStore_DeleteDeployment(t *testing.T) { d1 := mock.Deployment() d2 := mock.Deployment() - err := state.UpsertDeployment(1000, d1) + now := time.Now().UnixNano() + + err := state.UpsertDeployment(1000, now, d1) if err != nil { t.Fatalf("err: %v", err) } - if err := state.UpsertDeployment(1001, d2); err != nil { + if err := state.UpsertDeployment(1001, now, d2); err != nil { t.Fatalf("err: %v", err) } @@ -725,7 +735,7 @@ func TestStateStore_Deployments(t *testing.T) { deployment := mock.Deployment() deployments = append(deployments, deployment) - err := state.UpsertDeployment(1000+uint64(i), deployment) + err := state.UpsertDeployment(1000+uint64(i), time.Now().UnixNano(), deployment) require.NoError(t, err) } @@ -774,10 +784,11 @@ func TestStateStore_Deployments_Namespace(t *testing.T) { _, err = state.DeploymentsByNamespace(watches[1], ns2.Name) require.NoError(t, err) - require.NoError(t, state.UpsertDeployment(1001, deploy1)) - require.NoError(t, state.UpsertDeployment(1002, deploy2)) - require.NoError(t, state.UpsertDeployment(1003, deploy3)) - require.NoError(t, state.UpsertDeployment(1004, deploy4)) + now := time.Now().UnixNano() + require.NoError(t, state.UpsertDeployment(1001, now, deploy1)) + require.NoError(t, state.UpsertDeployment(1002, now, deploy2)) + require.NoError(t, state.UpsertDeployment(1003, now, deploy3)) + require.NoError(t, state.UpsertDeployment(1004, now, deploy4)) require.True(t, watchFired(watches[0])) require.True(t, watchFired(watches[1])) @@ -827,8 +838,10 @@ func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { state := testStateStore(t) deploy := mock.Deployment() + now := time.Now().UnixNano() + deploy.ID = "11111111-662e-d0ab-d1c9-3e434af7bdb4" - err := state.UpsertDeployment(1000, deploy) + err := state.UpsertDeployment(1000, now, deploy) require.NoError(t, err) gatherDeploys := func(iter memdb.ResultIterator) []*structs.Deployment { @@ -867,7 +880,7 @@ func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { deploy = mock.Deployment() deploy.ID = "11222222-662e-d0ab-d1c9-3e434af7bdb4" - err = state.UpsertDeployment(1001, deploy) + err = state.UpsertDeployment(1001, now, deploy) require.NoError(t, err) t.Run("more than one", func(t *testing.T) { @@ -924,9 +937,10 @@ func TestStateStore_DeploymentsByIDPrefix_Namespaces(t *testing.T) { deploy1.Namespace = ns1.Name deploy2.Namespace = ns2.Name + now := time.Now().UnixNano() require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) - require.NoError(t, state.UpsertDeployment(1000, deploy1)) - require.NoError(t, state.UpsertDeployment(1001, deploy2)) + require.NoError(t, state.UpsertDeployment(1000, now, deploy1)) + require.NoError(t, state.UpsertDeployment(1001, now, deploy2)) gatherDeploys := func(iter memdb.ResultIterator) []*structs.Deployment { var deploys []*structs.Deployment @@ -1081,7 +1095,7 @@ func TestStateStore_DeleteNamespaces_CSIVolumes(t *testing.T) { vol := mock.CSIVolume(plugin) vol.Namespace = ns.Name - require.NoError(t, state.UpsertCSIVolume(1001, []*structs.CSIVolume{vol})) + require.NoError(t, state.UpsertCSIVolume(1001, time.Now().UnixNano(), []*structs.CSIVolume{vol})) // Create a watchset so we can test that delete fires the watch ws := memdb.NewWatchSet() @@ -3986,8 +4000,10 @@ func TestStateStore_CSIVolume(t *testing.T) { require.NoError(t, err) defer state.DeleteNode(structs.MsgTypeTestSetup, 9999, []string{pluginID}) + now := time.Now().UnixNano() + index++ - err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, now, []*structs.Allocation{alloc}) require.NoError(t, err) ns := structs.DefaultNamespace @@ -4018,18 +4034,18 @@ func TestStateStore_CSIVolume(t *testing.T) { }} index++ - err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v0, v1}) + err = state.UpsertCSIVolume(index, now, []*structs.CSIVolume{v0, v1}) require.NoError(t, err) // volume registration is idempotent, unless identies are changed index++ - err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v0, v1}) + err = state.UpsertCSIVolume(index, now, []*structs.CSIVolume{v0, v1}) require.NoError(t, err) index++ v2 := v0.Copy() v2.PluginID = "new-id" - err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v2}) + err = state.UpsertCSIVolume(index, now, []*structs.CSIVolume{v2}) require.Error(t, err, fmt.Sprintf("volume exists: %s", v0.ID)) ws := memdb.NewWatchSet() @@ -4067,7 +4083,7 @@ func TestStateStore_CSIVolume(t *testing.T) { a0 := mock.Alloc() a1 := mock.Alloc() index++ - err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{a0, a1}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, now, []*structs.Allocation{a0, a1}) require.NoError(t, err) // Claims @@ -4086,10 +4102,10 @@ func TestStateStore_CSIVolume(t *testing.T) { } index++ - err = state.CSIVolumeClaim(index, ns, vol0, claim0) + err = state.CSIVolumeClaim(index, now, ns, vol0, claim0) require.NoError(t, err) index++ - err = state.CSIVolumeClaim(index, ns, vol0, claim1) + err = state.CSIVolumeClaim(index, now, ns, vol0, claim1) require.NoError(t, err) ws = memdb.NewWatchSet() @@ -4101,7 +4117,7 @@ func TestStateStore_CSIVolume(t *testing.T) { claim2 := new(structs.CSIVolumeClaim) *claim2 = *claim0 claim2.Mode = u - err = state.CSIVolumeClaim(2, ns, vol0, claim2) + err = state.CSIVolumeClaim(2, now, ns, vol0, claim2) require.NoError(t, err) ws = memdb.NewWatchSet() iter, err = state.CSIVolumesByPluginID(ws, ns, "", "minnie") @@ -4129,12 +4145,12 @@ func TestStateStore_CSIVolume(t *testing.T) { claim3 := new(structs.CSIVolumeClaim) *claim3 = *claim2 claim3.State = structs.CSIVolumeClaimStateReadyToFree - err = state.CSIVolumeClaim(index, ns, vol0, claim3) + err = state.CSIVolumeClaim(index, now, ns, vol0, claim3) require.NoError(t, err) index++ claim1.Mode = u claim1.State = structs.CSIVolumeClaimStateReadyToFree - err = state.CSIVolumeClaim(index, ns, vol0, claim1) + err = state.CSIVolumeClaim(index, now, ns, vol0, claim1) require.NoError(t, err) index++ @@ -4195,6 +4211,8 @@ func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { CLIENT ) + now := time.Now().UnixNano() + // helper function calling client-side update with with // UpsertAllocs and/or UpdateAllocsFromClient, depending on which // status(es) are set @@ -4211,13 +4229,13 @@ func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { } switch kind { case SERVER: - err = store.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(store), allocs) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(store), now, allocs) case CLIENT: // this is somewhat artificial b/c we get alloc updates // from multiple nodes concurrently but not in a single // RPC call. But this guarantees we'll trigger any nested // transaction setup bugs - err = store.UpdateAllocsFromClient(structs.MsgTypeTestSetup, nextIndex(store), allocs) + err = store.UpdateAllocsFromClient(structs.MsgTypeTestSetup, nextIndex(store), now, allocs) } must.NoError(t, err) return allocs @@ -4301,7 +4319,7 @@ func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { allocIDs = append(allocIDs, nodeAlloc.ID) allocs = append(allocs, nodeAlloc) } - err = store.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(store), allocs) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(store), now, allocs) must.NoError(t, err) // node plugin now has expected counts too @@ -4450,7 +4468,7 @@ func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { Namespace: structs.DefaultNamespace, PluginID: plugID, } - err = store.UpsertCSIVolume(nextIndex(store), []*structs.CSIVolume{vol}) + err = store.UpsertCSIVolume(nextIndex(store), now, []*structs.CSIVolume{vol}) must.NoError(t, err) err = store.DeleteJob(nextIndex(store), structs.DefaultNamespace, controllerJobID) @@ -4994,7 +5012,7 @@ func TestStateStore_DeleteEval_Eval(t *testing.T) { t.Fatalf("err: %v", err) } - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1, alloc2}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -5109,7 +5127,7 @@ func TestStateStore_DeleteEval_ChildJob(t *testing.T) { t.Fatalf("err: %v", err) } - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -5781,6 +5799,7 @@ func TestStateStore_UpdateAllocsFromClient(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() node := mock.Node() must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 997, node)) @@ -5797,7 +5816,7 @@ func TestStateStore_UpdateAllocsFromClient(t *testing.T) { alloc.NodeID = node.ID alloc.JobID = child.ID alloc.Job = child - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{alloc})) ws := memdb.NewWatchSet() summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) @@ -5824,7 +5843,7 @@ func TestStateStore_UpdateAllocsFromClient(t *testing.T) { JobID: alloc.JobID, TaskGroup: alloc.TaskGroup, } - err = state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{update}) + err = state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{update}) must.NoError(t, err) must.True(t, watchFired(ws)) @@ -5846,6 +5865,7 @@ func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() node := mock.Node() @@ -5858,7 +5878,7 @@ func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc1.Job)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc2.Job)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{alloc1, alloc2})) // Create watchsets so we can test that update fires the watch watches := make([]memdb.WatchSet, 8) @@ -5904,7 +5924,7 @@ func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { TaskGroup: alloc2.TaskGroup, } - err = state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{update, update2}) + err = state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{update, update2}) must.NoError(t, err) for _, ws := range watches { @@ -5954,6 +5974,7 @@ func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() node := mock.Node() @@ -5962,7 +5983,7 @@ func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{alloc})) // Create the delta updates ts := map[string]*structs.TaskState{"web": {State: structs.TaskStatePending}} @@ -5983,7 +6004,7 @@ func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { TaskGroup: alloc.TaskGroup, } - err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{update, update2}) + err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{update, update2}) must.NoError(t, err) ws := memdb.NewWatchSet() @@ -6032,8 +6053,8 @@ func TestStateStore_UpdateAllocsFromClient_Deployment(t *testing.T) { must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) - must.NoError(t, state.UpsertDeployment(1000, deployment)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertDeployment(1000, now.UnixNano(), deployment)) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now.UnixNano(), []*structs.Allocation{alloc})) healthy := now.Add(time.Second) update := &structs.Allocation{ @@ -6047,7 +6068,7 @@ func TestStateStore_UpdateAllocsFromClient_Deployment(t *testing.T) { Timestamp: healthy, }, } - must.NoError(t, state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{update})) + must.NoError(t, state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, now.UnixNano(), []*structs.Allocation{update})) // Check that the deployment state was updated because the healthy // deployment @@ -6084,8 +6105,8 @@ func TestStateStore_UpdateAllocsFromClient_DeploymentStateMerges(t *testing.T) { must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) - must.NoError(t, state.UpsertDeployment(1000, deployment)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertDeployment(1000, now.UnixNano(), deployment)) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now.UnixNano(), []*structs.Allocation{alloc})) update := &structs.Allocation{ ID: alloc.ID, @@ -6098,7 +6119,7 @@ func TestStateStore_UpdateAllocsFromClient_DeploymentStateMerges(t *testing.T) { Canary: false, }, } - must.NoError(t, state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{update})) + must.NoError(t, state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, now.UnixNano(), []*structs.Allocation{update})) // Check that the merging of the deployment status was correct out, err := state.AllocByID(nil, alloc.ID) @@ -6115,6 +6136,7 @@ func TestStateStore_UpdateAllocsFromClient_UpdateNodes(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() node1 := mock.Node() alloc1 := mock.Alloc() @@ -6133,7 +6155,7 @@ func TestStateStore_UpdateAllocsFromClient_UpdateNodes(t *testing.T) { must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1002, node3)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, nil, alloc1.Job)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1004, nil, alloc2.Job)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, []*structs.Allocation{alloc1, alloc2, alloc3})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, now, []*structs.Allocation{alloc1, alloc2, alloc3})) // Create watches to make sure they fire when nodes are updated. ws1 := memdb.NewWatchSet() @@ -6172,7 +6194,7 @@ func TestStateStore_UpdateAllocsFromClient_UpdateNodes(t *testing.T) { TaskGroup: "group", } - err = state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1005, []*structs.Allocation{ + err = state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1005, now, []*structs.Allocation{ updateAlloc1, updateAlloc2, updateAllocNonExisting, }) must.NoError(t, err) @@ -6231,7 +6253,7 @@ func TestStateStore_UpsertAlloc_Alloc(t *testing.T) { t.Fatalf("bad: %v", err) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -6293,13 +6315,13 @@ func TestStateStore_UpsertAlloc_Deployment(t *testing.T) { alloc.DeploymentID = deployment.ID require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) - require.Nil(state.UpsertDeployment(1000, deployment)) + require.Nil(state.UpsertDeployment(1000, now.UnixNano(), deployment)) // Create a watch set so we can test that update fires the watch ws := memdb.NewWatchSet() require.Nil(state.AllocsByDeployment(ws, alloc.DeploymentID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now.UnixNano(), []*structs.Allocation{alloc}) require.Nil(err) if !watchFired(ws) { @@ -6364,7 +6386,7 @@ func TestStateStore_UpsertAlloc_AllocsByNamespace(t *testing.T) { _, err = state.AllocsByNamespace(watches[1], ns2.Name) require.NoError(t, err) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1, alloc2, alloc3, alloc4})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2, alloc3, alloc4})) require.True(t, watchFired(watches[0])) require.True(t, watchFired(watches[1])) @@ -6417,7 +6439,7 @@ func TestStateStore_UpsertAlloc_No_Job(t *testing.T) { alloc := mock.Alloc() alloc.Job = nil - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 999, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 999, time.Now().UnixNano(), []*structs.Allocation{alloc}) if err == nil || !strings.Contains(err.Error(), "without a job") { t.Fatalf("expect err: %v", err) } @@ -6446,7 +6468,7 @@ func TestStateStore_UpsertAlloc_ChildJob(t *testing.T) { _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) require.NoError(t, err) - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}) require.NoError(t, err) require.True(t, watchFired(ws)) @@ -6471,12 +6493,13 @@ func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { state := testStateStore(t) alloc := mock.Alloc() + now := time.Now().UnixNano() if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job); err != nil { t.Fatalf("err: %v", err) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -6514,7 +6537,7 @@ func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { t.Fatalf("bad: %v", err) } - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc2}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, now, []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -6569,6 +6592,7 @@ func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { // when set rather than preferring the existing status. func TestStateStore_UpdateAlloc_Lost(t *testing.T) { ci.Parallel(t) + now := time.Now().UnixNano() state := testStateStore(t) alloc := mock.Alloc() @@ -6578,7 +6602,7 @@ func TestStateStore_UpdateAlloc_Lost(t *testing.T) { t.Fatalf("err: %v", err) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -6586,7 +6610,7 @@ func TestStateStore_UpdateAlloc_Lost(t *testing.T) { alloc2 := new(structs.Allocation) *alloc2 = *alloc alloc2.ClientStatus = structs.AllocClientStatusLost - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{alloc2}); err != nil { t.Fatalf("err: %v", err) } @@ -6609,6 +6633,7 @@ func TestStateStore_UpdateAlloc_NoJob(t *testing.T) { state := testStateStore(t) alloc := mock.Alloc() + now := time.Now().UnixNano() // Upsert a job state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID)) @@ -6616,7 +6641,7 @@ func TestStateStore_UpdateAlloc_NoJob(t *testing.T) { t.Fatalf("err: %v", err) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -6628,14 +6653,14 @@ func TestStateStore_UpdateAlloc_NoJob(t *testing.T) { // Update the desired state of the allocation to stop allocCopy := alloc.Copy() allocCopy.DesiredStatus = structs.AllocDesiredStatusStop - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{allocCopy}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, now, []*structs.Allocation{allocCopy}); err != nil { t.Fatalf("err: %v", err) } // Update the client state of the allocation to complete allocCopy1 := allocCopy.Copy() allocCopy1.ClientStatus = structs.AllocClientStatusComplete - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{allocCopy1}); err != nil { + if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{allocCopy1}); err != nil { t.Fatalf("err: %v", err) } @@ -6656,7 +6681,7 @@ func TestStateStore_UpdateAllocDesiredTransition(t *testing.T) { alloc := mock.Alloc() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) t1 := &structs.DesiredTransition{ Migrate: pointer.Of(true), @@ -6720,6 +6745,7 @@ func TestStateStore_JobSummary(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() // Add a job job := mock.Job() @@ -6740,30 +6766,30 @@ func TestStateStore_JobSummary(t *testing.T) { alloc := mock.Alloc() alloc.JobID = job.ID alloc.Job = job - state.UpsertAllocs(structs.MsgTypeTestSetup, 910, []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 910, now, []*structs.Allocation{alloc}) // Update the alloc from client alloc1 := alloc.Copy() alloc1.ClientStatus = structs.AllocClientStatusPending alloc1.DesiredStatus = "" - state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 920, []*structs.Allocation{alloc}) + state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 920, now, []*structs.Allocation{alloc}) alloc3 := alloc.Copy() alloc3.ClientStatus = structs.AllocClientStatusRunning alloc3.DesiredStatus = "" - state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 930, []*structs.Allocation{alloc3}) + state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 930, now, []*structs.Allocation{alloc3}) // Upsert the alloc alloc4 := alloc.Copy() alloc4.ClientStatus = structs.AllocClientStatusPending alloc4.DesiredStatus = structs.AllocDesiredStatusRun - state.UpsertAllocs(structs.MsgTypeTestSetup, 950, []*structs.Allocation{alloc4}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 950, now, []*structs.Allocation{alloc4}) // Again upsert the alloc alloc5 := alloc.Copy() alloc5.ClientStatus = structs.AllocClientStatusPending alloc5.DesiredStatus = structs.AllocDesiredStatusRun - state.UpsertAllocs(structs.MsgTypeTestSetup, 970, []*structs.Allocation{alloc5}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 970, now, []*structs.Allocation{alloc5}) if !watchFired(ws) { t.Fatalf("bad") @@ -6794,7 +6820,7 @@ func TestStateStore_JobSummary(t *testing.T) { alloc6 := alloc.Copy() alloc6.ClientStatus = structs.AllocClientStatusRunning alloc6.DesiredStatus = "" - state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 990, []*structs.Allocation{alloc6}) + state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 990, now, []*structs.Allocation{alloc6}) // We shouldn't have any summary at this point summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) @@ -6821,7 +6847,7 @@ func TestStateStore_JobSummary(t *testing.T) { alloc7.Job = outJob alloc7.ClientStatus = structs.AllocClientStatusComplete alloc7.DesiredStatus = structs.AllocDesiredStatusRun - state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1020, []*structs.Allocation{alloc7}) + state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1020, now, []*structs.Allocation{alloc7}) expectedSummary = structs.JobSummary{ JobID: job.ID, @@ -6844,6 +6870,7 @@ func TestStateStore_ReconcileJobSummary(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() // Create an alloc alloc := mock.Alloc() @@ -6861,12 +6888,12 @@ func TestStateStore_ReconcileJobSummary(t *testing.T) { alloc2.Job = alloc.Job // Upserts the alloc - state.UpsertAllocs(structs.MsgTypeTestSetup, 110, []*structs.Allocation{alloc, alloc2}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 110, now, []*structs.Allocation{alloc, alloc2}) // Change the state of the first alloc to running alloc3 := alloc.Copy() alloc3.ClientStatus = structs.AllocClientStatusRunning - state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 120, []*structs.Allocation{alloc3}) + state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 120, now, []*structs.Allocation{alloc3}) //Add some more allocs to the second tg alloc4 := mock.Alloc() @@ -6903,9 +6930,9 @@ func TestStateStore_ReconcileJobSummary(t *testing.T) { alloc12.TaskGroup = "db" alloc12.ClientStatus = structs.AllocClientStatusUnknown - state.UpsertAllocs(structs.MsgTypeTestSetup, 130, []*structs.Allocation{alloc4, alloc6, alloc8, alloc10, alloc12}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 130, now, []*structs.Allocation{alloc4, alloc6, alloc8, alloc10, alloc12}) - state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 150, []*structs.Allocation{alloc5, alloc7, alloc9, alloc11}) + state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 150, now, []*structs.Allocation{alloc5, alloc7, alloc9, alloc11}) // DeleteJobSummary is a helper method and doesn't modify the indexes table state.DeleteJobSummary(130, alloc.Namespace, alloc.Job.ID) @@ -6978,7 +7005,7 @@ func TestStateStore_ReconcileParentJobSummary(t *testing.T) { alloc2.ClientStatus = structs.AllocClientStatusFailed require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 110, nil, childJob)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 111, []*structs.Allocation{alloc, alloc2})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 111, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2})) // Make the summary incorrect in the state store summary, err := state.JobSummaryByID(nil, job1.Namespace, job1.ID) @@ -7032,10 +7059,11 @@ func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() alloc := mock.Alloc() state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, alloc.Job) - state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 200, now, []*structs.Allocation{alloc}) // Delete the job state.DeleteJob(300, alloc.Namespace, alloc.Job.ID) @@ -7045,7 +7073,7 @@ func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { alloc1.ClientStatus = structs.AllocClientStatusRunning // Updating allocation should not throw any error - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 400, []*structs.Allocation{alloc1}); err != nil { + if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 400, now, []*structs.Allocation{alloc1}); err != nil { t.Fatalf("expect err: %v", err) } @@ -7055,7 +7083,7 @@ func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { // Update the alloc again alloc2 := alloc.Copy() alloc2.ClientStatus = structs.AllocClientStatusComplete - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 400, []*structs.Allocation{alloc1}); err != nil { + if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 400, now, []*structs.Allocation{alloc1}); err != nil { t.Fatalf("expect err: %v", err) } @@ -7084,9 +7112,10 @@ func TestStateStore_EvictAlloc_Alloc(t *testing.T) { state := testStateStore(t) alloc := mock.Alloc() + now := time.Now().UnixNano() state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -7094,7 +7123,7 @@ func TestStateStore_EvictAlloc_Alloc(t *testing.T) { alloc2 := new(structs.Allocation) *alloc2 = *alloc alloc2.DesiredStatus = structs.AllocDesiredStatusEvict - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc2}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -7134,7 +7163,7 @@ func TestStateStore_AllocsByNode(t *testing.T) { state.UpsertJobSummary(uint64(900+idx), mock.JobSummary(alloc.JobID)) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), allocs) if err != nil { t.Fatalf("err: %v", err) } @@ -7179,7 +7208,7 @@ func TestStateStore_AllocsByNodeTerminal(t *testing.T) { state.UpsertJobSummary(uint64(900+idx), mock.JobSummary(alloc.JobID)) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), allocs) if err != nil { t.Fatalf("err: %v", err) } @@ -7232,7 +7261,7 @@ func TestStateStore_AllocsByJob(t *testing.T) { state.UpsertJobSummary(uint64(900+i), mock.JobSummary(alloc.JobID)) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), allocs) if err != nil { t.Fatalf("err: %v", err) } @@ -7257,6 +7286,7 @@ func TestStateStore_AllocsByJob(t *testing.T) { func TestStateStore_AllocsForRegisteredJob(t *testing.T) { ci.Parallel(t) + now := time.Now().UnixNano() state := testStateStore(t) var allocs []*structs.Allocation @@ -7271,7 +7301,7 @@ func TestStateStore_AllocsForRegisteredJob(t *testing.T) { alloc.JobID = job.ID allocs = append(allocs, alloc) } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, allocs); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, now, allocs); err != nil { t.Fatalf("err: %v", err) } @@ -7290,7 +7320,7 @@ func TestStateStore_AllocsForRegisteredJob(t *testing.T) { allocs1 = append(allocs1, alloc) } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs1); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, allocs1); err != nil { t.Fatalf("err: %v", err) } @@ -7347,7 +7377,7 @@ func TestStateStore_AllocsByIDPrefix(t *testing.T) { state.UpsertJobSummary(uint64(900+i), mock.JobSummary(alloc.JobID)) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), allocs) require.NoError(t, err) gatherAllocs := func(iter memdb.ResultIterator) []*structs.Allocation { @@ -7438,7 +7468,8 @@ func TestStateStore_AllocsByIDPrefix_Namespaces(t *testing.T) { alloc2.Namespace = ns2.Name require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertAllocs( + structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) gatherAllocs := func(iter memdb.ResultIterator) []*structs.Allocation { var allocs []*structs.Allocation @@ -7486,7 +7517,7 @@ func TestStateStore_Allocs(t *testing.T) { state.UpsertJobSummary(uint64(900+i), mock.JobSummary(alloc.JobID)) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), allocs) if err != nil { t.Fatalf("err: %v", err) } @@ -7536,7 +7567,9 @@ func TestStateStore_Allocs_PrevAlloc(t *testing.T) { allocs[1].PreviousAllocation = allocs[0].ID allocs[2].PreviousAllocation = allocs[1].ID - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) + now := time.Now().UnixNano() + + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, allocs) require.Nil(err) ws := memdb.NewWatchSet() @@ -7565,7 +7598,7 @@ func TestStateStore_Allocs_PrevAlloc(t *testing.T) { // Insert another alloc, verify index of previous alloc also got updated alloc := mock.Alloc() alloc.PreviousAllocation = allocs[0].ID - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{alloc}) require.Nil(err) alloc0, err := state.AllocByID(nil, allocs[0].ID) require.Nil(err) @@ -7733,7 +7766,7 @@ func TestStateStore_GetJobStatus_DeadEvalsAndAllocs(t *testing.T) { alloc.JobID = job.ID alloc.DesiredStatus = structs.AllocDesiredStatusStop state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -7767,7 +7800,7 @@ func TestStateStore_GetJobStatus_RunningAlloc(t *testing.T) { alloc.JobID = job.ID alloc.DesiredStatus = structs.AllocDesiredStatusRun state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -7905,6 +7938,7 @@ func TestStateStore_SetJobStatus_SystemJob(t *testing.T) { func TestStateJobSummary_UpdateJobCount(t *testing.T) { ci.Parallel(t) + now := time.Now().UnixNano() state := testStateStore(t) alloc := mock.Alloc() @@ -7921,7 +7955,7 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { t.Fatalf("err: %v", err) } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -7961,7 +7995,7 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { alloc3.Job = job alloc3.JobID = job.ID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc2, alloc3}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, now, []*structs.Allocation{alloc2, alloc3}); err != nil { t.Fatalf("err: %v", err) } @@ -8006,7 +8040,7 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { alloc5.JobID = alloc3.JobID alloc5.ClientStatus = structs.AllocClientStatusComplete - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1004, []*structs.Allocation{alloc4, alloc5}); err != nil { + if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1004, now, []*structs.Allocation{alloc4, alloc5}); err != nil { t.Fatalf("err: %v", err) } @@ -8036,6 +8070,7 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { func TestJobSummary_UpdateClientStatus(t *testing.T) { ci.Parallel(t) + now := time.Now().UnixNano() state := testStateStore(t) alloc := mock.Alloc() @@ -8055,7 +8090,7 @@ func TestJobSummary_UpdateClientStatus(t *testing.T) { t.Fatalf("err: %v", err) } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2, alloc3}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{alloc, alloc2, alloc3}); err != nil { t.Fatalf("err: %v", err) } @@ -8083,7 +8118,7 @@ func TestJobSummary_UpdateClientStatus(t *testing.T) { alloc6.JobID = alloc.JobID alloc6.ClientStatus = structs.AllocClientStatusRunning - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc4, alloc5, alloc6}); err != nil { + if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1002, now, []*structs.Allocation{alloc4, alloc5, alloc6}); err != nil { t.Fatalf("err: %v", err) } @@ -8100,7 +8135,7 @@ func TestJobSummary_UpdateClientStatus(t *testing.T) { alloc7.Job = alloc.Job alloc7.JobID = alloc.JobID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc7}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{alloc7}); err != nil { t.Fatalf("err: %v", err) } summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) @@ -8122,7 +8157,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Nonexistent(t *testing.T) { Status: structs.DeploymentStatusRunning, }, } - err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, req) + err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, time.Now().UnixNano(), req) if err == nil || !strings.Contains(err.Error(), "does not exist") { t.Fatalf("expected error updating the status because the deployment doesn't exist") } @@ -8133,12 +8168,13 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Terminal(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() // Insert a terminal deployment d := mock.Deployment() d.Status = structs.DeploymentStatusFailed - if err := state.UpsertDeployment(1, d); err != nil { + if err := state.UpsertDeployment(1, now, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8149,7 +8185,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Terminal(t *testing.T) { Status: structs.DeploymentStatusRunning, }, } - err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, req) + err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, now, req) if err == nil || !strings.Contains(err.Error(), "has terminal status") { t.Fatalf("expected error updating the status because the deployment is terminal") } @@ -8161,10 +8197,11 @@ func TestStateStore_UpsertDeploymentStatusUpdate_NonTerminal(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() // Insert a deployment d := mock.Deployment() - if err := state.UpsertDeployment(1, d); err != nil { + if err := state.UpsertDeployment(1, now, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8183,7 +8220,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_NonTerminal(t *testing.T) { Job: j, Eval: e, } - err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, req) + err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, now, req) if err != nil { t.Fatalf("bad: %v", err) } @@ -8223,6 +8260,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() // Insert a job job := mock.Job() @@ -8232,7 +8270,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) { // Insert a deployment d := structs.NewDeployment(job, 50) - if err := state.UpsertDeployment(2, d); err != nil { + if err := state.UpsertDeployment(2, now, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8244,7 +8282,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) { StatusDescription: structs.DeploymentStatusDescriptionSuccessful, }, } - err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 3, req) + err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 3, now, req) if err != nil { t.Fatalf("bad: %v", err) } @@ -8324,7 +8362,7 @@ func TestStateStore_UpsertDeploymentPromotion_Nonexistent(t *testing.T) { All: true, }, } - err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 2, req) + err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 2, time.Now().UnixNano(), req) if err == nil || !strings.Contains(err.Error(), "does not exist") { t.Fatalf("expected error promoting because the deployment doesn't exist") } @@ -8335,12 +8373,13 @@ func TestStateStore_UpsertDeploymentPromotion_Terminal(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() // Insert a terminal deployment d := mock.Deployment() d.Status = structs.DeploymentStatusFailed - if err := state.UpsertDeployment(1, d); err != nil { + if err := state.UpsertDeployment(1, now, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8351,7 +8390,7 @@ func TestStateStore_UpsertDeploymentPromotion_Terminal(t *testing.T) { All: true, }, } - err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 2, req) + err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 2, now, req) if err == nil || !strings.Contains(err.Error(), "has terminal status") { t.Fatalf("expected error updating the status because the deployment is terminal: %v", err) } @@ -8360,6 +8399,7 @@ func TestStateStore_UpsertDeploymentPromotion_Terminal(t *testing.T) { // Test promoting unhealthy canaries in a deployment. func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { ci.Parallel(t) + now := time.Now().UnixNano() state := testStateStore(t) require := require.New(t) @@ -8372,7 +8412,7 @@ func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { d := mock.Deployment() d.JobID = j.ID d.TaskGroups["web"].DesiredCanaries = 2 - require.Nil(state.UpsertDeployment(2, d)) + require.Nil(state.UpsertDeployment(2, now, d)) // Create a set of allocations c1 := mock.Alloc() @@ -8392,7 +8432,7 @@ func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { c3.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} d.TaskGroups[c3.TaskGroup].PlacedCanaries = append(d.TaskGroups[c3.TaskGroup].PlacedCanaries, c3.ID) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2, c3})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, now, []*structs.Allocation{c1, c2, c3})) // Promote the canaries req := &structs.ApplyDeploymentPromoteRequest{ @@ -8401,7 +8441,7 @@ func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { All: true, }, } - err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, req) + err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, now, req) require.NotNil(err) require.Contains(err.Error(), `Task group "web" has 0/2 healthy allocations`) } @@ -8410,6 +8450,7 @@ func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { func TestStateStore_UpsertDeploymentPromotion_NoCanaries(t *testing.T) { ci.Parallel(t) + now := time.Now().UnixNano() state := testStateStore(t) require := require.New(t) @@ -8421,7 +8462,7 @@ func TestStateStore_UpsertDeploymentPromotion_NoCanaries(t *testing.T) { d := mock.Deployment() d.TaskGroups["web"].DesiredCanaries = 2 d.JobID = j.ID - require.Nil(state.UpsertDeployment(2, d)) + require.Nil(state.UpsertDeployment(2, now, d)) // Promote the canaries req := &structs.ApplyDeploymentPromoteRequest{ @@ -8430,7 +8471,7 @@ func TestStateStore_UpsertDeploymentPromotion_NoCanaries(t *testing.T) { All: true, }, } - err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, req) + err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, now, req) require.NotNil(err) require.Contains(err.Error(), `Task group "web" has 0/2 healthy allocations`) } @@ -8440,6 +8481,7 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() // Create a job with two task groups j := mock.Job() @@ -8465,7 +8507,7 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { DesiredCanaries: 1, }, } - if err := state.UpsertDeployment(2, d); err != nil { + if err := state.UpsertDeployment(2, now, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8486,7 +8528,7 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { Healthy: pointer.Of(true), } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, now, []*structs.Allocation{c1, c2}); err != nil { t.Fatalf("err: %v", err) } @@ -8501,7 +8543,7 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { }, Eval: e, } - err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, req) + err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, now, req) if err != nil { t.Fatalf("bad: %v", err) } @@ -8540,6 +8582,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { require := require.New(t) state := testStateStore(t) + now := time.Now().UnixNano() // Create a job with two task groups j := mock.Job() @@ -8562,7 +8605,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { DesiredCanaries: 1, }, } - require.Nil(state.UpsertDeployment(2, d)) + require.Nil(state.UpsertDeployment(2, now, d)) // Create a set of allocations for both groups, including an unhealthy one c1 := mock.Alloc() @@ -8594,7 +8637,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { Canary: true, } - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2, c3})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, now, []*structs.Allocation{c1, c2, c3})) // Create an eval e := mock.Eval() @@ -8607,7 +8650,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { }, Eval: e, } - require.Nil(state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, req)) + require.Nil(state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, now, req)) // Check that the status per task group was updated properly ws := memdb.NewWatchSet() @@ -8650,7 +8693,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_Nonexistent(t *testing.T) { HealthyAllocationIDs: []string{uuid.Generate()}, }, } - err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 2, req) + err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 2, time.Now().UnixNano(), req) if err == nil || !strings.Contains(err.Error(), "does not exist") { t.Fatalf("expected error because the deployment doesn't exist: %v", err) } @@ -8661,12 +8704,13 @@ func TestStateStore_UpsertDeploymentAllocHealth_Terminal(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() // Insert a terminal deployment d := mock.Deployment() d.Status = structs.DeploymentStatusFailed - if err := state.UpsertDeployment(1, d); err != nil { + if err := state.UpsertDeployment(1, now, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8677,7 +8721,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_Terminal(t *testing.T) { HealthyAllocationIDs: []string{uuid.Generate()}, }, } - err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 2, req) + err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 2, now, req) if err == nil || !strings.Contains(err.Error(), "has terminal status") { t.Fatalf("expected error because the deployment is terminal: %v", err) } @@ -8688,10 +8732,11 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_Nonexistent(t *testing. ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() // Insert a deployment d := mock.Deployment() - if err := state.UpsertDeployment(1, d); err != nil { + if err := state.UpsertDeployment(1, now, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8702,7 +8747,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_Nonexistent(t *testing. HealthyAllocationIDs: []string{uuid.Generate()}, }, } - err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 2, req) + err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 2, now, req) if err == nil || !strings.Contains(err.Error(), "unknown alloc") { t.Fatalf("expected error because the alloc doesn't exist: %v", err) } @@ -8713,10 +8758,11 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() // Create a deployment d1 := mock.Deployment() - require.NoError(t, state.UpsertDeployment(2, d1)) + require.NoError(t, state.UpsertDeployment(2, now, d1)) // Create a Job job := mock.Job() @@ -8730,7 +8776,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { Healthy: pointer.Of(false), Canary: true, } - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, now, []*structs.Allocation{a})) // Pull the deployment from state ws := memdb.NewWatchSet() @@ -8748,7 +8794,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { Healthy: pointer.Of(false), Canary: false, } - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{b})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, now, []*structs.Allocation{b})) // Pull the deployment from state ws = memdb.NewWatchSet() @@ -8760,7 +8806,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { // Create a second deployment d2 := mock.Deployment() - require.NoError(t, state.UpsertDeployment(5, d2)) + require.NoError(t, state.UpsertDeployment(5, now, d2)) c := mock.Alloc() c.JobID = job.ID @@ -8769,7 +8815,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { Healthy: pointer.Of(false), Canary: true, } - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 6, []*structs.Allocation{c})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 6, now, []*structs.Allocation{c})) ws = memdb.NewWatchSet() deploy2, err := state.DeploymentByID(ws, d2.ID) @@ -8783,10 +8829,11 @@ func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() // Create a deployment d1 := mock.Deployment() - require.NoError(t, state.UpsertDeployment(2, d1)) + require.NoError(t, state.UpsertDeployment(2, now, d1)) // Create a Job job := mock.Job() @@ -8800,7 +8847,7 @@ func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { Healthy: pointer.Of(true), Canary: false, } - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, now, []*structs.Allocation{a})) // Pull the deployment from state ws := memdb.NewWatchSet() @@ -8817,21 +8864,22 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_MismatchDeployment(t *t ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() // Insert two deployment d1 := mock.Deployment() d2 := mock.Deployment() - if err := state.UpsertDeployment(1, d1); err != nil { + if err := state.UpsertDeployment(1, now, d1); err != nil { t.Fatalf("bad: %v", err) } - if err := state.UpsertDeployment(2, d2); err != nil { + if err := state.UpsertDeployment(2, now, d2); err != nil { t.Fatalf("bad: %v", err) } // Insert an alloc for a random deployment a := mock.Alloc() a.DeploymentID = d1.ID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{a}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, now, []*structs.Allocation{a}); err != nil { t.Fatalf("bad: %v", err) } @@ -8842,7 +8890,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_MismatchDeployment(t *t HealthyAllocationIDs: []string{a.ID}, }, } - err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 4, req) + err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 4, now, req) if err == nil || !strings.Contains(err.Error(), "not part of deployment") { t.Fatalf("expected error because the alloc isn't part of the deployment: %v", err) } @@ -8853,11 +8901,12 @@ func TestStateStore_UpsertDeploymentAllocHealth(t *testing.T) { ci.Parallel(t) state := testStateStore(t) + now := time.Now().UnixNano() // Insert a deployment d := mock.Deployment() d.TaskGroups["web"].ProgressDeadline = 5 * time.Minute - if err := state.UpsertDeployment(1, d); err != nil { + if err := state.UpsertDeployment(1, now, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8866,7 +8915,7 @@ func TestStateStore_UpsertDeploymentAllocHealth(t *testing.T) { a1.DeploymentID = d.ID a2 := mock.Alloc() a2.DeploymentID = d.ID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{a1, a2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, now, []*structs.Allocation{a1, a2}); err != nil { t.Fatalf("bad: %v", err) } @@ -8899,7 +8948,7 @@ func TestStateStore_UpsertDeploymentAllocHealth(t *testing.T) { DeploymentUpdate: u, Timestamp: ts, } - err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 3, req) + err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 3, now, req) if err != nil { t.Fatalf("bad: %v", err) } diff --git a/nomad/variables_endpoint_test.go b/nomad/variables_endpoint_test.go index 43c1ab7dde0..3d2e4a646b0 100644 --- a/nomad/variables_endpoint_test.go +++ b/nomad/variables_endpoint_test.go @@ -488,7 +488,7 @@ func TestVariablesEndpoint_auth(t *testing.T) { store := srv.fsm.State() must.NoError(t, store.UpsertNamespaces(1000, []*structs.Namespace{{Name: ns}})) must.NoError(t, store.UpsertAllocs( - structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1, alloc2, alloc3, alloc4})) + structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2, alloc3, alloc4})) wiHandle := &structs.WIHandle{ WorkloadIdentifier: "web", @@ -590,7 +590,7 @@ func TestVariablesEndpoint_auth(t *testing.T) { // make alloc non-terminal alloc1.ClientStatus = structs.AllocClientStatusRunning must.NoError(t, store.UpsertAllocs( - structs.MsgTypeTestSetup, 1200, []*structs.Allocation{alloc1})) + structs.MsgTypeTestSetup, 1200, time.Now().UnixNano(), []*structs.Allocation{alloc1})) t.Run("wrong namespace should be denied", func(t *testing.T) { err := testFn(&structs.QueryOptions{ @@ -877,7 +877,7 @@ func TestVariablesEndpoint_ListFiltering(t *testing.T) { must.NoError(t, store.UpsertNamespaces(idx, []*structs.Namespace{{Name: ns}})) idx++ must.NoError(t, store.UpsertAllocs( - structs.MsgTypeTestSetup, idx, []*structs.Allocation{alloc})) + structs.MsgTypeTestSetup, idx, time.Now().UnixNano(), []*structs.Allocation{alloc})) wiHandle := &structs.WIHandle{ WorkloadIdentifier: "web", diff --git a/nomad/volumewatcher/volumes_watcher_test.go b/nomad/volumewatcher/volumes_watcher_test.go index 83dc2e1f449..e788f635221 100644 --- a/nomad/volumewatcher/volumes_watcher_test.go +++ b/nomad/volumewatcher/volumes_watcher_test.go @@ -36,9 +36,10 @@ func TestVolumeWatch_EnableDisable(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete vol := testVolume(plugin, alloc, node.ID) + now := time.Now().UnixNano() index++ - err := srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol}) + err := srv.State().UpsertCSIVolume(index, now, []*structs.CSIVolume{vol}) require.NoError(t, err) // need to have just enough of a volume and claim in place so that @@ -48,7 +49,7 @@ func TestVolumeWatch_EnableDisable(t *testing.T) { State: structs.CSIVolumeClaimStateNodeDetached, } index++ - err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, now, vol.Namespace, vol.ID, claim) require.NoError(t, err) require.Eventually(t, func() bool { watcher.wlock.RLock() @@ -79,16 +80,17 @@ func TestVolumeWatch_LeadershipTransition(t *testing.T) { alloc := mock.Alloc() alloc.ClientStatus = structs.AllocClientStatusRunning vol := testVolume(plugin, alloc, node.ID) + now := time.Now().UnixNano() index++ - err := srv.State().UpsertAllocs(structs.MsgTypeTestSetup, index, + err := srv.State().UpsertAllocs(structs.MsgTypeTestSetup, index, now, []*structs.Allocation{alloc}) require.NoError(t, err) watcher.SetEnabled(true, srv.State(), "") index++ - err = srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol}) + err = srv.State().UpsertCSIVolume(index, now, []*structs.CSIVolume{vol}) require.NoError(t, err) // we should get or start up a watcher when we get an update for @@ -127,7 +129,7 @@ func TestVolumeWatch_LeadershipTransition(t *testing.T) { State: structs.CSIVolumeClaimStateUnpublishing, } index++ - err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, now, vol.Namespace, vol.ID, claim) require.NoError(t, err) // create a new watcher and enable it to simulate the leadership @@ -151,6 +153,7 @@ func TestVolumeWatch_LeadershipTransition(t *testing.T) { // it receives notifcations and has completed its work func TestVolumeWatch_StartStop(t *testing.T) { ci.Parallel(t) + now := time.Now().UnixNano() srv := &MockStatefulRPCServer{} srv.state = state.TestStateStore(t) @@ -172,13 +175,13 @@ func TestVolumeWatch_StartStop(t *testing.T) { err := srv.State().UpsertJob(structs.MsgTypeTestSetup, index, nil, alloc1.Job) require.NoError(t, err) index++ - err = srv.State().UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc1, alloc2}) + err = srv.State().UpsertAllocs(structs.MsgTypeTestSetup, index, now, []*structs.Allocation{alloc1, alloc2}) require.NoError(t, err) // register a volume and an unused volume vol := testVolume(plugin, alloc1, node.ID) index++ - err = srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol}) + err = srv.State().UpsertCSIVolume(index, now, []*structs.CSIVolume{vol}) require.NoError(t, err) // assert we get a watcher; there are no claims so it should immediately stop @@ -197,11 +200,11 @@ func TestVolumeWatch_StartStop(t *testing.T) { } index++ - err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, now, vol.Namespace, vol.ID, claim) require.NoError(t, err) claim.AllocationID = alloc2.ID index++ - err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, now, vol.Namespace, vol.ID, claim) require.NoError(t, err) // reap the volume and assert nothing has happened @@ -210,7 +213,7 @@ func TestVolumeWatch_StartStop(t *testing.T) { NodeID: node.ID, } index++ - err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, now, vol.Namespace, vol.ID, claim) require.NoError(t, err) ws := memdb.NewWatchSet() @@ -221,11 +224,11 @@ func TestVolumeWatch_StartStop(t *testing.T) { alloc1 = alloc1.Copy() alloc1.ClientStatus = structs.AllocClientStatusComplete index++ - err = srv.State().UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc1}) + err = srv.State().UpsertAllocs(structs.MsgTypeTestSetup, index, now, []*structs.Allocation{alloc1}) require.NoError(t, err) index++ claim.State = structs.CSIVolumeClaimStateReadyToFree - err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, now, vol.Namespace, vol.ID, claim) require.NoError(t, err) // watcher stops and 1 claim has been released @@ -244,6 +247,7 @@ func TestVolumeWatch_StartStop(t *testing.T) { // notifications around a deleted volume func TestVolumeWatch_Delete(t *testing.T) { ci.Parallel(t) + now := time.Now().UnixNano() srv := &MockStatefulRPCServer{} srv.state = state.TestStateStore(t) @@ -258,7 +262,7 @@ func TestVolumeWatch_Delete(t *testing.T) { plugin := mock.CSIPlugin() vol := mock.CSIVolume(plugin) index++ - must.NoError(t, srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol})) + must.NoError(t, srv.State().UpsertCSIVolume(index, now, []*structs.CSIVolume{vol})) // assert we get a watcher; there are no claims so it should immediately stop require.Eventually(t, func() bool { @@ -270,7 +274,7 @@ func TestVolumeWatch_Delete(t *testing.T) { // write a GC claim to the volume and then immediately delete, to // potentially hit the race condition between updates and deletes index++ - must.NoError(t, srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, + must.NoError(t, srv.State().CSIVolumeClaim(index, now, vol.Namespace, vol.ID, &structs.CSIVolumeClaim{ Mode: structs.CSIVolumeClaimGC, State: structs.CSIVolumeClaimStateReadyToFree, @@ -312,7 +316,7 @@ func TestVolumeWatch_RegisterDeregister(t *testing.T) { // register a volume without claims vol := mock.CSIVolume(plugin) index++ - err := srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol}) + err := srv.State().UpsertCSIVolume(index, time.Now().UnixNano(), []*structs.CSIVolume{vol}) require.NoError(t, err) // watcher should stop diff --git a/nomad/worker_test.go b/nomad/worker_test.go index c63deec3da0..9cd0944db16 100644 --- a/nomad/worker_test.go +++ b/nomad/worker_test.go @@ -533,7 +533,7 @@ func TestWorker_SubmitPlanNormalizedAllocations(t *testing.T) { stoppedAlloc := mock.Alloc() preemptedAlloc := mock.Alloc() - s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 5, []*structs.Allocation{stoppedAlloc, preemptedAlloc}) + s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 5, time.Now().UnixNano(), []*structs.Allocation{stoppedAlloc, preemptedAlloc}) // Create an allocation plan plan := &structs.Plan{ diff --git a/scheduler/context_test.go b/scheduler/context_test.go index 5fe23e2a863..51cf902f046 100644 --- a/scheduler/context_test.go +++ b/scheduler/context_test.go @@ -5,6 +5,7 @@ package scheduler import ( "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/lib/idset" @@ -162,7 +163,7 @@ func TestEvalContext_ProposedAlloc(t *testing.T) { } require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) // Add a planned eviction to alloc1 plan := ctx.Plan() @@ -302,7 +303,7 @@ func TestEvalContext_ProposedAlloc_EvictPreempt(t *testing.T) { require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(allocEvict.JobID))) require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPreempt.JobID))) require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPropose.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{allocEvict, allocPreempt, allocPropose})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{allocEvict, allocPreempt, allocPropose})) // Plan to evict one alloc and preempt another plan := ctx.Plan() diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index adda5e2cb2a..90bd3ef92d1 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -592,7 +592,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - assert.Nil(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs), "UpsertAllocs") + assert.Nil(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs), "UpsertAllocs") // Update the count job2 := job.Copy() @@ -808,7 +808,7 @@ func TestServiceSched_JobRegister_Datacenter_Downgrade(t *testing.T) { } allocs = append(allocs, alloc) } - must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Update job to place it in dc2. job2 := job1.Copy() @@ -942,7 +942,7 @@ func TestServiceSched_JobRegister_NodePool_Downgrade(t *testing.T) { } allocs = append(allocs, alloc) } - must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Update job to place it in the spread node pool. job2 := job1.Copy() @@ -1891,7 +1891,7 @@ func TestServiceSched_JobModify(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Add a few terminal status allocations, these should be ignored var terminal []*structs.Allocation @@ -1905,7 +1905,7 @@ func TestServiceSched_JobModify(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusFailed // #10446 terminal = append(terminal, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), terminal)) // Update the job job2 := mock.Job() @@ -2003,7 +2003,7 @@ func TestServiceSched_JobModify_ExistingDuplicateAllocIndex(t *testing.T) { } allocs = append(allocs, alloc) } - must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, testHarness.NextIndex(), allocs)) + must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, testHarness.NextIndex(), time.Now().UnixNano(), allocs)) // Generate a job modification which will force a destructive update. mockJob2 := mock.Job() @@ -2081,7 +2081,7 @@ func TestServiceSched_JobModify_ProposedDuplicateAllocIndex(t *testing.T) { alloc.Name = structs.AllocName(mockJob.ID, mockJob.TaskGroups[0].Name, uint(i)) allocs = append(allocs, alloc) } - must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, testHarness.NextIndex(), allocs)) + must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, testHarness.NextIndex(), time.Now().UnixNano(), allocs)) // Generate a job modification which will force a destructive update as // well as a scaling. @@ -2105,7 +2105,7 @@ func TestServiceSched_JobModify_ProposedDuplicateAllocIndex(t *testing.T) { canaryAlloc.Name = structs.AllocName(mockJob2.ID, mockJob2.TaskGroups[0].Name, uint(0)) canaryAlloc.DeploymentID = deploymentID canaryAlloc.ClientStatus = structs.AllocClientStatusRunning - must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, nextRaftIndex, []*structs.Allocation{ + must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, nextRaftIndex, time.Now().UnixNano(), []*structs.Allocation{ canaryAlloc, })) @@ -2131,7 +2131,7 @@ func TestServiceSched_JobModify_ProposedDuplicateAllocIndex(t *testing.T) { EvalPriority: 50, JobCreateIndex: mockJob2.CreateIndex, } - must.NoError(t, testHarness.State.UpsertDeployment(nextRaftIndex, &canaryDeployment)) + must.NoError(t, testHarness.State.UpsertDeployment(nextRaftIndex, time.Now().UnixNano(), &canaryDeployment)) // Create a mock evaluation which represents work to reconcile the job // update. @@ -2218,7 +2218,7 @@ func TestServiceSched_JobModify_ExistingDuplicateAllocIndexNonDestructive(t *tes allocs = append(allocs, alloc) allocIDs = append(allocIDs, alloc.ID) } - must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, testHarness.NextIndex(), allocs)) + must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, testHarness.NextIndex(), time.Now().UnixNano(), allocs)) // Generate a job modification which will be an in-place update. mockJob2 := mockJob.Copy() @@ -2300,7 +2300,7 @@ func TestServiceSched_JobModify_Datacenters(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Update the job to 2 DCs job2 := job.Copy() @@ -2375,7 +2375,7 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { alloc.Name = "my-job.web[0]" alloc.AllocatedResources.Tasks["web"].Cpu.CpuShares = 256 allocs = append(allocs, alloc) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Update the job to count 3 job2.TaskGroups[0].Count = 3 @@ -2471,7 +2471,7 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { alloc.Name = structs.AllocName(alloc.JobID, alloc.TaskGroup, uint(i)) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Add a few terminal status allocations, these should be ignored var terminal []*structs.Allocation @@ -2484,7 +2484,7 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop terminal = append(terminal, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), terminal)) // Update the job to be count zero job2 := mock.Job() @@ -2573,7 +2573,7 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Update the job job2 := mock.Job() @@ -2699,7 +2699,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Update the job to place more versions of the task group, drop the count // and force destructive updates @@ -2803,7 +2803,7 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Update the job job2 := mock.Job() @@ -2921,7 +2921,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { d := mock.Deployment() d.JobID = job.ID require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), d)) taskName := job.TaskGroups[0].Tasks[0].Name @@ -2950,7 +2950,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { alloc.AllocatedResources.Shared = asr allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Update the job job2 := mock.Job() @@ -3077,7 +3077,7 @@ func TestServiceSched_JobModify_InPlace08(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.AllocatedResources = nil // 0.8 didn't have this - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Update the job inplace job2 := job.Copy() @@ -3175,7 +3175,7 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -3297,7 +3297,7 @@ func TestServiceSched_JobModify_NodeReschedulePenalty(t *testing.T) { failedAllocID := failedAlloc.ID successAllocID := allocs[0].ID - require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create and process a mock evaluation eval := &structs.Evaluation{ @@ -3397,7 +3397,7 @@ func TestServiceSched_JobDeregister_Purged(t *testing.T) { for _, alloc := range allocs { h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -3466,7 +3466,7 @@ func TestServiceSched_JobDeregister_Stopped(t *testing.T) { alloc.JobID = job.ID allocs = append(allocs, alloc) } - require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create a summary where the queued allocs are set as we want to assert // they get zeroed out. @@ -3603,7 +3603,7 @@ func TestServiceSched_NodeDown(t *testing.T) { alloc.DesiredTransition.Migrate = pointer.Of(tc.migrate) allocs := []*structs.Allocation{alloc} - must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -3757,7 +3757,7 @@ func TestServiceSched_StopAfterClientDisconnect(t *testing.T) { }} } must.NoError(t, h.State.UpsertAllocs( - structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with node going down evals := []*structs.Evaluation{{ @@ -3865,14 +3865,14 @@ func TestServiceSched_NodeUpdate(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Mark some allocs as running ws := memdb.NewWatchSet() for i := 0; i < 4; i++ { out, _ := h.State.AllocByID(ws, allocs[i].ID) out.ClientStatus = structs.AllocClientStatusRunning - require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{out})) + require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{out})) } // Create a mock evaluation which won't trigger any new placements @@ -3928,7 +3928,7 @@ func TestServiceSched_NodeDrain(t *testing.T) { alloc.DesiredTransition.Migrate = pointer.Of(true) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -3996,6 +3996,8 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { job := mock.Job() require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + now := time.Now().UnixNano() + var allocs []*structs.Allocation for i := 0; i < 10; i++ { alloc := mock.Alloc() @@ -4005,7 +4007,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), now, allocs)) // Set the desired state of the allocs to stop var stop []*structs.Allocation @@ -4015,7 +4017,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { newAlloc.DesiredTransition.Migrate = pointer.Of(true) stop = append(stop, newAlloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), stop)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), now, stop)) // Mark some of the allocations as running var running []*structs.Allocation @@ -4024,7 +4026,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { newAlloc.ClientStatus = structs.AllocClientStatusRunning running = append(running, newAlloc) } - require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), running)) + require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), now, running)) // Mark some of the allocations as complete var complete []*structs.Allocation @@ -4043,7 +4045,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { newAlloc.ClientStatus = structs.AllocClientStatusComplete complete = append(complete, newAlloc) } - require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), complete)) + require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), now, complete)) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -4120,7 +4122,7 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { alloc.DesiredTransition.Migrate = pointer.Of(true) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) node.DrainStrategy = mock.DrainNode().DrainStrategy require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) @@ -4187,7 +4189,7 @@ func TestServiceSched_NodeDrain_TaskHandle(t *testing.T) { } allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) node.DrainStrategy = mock.DrainNode().DrainStrategy require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) @@ -4340,7 +4342,7 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { failedAllocID := allocs[1].ID successAllocID := allocs[0].ID - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), now.UnixNano(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -4385,7 +4387,7 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { // Mark this alloc as failed again, should not get rescheduled newAlloc.ClientStatus = structs.AllocClientStatusFailed - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{newAlloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{newAlloc})) // Create another mock evaluation eval = &structs.Evaluation{ @@ -4454,7 +4456,7 @@ func TestServiceSched_Reschedule_Later(t *testing.T) { FinishedAt: now}} failedAllocID := allocs[1].ID - require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -4543,7 +4545,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), now.UnixNano(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -4614,7 +4616,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { failedAllocId = newAlloc.ID failedNodeID = newAlloc.NodeID - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{newAlloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{newAlloc})) // Create another mock evaluation eval = &structs.Evaluation{ @@ -4676,7 +4678,7 @@ func TestServiceSched_BlockedReschedule(t *testing.T) { failedAllocID := alloc.ID must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, - h.NextIndex(), []*structs.Allocation{alloc})) + h.NextIndex(), now.UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation for the allocation failure eval := &structs.Evaluation{ @@ -4749,7 +4751,7 @@ func TestServiceSched_BlockedReschedule(t *testing.T) { StartedAt: now.Add(-1 * time.Hour), FinishedAt: now}} must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, - h.NextIndex(), []*structs.Allocation{alloc})) + h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation for the allocation failure eval.ID = uuid.Generate() @@ -4817,7 +4819,7 @@ func TestServiceSched_BlockedReschedule(t *testing.T) { alloc = alloc.Copy() alloc.FollowupEvalID = blockedEval.ID must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, - h.NextIndex(), []*structs.Allocation{alloc})) + h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) must.NoError(t, h.Process(NewServiceScheduler, blockedEval)) must.Len(t, 5, h.Plans) @@ -4917,7 +4919,7 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { failedAllocID := allocs[1].ID successAllocID := allocs[0].ID - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -5004,7 +5006,7 @@ func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { deployment.Status = structs.DeploymentStatusFailed } - require.Nil(h.State.UpsertDeployment(h.NextIndex(), deployment)) + require.Nil(h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), deployment)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -5023,7 +5025,7 @@ func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { FinishedAt: time.Now().Add(-10 * time.Hour)}} allocs[1].DesiredTransition.Reschedule = pointer.Of(true) - require.Nil(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.Nil(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -5081,7 +5083,7 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.ClientStatus = structs.AllocClientStatusComplete - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5146,7 +5148,7 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) { alloc.TaskStates = map[string]*structs.TaskState{tgName: {State: "dead", StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5230,7 +5232,7 @@ func TestBatchSched_Run_LostAlloc(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop alloc.ClientStatus = structs.AllocClientStatusComplete allocs = append(allocs, alloc) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5306,7 +5308,7 @@ func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { alloc.TaskStates = map[string]*structs.TaskState{tgName: {State: "dead", StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), now.UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5369,7 +5371,7 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { }, }, } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to rerun the job eval := &structs.Evaluation{ @@ -5436,7 +5438,7 @@ func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create a mock evaluation to trigger the job eval := &structs.Evaluation{ @@ -5490,7 +5492,7 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Update the job job2 := mock.Job() @@ -5521,7 +5523,7 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { } allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -5573,7 +5575,7 @@ func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.ClientStatus = structs.AllocClientStatusRunning - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create an update job job2 := job.Copy() @@ -5656,7 +5658,7 @@ func TestBatchSched_NodeDrain_Complete(t *testing.T) { }, }, } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5725,7 +5727,7 @@ func TestBatchSched_ScaleDown_SameName(t *testing.T) { alloc.Metrics = scoreMetric allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Update the job's modify index to force an inplace upgrade updatedJob := job.Copy() @@ -6048,7 +6050,7 @@ func TestServiceSched_NodeDrain_Sticky(t *testing.T) { alloc.Job.TaskGroups[0].EphemeralDisk.Sticky = true alloc.DesiredTransition.Migrate = pointer.Of(true) must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, alloc.Job)) - must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -6103,7 +6105,7 @@ func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { d.JobID = job.ID d.JobCreateIndex = job.CreateIndex d.JobModifyIndex = job.JobModifyIndex - 1 - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), d)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -6174,7 +6176,7 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { // Create a deployment for an old version of the job d := mock.Deployment() d.JobID = job.ID - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), d)) // Upsert again to bump job version require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) @@ -6665,7 +6667,7 @@ func TestServiceSched_Migrate_NonCanary(t *testing.T) { Status: structs.DeploymentStatusSuccessful, StatusDescription: structs.DeploymentStatusDescriptionSuccessful, } - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), deployment)) alloc := mock.Alloc() alloc.Job = job @@ -6676,7 +6678,7 @@ func TestServiceSched_Migrate_NonCanary(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusRun alloc.ClientStatus = structs.AllocClientStatusRunning alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation eval := &structs.Evaluation{ @@ -6740,7 +6742,7 @@ func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { Status: structs.DeploymentStatusSuccessful, StatusDescription: structs.DeploymentStatusDescriptionSuccessful, } - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), deployment)) var allocs []*structs.Allocation for i := 0; i < 3; i++ { @@ -6751,7 +6753,7 @@ func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // new update with new task group job2 := job.Copy() @@ -6938,7 +6940,7 @@ func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { Status: structs.DeploymentStatusSuccessful, StatusDescription: structs.DeploymentStatusDescriptionSuccessful, } - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), initDeployment)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), initDeployment)) deploymentIDs := []string{initDeployment.ID} @@ -6972,7 +6974,7 @@ func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { Status: structs.DeploymentStatusSuccessful, StatusDescription: structs.DeploymentStatusDescriptionSuccessful, } - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), deployment)) deploymentIDs = append(deploymentIDs, deployment.ID) @@ -7024,7 +7026,7 @@ func TestServiceSched_RunningWithNextAllocation(t *testing.T) { // simulate a case where .NextAllocation is set but alloc is still running allocs[2].PreviousAllocation = allocs[0].ID allocs[0].NextAllocation = allocs[2].ID - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // new update with new task group job2 := job.Copy() @@ -7110,7 +7112,7 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { shared.AccessMode = structs.CSIVolumeAccessModeMultiNodeReader require.NoError(h.State.UpsertCSIVolume( - h.NextIndex(), []*structs.CSIVolume{shared, vol0, vol1, vol2})) + h.NextIndex(), time.Now().UnixNano(), []*structs.CSIVolume{shared, vol0, vol1, vol2})) // Create a job that uses both job := mock.Job() @@ -7203,7 +7205,7 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { vol5 := vol0.Copy() vol5.ID = "volume-unique[4]" require.NoError(h.State.UpsertCSIVolume( - h.NextIndex(), []*structs.CSIVolume{vol4, vol5})) + h.NextIndex(), time.Now().UnixNano(), []*structs.CSIVolume{vol4, vol5})) // Process again with failure fixed. It should create a new plan eval.ID = uuid.Generate() @@ -7282,7 +7284,7 @@ func TestServiceSched_CSITopology(t *testing.T) { vol1.RequestedTopologies.Required[0].Segments["zone"] = "zone-1" require.NoError(t, h.State.UpsertCSIVolume( - h.NextIndex(), []*structs.CSIVolume{vol0, vol1})) + h.NextIndex(), time.Now().UnixNano(), []*structs.CSIVolume{vol0, vol1})) // Create a job that uses those volumes job := mock.Job() @@ -7533,7 +7535,7 @@ func TestServiceSched_Client_Disconnect_Creates_Updates_and_Evals(t *testing.T) // Simulate that NodeAllocation got processed. must.NoError(t, h.State.UpsertAllocs( - structs.MsgTypeTestSetup, h.NextIndex(), + structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), h.Plans[0].NodeAllocation[disconnectedNode.ID])) // Validate that the StateStore Upsert applied the ClientStatus we specified. @@ -7572,7 +7574,7 @@ func initNodeAndAllocs(t *testing.T, h *Harness, job *structs.Job, allocs[i] = alloc } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) return node, job, allocs } diff --git a/scheduler/rank_test.go b/scheduler/rank_test.go index 6dec2f77784..788b885f6a9 100644 --- a/scheduler/rank_test.go +++ b/scheduler/rank_test.go @@ -6,6 +6,7 @@ package scheduler import ( "sort" "testing" + "time" "github.com/hashicorp/nomad/client/lib/idset" "github.com/hashicorp/nomad/client/lib/numalib" @@ -786,7 +787,7 @@ func TestBinPackIterator_Network_PortCollision_Alloc(t *testing.T) { } require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) taskGroup := &structs.TaskGroup{ EphemeralDisk: &structs.EphemeralDisk{}, @@ -1377,7 +1378,7 @@ func TestBinPackIterator_ReservedCores(t *testing.T) { } require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) taskGroup := &structs.TaskGroup{ EphemeralDisk: &structs.EphemeralDisk{}, @@ -1489,7 +1490,7 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) { } require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) taskGroup := &structs.TaskGroup{ EphemeralDisk: &structs.EphemeralDisk{}, @@ -1603,7 +1604,7 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) { } require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) // Add a planned eviction to alloc1 plan := ctx.Plan() @@ -1927,7 +1928,7 @@ func TestBinPackIterator_Devices(t *testing.T) { for _, alloc := range c.ExistingAllocs { alloc.NodeID = c.Node.ID } - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, c.ExistingAllocs)) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), c.ExistingAllocs)) } static := NewStaticRankIterator(ctx, []*RankedNode{{Node: c.Node}}) diff --git a/scheduler/scheduler_system_test.go b/scheduler/scheduler_system_test.go index ef0fc491910..a667a570952 100644 --- a/scheduler/scheduler_system_test.go +++ b/scheduler/scheduler_system_test.go @@ -136,7 +136,7 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) { // Get an allocation and mark it as failed alloc := planned[4].Copy() alloc.ClientStatus = structs.AllocClientStatusFailed - require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to handle the update eval = &structs.Evaluation{ @@ -441,7 +441,7 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Add a new node. node := mock.Node() @@ -555,7 +555,7 @@ func TestSystemSched_JobModify(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Add a few terminal status allocations, these should be ignored var terminal []*structs.Allocation @@ -568,7 +568,7 @@ func TestSystemSched_JobModify(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop terminal = append(terminal, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), terminal)) // Update the job job2 := mock.SystemJob() @@ -644,7 +644,7 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Update the job job2 := mock.SystemJob() @@ -743,7 +743,7 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Update the job job2 := mock.SystemJob() @@ -835,7 +835,7 @@ func TestSystemSched_JobModify_RemoveDC(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Update the job job2 := job.Copy() @@ -913,7 +913,7 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) { for _, alloc := range allocs { require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -976,7 +976,7 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) { for _, alloc := range allocs { require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -1034,7 +1034,7 @@ func TestSystemSched_NodeDown(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1094,7 +1094,7 @@ func TestSystemSched_NodeDrain_Down(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -1148,7 +1148,7 @@ func TestSystemSched_NodeDrain(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1204,7 +1204,7 @@ func TestSystemSched_NodeUpdate(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -1757,7 +1757,7 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { alloc2.NodeID = node2.ID alloc2.Name = "my-job.web2[0]" alloc2.TaskGroup = "web2" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc, alloc2})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -2006,7 +2006,7 @@ func TestSystemSched_Preemption(t *testing.T) { }, Shared: structs.AllocatedSharedResources{DiskMB: 5 * 1024}, } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc1, alloc2, alloc3})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2, alloc3})) // Create a high priority job and allocs for it // These allocs should not be preempted @@ -2050,7 +2050,7 @@ func TestSystemSched_Preemption(t *testing.T) { }, } require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job4)) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc4})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc4})) // Create a system job such that it would need to preempt both allocs to succeed job := mock.SystemJob() @@ -2916,7 +2916,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { alloc.TaskStates = tc.taskState if tc.exists { - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) } if tc.modifyJob { @@ -2937,7 +2937,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { prev.ClientStatus = structs.AllocClientStatusComplete prev.DesiredStatus = structs.AllocDesiredStatusRun - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{prev})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{prev})) } // Create a mock evaluation to deal with disconnect eval := &structs.Evaluation{ @@ -3051,7 +3051,7 @@ func TestSystemSched_CSITopology(t *testing.T) { } must.NoError(t, h.State.UpsertCSIVolume( - h.NextIndex(), []*structs.CSIVolume{vol0})) + h.NextIndex(), time.Now().UnixNano(), []*structs.CSIVolume{vol0})) // Create a job that uses that volumes job := mock.SystemJob() diff --git a/scheduler/spread_test.go b/scheduler/spread_test.go index f23ad185136..dbd154dca01 100644 --- a/scheduler/spread_test.go +++ b/scheduler/spread_test.go @@ -67,7 +67,7 @@ func TestSpreadIterator_SingleAttribute(t *testing.T) { }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } @@ -229,7 +229,7 @@ func TestSpreadIterator_MultipleAttributes(t *testing.T) { }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } @@ -996,7 +996,7 @@ func TestSpreadPanicDowngrade(t *testing.T) { } allocs = append(allocs, alloc) } - err = h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs) + err = h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs) require.NoError(t, err) // job version 2 diff --git a/scheduler/stack_test.go b/scheduler/stack_test.go index ebdda77c101..a800c3ca355 100644 --- a/scheduler/stack_test.go +++ b/scheduler/stack_test.go @@ -8,6 +8,7 @@ import ( "reflect" "runtime" "testing" + "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -334,7 +335,7 @@ func TestServiceStack_Select_CSI(t *testing.T) { v.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter v.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem v.PluginID = "bar" - err := state.UpsertCSIVolume(999, []*structs.CSIVolume{v}) + err := state.UpsertCSIVolume(999, time.Now().UnixNano(), []*structs.CSIVolume{v}) must.NoError(t, err) // Create a node with healthy fingerprints for both controller and node plugins From e92767d45801d5605e0a8d95c22010a7c91c579c Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Wed, 30 Oct 2024 11:18:08 +0100 Subject: [PATCH 15/40] removed time.Now from fsm and state store methods --- nomad/fsm.go | 23 ++++++++-------- nomad/state/state_store.go | 55 +++++++++++++++++++------------------- nomad/structs/csi.go | 7 +++-- nomad/structs/structs.go | 9 ++++++- scheduler/reconcile.go | 2 +- 5 files changed, 52 insertions(+), 44 deletions(-) diff --git a/nomad/fsm.go b/nomad/fsm.go index 1f0a414ab3d..977e050ada3 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -680,7 +680,6 @@ func (n *nomadFSM) applyUpsertJob(msgType structs.MessageType, buf []byte, index } } - now := time.Now().UnixNano() if req.Deployment != nil { // Cancel any preivous deployment. lastDeployment, err := n.state.LatestDeploymentByJobID(ws, req.Job.Namespace, req.Job.ID) @@ -691,7 +690,7 @@ func (n *nomadFSM) applyUpsertJob(msgType structs.MessageType, buf []byte, index activeDeployment := lastDeployment.Copy() activeDeployment.Status = structs.DeploymentStatusCancelled activeDeployment.StatusDescription = structs.DeploymentStatusDescriptionNewerJob - if err := n.state.UpsertDeployment(index, now, activeDeployment); err != nil { + if err := n.state.UpsertDeployment(index, activeDeployment); err != nil { return err } } @@ -702,7 +701,7 @@ func (n *nomadFSM) applyUpsertJob(msgType structs.MessageType, buf []byte, index req.Deployment.JobSpecModifyIndex = req.Job.JobModifyIndex req.Deployment.JobVersion = req.Job.Version - if err := n.state.UpsertDeployment(index, now, req.Deployment); err != nil { + if err := n.state.UpsertDeployment(index, req.Deployment); err != nil { return err } } @@ -939,7 +938,7 @@ func (n *nomadFSM) applyAllocClientUpdate(msgType structs.MessageType, buf []byt } // Update all the client allocations - if err := n.state.UpdateAllocsFromClient(msgType, index, time.Now().UnixNano(), req.Alloc); err != nil { + if err := n.state.UpdateAllocsFromClient(msgType, index, req.Alloc); err != nil { n.logger.Error("UpdateAllocFromClient failed", "error", err) return err } @@ -1093,7 +1092,7 @@ func (n *nomadFSM) applyPlanResults(msgType structs.MessageType, buf []byte, ind panic(fmt.Errorf("failed to decode request: %v", err)) } - if err := n.state.UpsertPlanResults(msgType, index, time.Now().UnixNano(), &req); err != nil { + if err := n.state.UpsertPlanResults(msgType, index, &req); err != nil { n.logger.Error("ApplyPlan failed", "error", err) return err } @@ -1112,7 +1111,7 @@ func (n *nomadFSM) applyDeploymentStatusUpdate(msgType structs.MessageType, buf panic(fmt.Errorf("failed to decode request: %v", err)) } - if err := n.state.UpdateDeploymentStatus(msgType, index, time.Now().UnixNano(), &req); err != nil { + if err := n.state.UpdateDeploymentStatus(msgType, index, &req); err != nil { n.logger.Error("UpsertDeploymentStatusUpdate failed", "error", err) return err } @@ -1129,7 +1128,7 @@ func (n *nomadFSM) applyDeploymentPromotion(msgType structs.MessageType, buf []b panic(fmt.Errorf("failed to decode request: %v", err)) } - if err := n.state.UpdateDeploymentPromotion(msgType, index, time.Now().UnixNano(), &req); err != nil { + if err := n.state.UpdateDeploymentPromotion(msgType, index, &req); err != nil { n.logger.Error("UpsertDeploymentPromotion failed", "error", err) return err } @@ -1147,7 +1146,7 @@ func (n *nomadFSM) applyDeploymentAllocHealth(msgType structs.MessageType, buf [ panic(fmt.Errorf("failed to decode request: %v", err)) } - if err := n.state.UpdateDeploymentAllocHealth(msgType, index, time.Now().UnixNano(), &req); err != nil { + if err := n.state.UpdateDeploymentAllocHealth(msgType, index, &req); err != nil { n.logger.Error("UpsertDeploymentAllocHealth failed", "error", err) return err } @@ -1367,7 +1366,7 @@ func (n *nomadFSM) applyCSIVolumeRegister(buf []byte, index uint64) interface{} } defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_register"}, time.Now()) - if err := n.state.UpsertCSIVolume(index, time.Now().UnixNano(), req.Volumes); err != nil { + if err := n.state.UpsertCSIVolume(index, req.Timestamp, req.Volumes); err != nil { n.logger.Error("CSIVolumeRegister failed", "error", err) return err } @@ -1398,7 +1397,7 @@ func (n *nomadFSM) applyCSIVolumeBatchClaim(buf []byte, index uint64) interface{ defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_batch_claim"}, time.Now()) for _, req := range batch.Claims { - err := n.state.CSIVolumeClaim(index, time.Now().UnixNano(), req.RequestNamespace(), + err := n.state.CSIVolumeClaim(index, req.Timestamp, req.RequestNamespace(), req.VolumeID, req.ToClaim()) if err != nil { n.logger.Error("CSIVolumeClaim for batch failed", "error", err) @@ -1415,7 +1414,7 @@ func (n *nomadFSM) applyCSIVolumeClaim(buf []byte, index uint64) interface{} { } defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_claim"}, time.Now()) - if err := n.state.CSIVolumeClaim(index, time.Now().UnixNano(), req.RequestNamespace(), req.VolumeID, req.ToClaim()); err != nil { + if err := n.state.CSIVolumeClaim(index, req.Timestamp, req.RequestNamespace(), req.VolumeID, req.ToClaim()); err != nil { n.logger.Error("CSIVolumeClaim failed", "error", err) return err } @@ -2006,7 +2005,7 @@ func (n *nomadFSM) failLeakedDeployments(store *state.StateStore) error { failed := d.Copy() failed.Status = structs.DeploymentStatusCancelled failed.StatusDescription = structs.DeploymentStatusDescriptionStoppedJob - if err := store.UpsertDeployment(dindex, time.Now().UnixNano(), failed); err != nil { + if err := store.UpsertDeployment(dindex, failed); err != nil { return fmt.Errorf("failed to mark leaked deployment %q as failed: %v", failed.ID, err) } } diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 4e36280fd17..a7c40b9165b 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -1,4 +1,4 @@ -// Copyright (c) HashiCorp, Inc. +// Copyright (c) HashupdateCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package state @@ -366,7 +366,7 @@ RUN_QUERY: } // UpsertPlanResults is used to upsert the results of a plan. -func (s *StateStore) UpsertPlanResults(msgType structs.MessageType, index uint64, now int64, results *structs.ApplyPlanResultsRequest) error { +func (s *StateStore) UpsertPlanResults(msgType structs.MessageType, index uint64, results *structs.ApplyPlanResultsRequest) error { snapshot, err := s.Snapshot() if err != nil { return err @@ -408,7 +408,7 @@ func (s *StateStore) UpsertPlanResults(msgType structs.MessageType, index uint64 // Upsert the newly created or updated deployment if results.Deployment != nil { - if err := s.upsertDeploymentImpl(index, now, results.Deployment, txn); err != nil { + if err := s.upsertDeploymentImpl(index, results.Deployment, txn); err != nil { return err } } @@ -575,16 +575,16 @@ func (s *StateStore) DeleteJobSummary(index uint64, namespace, id string) error } // UpsertDeployment is used to insert or update a new deployment. -func (s *StateStore) UpsertDeployment(index uint64, now int64, deployment *structs.Deployment) error { +func (s *StateStore) UpsertDeployment(index uint64, deployment *structs.Deployment) error { txn := s.db.WriteTxn(index) defer txn.Abort() - if err := s.upsertDeploymentImpl(index, now, deployment, txn); err != nil { + if err := s.upsertDeploymentImpl(index, deployment, txn); err != nil { return err } return txn.Commit() } -func (s *StateStore) upsertDeploymentImpl(index uint64, now int64, deployment *structs.Deployment, txn *txn) error { +func (s *StateStore) upsertDeploymentImpl(index uint64, deployment *structs.Deployment, txn *txn) error { // Check if the deployment already exists existing, err := txn.First("deployment", "id", deployment.ID) if err != nil { @@ -595,13 +595,9 @@ func (s *StateStore) upsertDeploymentImpl(index uint64, now int64, deployment *s if existing != nil { deployment.CreateIndex = existing.(*structs.Deployment).CreateIndex deployment.ModifyIndex = index - deployment.CreateTime = existing.(*structs.Deployment).CreateTime - deployment.ModifyTime = now } else { deployment.CreateIndex = index deployment.ModifyIndex = index - deployment.CreateTime = now - deployment.ModifyTime = now } // Insert the deployment @@ -3945,7 +3941,7 @@ func (s *StateStore) EvalsByNamespaceOrdered(ws memdb.WatchSet, namespace string // most things, some updates are authoritative from the client. Specifically, // the desired state comes from the schedulers, while the actual state comes // from clients. -func (s *StateStore) UpdateAllocsFromClient(msgType structs.MessageType, index uint64, now int64, allocs []*structs.Allocation) error { +func (s *StateStore) UpdateAllocsFromClient(msgType structs.MessageType, index uint64, allocs []*structs.Allocation) error { txn := s.db.WriteTxnMsgT(msgType, index) defer txn.Abort() @@ -3956,7 +3952,7 @@ func (s *StateStore) UpdateAllocsFromClient(msgType structs.MessageType, index u // Handle each of the updated allocations for _, alloc := range allocs { nodeIDs.Insert(alloc.NodeID) - if err := s.nestedUpdateAllocFromClient(txn, index, now, alloc); err != nil { + if err := s.nestedUpdateAllocFromClient(txn, index, alloc); err != nil { return err } } @@ -3977,7 +3973,7 @@ func (s *StateStore) UpdateAllocsFromClient(msgType structs.MessageType, index u } // nestedUpdateAllocFromClient is used to nest an update of an allocation with client status -func (s *StateStore) nestedUpdateAllocFromClient(txn *txn, index uint64, now int64, alloc *structs.Allocation) error { +func (s *StateStore) nestedUpdateAllocFromClient(txn *txn, index uint64, alloc *structs.Allocation) error { // Look for existing alloc existing, err := txn.First("allocs", "id", alloc.ID) if err != nil { @@ -4025,7 +4021,7 @@ func (s *StateStore) nestedUpdateAllocFromClient(txn *txn, index uint64, now int // Update the modify time copyAlloc.ModifyTime = alloc.ModifyTime - if err := s.updateDeploymentWithAlloc(index, now, copyAlloc, exist, txn); err != nil { + if err := s.updateDeploymentWithAlloc(index, copyAlloc, exist, txn); err != nil { return fmt.Errorf("error updating deployment: %v", err) } @@ -4164,7 +4160,7 @@ func (s *StateStore) upsertAllocsImpl(index uint64, now int64, allocs []*structs // These should be given a map of new to old allocation and the updates // should be one on all changes. The current implementation causes O(n) // lookups/copies/insertions rather than O(1) - if err := s.updateDeploymentWithAlloc(index, now, alloc, exist, txn); err != nil { + if err := s.updateDeploymentWithAlloc(index, alloc, exist, txn); err != nil { return fmt.Errorf("error updating deployment: %v", err) } @@ -4829,11 +4825,11 @@ func (s *StateStore) SITokenAccessorsByNode(ws memdb.WatchSet, nodeID string) ([ // UpdateDeploymentStatus is used to make deployment status updates and // potentially make a evaluation -func (s *StateStore) UpdateDeploymentStatus(msgType structs.MessageType, index uint64, now int64, req *structs.DeploymentStatusUpdateRequest) error { +func (s *StateStore) UpdateDeploymentStatus(msgType structs.MessageType, index uint64, req *structs.DeploymentStatusUpdateRequest) error { txn := s.db.WriteTxnMsgT(msgType, index) defer txn.Abort() - if err := s.updateDeploymentStatusImpl(index, now, req.DeploymentUpdate, txn); err != nil { + if err := s.updateDeploymentStatusImpl(index, req.DeploymentUpdate, txn); err != nil { return err } @@ -4855,7 +4851,7 @@ func (s *StateStore) UpdateDeploymentStatus(msgType structs.MessageType, index u } // updateDeploymentStatusImpl is used to make deployment status updates -func (s *StateStore) updateDeploymentStatusImpl(index uint64, now int64, u *structs.DeploymentStatusUpdate, txn *txn) error { +func (s *StateStore) updateDeploymentStatusImpl(index uint64, u *structs.DeploymentStatusUpdate, txn *txn) error { // Retrieve deployment ws := memdb.NewWatchSet() deployment, err := s.deploymentByIDImpl(ws, u.DeploymentID, txn) @@ -4872,7 +4868,7 @@ func (s *StateStore) updateDeploymentStatusImpl(index uint64, now int64, u *stru copy.Status = u.Status copy.StatusDescription = u.StatusDescription copy.ModifyIndex = index - copy.ModifyTime = now + copy.ModifyTime = u.UpdatedAt // Insert the deployment if err := txn.Insert("deployment", copy); err != nil { @@ -5021,7 +5017,7 @@ func (s *StateStore) unsetJobVersionTagImpl(index uint64, namespace, jobID strin // UpdateDeploymentPromotion is used to promote canaries in a deployment and // potentially make a evaluation -func (s *StateStore) UpdateDeploymentPromotion(msgType structs.MessageType, index uint64, now int64, req *structs.ApplyDeploymentPromoteRequest) error { +func (s *StateStore) UpdateDeploymentPromotion(msgType structs.MessageType, index uint64, req *structs.ApplyDeploymentPromoteRequest) error { txn := s.db.WriteTxnMsgT(msgType, index) defer txn.Abort() @@ -5114,7 +5110,6 @@ func (s *StateStore) UpdateDeploymentPromotion(msgType structs.MessageType, inde // Update deployment copy := deployment.Copy() copy.ModifyIndex = index - copy.ModifyTime = now for tg, status := range copy.TaskGroups { _, ok := groupIndex[tg] if !req.All && !ok { @@ -5133,8 +5128,11 @@ func (s *StateStore) UpdateDeploymentPromotion(msgType structs.MessageType, inde copy.StatusDescription = structs.DeploymentStatusDescriptionRunning } + // Update modify time to the time of deployment promotion + copy.ModifyTime = req.PromotedAt + // Insert the deployment - if err := s.upsertDeploymentImpl(index, now, copy, txn); err != nil { + if err := s.upsertDeploymentImpl(index, copy, txn); err != nil { return err } @@ -5168,7 +5166,7 @@ func (s *StateStore) UpdateDeploymentPromotion(msgType structs.MessageType, inde // UpdateDeploymentAllocHealth is used to update the health of allocations as // part of the deployment and potentially make a evaluation -func (s *StateStore) UpdateDeploymentAllocHealth(msgType structs.MessageType, index uint64, now int64, req *structs.ApplyDeploymentAllocHealthRequest) error { +func (s *StateStore) UpdateDeploymentAllocHealth(msgType structs.MessageType, index uint64, req *structs.ApplyDeploymentAllocHealthRequest) error { txn := s.db.WriteTxnMsgT(msgType, index) defer txn.Abort() @@ -5207,9 +5205,10 @@ func (s *StateStore) UpdateDeploymentAllocHealth(msgType structs.MessageType, in copy.DeploymentStatus.Healthy = pointer.Of(healthy) copy.DeploymentStatus.Timestamp = ts copy.DeploymentStatus.ModifyIndex = index + copy.ModifyTime = req.Timestamp.UnixNano() copy.ModifyIndex = index - if err := s.updateDeploymentWithAlloc(index, now, copy, old, txn); err != nil { + if err := s.updateDeploymentWithAlloc(index, copy, old, txn); err != nil { return fmt.Errorf("error updating deployment: %v", err) } @@ -5239,7 +5238,7 @@ func (s *StateStore) UpdateDeploymentAllocHealth(msgType structs.MessageType, in // Update the deployment status as needed. if req.DeploymentUpdate != nil { - if err := s.updateDeploymentStatusImpl(index, now, req.DeploymentUpdate, txn); err != nil { + if err := s.updateDeploymentStatusImpl(index, req.DeploymentUpdate, txn); err != nil { return err } } @@ -5917,7 +5916,7 @@ func (s *StateStore) updateJobCSIPlugins(index uint64, job, prev *structs.Job, t // updateDeploymentWithAlloc is used to update the deployment state associated // with the given allocation. The passed alloc may be updated if the deployment // status has changed to capture the modify index at which it has changed. -func (s *StateStore) updateDeploymentWithAlloc(index uint64, now int64, alloc, existing *structs.Allocation, txn *txn) error { +func (s *StateStore) updateDeploymentWithAlloc(index uint64, alloc, existing *structs.Allocation, txn *txn) error { // Nothing to do if the allocation is not associated with a deployment if alloc.DeploymentID == "" { return nil @@ -5979,7 +5978,7 @@ func (s *StateStore) updateDeploymentWithAlloc(index uint64, now int64, alloc, e // Create a copy of the deployment object deploymentCopy := deployment.Copy() deploymentCopy.ModifyIndex = index - deploymentCopy.ModifyTime = now + deploymentCopy.ModifyTime = alloc.ModifyTime dstate := deploymentCopy.TaskGroups[alloc.TaskGroup] dstate.PlacedAllocs += placed @@ -6015,7 +6014,7 @@ func (s *StateStore) updateDeploymentWithAlloc(index uint64, now int64, alloc, e } // Upsert the deployment - if err := s.upsertDeploymentImpl(index, now, deploymentCopy, txn); err != nil { + if err := s.upsertDeploymentImpl(index, deploymentCopy, txn); err != nil { return err } diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 32663d53e7b..858d3c385db 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -851,7 +851,8 @@ func (v *CSIVolume) Merge(other *CSIVolume) error { // Request and response wrappers type CSIVolumeRegisterRequest struct { - Volumes []*CSIVolume + Volumes []*CSIVolume + Timestamp int64 WriteRequest } @@ -870,7 +871,8 @@ type CSIVolumeDeregisterResponse struct { } type CSIVolumeCreateRequest struct { - Volumes []*CSIVolume + Volumes []*CSIVolume + Timestamp int64 WriteRequest } @@ -927,6 +929,7 @@ type CSIVolumeClaimRequest struct { AccessMode CSIVolumeAccessMode AttachmentMode CSIVolumeAttachmentMode State CSIVolumeClaimState + Timestamp int64 WriteRequest } diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index b23477246f5..de3ccf17321 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -1381,6 +1381,9 @@ type DeploymentPromoteRequest struct { // Groups is used to set the promotion status per task group Groups []string + // PromotedAt is the timestamp stored as Unix nano + PromotedAt int64 + WriteRequest } @@ -10645,7 +10648,7 @@ type Deployment struct { } // NewDeployment creates a new deployment given the job. -func NewDeployment(job *Job, evalPriority int) *Deployment { +func NewDeployment(job *Job, evalPriority int, now int64) *Deployment { return &Deployment{ ID: uuid.Generate(), Namespace: job.Namespace, @@ -10659,6 +10662,7 @@ func NewDeployment(job *Job, evalPriority int) *Deployment { StatusDescription: DeploymentStatusDescriptionRunning, TaskGroups: make(map[string]*DeploymentState, len(job.TaskGroups)), EvalPriority: evalPriority, + CreateTime: now, } } @@ -10838,6 +10842,9 @@ type DeploymentStatusUpdate struct { // StatusDescription is the new status description of the deployment. StatusDescription string + + // Updated at is the time of the update + UpdatedAt int64 } // RescheduleTracker encapsulates previous reschedule events diff --git a/scheduler/reconcile.go b/scheduler/reconcile.go index 2d1451d8bc0..f07e83dd5a5 100644 --- a/scheduler/reconcile.go +++ b/scheduler/reconcile.go @@ -995,7 +995,7 @@ func (a *allocReconciler) createDeployment(groupName string, strategy *structs.U // A previous group may have made the deployment already. If not create one. if a.deployment == nil { - a.deployment = structs.NewDeployment(a.job, a.evalPriority) + a.deployment = structs.NewDeployment(a.job, a.evalPriority, a.now.UnixNano()) a.result.deployment = a.deployment } From d5c378c51f4d2879c5a684b0dcef6565a6dc402a Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Wed, 30 Oct 2024 14:41:21 +0100 Subject: [PATCH 16/40] signatures change --- client/client_test.go | 16 +- client/gc_test.go | 11 +- command/agent/alloc_endpoint_test.go | 16 +- command/agent/deployment_endpoint_test.go | 28 +- command/agent/eval_endpoint_test.go | 3 +- command/agent/event_endpoint_test.go | 2 +- command/agent/fs_endpoint_test.go | 2 +- command/agent/job_endpoint_test.go | 6 +- command/agent/node_endpoint_test.go | 6 +- command/agent/search_endpoint_test.go | 9 +- command/alloc_checks_test.go | 3 +- command/alloc_exec_test.go | 3 +- command/alloc_fs_test.go | 3 +- command/alloc_logs_test.go | 3 +- command/alloc_restart_test.go | 3 +- command/alloc_signal_test.go | 3 +- command/alloc_status_test.go | 14 +- command/deployment_fail_test.go | 3 +- command/deployment_pause_test.go | 3 +- command/deployment_promote_test.go | 3 +- command/deployment_resume_test.go | 3 +- command/deployment_status_test.go | 3 +- command/deployment_unblock_test.go | 3 +- command/job_allocs_test.go | 9 +- command/job_deployments_test.go | 7 +- command/job_eval_test.go | 3 +- command/job_promote_test.go | 3 +- command/job_status_test.go | 2 +- command/status_test.go | 5 +- command/volume_status_test.go | 3 +- nomad/acl_endpoint_test.go | 2 +- nomad/alloc_endpoint_test.go | 56 +- nomad/auth/auth_test.go | 8 +- .../deployments_watcher_test.go | 502 +++++++++--------- nomad/deploymentwatcher/testutil_test.go | 7 +- nomad/drainer/draining_node_test.go | 60 +-- nomad/drainer/watch_jobs_test.go | 30 +- nomad/drainer/watch_nodes_test.go | 4 +- nomad/volumewatcher/volumes_watcher_test.go | 34 +- nomad/worker_test.go | 2 +- 40 files changed, 431 insertions(+), 455 deletions(-) diff --git a/client/client_test.go b/client/client_test.go index 36d144a7e43..92914004208 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -603,7 +603,7 @@ func TestClient_WatchAllocs(t *testing.T) { if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil { t.Fatal(err) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 102, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc1, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -628,7 +628,7 @@ func TestClient_WatchAllocs(t *testing.T) { // alloc runner. alloc2_2 := alloc2.Copy() alloc2_2.DesiredStatus = structs.AllocDesiredStatusStop - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 104, time.Now().UnixNano(), []*structs.Allocation{alloc2_2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 104, []*structs.Allocation{alloc2_2}); err != nil { t.Fatalf("err upserting stopped alloc: %v", err) } @@ -992,7 +992,7 @@ func TestClient_AddAllocError(t *testing.T) { err = state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)) require.Nil(err) - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 102, time.Now().UnixNano(), []*structs.Allocation{alloc1}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc1}) require.Nil(err) // Push this alloc update to the client @@ -1092,7 +1092,7 @@ func TestClient_BlockedAllocations(t *testing.T) { } state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) - state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) // Wait until the client downloads and starts the allocation testutil.WaitForResult(func() (bool, error) { @@ -1115,7 +1115,7 @@ func TestClient_BlockedAllocations(t *testing.T) { alloc2.Job = alloc.Job alloc2.JobID = alloc.JobID alloc2.PreviousAllocation = alloc.ID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{alloc2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc2}); err != nil { t.Fatalf("err: %v", err) } @@ -1136,7 +1136,7 @@ func TestClient_BlockedAllocations(t *testing.T) { // Change the desired state of the parent alloc to stop alloc1 := alloc.Copy() alloc1.DesiredStatus = structs.AllocDesiredStatusStop - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 300, time.Now().UnixNano(), []*structs.Allocation{alloc1}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 300, []*structs.Allocation{alloc1}); err != nil { t.Fatalf("err: %v", err) } @@ -2047,7 +2047,7 @@ func TestClient_ReconnectAllocs(t *testing.T) { err = state.UpsertJobSummary(101, mock.JobSummary(runningAlloc.JobID)) require.NoError(t, err) - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 102, time.Now().UnixNano(), []*structs.Allocation{runningAlloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{runningAlloc}) require.NoError(t, err) // Ensure allocation gets upserted with desired status. @@ -2065,7 +2065,7 @@ func TestClient_ReconnectAllocs(t *testing.T) { require.NoError(t, err) unknownAlloc.ClientStatus = structs.AllocClientStatusUnknown unknownAlloc.AppendState(structs.AllocStateFieldClientStatus, structs.AllocClientStatusUnknown) - err = state.UpsertAllocs(structs.MsgTypeTestSetup, runningAlloc.AllocModifyIndex+1, time.Now().UnixNano(), []*structs.Allocation{unknownAlloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, runningAlloc.AllocModifyIndex+1, []*structs.Allocation{unknownAlloc}) require.NoError(t, err) updates := &allocUpdates{ diff --git a/client/gc_test.go b/client/gc_test.go index ca61bb366ae..fa7ebdffb2b 100644 --- a/client/gc_test.go +++ b/client/gc_test.go @@ -18,7 +18,7 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func gcConfig() *GCConfig { @@ -364,6 +364,7 @@ func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) { ci.Parallel(t) const maxAllocs = 6 + require := require.New(t) server, serverAddr, cleanupS := testServer(t, nil) defer cleanupS() @@ -397,8 +398,8 @@ func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) { upsertJobFn := func(server *nomad.Server, j *structs.Job) { state := server.State() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, nextIndex(), nil, j)) - must.NoError(t, state.UpsertJobSummary(nextIndex(), mock.JobSummary(j.ID))) + require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, nextIndex(), nil, j)) + require.NoError(state.UpsertJobSummary(nextIndex(), mock.JobSummary(j.ID))) } // Insert the Job @@ -406,7 +407,7 @@ func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) { upsertAllocFn := func(server *nomad.Server, a *structs.Allocation) { state := server.State() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a})) + require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(), []*structs.Allocation{a})) } upsertNewAllocFn := func(server *nomad.Server, j *structs.Job) *structs.Allocation { @@ -503,7 +504,7 @@ func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) { } return true, nil }, func(err error) { - must.NoError(t, err) + require.NoError(err) }) } diff --git a/command/agent/alloc_endpoint_test.go b/command/agent/alloc_endpoint_test.go index 5043b2c9448..fa5014dcdea 100644 --- a/command/agent/alloc_endpoint_test.go +++ b/command/agent/alloc_endpoint_test.go @@ -53,7 +53,7 @@ func TestHTTP_AllocsList(t *testing.T) { state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -121,7 +121,7 @@ func TestHTTP_AllocsPrefixList(t *testing.T) { if err := state.UpsertJobSummary(999, summary2); err != nil { t.Fatal(err) } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}); err != nil { t.Fatalf("err: %v", err) } @@ -174,7 +174,7 @@ func TestHTTP_AllocQuery(t *testing.T) { state := s.Agent.server.State() alloc := mock.Alloc() require.NoError(state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID))) - require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) // Make the HTTP request req, err := http.NewRequest(http.MethodGet, "/v1/allocation/"+alloc.ID, nil) @@ -222,7 +222,7 @@ func TestHTTP_AllocQuery_Payload(t *testing.T) { compressed := snappy.Encode(nil, expected) alloc.Job.Payload = compressed - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -406,7 +406,7 @@ func TestHTTP_AllocStop(t *testing.T) { require := require.New(t) require.NoError(state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID))) - require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) // Test that the happy path works { @@ -459,7 +459,7 @@ func TestHTTP_allocServiceRegistrations(t *testing.T) { // Generate an alloc and upsert this. alloc := mock.Alloc() require.NoError(t, testState.UpsertAllocs( - structs.MsgTypeTestSetup, 10, time.Now().UnixNano(), []*structs.Allocation{alloc})) + structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc})) // Generate a service registration, assigned the allocID to the // mocked allocation ID, and upsert this. @@ -494,7 +494,7 @@ func TestHTTP_allocServiceRegistrations(t *testing.T) { // Generate an alloc and upsert this. alloc := mock.Alloc() require.NoError(t, testState.UpsertAllocs( - structs.MsgTypeTestSetup, 10, time.Now().UnixNano(), []*structs.Allocation{alloc})) + structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc})) // Build the HTTP request. path := fmt.Sprintf("/v1/allocation/%s/services", alloc.ID) @@ -744,7 +744,7 @@ func TestHTTP_AllocSnapshot_Atomic(t *testing.T) { } alloc.NodeID = s.client.NodeID() state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID)) - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc.Copy()}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc.Copy()}); err != nil { t.Fatalf("error upserting alloc: %v", err) } diff --git a/command/agent/deployment_endpoint_test.go b/command/agent/deployment_endpoint_test.go index e7a0c770262..dc9d3e3105d 100644 --- a/command/agent/deployment_endpoint_test.go +++ b/command/agent/deployment_endpoint_test.go @@ -7,7 +7,6 @@ import ( "net/http" "net/http/httptest" "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -18,14 +17,13 @@ import ( func TestHTTP_DeploymentList(t *testing.T) { ci.Parallel(t) assert := assert.New(t) - now := time.Now().UnixNano() httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() d1 := mock.Deployment() d2 := mock.Deployment() - assert.Nil(state.UpsertDeployment(999, now, d1), "UpsertDeployment") - assert.Nil(state.UpsertDeployment(1000, now, d2), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(999, d1), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, d2), "UpsertDeployment") // Make the HTTP request req, err := http.NewRequest(http.MethodGet, "/v1/deployments", nil) @@ -50,7 +48,6 @@ func TestHTTP_DeploymentList(t *testing.T) { func TestHTTP_DeploymentPrefixList(t *testing.T) { ci.Parallel(t) assert := assert.New(t) - now := time.Now().UnixNano() httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -58,8 +55,8 @@ func TestHTTP_DeploymentPrefixList(t *testing.T) { d1.ID = "aaabbbbb-e8f7-fd38-c855-ab94ceb89706" d2 := mock.Deployment() d2.ID = "aaabbbbb-e8f7-fd38-c855-ab94ceb89706" - assert.Nil(state.UpsertDeployment(999, now, d1), "UpsertDeployment") - assert.Nil(state.UpsertDeployment(1000, now, d2), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(999, d1), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, d2), "UpsertDeployment") // Make the HTTP request req, err := http.NewRequest(http.MethodGet, "/v1/deployments?prefix=aaab", nil) @@ -85,7 +82,6 @@ func TestHTTP_DeploymentPrefixList(t *testing.T) { func TestHTTP_DeploymentAllocations(t *testing.T) { ci.Parallel(t) assert := assert.New(t) - now := time.Now().UnixNano() httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -116,8 +112,8 @@ func TestHTTP_DeploymentAllocations(t *testing.T) { a2.TaskStates["test"] = taskState2 assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(999, now, d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{a1, a2}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(999, d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a1, a2}), "UpsertAllocs") // Make the HTTP request req, err := http.NewRequest(http.MethodGet, "/v1/deployment/allocations/"+d.ID, nil) @@ -151,7 +147,7 @@ func TestHTTP_DeploymentQuery(t *testing.T) { // Directly manipulate the state state := s.Agent.server.State() d := mock.Deployment() - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Make the HTTP request req, err := http.NewRequest(http.MethodGet, "/v1/deployment/"+d.ID, nil) @@ -183,7 +179,7 @@ func TestHTTP_DeploymentPause(t *testing.T) { d := mock.Deployment() d.JobID = j.ID assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Create the pause request args := structs.DeploymentPauseRequest{ @@ -224,7 +220,7 @@ func TestHTTP_DeploymentPromote(t *testing.T) { d := mock.Deployment() d.JobID = j.ID assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Create the pause request args := structs.DeploymentPromoteRequest{ @@ -268,8 +264,8 @@ func TestHTTP_DeploymentAllocHealth(t *testing.T) { a.JobID = j.ID a.DeploymentID = d.ID assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(999, time.Now().UnixNano(), d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(999, d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a}), "UpsertAllocs") // Create the pause request args := structs.DeploymentAllocHealthRequest{ @@ -310,7 +306,7 @@ func TestHTTP_DeploymentFail(t *testing.T) { d := mock.Deployment() d.JobID = j.ID assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(999, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(999, d), "UpsertDeployment") // Make the HTTP request req, err := http.NewRequest(http.MethodPut, "/v1/deployment/fail/"+d.ID, nil) diff --git a/command/agent/eval_endpoint_test.go b/command/agent/eval_endpoint_test.go index 5bb49b7c41b..506ba5a2cc4 100644 --- a/command/agent/eval_endpoint_test.go +++ b/command/agent/eval_endpoint_test.go @@ -9,7 +9,6 @@ import ( "net/http/httptest" "net/url" "testing" - "time" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" @@ -259,7 +258,7 @@ func TestHTTP_EvalAllocations(t *testing.T) { alloc2.EvalID = alloc1.EvalID state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}) if err != nil { t.Fatalf("err: %v", err) } diff --git a/command/agent/event_endpoint_test.go b/command/agent/event_endpoint_test.go index bea4105e61b..66415b5f46a 100644 --- a/command/agent/event_endpoint_test.go +++ b/command/agent/event_endpoint_test.go @@ -231,7 +231,7 @@ func TestHTTP_Alloc_Port_Response(t *testing.T) { alloc.NodeID = srv.client.NodeID() require.Nil(t, srv.server.State().UpsertJobSummary(101, mock.JobSummary(alloc.JobID))) - require.Nil(t, srv.server.State().UpsertAllocs(structs.MsgTypeTestSetup, 102, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.Nil(t, srv.server.State().UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc})) running := false testutil.WaitForResult(func() (bool, error) { diff --git a/command/agent/fs_endpoint_test.go b/command/agent/fs_endpoint_test.go index 2ba5b434c90..1a07e8165a5 100644 --- a/command/agent/fs_endpoint_test.go +++ b/command/agent/fs_endpoint_test.go @@ -67,7 +67,7 @@ func addAllocToClient(agent *TestAgent, alloc *structs.Allocation, wait clientAl // Upsert the allocation state := agent.server.State() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc})) if wait == noWaitClientAlloc { return diff --git a/command/agent/job_endpoint_test.go b/command/agent/job_endpoint_test.go index 443be039193..be15d0eca0f 100644 --- a/command/agent/job_endpoint_test.go +++ b/command/agent/job_endpoint_test.go @@ -1543,7 +1543,7 @@ func TestHTTP_JobAllocations(t *testing.T) { alloc1.TaskStates = make(map[string]*structs.TaskState) alloc1.TaskStates["test"] = taskState state := s.Agent.server.State() - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -1604,7 +1604,7 @@ func TestHTTP_JobDeployments(t *testing.T) { d.JobID = j.ID d.JobCreateIndex = resp.JobModifyIndex - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Make the HTTP request req, err := http.NewRequest(http.MethodGet, "/v1/job/"+j.ID+"/deployments", nil) @@ -1647,7 +1647,7 @@ func TestHTTP_JobDeployment(t *testing.T) { d := mock.Deployment() d.JobID = j.ID d.JobCreateIndex = resp.JobModifyIndex - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Make the HTTP request req, err := http.NewRequest(http.MethodGet, "/v1/job/"+j.ID+"/deployment", nil) diff --git a/command/agent/node_endpoint_test.go b/command/agent/node_endpoint_test.go index b2c04345b34..4ff558c7a69 100644 --- a/command/agent/node_endpoint_test.go +++ b/command/agent/node_endpoint_test.go @@ -207,7 +207,7 @@ func TestHTTP_NodeForceEval(t *testing.T) { if err := state.UpsertJobSummary(999, mock.JobSummary(alloc1.JobID)); err != nil { t.Fatal(err) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -267,7 +267,7 @@ func TestHTTP_NodeAllocations(t *testing.T) { alloc1.TaskStates = make(map[string]*structs.TaskState) alloc1.TaskStates["test"] = taskState - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -478,7 +478,7 @@ func TestHTTP_NodePurge(t *testing.T) { if err := state.UpsertJobSummary(999, mock.JobSummary(alloc1.JobID)); err != nil { t.Fatal(err) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } diff --git a/command/agent/search_endpoint_test.go b/command/agent/search_endpoint_test.go index 8f506ca2344..491ae159dd5 100644 --- a/command/agent/search_endpoint_test.go +++ b/command/agent/search_endpoint_test.go @@ -8,7 +8,6 @@ import ( "net/http" "net/http/httptest" "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" @@ -345,7 +344,7 @@ func TestHTTP_PrefixSearch_Allocations(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() alloc := mockAlloc() - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 7000, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 7000, []*structs.Allocation{alloc}) require.NoError(t, err) prefix := alloc.ID[:len(alloc.ID)-2] @@ -376,7 +375,7 @@ func TestHTTP_FuzzySearch_Allocations(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() alloc := mockAlloc() - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 7000, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 7000, []*structs.Allocation{alloc}) require.NoError(t, err) data := structs.FuzzySearchRequest{Text: "-job", Context: structs.Allocs} @@ -467,7 +466,7 @@ func TestHTTP_PrefixSearch_Deployments(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() deployment := mock.Deployment() - require.NoError(t, state.UpsertDeployment(999, time.Now().UnixNano(), deployment), "UpsertDeployment") + require.NoError(t, state.UpsertDeployment(999, deployment), "UpsertDeployment") prefix := deployment.ID[:len(deployment.ID)-2] data := structs.SearchRequest{Prefix: prefix, Context: structs.Deployments} @@ -495,7 +494,7 @@ func TestHTTP_FuzzySearch_Deployments(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() deployment := mock.Deployment() - require.NoError(t, state.UpsertDeployment(999, time.Now().UnixNano(), deployment), "UpsertDeployment") + require.NoError(t, state.UpsertDeployment(999, deployment), "UpsertDeployment") // fuzzy search of deployments are prefix searches prefix := deployment.ID[:len(deployment.ID)-2] diff --git a/command/alloc_checks_test.go b/command/alloc_checks_test.go index 7df3fa48aaa..652bce0a368 100644 --- a/command/alloc_checks_test.go +++ b/command/alloc_checks_test.go @@ -6,7 +6,6 @@ package command import ( "encoding/json" "testing" - "time" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" @@ -75,7 +74,7 @@ func TestAllocChecksCommand_AutocompleteArgs(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() a := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) prefix := a.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/alloc_exec_test.go b/command/alloc_exec_test.go index d4383f29716..3c730d65f02 100644 --- a/command/alloc_exec_test.go +++ b/command/alloc_exec_test.go @@ -8,7 +8,6 @@ import ( "fmt" "strings" "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" @@ -154,7 +153,7 @@ func TestAllocExecCommand_AutocompleteArgs(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() a := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) prefix := a.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/alloc_fs_test.go b/command/alloc_fs_test.go index f1c7680c8d3..9e6ef96b7f1 100644 --- a/command/alloc_fs_test.go +++ b/command/alloc_fs_test.go @@ -5,7 +5,6 @@ package command import ( "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -102,7 +101,7 @@ func TestFSCommand_AutocompleteArgs(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() a := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) prefix := a.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/alloc_logs_test.go b/command/alloc_logs_test.go index 88e262cb0c0..5cee74972ec 100644 --- a/command/alloc_logs_test.go +++ b/command/alloc_logs_test.go @@ -5,7 +5,6 @@ package command import ( "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -81,7 +80,7 @@ func TestLogsCommand_AutocompleteArgs(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() a := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) prefix := a.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/alloc_restart_test.go b/command/alloc_restart_test.go index 6e562deab4d..0596f01e69a 100644 --- a/command/alloc_restart_test.go +++ b/command/alloc_restart_test.go @@ -5,7 +5,6 @@ package command import ( "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -153,7 +152,7 @@ func TestAllocRestartCommand_AutocompleteArgs(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() a := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) prefix := a.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/alloc_signal_test.go b/command/alloc_signal_test.go index 40addffe6a0..f27bb64dc06 100644 --- a/command/alloc_signal_test.go +++ b/command/alloc_signal_test.go @@ -5,7 +5,6 @@ package command import ( "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -86,7 +85,7 @@ func TestAllocSignalCommand_AutocompleteArgs(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() a := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) prefix := a.ID[:5] args := complete.Args{All: []string{"signal", prefix}, Last: prefix} diff --git a/command/alloc_status_test.go b/command/alloc_status_test.go index bbd646a7061..6260b7963f1 100644 --- a/command/alloc_status_test.go +++ b/command/alloc_status_test.go @@ -123,7 +123,7 @@ func TestAllocStatusCommand_LifecycleInfo(t *testing.T) { "prestart_sidecar": {State: "running"}, } - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) code := cmd.Run([]string{"-address=" + url, a.ID}) must.Zero(t, code) @@ -226,7 +226,7 @@ func TestAllocStatusCommand_RescheduleInfo(t *testing.T) { }, }, } - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) if code := cmd.Run([]string{"-address=" + url, a.ID}); code != 0 { t.Fatalf("expected exit 0, got: %d", code) @@ -269,7 +269,7 @@ func TestAllocStatusCommand_ScoreMetrics(t *testing.T) { }, }, } - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) code := cmd.Run([]string{"-address=" + url, "-verbose", a.ID}) must.Zero(t, code) @@ -296,7 +296,7 @@ func TestAllocStatusCommand_AutocompleteArgs(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() a := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) prefix := a.ID[:5] args := complete.Args{Last: prefix} @@ -359,7 +359,7 @@ func TestAllocStatusCommand_HostVolumes(t *testing.T) { } summary := mock.JobSummary(alloc.JobID) must.NoError(t, state.UpsertJobSummary(1004, summary)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, time.Now().UnixNano(), []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, []*structs.Allocation{alloc})) ui := cli.NewMockUi() cmd := &AllocStatusCommand{Meta: Meta{Ui: ui}} @@ -402,7 +402,7 @@ func TestAllocStatusCommand_CSIVolumes(t *testing.T) { Segments: map[string]string{"foo": "bar"}, }}, }} - err = state.UpsertCSIVolume(1002, time.Now().UnixNano(), vols) + err = state.UpsertCSIVolume(1002, vols) must.NoError(t, err) // Upsert the job and alloc @@ -435,7 +435,7 @@ func TestAllocStatusCommand_CSIVolumes(t *testing.T) { } summary := mock.JobSummary(alloc.JobID) must.NoError(t, state.UpsertJobSummary(1004, summary)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, time.Now().UnixNano(), []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, []*structs.Allocation{alloc})) ui := cli.NewMockUi() cmd := &AllocStatusCommand{Meta: Meta{Ui: ui}} diff --git a/command/deployment_fail_test.go b/command/deployment_fail_test.go index bceae5584e9..c6d3c72330d 100644 --- a/command/deployment_fail_test.go +++ b/command/deployment_fail_test.go @@ -6,7 +6,6 @@ package command import ( "strings" "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -55,7 +54,7 @@ func TestDeploymentFailCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.Nil(t, state.UpsertDeployment(1000, time.Now().UnixNano(), d)) + must.Nil(t, state.UpsertDeployment(1000, d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/deployment_pause_test.go b/command/deployment_pause_test.go index 58a25871cd7..f8356af7cac 100644 --- a/command/deployment_pause_test.go +++ b/command/deployment_pause_test.go @@ -6,7 +6,6 @@ package command import ( "strings" "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -55,7 +54,7 @@ func TestDeploymentPauseCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, time.Now().UnixNano(), d)) + must.NoError(t, state.UpsertDeployment(1000, d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/deployment_promote_test.go b/command/deployment_promote_test.go index 37107f9b792..38aefcddb9e 100644 --- a/command/deployment_promote_test.go +++ b/command/deployment_promote_test.go @@ -6,7 +6,6 @@ package command import ( "strings" "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -55,7 +54,7 @@ func TestDeploymentPromoteCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, time.Now().UnixNano(), d)) + must.NoError(t, state.UpsertDeployment(1000, d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/deployment_resume_test.go b/command/deployment_resume_test.go index f71f445d4b4..ee4f97c1922 100644 --- a/command/deployment_resume_test.go +++ b/command/deployment_resume_test.go @@ -6,7 +6,6 @@ package command import ( "strings" "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -55,7 +54,7 @@ func TestDeploymentResumeCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, time.Now().UnixNano(), d)) + must.NoError(t, state.UpsertDeployment(1000, d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/deployment_status_test.go b/command/deployment_status_test.go index 85da942e462..62e86428203 100644 --- a/command/deployment_status_test.go +++ b/command/deployment_status_test.go @@ -5,7 +5,6 @@ package command import ( "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -66,7 +65,7 @@ func TestDeploymentStatusCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, time.Now().UnixNano(), d)) + must.NoError(t, state.UpsertDeployment(1000, d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/deployment_unblock_test.go b/command/deployment_unblock_test.go index 1a40882bde3..96430fc71f5 100644 --- a/command/deployment_unblock_test.go +++ b/command/deployment_unblock_test.go @@ -6,7 +6,6 @@ package command import ( "strings" "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -55,7 +54,7 @@ func TestDeploymentUnblockCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, time.Now().UnixNano(), d)) + must.NoError(t, state.UpsertDeployment(1000, d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} diff --git a/command/job_allocs_test.go b/command/job_allocs_test.go index b4d1a08e1a7..4f51eec6fdd 100644 --- a/command/job_allocs_test.go +++ b/command/job_allocs_test.go @@ -5,7 +5,6 @@ package command import ( "testing" - "time" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" @@ -84,7 +83,7 @@ func TestJobAllocsCommand_Run(t *testing.T) { a.Metrics = &structs.AllocMetric{} a.DesiredStatus = structs.AllocDesiredStatusRun a.ClientStatus = structs.AllocClientStatusRunning - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{a})) // Should now display the alloc code = cmd.Run([]string{"-address=" + url, "-verbose", job.ID}) @@ -119,7 +118,7 @@ func TestJobAllocsCommand_Template(t *testing.T) { a.Metrics = &structs.AllocMetric{} a.DesiredStatus = structs.AllocDesiredStatusRun a.ClientStatus = structs.AllocClientStatusRunning - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{a})) // Inject a pending allocation b := mock.Alloc() @@ -129,7 +128,7 @@ func TestJobAllocsCommand_Template(t *testing.T) { b.Metrics = &structs.AllocMetric{} b.DesiredStatus = structs.AllocDesiredStatusRun b.ClientStatus = structs.AllocClientStatusPending - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 300, time.Now().UnixNano(), []*structs.Allocation{b})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 300, []*structs.Allocation{b})) // Should display an AllocacitonListStub object code := cmd.Run([]string{"-address=" + url, "-t", "'{{printf \"%#+v\" .}}'", job.ID}) @@ -201,7 +200,7 @@ func TestJobAllocsCommand_ACL(t *testing.T) { a.Metrics = &structs.AllocMetric{} a.DesiredStatus = structs.AllocDesiredStatusRun a.ClientStatus = structs.AllocClientStatusRunning - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{a}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{a}) must.NoError(t, err) testCases := []struct { diff --git a/command/job_deployments_test.go b/command/job_deployments_test.go index e48c8a79e49..ab1f4336675 100644 --- a/command/job_deployments_test.go +++ b/command/job_deployments_test.go @@ -6,7 +6,6 @@ package command import ( "strings" "testing" - "time" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" @@ -78,7 +77,7 @@ func TestJobDeploymentsCommand_Run(t *testing.T) { d := mock.Deployment() d.JobID = job.ID d.JobCreateIndex = job.CreateIndex - must.NoError(t, state.UpsertDeployment(200, time.Now().UnixNano(), d)) + must.NoError(t, state.UpsertDeployment(200, d)) // Should now display the deployment if code := cmd.Run([]string{"-address=" + url, "-verbose", job.ID}); code != 0 { @@ -122,7 +121,7 @@ func TestJobDeploymentsCommand_Run_Latest(t *testing.T) { d := mock.Deployment() d.JobID = job.ID d.JobCreateIndex = job.CreateIndex - must.NoError(t, state.UpsertDeployment(200, time.Now().UnixNano(), d)) + must.NoError(t, state.UpsertDeployment(200, d)) // Should now display the deployment if code := cmd.Run([]string{"-address=" + url, "-verbose", "-latest", job.ID}); code != 0 { @@ -175,7 +174,7 @@ func TestJobDeploymentsCommand_ACL(t *testing.T) { d := mock.Deployment() d.JobID = job.ID d.JobCreateIndex = job.CreateIndex - err = state.UpsertDeployment(101, time.Now().UnixNano(), d) + err = state.UpsertDeployment(101, d) must.NoError(t, err) testCases := []struct { diff --git a/command/job_eval_test.go b/command/job_eval_test.go index 9004e315796..fd081b9ee97 100644 --- a/command/job_eval_test.go +++ b/command/job_eval_test.go @@ -7,7 +7,6 @@ import ( "fmt" "strings" "testing" - "time" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" @@ -91,7 +90,7 @@ func TestJobEvalCommand_Run(t *testing.T) { alloc.TaskGroup = job.TaskGroups[0].Name alloc.Namespace = job.Namespace alloc.ClientStatus = structs.AllocClientStatusFailed - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 12, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 12, []*structs.Allocation{alloc}) must.NoError(t, err) if code := cmd.Run([]string{"-address=" + url, "-force-reschedule", "-detach", job.ID}); code != 0 { diff --git a/command/job_promote_test.go b/command/job_promote_test.go index 86891c567db..299bb370438 100644 --- a/command/job_promote_test.go +++ b/command/job_promote_test.go @@ -6,7 +6,6 @@ package command import ( "strings" "testing" - "time" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" @@ -155,7 +154,7 @@ namespace "default" { d := mock.Deployment() d.JobID = job.ID d.JobCreateIndex = job.CreateIndex - err = state.UpsertDeployment(uint64(301+i), time.Now().UnixNano(), d) + err = state.UpsertDeployment(uint64(301+i), d) must.NoError(t, err) if tc.aclPolicy != "" { diff --git a/command/job_status_test.go b/command/job_status_test.go index e5a3235b098..90800a52786 100644 --- a/command/job_status_test.go +++ b/command/job_status_test.go @@ -380,7 +380,7 @@ func TestJobStatusCommand_RescheduleEvals(t *testing.T) { a.Metrics = &structs.AllocMetric{} a.DesiredStatus = structs.AllocDesiredStatusRun a.ClientStatus = structs.AllocClientStatusRunning - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) // Query jobs with prefix match if code := cmd.Run([]string{"-address=" + url, j.ID}); code != 0 { diff --git a/command/status_test.go b/command/status_test.go index c9233552ad8..cce94977b4e 100644 --- a/command/status_test.go +++ b/command/status_test.go @@ -7,7 +7,6 @@ import ( "fmt" "regexp" "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" @@ -146,7 +145,7 @@ func TestStatusCommand_Run_AllocStatus(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() alloc := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) code := cmd.Run([]string{"-address=" + url, alloc.ID}) must.Zero(t, code) @@ -169,7 +168,7 @@ func TestStatusCommand_Run_DeploymentStatus(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() deployment := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, time.Now().UnixNano(), deployment)) + must.NoError(t, state.UpsertDeployment(1000, deployment)) // Query to check the deployment status code := cmd.Run([]string{"-address=" + url, deployment.ID}) diff --git a/command/volume_status_test.go b/command/volume_status_test.go index dcfafe1fcac..0fde4610f6b 100644 --- a/command/volume_status_test.go +++ b/command/volume_status_test.go @@ -5,7 +5,6 @@ package command import ( "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" @@ -51,7 +50,7 @@ func TestCSIVolumeStatusCommand_AutocompleteArgs(t *testing.T) { PluginID: "glade", } - must.NoError(t, state.UpsertCSIVolume(1000, time.Now().UnixNano(), []*structs.CSIVolume{vol})) + must.NoError(t, state.UpsertCSIVolume(1000, []*structs.CSIVolume{vol})) prefix := vol.ID[:len(vol.ID)-5] args := complete.Args{Last: prefix} diff --git a/nomad/acl_endpoint_test.go b/nomad/acl_endpoint_test.go index af54538bf8d..ce26d6a4d2a 100644 --- a/nomad/acl_endpoint_test.go +++ b/nomad/acl_endpoint_test.go @@ -1943,7 +1943,7 @@ func TestACLEndpoint_WhoAmI(t *testing.T) { // Lookup identity claim alloc := mock.Alloc() - s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1500, time.Now().UnixNano(), []*structs.Allocation{alloc}) + s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1500, []*structs.Allocation{alloc}) task := alloc.LookupTask("web") claims := structs.NewIdentityClaimsBuilder(alloc.Job, alloc, wiHandle, // see encrypter_test.go diff --git a/nomad/alloc_endpoint_test.go b/nomad/alloc_endpoint_test.go index b2063c74799..715f311150d 100644 --- a/nomad/alloc_endpoint_test.go +++ b/nomad/alloc_endpoint_test.go @@ -38,7 +38,7 @@ func TestAllocEndpoint_List(t *testing.T) { if err := state.UpsertJobSummary(999, summary); err != nil { t.Fatalf("err: %v", err) } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -151,7 +151,7 @@ func TestAllocEndpoint_List_PaginationFiltering(t *testing.T) { } // other fields index := 1000 + uint64(i) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), allocsInTx)) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, allocsInTx)) } aclToken := mock.CreatePolicyAndToken(t, @@ -337,17 +337,17 @@ func TestAllocEndpoint_List_order(t *testing.T) { alloc3 := mock.Alloc() alloc3.ID = uuid3 - err := s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1}) + err := s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1}) require.NoError(t, err) - err = s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc2}) + err = s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc2}) require.NoError(t, err) - err = s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1002, time.Now().UnixNano(), []*structs.Allocation{alloc3}) + err = s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc3}) require.NoError(t, err) // update alloc2 again so we can later assert create index order did not change - err = s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{alloc2}) + err = s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc2}) require.NoError(t, err) t.Run("default", func(t *testing.T) { @@ -426,7 +426,7 @@ func TestAllocEndpoint_List_Fields(t *testing.T) { state := s1.fsm.State() require.NoError(t, state.UpsertJobSummary(999, summary)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) cases := []struct { Name string @@ -521,7 +521,7 @@ func TestAllocEndpoint_List_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJobSummary(999, summary), "UpsertJobSummary") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), allocs), "UpsertAllocs") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs), "UpsertAllocs") stubAllocs := []*structs.AllocListStub{alloc.Stub(nil)} stubAllocs[0].CreateIndex = 1000 @@ -580,7 +580,7 @@ func TestAllocEndpoint_List_Blocking(t *testing.T) { } // Upsert alloc triggers watches time.AfterFunc(100*time.Millisecond, func() { - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } }) @@ -614,7 +614,7 @@ func TestAllocEndpoint_List_Blocking(t *testing.T) { alloc2.ClientStatus = structs.AllocClientStatusRunning time.AfterFunc(100*time.Millisecond, func() { state.UpsertJobSummary(3, mock.JobSummary(alloc2.JobID)) - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 4, time.Now().UnixNano(), []*structs.Allocation{alloc2}); err != nil { + if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 4, []*structs.Allocation{alloc2}); err != nil { t.Fatalf("err: %v", err) } }) @@ -670,8 +670,8 @@ func TestAllocEndpoint_List_AllNamespaces_OSS(t *testing.T) { require.NoError(t, state.UpsertJobSummary(1000, summary1)) require.NoError(t, state.UpsertJobSummary(1001, summary2)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, time.Now().UnixNano(), []*structs.Allocation{alloc1})) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc1})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc2})) t.Run("looking up all allocations", func(t *testing.T) { get := &structs.AllocListRequest{ @@ -739,7 +739,7 @@ func TestAllocEndpoint_GetAlloc(t *testing.T) { } state := s1.fsm.State() state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -778,7 +778,7 @@ func TestAllocEndpoint_GetAlloc_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJobSummary(999, summary), "UpsertJobSummary") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), allocs), "UpsertAllocs") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs), "UpsertAllocs") // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", @@ -889,7 +889,7 @@ func TestAllocEndpoint_GetAlloc_Blocking(t *testing.T) { // First create an unrelated alloc time.AfterFunc(100*time.Millisecond, func() { state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -898,7 +898,7 @@ func TestAllocEndpoint_GetAlloc_Blocking(t *testing.T) { // Create the alloc we are watching later time.AfterFunc(200*time.Millisecond, func() { state.UpsertJobSummary(199, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -943,7 +943,7 @@ func TestAllocEndpoint_GetAllocs(t *testing.T) { state := s1.fsm.State() state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -1000,7 +1000,7 @@ func TestAllocEndpoint_GetAllocs_Blocking(t *testing.T) { // First create an unrelated alloc time.AfterFunc(100*time.Millisecond, func() { state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -1009,7 +1009,7 @@ func TestAllocEndpoint_GetAllocs_Blocking(t *testing.T) { // Create the alloc we are watching later time.AfterFunc(200*time.Millisecond, func() { state.UpsertJobSummary(199, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -1056,7 +1056,7 @@ func TestAllocEndpoint_UpdateDesiredTransition(t *testing.T) { state := s1.fsm.State() require.Nil(state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID))) require.Nil(state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc, alloc2})) t1 := &structs.DesiredTransition{ Migrate: pointer.Of(true), @@ -1140,7 +1140,7 @@ func TestAllocEndpoint_Stop_ACL(t *testing.T) { state := s1.fsm.State() require.Nil(state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID))) require.Nil(state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc, alloc2})) req := &structs.AllocStopRequest{ AllocID: alloc.ID, @@ -1213,7 +1213,7 @@ func TestAllocEndpoint_List_AllNamespaces_ACL_OSS(t *testing.T) { require.NoError(t, state.UpsertJobSummary(999, summary1)) require.NoError(t, state.UpsertJobSummary(999, summary2)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) alloc1.CreateIndex = 1000 alloc1.ModifyIndex = 1000 alloc2.CreateIndex = 1000 @@ -1382,7 +1382,7 @@ func TestAlloc_GetServiceRegistrations(t *testing.T) { correctSetupFn := func(s *Server) (error, string, *structs.ServiceRegistration) { // Generate an upsert an allocation. alloc := mock.Alloc() - err := s.State().UpsertAllocs(structs.MsgTypeTestSetup, 10, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err := s.State().UpsertAllocs(structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc}) if err != nil { return nil, "", nil } @@ -1502,7 +1502,7 @@ func TestAlloc_GetServiceRegistrations(t *testing.T) { // Generate an upsert an allocation. alloc := mock.Alloc() require.NoError(t, s.State().UpsertAllocs( - structs.MsgTypeTestSetup, 10, time.Now().UnixNano(), []*structs.Allocation{alloc})) + structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc})) // Perform a lookup using the allocation information. serviceRegReq := &structs.AllocServiceRegistrationsRequest{ @@ -1733,7 +1733,7 @@ func TestAlloc_SignIdentities_Bad(t *testing.T) { summary := mock.JobSummary(alloc.JobID) state := s1.fsm.State() must.NoError(t, state.UpsertJobSummary(100, summary)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 101, time.Now().UnixNano(), []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 101, []*structs.Allocation{alloc})) // A valid alloc and invalid TaskName is an error req.Identities[0].AllocID = alloc.ID @@ -1801,7 +1801,7 @@ func TestAlloc_SignIdentities_Blocking(t *testing.T) { otherAlloc := mock.Alloc() otherSummary := mock.JobSummary(otherAlloc.JobID) must.NoError(t, state.UpsertJobSummary(999, otherSummary)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{otherAlloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{otherAlloc})) type resultT struct { Err error @@ -1848,7 +1848,7 @@ func TestAlloc_SignIdentities_Blocking(t *testing.T) { otherAlloc = mock.Alloc() otherSummary = mock.JobSummary(otherAlloc.JobID) must.NoError(t, state.UpsertJobSummary(1997, otherSummary)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1998, time.Now().UnixNano(), []*structs.Allocation{otherAlloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1998, []*structs.Allocation{otherAlloc})) select { case result := <-resultCh: @@ -1858,7 +1858,7 @@ func TestAlloc_SignIdentities_Blocking(t *testing.T) { // Finally add the alloc we're waiting for must.NoError(t, state.UpsertJobSummary(1999, summary)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2000, time.Now().UnixNano(), []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2000, []*structs.Allocation{alloc})) select { case result := <-resultCh: diff --git a/nomad/auth/auth_test.go b/nomad/auth/auth_test.go index e7364c9e0d2..9be3bb78aa8 100644 --- a/nomad/auth/auth_test.go +++ b/nomad/auth/auth_test.go @@ -271,7 +271,7 @@ func TestAuthenticateDefault(t *testing.T) { must.EqError(t, err, "allocation does not exist") // insert alloc so it's live - store.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), + store.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc}) args = &structs.GenericRequest{} @@ -289,7 +289,7 @@ func TestAuthenticateDefault(t *testing.T) { // alloc becomes terminal alloc.ClientStatus = structs.AllocClientStatusComplete - store.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), + store.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc}) args = &structs.GenericRequest{} @@ -924,7 +924,7 @@ func TestIdentityToACLClaim(t *testing.T) { Encrypter: newTestEncrypter(), }) - store.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), + store.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) token, err := auth.encrypter.(*testEncrypter).signClaim(claims) @@ -1115,7 +1115,7 @@ func TestResolveClaims(t *testing.T) { // upsert the allocation index++ - err = auth.getState().UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc, dispatchAlloc}) + err = auth.getState().UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc, dispatchAlloc}) must.NoError(t, err) // Resolve claims and check we that the ACL object without policies provides no access diff --git a/nomad/deploymentwatcher/deployments_watcher_test.go b/nomad/deploymentwatcher/deployments_watcher_test.go index 19f9af7a8b1..a6b880441f7 100644 --- a/nomad/deploymentwatcher/deployments_watcher_test.go +++ b/nomad/deploymentwatcher/deployments_watcher_test.go @@ -20,6 +20,7 @@ import ( "github.com/shoenig/test/wait" "github.com/stretchr/testify/assert" mocker "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" ) func testDeploymentWatcher(t *testing.T, qps float64, batchDur time.Duration) (*Watcher, *mockBackend) { @@ -35,6 +36,7 @@ func defaultTestDeploymentWatcher(t *testing.T) (*Watcher, *mockBackend) { // Tests that the watcher properly watches for deployments and reconciles them func TestWatcher_WatchDeployments(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -43,9 +45,9 @@ func TestWatcher_WatchDeployments(t *testing.T) { // Create three jobs j1, j2, j3 := mock.Job(), mock.Job(), mock.Job() - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, j1)) - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, j2)) - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, 102, nil, j3)) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, j1)) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, j2)) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, 102, nil, j3)) // Create three deployments all running d1, d2, d3 := mock.Deployment(), mock.Deployment(), mock.Deployment() @@ -54,14 +56,14 @@ func TestWatcher_WatchDeployments(t *testing.T) { d3.JobID = j3.ID // Upsert the first deployment - must.Nil(t, m.state.UpsertDeployment(103, time.Now().UnixNano(), d1)) + require.Nil(m.state.UpsertDeployment(103, d1)) // Next list 3 block1 := make(chan time.Time) go func() { <-block1 - must.Nil(t, m.state.UpsertDeployment(104, time.Now().UnixNano(), d2)) - must.Nil(t, m.state.UpsertDeployment(105, time.Now().UnixNano(), d3)) + require.Nil(m.state.UpsertDeployment(104, d2)) + require.Nil(m.state.UpsertDeployment(105, d3)) }() //// Next list 3 but have one be terminal @@ -70,25 +72,27 @@ func TestWatcher_WatchDeployments(t *testing.T) { d3terminal.Status = structs.DeploymentStatusFailed go func() { <-block2 - must.Nil(t, m.state.UpsertDeployment(106, time.Now().UnixNano(), d3terminal)) + require.Nil(m.state.UpsertDeployment(106, d3terminal)) }() w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("1 deployment returned")) }) + func(err error) { require.Equal(1, watchersCount(w), "1 deployment returned") }) close(block1) testutil.WaitForResult(func() (bool, error) { return 3 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 3, watchersCount(w), must.Sprint("3 deployment returned")) }) + func(err error) { require.Equal(3, watchersCount(w), "3 deployment returned") }) close(block2) testutil.WaitForResult(func() (bool, error) { return 2 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 3, watchersCount(w), must.Sprint("3 deployment returned - 1 terminal")) }) + func(err error) { require.Equal(3, watchersCount(w), "3 deployment returned - 1 terminal") }) } // Tests that calls against an unknown deployment fail func TestWatcher_UnknownDeployment(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) + require := require.New(t) w, m := defaultTestDeploymentWatcher(t) w.SetEnabled(true, m.state) @@ -107,8 +111,9 @@ func TestWatcher_UnknownDeployment(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.SetAllocHealth(req, &resp) - must.Error(t, err, must.Sprint("should have error for unknown deployment")) - must.ErrorContains(t, err, expected) + if assert.NotNil(err, "should have error for unknown deployment") { + require.Contains(err.Error(), expected) + } // Request promoting against an unknown deployment req2 := &structs.DeploymentPromoteRequest{ @@ -116,8 +121,9 @@ func TestWatcher_UnknownDeployment(t *testing.T) { All: true, } err = w.PromoteDeployment(req2, &resp) - must.Error(t, err, must.Sprint("should have error for unknown deployment")) - must.ErrorContains(t, err, expected) + if assert.NotNil(err, "should have error for unknown deployment") { + require.Contains(err.Error(), expected) + } // Request pausing against an unknown deployment req3 := &structs.DeploymentPauseRequest{ @@ -125,21 +131,25 @@ func TestWatcher_UnknownDeployment(t *testing.T) { Pause: true, } err = w.PauseDeployment(req3, &resp) - must.Error(t, err, must.Sprint("should have error for unknown deployment")) - must.ErrorContains(t, err, expected) + if assert.NotNil(err, "should have error for unknown deployment") { + require.Contains(err.Error(), expected) + } // Request failing against an unknown deployment req4 := &structs.DeploymentFailRequest{ DeploymentID: dID, } err = w.FailDeployment(req4, &resp) - must.Error(t, err, must.Sprint("should have error for unknown deployment")) - must.ErrorContains(t, err, expected) + if assert.NotNil(err, "should have error for unknown deployment") { + require.Contains(err.Error(), expected) + } } // Test setting an unknown allocation's health func TestWatcher_SetAllocHealth_Unknown(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) + require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -150,8 +160,8 @@ func TestWatcher_SetAllocHealth_Unknown(t *testing.T) { j := mock.Job() d := mock.Deployment() d.JobID = j.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") // require that we get a call to UpsertDeploymentAllocHealth a := mock.Alloc() @@ -165,7 +175,7 @@ func TestWatcher_SetAllocHealth_Unknown(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Call SetAllocHealth req := &structs.DeploymentAllocHealthRequest{ @@ -174,14 +184,16 @@ func TestWatcher_SetAllocHealth_Unknown(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.SetAllocHealth(req, &resp) - must.Error(t, err, must.Sprint("Set health of unknown allocation")) - must.ErrorContains(t, err, "unknown") - must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) + if assert.NotNil(err, "Set health of unknown allocation") { + require.Contains(err.Error(), "unknown") + } + require.Equal(1, watchersCount(w), "Deployment should still be active") } // Test setting allocation health func TestWatcher_SetAllocHealth_Healthy(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -194,10 +206,9 @@ func TestWatcher_SetAllocHealth_Healthy(t *testing.T) { d.JobID = j.ID a := mock.Alloc() a.DeploymentID = d.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") // require that we get a call to UpsertDeploymentAllocHealth matchConfig := &matchDeploymentAllocHealthRequestConfig{ @@ -210,7 +221,7 @@ func TestWatcher_SetAllocHealth_Healthy(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Call SetAllocHealth req := &structs.DeploymentAllocHealthRequest{ @@ -219,14 +230,15 @@ func TestWatcher_SetAllocHealth_Healthy(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.SetAllocHealth(req, &resp) - must.Nil(t, err, must.Sprint("SetAllocHealth")) - must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) + require.Nil(err, "SetAllocHealth") + require.Equal(1, watchersCount(w), "Deployment should still be active") m.AssertCalled(t, "UpdateDeploymentAllocHealth", mocker.MatchedBy(matcher)) } // Test setting allocation unhealthy func TestWatcher_SetAllocHealth_Unhealthy(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -239,10 +251,9 @@ func TestWatcher_SetAllocHealth_Unhealthy(t *testing.T) { d.JobID = j.ID a := mock.Alloc() a.DeploymentID = d.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") // require that we get a call to UpsertDeploymentAllocHealth matchConfig := &matchDeploymentAllocHealthRequestConfig{ @@ -260,7 +271,7 @@ func TestWatcher_SetAllocHealth_Unhealthy(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Call SetAllocHealth req := &structs.DeploymentAllocHealthRequest{ @@ -269,16 +280,17 @@ func TestWatcher_SetAllocHealth_Unhealthy(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.SetAllocHealth(req, &resp) - must.Nil(t, err, must.Sprint("SetAllocHealth")) + require.Nil(err, "SetAllocHealth") testutil.WaitForResult(func() (bool, error) { return 0 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 0, watchersCount(w), must.Sprint("Should have no deployment")) }) + func(err error) { require.Equal(0, watchersCount(w), "Should have no deployment") }) m.AssertNumberOfCalls(t, "UpdateDeploymentAllocHealth", 1) } // Test setting allocation unhealthy and that there should be a rollback func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -297,10 +309,9 @@ func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { d.TaskGroups["web"].AutoRevert = true a := mock.Alloc() a.DeploymentID = d.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") // Upsert the job again to get a new version j2 := j.Copy() @@ -308,7 +319,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { // Modify the job to make its specification different j2.Meta["foo"] = "bar" - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), must.Sprint("UpsertJob2")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), "UpsertJob2") // require that we get a call to UpsertDeploymentAllocHealth matchConfig := &matchDeploymentAllocHealthRequestConfig{ @@ -327,7 +338,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Call SetAllocHealth req := &structs.DeploymentAllocHealthRequest{ @@ -336,16 +347,17 @@ func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.SetAllocHealth(req, &resp) - must.Nil(t, err, must.Sprint("SetAllocHealth")) + require.Nil(err, "SetAllocHealth") testutil.WaitForResult(func() (bool, error) { return 0 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 0, watchersCount(w), must.Sprint("Should have no deployment")) }) + func(err error) { require.Equal(0, watchersCount(w), "Should have no deployment") }) m.AssertNumberOfCalls(t, "UpdateDeploymentAllocHealth", 1) } // Test setting allocation unhealthy on job with identical spec and there should be no rollback func TestWatcher_SetAllocHealth_Unhealthy_NoRollback(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -364,16 +376,15 @@ func TestWatcher_SetAllocHealth_Unhealthy_NoRollback(t *testing.T) { d.TaskGroups["web"].AutoRevert = true a := mock.Alloc() a.DeploymentID = d.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") // Upsert the job again to get a new version j2 := j.Copy() j2.Stable = false - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), must.Sprint("UpsertJob2")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), "UpsertJob2") // require that we get a call to UpsertDeploymentAllocHealth matchConfig := &matchDeploymentAllocHealthRequestConfig{ @@ -392,7 +403,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_NoRollback(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Call SetAllocHealth req := &structs.DeploymentAllocHealthRequest{ @@ -401,16 +412,17 @@ func TestWatcher_SetAllocHealth_Unhealthy_NoRollback(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.SetAllocHealth(req, &resp) - must.Nil(t, err, must.Sprint("SetAllocHealth")) + require.Nil(err, "SetAllocHealth") testutil.WaitForResult(func() (bool, error) { return 0 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 0, watchersCount(w), must.Sprint("Should have no deployment")) }) + func(err error) { require.Equal(0, watchersCount(w), "Should have no deployment") }) m.AssertNumberOfCalls(t, "UpdateDeploymentAllocHealth", 1) } // Test promoting a deployment func TestWatcher_PromoteDeployment_HealthyCanaries(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -432,10 +444,9 @@ func TestWatcher_PromoteDeployment_HealthyCanaries(t *testing.T) { Healthy: pointer.Of(true), } a.DeploymentID = d.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") // require that we get a call to UpsertDeploymentPromotion matchConfig := &matchDeploymentPromoteRequestConfig{ @@ -454,7 +465,7 @@ func TestWatcher_PromoteDeployment_HealthyCanaries(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Call PromoteDeployment req := &structs.DeploymentPromoteRequest{ @@ -463,14 +474,15 @@ func TestWatcher_PromoteDeployment_HealthyCanaries(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.PromoteDeployment(req, &resp) - must.Nil(t, err, must.Sprint("PromoteDeployment")) - must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) + require.Nil(err, "PromoteDeployment") + require.Equal(1, watchersCount(w), "Deployment should still be active") m.AssertCalled(t, "UpdateDeploymentPromotion", mocker.MatchedBy(matcher)) } // Test promoting a deployment with unhealthy canaries func TestWatcher_PromoteDeployment_UnhealthyCanaries(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := defaultTestDeploymentWatcher(t) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -489,10 +501,9 @@ func TestWatcher_PromoteDeployment_UnhealthyCanaries(t *testing.T) { d.TaskGroups[a.TaskGroup].PlacedCanaries = []string{a.ID} d.TaskGroups[a.TaskGroup].DesiredCanaries = 2 a.DeploymentID = d.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") // require that we get a call to UpsertDeploymentPromotion matchConfig := &matchDeploymentPromoteRequestConfig{ @@ -507,7 +518,7 @@ func TestWatcher_PromoteDeployment_UnhealthyCanaries(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Call SetAllocHealth req := &structs.DeploymentPromoteRequest{ @@ -518,10 +529,10 @@ func TestWatcher_PromoteDeployment_UnhealthyCanaries(t *testing.T) { err := w.PromoteDeployment(req, &resp) if assert.NotNil(t, err, "PromoteDeployment") { // 0/2 because the old version has been stopped but the canary isn't marked healthy yet - must.ErrorContains(t, err, `Task group "web" has 0/2 healthy allocations`, must.Sprint("Should error because canary isn't marked healthy")) + require.Contains(err.Error(), `Task group "web" has 0/2 healthy allocations`, "Should error because canary isn't marked healthy") } - must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) + require.Equal(1, watchersCount(w), "Deployment should still be active") m.AssertCalled(t, "UpdateDeploymentPromotion", mocker.MatchedBy(matcher)) } @@ -601,10 +612,9 @@ func TestWatcher_AutoPromoteDeployment(t *testing.T) { d.TaskGroups[ca1.TaskGroup].PlacedCanaries = []string{ca1.ID, ca2.ID} d.TaskGroups[ca1.TaskGroup].DesiredCanaries = 2 d.TaskGroups[ra1.TaskGroup].PlacedAllocs = 2 - must.NoError(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.NoError(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) - must.NoError(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{ca1, ca2, ra1, ra2}), must.Sprint("UpsertAllocs")) + require.NoError(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.NoError(t, m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + require.NoError(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{ca1, ca2, ra1, ra2}), "UpsertAllocs") // ============================================================= // Support method calls @@ -650,7 +660,7 @@ func TestWatcher_AutoPromoteDeployment(t *testing.T) { func(err error) { w.l.RLock() defer w.l.RUnlock() - must.Eq(t, 1, len(w.watchers), must.Sprint("Should have 1 deployment")) + require.Equal(t, 1, len(w.watchers), "Should have 1 deployment") }, ) @@ -663,7 +673,7 @@ func TestWatcher_AutoPromoteDeployment(t *testing.T) { // Calls w.raft.UpdateDeploymentAllocHealth, which is implemented by StateStore in // state.UpdateDeploymentAllocHealth via a raft shim? err := w.SetAllocHealth(req, &resp) - must.NoError(t, err) + require.NoError(t, err) ws := memdb.NewWatchSet() @@ -673,22 +683,22 @@ func TestWatcher_AutoPromoteDeployment(t *testing.T) { d = ds[0] return 2 == d.TaskGroups["web"].HealthyAllocs, nil }, - func(err error) { must.NoError(t, err) }, + func(err error) { require.NoError(t, err) }, ) - must.Eq(t, 1, len(w.watchers), must.Sprint("Deployment should still be active")) + require.Equal(t, 1, len(w.watchers), "Deployment should still be active") m.AssertCalled(t, "UpdateDeploymentPromotion", mocker.MatchedBy(matcher2)) - must.Eq(t, "running", d.Status) - must.True(t, d.TaskGroups["web"].Promoted) + require.Equal(t, "running", d.Status) + require.True(t, d.TaskGroups["web"].Promoted) a1, _ := m.state.AllocByID(ws, ca1.ID) - must.False(t, a1.DeploymentStatus.Canary) - must.Eq(t, "pending", a1.ClientStatus) - must.Eq(t, "run", a1.DesiredStatus) + require.False(t, a1.DeploymentStatus.Canary) + require.Equal(t, "pending", a1.ClientStatus) + require.Equal(t, "run", a1.DesiredStatus) b1, _ := m.state.AllocByID(ws, ca2.ID) - must.False(t, b1.DeploymentStatus.Canary) + require.False(t, b1.DeploymentStatus.Canary) } func TestWatcher_AutoPromoteDeployment_UnhealthyCanaries(t *testing.T) { @@ -737,10 +747,9 @@ func TestWatcher_AutoPromoteDeployment_UnhealthyCanaries(t *testing.T) { d.TaskGroups[ca1.TaskGroup].PlacedCanaries = []string{ca1.ID, ca2.ID, ca3.ID} d.TaskGroups[ca1.TaskGroup].DesiredCanaries = 2 - must.NoError(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.NoError(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) - must.NoError(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{ca1, ca2, ca3}), must.Sprint("UpsertAllocs")) + require.NoError(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.NoError(t, m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + require.NoError(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{ca1, ca2, ca3}), "UpsertAllocs") // ============================================================= // Support method calls @@ -786,7 +795,7 @@ func TestWatcher_AutoPromoteDeployment_UnhealthyCanaries(t *testing.T) { func(err error) { w.l.RLock() defer w.l.RUnlock() - must.Eq(t, 1, len(w.watchers), must.Sprint("Should have 1 deployment")) + require.Equal(t, 1, len(w.watchers), "Should have 1 deployment") }, ) @@ -799,7 +808,7 @@ func TestWatcher_AutoPromoteDeployment_UnhealthyCanaries(t *testing.T) { // Calls w.raft.UpdateDeploymentAllocHealth, which is implemented by StateStore in // state.UpdateDeploymentAllocHealth via a raft shim? err := w.SetAllocHealth(req, &resp) - must.NoError(t, err) + require.NoError(t, err) ws := memdb.NewWatchSet() @@ -809,28 +818,29 @@ func TestWatcher_AutoPromoteDeployment_UnhealthyCanaries(t *testing.T) { d = ds[0] return 2 == d.TaskGroups["web"].HealthyAllocs, nil }, - func(err error) { must.NoError(t, err) }, + func(err error) { require.NoError(t, err) }, ) // Verify that a promotion request was submitted. - must.Eq(t, 1, len(w.watchers), must.Sprint("Deployment should still be active")) + require.Equal(t, 1, len(w.watchers), "Deployment should still be active") m.AssertCalled(t, "UpdateDeploymentPromotion", mocker.MatchedBy(matcher2)) - must.Eq(t, "running", d.Status) - must.True(t, d.TaskGroups["web"].Promoted) + require.Equal(t, "running", d.Status) + require.True(t, d.TaskGroups["web"].Promoted) a1, _ := m.state.AllocByID(ws, ca1.ID) - must.False(t, a1.DeploymentStatus.Canary) - must.Eq(t, "pending", a1.ClientStatus) - must.Eq(t, "run", a1.DesiredStatus) + require.False(t, a1.DeploymentStatus.Canary) + require.Equal(t, "pending", a1.ClientStatus) + require.Equal(t, "run", a1.DesiredStatus) b1, _ := m.state.AllocByID(ws, ca2.ID) - must.False(t, b1.DeploymentStatus.Canary) + require.False(t, b1.DeploymentStatus.Canary) } // Test pausing a deployment that is running func TestWatcher_PauseDeployment_Pause_Running(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := defaultTestDeploymentWatcher(t) // clear UpdateDeploymentStatus default expectation @@ -840,8 +850,8 @@ func TestWatcher_PauseDeployment_Pause_Running(t *testing.T) { j := mock.Job() d := mock.Deployment() d.JobID = j.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") // require that we get a call to UpsertDeploymentStatusUpdate matchConfig := &matchDeploymentStatusUpdateConfig{ @@ -854,7 +864,7 @@ func TestWatcher_PauseDeployment_Pause_Running(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Call PauseDeployment req := &structs.DeploymentPauseRequest{ @@ -863,15 +873,16 @@ func TestWatcher_PauseDeployment_Pause_Running(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.PauseDeployment(req, &resp) - must.Nil(t, err, must.Sprint("PauseDeployment")) + require.Nil(err, "PauseDeployment") - must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) + require.Equal(1, watchersCount(w), "Deployment should still be active") m.AssertCalled(t, "UpdateDeploymentStatus", mocker.MatchedBy(matcher)) } // Test pausing a deployment that is paused func TestWatcher_PauseDeployment_Pause_Paused(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := defaultTestDeploymentWatcher(t) // clear UpdateDeploymentStatus default expectation @@ -882,8 +893,8 @@ func TestWatcher_PauseDeployment_Pause_Paused(t *testing.T) { d := mock.Deployment() d.JobID = j.ID d.Status = structs.DeploymentStatusPaused - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") // require that we get a call to UpsertDeploymentStatusUpdate matchConfig := &matchDeploymentStatusUpdateConfig{ @@ -896,7 +907,7 @@ func TestWatcher_PauseDeployment_Pause_Paused(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Call PauseDeployment req := &structs.DeploymentPauseRequest{ @@ -905,15 +916,16 @@ func TestWatcher_PauseDeployment_Pause_Paused(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.PauseDeployment(req, &resp) - must.Nil(t, err, must.Sprint("PauseDeployment")) + require.Nil(err, "PauseDeployment") - must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) + require.Equal(1, watchersCount(w), "Deployment should still be active") m.AssertCalled(t, "UpdateDeploymentStatus", mocker.MatchedBy(matcher)) } // Test unpausing a deployment that is paused func TestWatcher_PauseDeployment_Unpause_Paused(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := defaultTestDeploymentWatcher(t) // Create a job and a deployment @@ -921,8 +933,8 @@ func TestWatcher_PauseDeployment_Unpause_Paused(t *testing.T) { d := mock.Deployment() d.JobID = j.ID d.Status = structs.DeploymentStatusPaused - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") // require that we get a call to UpsertDeploymentStatusUpdate matchConfig := &matchDeploymentStatusUpdateConfig{ @@ -936,7 +948,7 @@ func TestWatcher_PauseDeployment_Unpause_Paused(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Call PauseDeployment req := &structs.DeploymentPauseRequest{ @@ -945,23 +957,24 @@ func TestWatcher_PauseDeployment_Unpause_Paused(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.PauseDeployment(req, &resp) - must.Nil(t, err, must.Sprint("PauseDeployment")) + require.Nil(err, "PauseDeployment") - must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) + require.Equal(1, watchersCount(w), "Deployment should still be active") m.AssertCalled(t, "UpdateDeploymentStatus", mocker.MatchedBy(matcher)) } // Test unpausing a deployment that is running func TestWatcher_PauseDeployment_Unpause_Running(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := defaultTestDeploymentWatcher(t) // Create a job and a deployment j := mock.Job() d := mock.Deployment() d.JobID = j.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") // require that we get a call to UpsertDeploymentStatusUpdate matchConfig := &matchDeploymentStatusUpdateConfig{ @@ -975,7 +988,7 @@ func TestWatcher_PauseDeployment_Unpause_Running(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Call PauseDeployment req := &structs.DeploymentPauseRequest{ @@ -984,23 +997,24 @@ func TestWatcher_PauseDeployment_Unpause_Running(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.PauseDeployment(req, &resp) - must.Nil(t, err, must.Sprint("PauseDeployment")) + require.Nil(err, "PauseDeployment") - must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) + require.Equal(1, watchersCount(w), "Deployment should still be active") m.AssertCalled(t, "UpdateDeploymentStatus", mocker.MatchedBy(matcher)) } // Test failing a deployment that is running func TestWatcher_FailDeployment_Running(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := defaultTestDeploymentWatcher(t) // Create a job and a deployment j := mock.Job() d := mock.Deployment() d.JobID = j.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") // require that we get a call to UpsertDeploymentStatusUpdate matchConfig := &matchDeploymentStatusUpdateConfig{ @@ -1014,7 +1028,7 @@ func TestWatcher_FailDeployment_Running(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Call PauseDeployment req := &structs.DeploymentFailRequest{ @@ -1022,9 +1036,9 @@ func TestWatcher_FailDeployment_Running(t *testing.T) { } var resp structs.DeploymentUpdateResponse err := w.FailDeployment(req, &resp) - must.Nil(t, err, must.Sprint("FailDeployment")) + require.Nil(err, "FailDeployment") - must.Eq(t, 1, watchersCount(w), must.Sprint("Deployment should still be active")) + require.Equal(1, watchersCount(w), "Deployment should still be active") m.AssertCalled(t, "UpdateDeploymentStatus", mocker.MatchedBy(matcher)) } @@ -1032,6 +1046,7 @@ func TestWatcher_FailDeployment_Running(t *testing.T) { // proper actions func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) // Create a job, alloc, and a deployment @@ -1046,17 +1061,16 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { d.TaskGroups["web"].AutoRevert = true a := mock.Alloc() a.DeploymentID = d.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") // Upsert the job again to get a new version j2 := j.Copy() // Modify the job to make its specification different j2.Meta["foo"] = "bar" j2.Stable = false - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), must.Sprint("UpsertJob2")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), "UpsertJob2") // require that we will get a update allocation call only once. This will // verify that the watcher is batching allocation changes @@ -1076,7 +1090,7 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Update the allocs health to healthy which should create an evaluation for i := 0; i < 5; i++ { @@ -1086,8 +1100,7 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { HealthyAllocationIDs: []string{a.ID}, }, } - must.Nil(t, m.state.UpdateDeploymentAllocHealth( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), req), must.Sprint("UpsertDeploymentAllocHealth")) + require.Nil(m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, m.nextIndex(), req), "UpsertDeploymentAllocHealth") } // Wait for there to be one eval @@ -1115,8 +1128,7 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { UnhealthyAllocationIDs: []string{a.ID}, }, } - must.Nil(t, m.state.UpdateDeploymentAllocHealth( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), req2), must.Sprint("UpsertDeploymentAllocHealth")) + require.Nil(m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, m.nextIndex(), req2), "UpsertDeploymentAllocHealth") // Wait for there to be one eval testutil.WaitForResult(func() (bool, error) { @@ -1149,11 +1161,12 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { m3 := matchDeploymentStatusUpdateRequest(c2) m.AssertCalled(t, "UpdateDeploymentStatus", mocker.MatchedBy(m3)) testutil.WaitForResult(func() (bool, error) { return 0 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 0, watchersCount(w), must.Sprint("Should have no deployment")) }) + func(err error) { require.Equal(0, watchersCount(w), "Should have no deployment") }) } func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) // Create a job, alloc, and a deployment @@ -1170,9 +1183,9 @@ func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { a.CreateTime = now.UnixNano() a.ModifyTime = now.UnixNano() a.DeploymentID = d.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") // require that we get a call to UpsertDeploymentStatusUpdate c := &matchDeploymentStatusUpdateConfig{ @@ -1186,7 +1199,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Update the alloc to be unhealthy and require that nothing happens. a2 := a.Copy() @@ -1194,7 +1207,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { Healthy: pointer.Of(false), Timestamp: now, } - must.Nil(t, m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{a2})) + require.Nil(m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 100, []*structs.Allocation{a2})) // Wait for the deployment to be failed testutil.WaitForResult(func() (bool, error) { @@ -1229,6 +1242,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { // Test that progress deadline handling works when there are multiple groups func TestDeploymentWatcher_ProgressCutoff(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -1264,10 +1278,9 @@ func TestDeploymentWatcher_ProgressCutoff(t *testing.T) { a2.ModifyTime = now.UnixNano() a2.DeploymentID = d.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a, a2}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a, a2}), "UpsertAllocs") // We may get an update for the desired transition. m1 := matchUpdateAllocDesiredTransitions([]string{d.ID}) @@ -1275,67 +1288,67 @@ func TestDeploymentWatcher_ProgressCutoff(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) watcher, err := w.getOrCreateWatcher(d.ID) - must.NoError(t, err) - must.NotNil(t, watcher) + require.NoError(err) + require.NotNil(watcher) d1, err := m.state.DeploymentByID(nil, d.ID) - must.NoError(t, err) + require.NoError(err) done := watcher.doneGroups(d1) - must.MapContainsKey(t, done, "web") - must.False(t, done["web"]) - must.MapContainsKey(t, done, "foo") - must.False(t, done["foo"]) + require.Contains(done, "web") + require.False(done["web"]) + require.Contains(done, "foo") + require.False(done["foo"]) cutoff1 := watcher.getDeploymentProgressCutoff(d1) - must.False(t, cutoff1.IsZero()) + require.False(cutoff1.IsZero()) // Update the first allocation to be healthy a3 := a.Copy() a3.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} - must.Nil(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a3}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a3}), "UpsertAllocs") // Get the updated deployment d2, err := m.state.DeploymentByID(nil, d.ID) - must.NoError(t, err) + require.NoError(err) done = watcher.doneGroups(d2) - must.MapContainsKey(t, done, "web") - must.True(t, done["web"]) - must.MapContainsKey(t, done, "foo") - must.False(t, done["foo"]) + require.Contains(done, "web") + require.True(done["web"]) + require.Contains(done, "foo") + require.False(done["foo"]) cutoff2 := watcher.getDeploymentProgressCutoff(d2) - must.False(t, cutoff2.IsZero()) - must.True(t, cutoff1.UnixNano() < cutoff2.UnixNano()) + require.False(cutoff2.IsZero()) + require.True(cutoff1.UnixNano() < cutoff2.UnixNano()) // Update the second allocation to be healthy a4 := a2.Copy() a4.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} - must.Nil(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a4}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a4}), "UpsertAllocs") // Get the updated deployment d3, err := m.state.DeploymentByID(nil, d.ID) - must.NoError(t, err) + require.NoError(err) done = watcher.doneGroups(d3) - must.MapContainsKey(t, done, "web") - must.True(t, done["web"]) - must.MapContainsKey(t, done, "foo") - must.True(t, done["foo"]) + require.Contains(done, "web") + require.True(done["web"]) + require.Contains(done, "foo") + require.True(done["foo"]) cutoff3 := watcher.getDeploymentProgressCutoff(d2) - must.True(t, cutoff3.IsZero()) + require.True(cutoff3.IsZero()) } // Test that we will allow the progress deadline to be reached when the canaries // are healthy but we haven't promoted func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -1359,10 +1372,9 @@ func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { a.CreateTime = now.UnixNano() a.ModifyTime = now.UnixNano() a.DeploymentID = d.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") // require that we will get a createEvaluation call only once. This will // verify that the watcher is batching allocation changes @@ -1371,7 +1383,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Update the alloc to be unhealthy and require that nothing happens. a2 := a.Copy() @@ -1379,22 +1391,22 @@ func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { Healthy: pointer.Of(true), Timestamp: now, } - must.Nil(t, m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a2})) + require.Nil(m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a2})) // Wait for the deployment to cross the deadline dout, err := m.state.DeploymentByID(nil, d.ID) - must.NoError(t, err) - must.NotNil(t, dout) + require.NoError(err) + require.NotNil(dout) state := dout.TaskGroups["web"] - must.NotNil(t, state) + require.NotNil(state) time.Sleep(state.RequireProgressBy.Add(time.Second).Sub(now)) // Require the deployment is still running dout, err = m.state.DeploymentByID(nil, d.ID) - must.NoError(t, err) - must.NotNil(t, dout) - must.Eq(t, structs.DeploymentStatusRunning, dout.Status) - must.Eq(t, structs.DeploymentStatusDescriptionRunningNeedsPromotion, dout.StatusDescription) + require.NoError(err) + require.NotNil(dout) + require.Equal(structs.DeploymentStatusRunning, dout.Status) + require.Equal(structs.DeploymentStatusDescriptionRunningNeedsPromotion, dout.StatusDescription) // require there are is only one evaluation testutil.WaitForResult(func() (bool, error) { @@ -1418,6 +1430,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { // evals to move the deployment forward func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -1451,14 +1464,13 @@ func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { Healthy: pointer.Of(true), Timestamp: now, } - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) m1 := matchUpdateAllocDesiredTransitions([]string{d.ID}) m.On("UpdateAllocDesiredTransition", mocker.MatchedBy(m1)).Return(nil).Twice() @@ -1474,18 +1486,17 @@ func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { Timestamp: now, } d.TaskGroups["web"].RequireProgressBy = time.Now().Add(2 * time.Second) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") // Wait until batch eval period passes before updating another alloc time.Sleep(1 * time.Second) - must.Nil(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a2}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a2}), "UpsertAllocs") // Wait for the deployment to cross the deadline dout, err := m.state.DeploymentByID(nil, d.ID) - must.NoError(t, err) - must.NotNil(t, dout) + require.NoError(err) + require.NotNil(dout) state := dout.TaskGroups["web"] - must.NotNil(t, state) + require.NotNil(state) time.Sleep(state.RequireProgressBy.Add(time.Second).Sub(now)) // There should be two evals @@ -1508,6 +1519,7 @@ func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { ci.Parallel(t) + require := require.New(t) mtype := structs.MsgTypeTestSetup w, m := defaultTestDeploymentWatcher(t) @@ -1552,8 +1564,8 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { }, } - must.NoError(t, m.state.UpsertJob(mtype, m.nextIndex(), nil, j)) - must.NoError(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d)) + require.NoError(m.state.UpsertJob(mtype, m.nextIndex(), nil, j)) + require.NoError(m.state.UpsertDeployment(m.nextIndex(), d)) // require that we get a call to UpsertDeploymentPromotion matchConfig := &matchDeploymentPromoteRequestConfig{ @@ -1587,8 +1599,8 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { canary2.ModifyTime = now.UnixNano() allocs := []*structs.Allocation{canary1, canary2} - err := m.state.UpsertAllocs(mtype, m.nextIndex(), time.Now().UnixNano(), allocs) - must.NoError(t, err) + err := m.state.UpsertAllocs(mtype, m.nextIndex(), allocs) + require.NoError(err) // 2nd group's canary becomes healthy @@ -1603,8 +1615,8 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { } allocs = []*structs.Allocation{canary2} - err = m.state.UpdateAllocsFromClient(mtype, m.nextIndex(), now.UnixNano(), allocs) - must.NoError(t, err) + err = m.state.UpdateAllocsFromClient(mtype, m.nextIndex(), allocs) + require.NoError(err) // wait for long enough to ensure we read deployment update channel // this sleep creates the race condition associated with #7058 @@ -1622,8 +1634,8 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { } allocs = []*structs.Allocation{canary1} - err = m.state.UpdateAllocsFromClient(mtype, m.nextIndex(), now.UnixNano(), allocs) - must.NoError(t, err) + err = m.state.UpdateAllocsFromClient(mtype, m.nextIndex(), allocs) + require.NoError(err) // ensure progress_deadline has definitely expired time.Sleep(progressTimeout) @@ -1635,7 +1647,7 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { All: true, } err = w.PromoteDeployment(req, &structs.DeploymentUpdateResponse{}) - must.NoError(t, err) + require.NoError(err) // wait for long enough to ensure we read deployment update channel time.Sleep(50 * time.Millisecond) @@ -1661,8 +1673,8 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { alloc1b.ModifyTime = now.UnixNano() allocs = []*structs.Allocation{alloc1a, alloc1b} - err = m.state.UpsertAllocs(mtype, m.nextIndex(), now.UnixNano(), allocs) - must.NoError(t, err) + err = m.state.UpsertAllocs(mtype, m.nextIndex(), allocs) + require.NoError(err) // allocs become healthy @@ -1687,8 +1699,8 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { } allocs = []*structs.Allocation{alloc1a, alloc1b} - err = m.state.UpdateAllocsFromClient(mtype, m.nextIndex(), now.UnixNano(), allocs) - must.NoError(t, err) + err = m.state.UpdateAllocsFromClient(mtype, m.nextIndex(), allocs) + require.NoError(err) // ensure any progress deadline has expired time.Sleep(progressTimeout) @@ -1696,25 +1708,26 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { // without a scheduler running we'll never mark the deployment as // successful, so test that healthy == desired and that we haven't failed deployment, err := m.state.DeploymentByID(nil, d.ID) - must.NoError(t, err) - must.Eq(t, structs.DeploymentStatusRunning, deployment.Status) + require.NoError(err) + require.Equal(structs.DeploymentStatusRunning, deployment.Status) group1 := deployment.TaskGroups["group1"] - must.Eq(t, group1.DesiredTotal, group1.HealthyAllocs, must.Sprint("not enough healthy")) - must.Eq(t, group1.DesiredTotal, group1.PlacedAllocs, must.Sprint("not enough placed")) - must.Eq(t, 0, group1.UnhealthyAllocs) + require.Equal(group1.DesiredTotal, group1.HealthyAllocs, "not enough healthy") + require.Equal(group1.DesiredTotal, group1.PlacedAllocs, "not enough placed") + require.Equal(0, group1.UnhealthyAllocs) group2 := deployment.TaskGroups["group2"] - must.Eq(t, group2.DesiredTotal, group2.HealthyAllocs, must.Sprint("not enough healthy")) - must.Eq(t, group2.DesiredTotal, group2.PlacedAllocs, must.Sprint("not enough placed")) - must.Eq(t, 0, group2.UnhealthyAllocs) + require.Equal(group2.DesiredTotal, group2.HealthyAllocs, "not enough healthy") + require.Equal(group2.DesiredTotal, group2.PlacedAllocs, "not enough placed") + require.Equal(0, group2.UnhealthyAllocs) } // Test scenario where deployment initially has no progress deadline // After the deployment is updated, a failed alloc's DesiredTransition should be set func TestDeploymentWatcher_Watch_StartWithoutProgressDeadline(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -1730,19 +1743,18 @@ func TestDeploymentWatcher_Watch_StartWithoutProgressDeadline(t *testing.T) { d := mock.Deployment() d.JobID = j.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") a := mock.Alloc() a.CreateTime = time.Now().UnixNano() a.DeploymentID = d.ID - must.Nil(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") d.TaskGroups["web"].ProgressDeadline = 500 * time.Millisecond // Update the deployment with a progress deadline - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") // Match on DesiredTransition set to Reschedule for the failed alloc m1 := matchUpdateAllocDesiredTransitionReschedule([]string{a.ID}) @@ -1750,7 +1762,7 @@ func TestDeploymentWatcher_Watch_StartWithoutProgressDeadline(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Update the alloc to be unhealthy a2 := a.Copy() @@ -1758,8 +1770,7 @@ func TestDeploymentWatcher_Watch_StartWithoutProgressDeadline(t *testing.T) { Healthy: pointer.Of(false), Timestamp: time.Now(), } - must.Nil(t, m.state.UpdateAllocsFromClient( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a2})) + require.Nil(m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a2})) // Wait for the alloc's DesiredState to set reschedule testutil.WaitForResult(func() (bool, error) { @@ -1800,8 +1811,8 @@ func TestDeploymentWatcher_Watch_FailEarly(t *testing.T) { a.ModifyTime = now.UnixNano() a.DeploymentID = d.ID must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), now.UnixNano(), d), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), now.UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) + must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) // require that we get a call to UpsertDeploymentStatusUpdate c := &matchDeploymentStatusUpdateConfig{ @@ -1823,7 +1834,7 @@ func TestDeploymentWatcher_Watch_FailEarly(t *testing.T) { Healthy: pointer.Of(false), Timestamp: now, } - must.Nil(t, m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, m.nextIndex(), now.UnixNano(), []*structs.Allocation{a2})) + must.Nil(t, m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a2})) // Wait for the deployment to be failed testutil.WaitForResult(func() (bool, error) { @@ -1862,6 +1873,7 @@ func TestDeploymentWatcher_Watch_FailEarly(t *testing.T) { // Tests that the watcher fails rollback when the spec hasn't changed func TestDeploymentWatcher_RollbackFailed(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) // Create a job, alloc, and a deployment @@ -1876,16 +1888,15 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) { d.TaskGroups["web"].AutoRevert = true a := mock.Alloc() a.DeploymentID = d.ID - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertAllocs( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), []*structs.Allocation{a}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") // Upsert the job again to get a new version j2 := j.Copy() // Modify the job to make its specification different j2.Stable = false - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), must.Sprint("UpsertJob2")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), "UpsertJob2") // require that we will get a createEvaluation call only once. This will // verify that the watcher is batching allocation changes @@ -1905,7 +1916,7 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 1 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 1, watchersCount(w), must.Sprint("Should have 1 deployment")) }) + func(err error) { require.Equal(1, watchersCount(w), "Should have 1 deployment") }) // Update the allocs health to healthy which should create an evaluation for i := 0; i < 5; i++ { @@ -1915,8 +1926,7 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) { HealthyAllocationIDs: []string{a.ID}, }, } - must.Nil(t, m.state.UpdateDeploymentAllocHealth( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), req), must.Sprint("UpsertDeploymentAllocHealth")) + require.Nil(m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, m.nextIndex(), req), "UpsertDeploymentAllocHealth") } // Wait for there to be one eval @@ -1944,8 +1954,7 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) { UnhealthyAllocationIDs: []string{a.ID}, }, } - must.Nil(t, m.state.UpdateDeploymentAllocHealth( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), req2), must.Sprint("UpsertDeploymentAllocHealth")) + require.Nil(m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, m.nextIndex(), req2), "UpsertDeploymentAllocHealth") // Wait for there to be one eval testutil.WaitForResult(func() (bool, error) { @@ -1968,12 +1977,13 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) { // verify that the job version hasn't changed after upsert m.state.JobByID(nil, structs.DefaultNamespace, j.ID) - must.Eq(t, uint64(0), j.Version, must.Sprintf("Expected job version 0 but got %v", j.Version)) + require.Equal(uint64(0), j.Version, "Expected job version 0 but got ", j.Version) } // Test allocation updates and evaluation creation is batched between watchers func TestWatcher_BatchAllocUpdates(t *testing.T) { ci.Parallel(t) + require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Second) m.On("UpdateDeploymentStatus", mocker.MatchedBy(func(args *structs.DeploymentStatusUpdateRequest) bool { @@ -2001,14 +2011,12 @@ func TestWatcher_BatchAllocUpdates(t *testing.T) { a2.JobID = j2.ID a2.DeploymentID = d2.ID - now := time.Now().UnixNano() - - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j1), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), must.Sprint("UpsertJob")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), now, d1), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertDeployment(m.nextIndex(), now, d2), must.Sprint("UpsertDeployment")) - must.Nil(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), now, []*structs.Allocation{a1}), must.Sprint("UpsertAllocs")) - must.Nil(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), now, []*structs.Allocation{a2}), must.Sprint("UpsertAllocs")) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j1), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), "UpsertJob") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d1), "UpsertDeployment") + require.Nil(m.state.UpsertDeployment(m.nextIndex(), d2), "UpsertDeployment") + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a1}), "UpsertAllocs") + require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a2}), "UpsertAllocs") // require that we will get a createEvaluation call only once and it contains // both deployments. This will verify that the watcher is batching @@ -2018,7 +2026,7 @@ func TestWatcher_BatchAllocUpdates(t *testing.T) { w.SetEnabled(true, m.state) testutil.WaitForResult(func() (bool, error) { return 2 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 2, watchersCount(w), must.Sprint("Should have 2 deployment")) }) + func(err error) { require.Equal(2, watchersCount(w), "Should have 2 deployment") }) // Update the allocs health to healthy which should create an evaluation req := &structs.ApplyDeploymentAllocHealthRequest{ @@ -2027,8 +2035,7 @@ func TestWatcher_BatchAllocUpdates(t *testing.T) { HealthyAllocationIDs: []string{a1.ID}, }, } - must.Nil(t, m.state.UpdateDeploymentAllocHealth( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), req), must.Sprint("UpsertDeploymentAllocHealth")) + require.Nil(m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, m.nextIndex(), req), "UpsertDeploymentAllocHealth") req2 := &structs.ApplyDeploymentAllocHealthRequest{ DeploymentAllocHealthRequest: structs.DeploymentAllocHealthRequest{ @@ -2036,8 +2043,7 @@ func TestWatcher_BatchAllocUpdates(t *testing.T) { HealthyAllocationIDs: []string{a2.ID}, }, } - must.Nil(t, m.state.UpdateDeploymentAllocHealth( - structs.MsgTypeTestSetup, m.nextIndex(), time.Now().UnixNano(), req2), must.Sprint("UpsertDeploymentAllocHealth")) + require.Nil(m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, m.nextIndex(), req2), "UpsertDeploymentAllocHealth") // Wait for there to be one eval for each job testutil.WaitForResult(func() (bool, error) { @@ -2067,7 +2073,7 @@ func TestWatcher_BatchAllocUpdates(t *testing.T) { m.AssertCalled(t, "UpdateAllocDesiredTransition", mocker.MatchedBy(m1)) testutil.WaitForResult(func() (bool, error) { return 2 == watchersCount(w), nil }, - func(err error) { must.Eq(t, 2, watchersCount(w), must.Sprint("Should have 2 deployment")) }) + func(err error) { require.Equal(2, watchersCount(w), "Should have 2 deployment") }) } func watchersCount(w *Watcher) int { @@ -2090,7 +2096,7 @@ func TestWatcher_PurgeDeployment(t *testing.T) { d := mock.Deployment() d.JobID = j.ID must.NoError(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j)) - must.NoError(t, m.state.UpsertDeployment(m.nextIndex(), time.Now().UnixNano(), d)) + must.NoError(t, m.state.UpsertDeployment(m.nextIndex(), d)) // require that we get a call to UpsertDeploymentStatusUpdate matchConfig := &matchDeploymentStatusUpdateConfig{ diff --git a/nomad/deploymentwatcher/testutil_test.go b/nomad/deploymentwatcher/testutil_test.go index 5ae67b9ed29..7af7df9cafd 100644 --- a/nomad/deploymentwatcher/testutil_test.go +++ b/nomad/deploymentwatcher/testutil_test.go @@ -8,7 +8,6 @@ import ( "strings" "sync" "testing" - "time" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -99,7 +98,7 @@ func (m *mockBackend) UpsertJob(job *structs.Job) (uint64, error) { func (m *mockBackend) UpdateDeploymentStatus(u *structs.DeploymentStatusUpdateRequest) (uint64, error) { m.Called(u) i := m.nextIndex() - return i, m.state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, i, time.Now().UnixNano(), u) + return i, m.state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, i, u) } // matchDeploymentStatusUpdateConfig is used to configure the matching @@ -153,7 +152,7 @@ func matchDeploymentStatusUpdateRequest(c *matchDeploymentStatusUpdateConfig) fu func (m *mockBackend) UpdateDeploymentPromotion(req *structs.ApplyDeploymentPromoteRequest) (uint64, error) { m.Called(req) i := m.nextIndex() - return i, m.state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, i, time.Now().UnixNano(), req) + return i, m.state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, i, req) } // matchDeploymentPromoteRequestConfig is used to configure the matching @@ -183,7 +182,7 @@ func matchDeploymentPromoteRequest(c *matchDeploymentPromoteRequestConfig) func( func (m *mockBackend) UpdateDeploymentAllocHealth(req *structs.ApplyDeploymentAllocHealthRequest) (uint64, error) { m.Called(req) i := m.nextIndex() - return i, m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, i, time.Now().UnixNano(), req) + return i, m.state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, i, req) } // matchDeploymentAllocHealthRequestConfig is used to configure the matching diff --git a/nomad/drainer/draining_node_test.go b/nomad/drainer/draining_node_test.go index 53f7fe12b7e..02c0b3dbafb 100644 --- a/nomad/drainer/draining_node_test.go +++ b/nomad/drainer/draining_node_test.go @@ -11,7 +11,8 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // testDrainingNode creates a *drainingNode with a 1h deadline but no allocs @@ -26,7 +27,7 @@ func testDrainingNode(t *testing.T) *drainingNode { ForceDeadline: time.Now().Add(time.Hour), } - must.Nil(t, state.UpsertNode(structs.MsgTypeTestSetup, 100, node)) + require.Nil(t, state.UpsertNode(structs.MsgTypeTestSetup, 100, node)) return NewDrainingNode(node, state) } @@ -34,22 +35,21 @@ func assertDrainingNode(t *testing.T, dn *drainingNode, isDone bool, remaining, t.Helper() done, err := dn.IsDone() - must.Nil(t, err) - must.Eq(t, isDone, done, must.Sprint("IsDone mismatch")) + require.Nil(t, err) + assert.Equal(t, isDone, done, "IsDone mismatch") allocs, err := dn.RemainingAllocs() - must.Nil(t, err) - must.Len(t, remaining, allocs, must.Sprint("RemainingAllocs mismatch")) + require.Nil(t, err) + assert.Len(t, allocs, remaining, "RemainingAllocs mismatch") jobs, err := dn.DrainingJobs() - must.Nil(t, err) - must.Len(t, running, jobs, must.Sprint("DrainingJobs mismatch")) + require.Nil(t, err) + assert.Len(t, jobs, running, "DrainingJobs mismatch") } func TestDrainingNode_Table(t *testing.T) { ci.Parallel(t) - now := time.Now().UnixNano() cases := []struct { name string isDone bool @@ -72,8 +72,8 @@ func TestDrainingNode_Table(t *testing.T) { setup: func(t *testing.T, dn *drainingNode) { alloc := mock.BatchAlloc() alloc.NodeID = dn.node.ID - must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, alloc.Job)) - must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, []*structs.Allocation{alloc})) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, alloc.Job)) + require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc})) }, }, { @@ -84,8 +84,8 @@ func TestDrainingNode_Table(t *testing.T) { setup: func(t *testing.T, dn *drainingNode) { alloc := mock.Alloc() alloc.NodeID = dn.node.ID - must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, alloc.Job)) - must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, []*structs.Allocation{alloc})) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, alloc.Job)) + require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc})) }, }, { @@ -96,8 +96,8 @@ func TestDrainingNode_Table(t *testing.T) { setup: func(t *testing.T, dn *drainingNode) { alloc := mock.SystemAlloc() alloc.NodeID = dn.node.ID - must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, alloc.Job)) - must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, []*structs.Allocation{alloc})) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, alloc.Job)) + require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc})) }, }, { @@ -109,9 +109,9 @@ func TestDrainingNode_Table(t *testing.T) { allocs := []*structs.Allocation{mock.Alloc(), mock.BatchAlloc(), mock.SystemAlloc()} for _, a := range allocs { a.NodeID = dn.node.ID - must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) } - must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, allocs)) + require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) // StateStore doesn't like inserting new allocs // with a terminal status, so set the status in @@ -119,7 +119,7 @@ func TestDrainingNode_Table(t *testing.T) { for _, a := range allocs { a.ClientStatus = structs.AllocClientStatusComplete } - must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, now, allocs)) + require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, allocs)) }, }, { @@ -131,13 +131,13 @@ func TestDrainingNode_Table(t *testing.T) { allocs := []*structs.Allocation{mock.Alloc(), mock.BatchAlloc(), mock.SystemAlloc()} for _, a := range allocs { a.NodeID = dn.node.ID - must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) } - must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, allocs)) + require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) // Set only the service job as terminal allocs[0].ClientStatus = structs.AllocClientStatusComplete - must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, now, allocs)) + require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, allocs)) }, }, { @@ -149,14 +149,14 @@ func TestDrainingNode_Table(t *testing.T) { allocs := []*structs.Allocation{mock.Alloc(), mock.BatchAlloc(), mock.SystemAlloc()} for _, a := range allocs { a.NodeID = dn.node.ID - must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) } - must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, allocs)) + require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) // Set only the service and batch jobs as terminal allocs[0].ClientStatus = structs.AllocClientStatusComplete allocs[2].ClientStatus = structs.AllocClientStatusComplete - must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, now, allocs)) + require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, allocs)) }, }, { @@ -168,14 +168,14 @@ func TestDrainingNode_Table(t *testing.T) { allocs := []*structs.Allocation{mock.Alloc(), mock.BatchAlloc(), mock.SystemAlloc()} for _, a := range allocs { a.NodeID = dn.node.ID - must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) } - must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, allocs)) + require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) // Set only the service and batch jobs as terminal allocs[0].ClientStatus = structs.AllocClientStatusComplete allocs[1].ClientStatus = structs.AllocClientStatusComplete - must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, now, allocs)) + require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, allocs)) }, }, { @@ -194,15 +194,15 @@ func TestDrainingNode_Table(t *testing.T) { } for _, a := range allocs { a.NodeID = dn.node.ID - must.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) } - must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, now, allocs)) + require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) // Set only the service and batch jobs as terminal allocs[0].ClientStatus = structs.AllocClientStatusComplete allocs[1].ClientStatus = structs.AllocClientStatusComplete allocs[2].ClientStatus = structs.AllocClientStatusComplete - must.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, now, allocs)) + require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 103, allocs)) }, }, } diff --git a/nomad/drainer/watch_jobs_test.go b/nomad/drainer/watch_jobs_test.go index 5e896c3b69b..05a2a509e04 100644 --- a/nomad/drainer/watch_jobs_test.go +++ b/nomad/drainer/watch_jobs_test.go @@ -117,8 +117,6 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { var index uint64 = 101 count := 8 - now := time.Now().UnixNano() - newAlloc := func(node *structs.Node, job *structs.Job) *structs.Allocation { a := mock.Alloc() a.JobID = job.ID @@ -149,7 +147,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { allocs = append(allocs, a) } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, allocs)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, allocs)) index++ } @@ -171,7 +169,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { // create a copy so we can reuse this slice drainedAllocs[i] = a.Copy() } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, drainedAllocs)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, drainedAllocs)) drains.Resp.Respond(index, nil) index++ @@ -198,7 +196,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { updates = append(updates, a, replacement) replacements[i] = replacement.Copy() } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, updates)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, updates)) index++ // The drained allocs stopping cause migrations but no new drains @@ -212,7 +210,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { a.ClientStatus = structs.AllocClientStatusComplete completeAllocs[i] = a } - must.NoError(t, store.UpdateAllocsFromClient(structs.MsgTypeTestSetup, index, now, completeAllocs)) + must.NoError(t, store.UpdateAllocsFromClient(structs.MsgTypeTestSetup, index, completeAllocs)) index++ // The drained allocs stopping cause migrations but no new drains @@ -226,7 +224,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { Healthy: pointer.Of(true), } } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, replacements)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, replacements)) index++ must.MapNotEmpty(t, jobWatcher.drainingJobs()) @@ -242,7 +240,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { // create a copy so we can reuse this slice drainedAllocs[i] = a.Copy() } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, drainedAllocs)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, drainedAllocs)) drains.Resp.Respond(index, nil) index++ @@ -259,7 +257,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { updates = append(updates, a, replacement) replacements[i] = replacement.Copy() } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, updates)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, updates)) index++ assertJobWatcherOps(t, jobWatcher, 0, 6) @@ -270,7 +268,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { Healthy: pointer.Of(true), } } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, replacements)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, replacements)) index++ must.MapNotEmpty(t, jobWatcher.drainingJobs()) @@ -286,7 +284,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { // create a copy so we can reuse this slice drainedAllocs[i] = a.Copy() } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, drainedAllocs)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, drainedAllocs)) drains.Resp.Respond(index, nil) index++ @@ -303,7 +301,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { updates = append(updates, a, replacement) replacements[i] = replacement.Copy() } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, updates)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, updates)) index++ assertJobWatcherOps(t, jobWatcher, 0, 4) @@ -314,7 +312,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { Healthy: pointer.Of(true), } } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, now, replacements)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, replacements)) // No jobs should be left! must.MapEmpty(t, jobWatcher.drainingJobs()) @@ -624,7 +622,7 @@ func TestDrainingJobWatcher_HandleTaskGroup(t *testing.T) { allocs = append(allocs, a) } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 103, time.Now().UnixNano(), allocs)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 103, allocs)) snap, err := store.Snapshot() must.NoError(t, err) @@ -674,7 +672,7 @@ func TestHandleTaskGroup_Migrations(t *testing.T) { } allocs = append(allocs, a) } - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 102, time.Now().UnixNano(), allocs)) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) snap, err := state.Snapshot() require.Nil(err) @@ -747,7 +745,7 @@ func TestHandleTaskGroup_GarbageCollectedNode(t *testing.T) { // Make the first one be on a GC'd node allocs[0].NodeID = uuid.Generate() - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 102, time.Now().UnixNano(), allocs)) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) snap, err := state.Snapshot() require.Nil(err) diff --git a/nomad/drainer/watch_nodes_test.go b/nomad/drainer/watch_nodes_test.go index 580c943cd0e..5e698e0b8b9 100644 --- a/nomad/drainer/watch_nodes_test.go +++ b/nomad/drainer/watch_nodes_test.go @@ -50,7 +50,7 @@ func TestNodeDrainWatcher_AddNodes(t *testing.T) { alloc2.NodeID = n2.ID must.NoError(t, store.UpsertAllocs( - structs.MsgTypeTestSetup, 102, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) + structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc1, alloc2})) must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 103, n1)) must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 104, n2)) @@ -228,7 +228,7 @@ func testNodeDrainWatcherSetup( alloc.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} index++ must.NoError(t, store.UpsertAllocs( - structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc})) + structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc})) index++ must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, index, node)) diff --git a/nomad/volumewatcher/volumes_watcher_test.go b/nomad/volumewatcher/volumes_watcher_test.go index e788f635221..83dc2e1f449 100644 --- a/nomad/volumewatcher/volumes_watcher_test.go +++ b/nomad/volumewatcher/volumes_watcher_test.go @@ -36,10 +36,9 @@ func TestVolumeWatch_EnableDisable(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete vol := testVolume(plugin, alloc, node.ID) - now := time.Now().UnixNano() index++ - err := srv.State().UpsertCSIVolume(index, now, []*structs.CSIVolume{vol}) + err := srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol}) require.NoError(t, err) // need to have just enough of a volume and claim in place so that @@ -49,7 +48,7 @@ func TestVolumeWatch_EnableDisable(t *testing.T) { State: structs.CSIVolumeClaimStateNodeDetached, } index++ - err = srv.State().CSIVolumeClaim(index, now, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) require.NoError(t, err) require.Eventually(t, func() bool { watcher.wlock.RLock() @@ -80,17 +79,16 @@ func TestVolumeWatch_LeadershipTransition(t *testing.T) { alloc := mock.Alloc() alloc.ClientStatus = structs.AllocClientStatusRunning vol := testVolume(plugin, alloc, node.ID) - now := time.Now().UnixNano() index++ - err := srv.State().UpsertAllocs(structs.MsgTypeTestSetup, index, now, + err := srv.State().UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc}) require.NoError(t, err) watcher.SetEnabled(true, srv.State(), "") index++ - err = srv.State().UpsertCSIVolume(index, now, []*structs.CSIVolume{vol}) + err = srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol}) require.NoError(t, err) // we should get or start up a watcher when we get an update for @@ -129,7 +127,7 @@ func TestVolumeWatch_LeadershipTransition(t *testing.T) { State: structs.CSIVolumeClaimStateUnpublishing, } index++ - err = srv.State().CSIVolumeClaim(index, now, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) require.NoError(t, err) // create a new watcher and enable it to simulate the leadership @@ -153,7 +151,6 @@ func TestVolumeWatch_LeadershipTransition(t *testing.T) { // it receives notifcations and has completed its work func TestVolumeWatch_StartStop(t *testing.T) { ci.Parallel(t) - now := time.Now().UnixNano() srv := &MockStatefulRPCServer{} srv.state = state.TestStateStore(t) @@ -175,13 +172,13 @@ func TestVolumeWatch_StartStop(t *testing.T) { err := srv.State().UpsertJob(structs.MsgTypeTestSetup, index, nil, alloc1.Job) require.NoError(t, err) index++ - err = srv.State().UpsertAllocs(structs.MsgTypeTestSetup, index, now, []*structs.Allocation{alloc1, alloc2}) + err = srv.State().UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc1, alloc2}) require.NoError(t, err) // register a volume and an unused volume vol := testVolume(plugin, alloc1, node.ID) index++ - err = srv.State().UpsertCSIVolume(index, now, []*structs.CSIVolume{vol}) + err = srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol}) require.NoError(t, err) // assert we get a watcher; there are no claims so it should immediately stop @@ -200,11 +197,11 @@ func TestVolumeWatch_StartStop(t *testing.T) { } index++ - err = srv.State().CSIVolumeClaim(index, now, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) require.NoError(t, err) claim.AllocationID = alloc2.ID index++ - err = srv.State().CSIVolumeClaim(index, now, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) require.NoError(t, err) // reap the volume and assert nothing has happened @@ -213,7 +210,7 @@ func TestVolumeWatch_StartStop(t *testing.T) { NodeID: node.ID, } index++ - err = srv.State().CSIVolumeClaim(index, now, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) require.NoError(t, err) ws := memdb.NewWatchSet() @@ -224,11 +221,11 @@ func TestVolumeWatch_StartStop(t *testing.T) { alloc1 = alloc1.Copy() alloc1.ClientStatus = structs.AllocClientStatusComplete index++ - err = srv.State().UpsertAllocs(structs.MsgTypeTestSetup, index, now, []*structs.Allocation{alloc1}) + err = srv.State().UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc1}) require.NoError(t, err) index++ claim.State = structs.CSIVolumeClaimStateReadyToFree - err = srv.State().CSIVolumeClaim(index, now, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) require.NoError(t, err) // watcher stops and 1 claim has been released @@ -247,7 +244,6 @@ func TestVolumeWatch_StartStop(t *testing.T) { // notifications around a deleted volume func TestVolumeWatch_Delete(t *testing.T) { ci.Parallel(t) - now := time.Now().UnixNano() srv := &MockStatefulRPCServer{} srv.state = state.TestStateStore(t) @@ -262,7 +258,7 @@ func TestVolumeWatch_Delete(t *testing.T) { plugin := mock.CSIPlugin() vol := mock.CSIVolume(plugin) index++ - must.NoError(t, srv.State().UpsertCSIVolume(index, now, []*structs.CSIVolume{vol})) + must.NoError(t, srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol})) // assert we get a watcher; there are no claims so it should immediately stop require.Eventually(t, func() bool { @@ -274,7 +270,7 @@ func TestVolumeWatch_Delete(t *testing.T) { // write a GC claim to the volume and then immediately delete, to // potentially hit the race condition between updates and deletes index++ - must.NoError(t, srv.State().CSIVolumeClaim(index, now, vol.Namespace, vol.ID, + must.NoError(t, srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, &structs.CSIVolumeClaim{ Mode: structs.CSIVolumeClaimGC, State: structs.CSIVolumeClaimStateReadyToFree, @@ -316,7 +312,7 @@ func TestVolumeWatch_RegisterDeregister(t *testing.T) { // register a volume without claims vol := mock.CSIVolume(plugin) index++ - err := srv.State().UpsertCSIVolume(index, time.Now().UnixNano(), []*structs.CSIVolume{vol}) + err := srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol}) require.NoError(t, err) // watcher should stop diff --git a/nomad/worker_test.go b/nomad/worker_test.go index 9cd0944db16..c63deec3da0 100644 --- a/nomad/worker_test.go +++ b/nomad/worker_test.go @@ -533,7 +533,7 @@ func TestWorker_SubmitPlanNormalizedAllocations(t *testing.T) { stoppedAlloc := mock.Alloc() preemptedAlloc := mock.Alloc() - s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 5, time.Now().UnixNano(), []*structs.Allocation{stoppedAlloc, preemptedAlloc}) + s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 5, []*structs.Allocation{stoppedAlloc, preemptedAlloc}) // Create an allocation plan plan := &structs.Plan{ From 0d25ee185a742ad12280042b65841ae79e1545b2 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Wed, 30 Oct 2024 14:46:07 +0100 Subject: [PATCH 17/40] revert more tests --- nomad/client_alloc_endpoint_test.go | 38 +- nomad/client_fs_endpoint_test.go | 48 +- nomad/csi_endpoint_test.go | 44 +- nomad/deployment_endpoint_test.go | 89 ++-- nomad/drainer_int_test.go | 2 +- nomad/eval_endpoint_test.go | 14 +- nomad/heartbeat_test.go | 23 +- nomad/job_endpoint_statuses_test.go | 4 +- nomad/job_endpoint_test.go | 56 +- nomad/namespace_endpoint_test.go | 2 +- nomad/node_endpoint_test.go | 56 +- nomad/operator_endpoint_test.go | 2 +- nomad/periodic_test.go | 2 +- nomad/plan_apply_test.go | 22 +- nomad/search_endpoint_test.go | 539 ++++++++++---------- nomad/service_registration_endpoint_test.go | 4 +- nomad/state/deployment_events_test.go | 4 +- nomad/state/events_test.go | 22 +- nomad/state/testing.go | 4 +- nomad/variables_endpoint_test.go | 6 +- scheduler/context_test.go | 5 +- scheduler/feasible_test.go | 20 +- scheduler/generic_sched_test.go | 146 +++--- scheduler/preemption_test.go | 5 +- scheduler/rank_test.go | 11 +- scheduler/scheduler_sysbatch_test.go | 29 +- scheduler/scheduler_system_test.go | 38 +- scheduler/spread_test.go | 6 +- scheduler/stack_test.go | 3 +- scheduler/testing.go | 2 +- scheduler/util_test.go | 12 +- 31 files changed, 618 insertions(+), 640 deletions(-) diff --git a/nomad/client_alloc_endpoint_test.go b/nomad/client_alloc_endpoint_test.go index 3ed2e419bf1..9ccc195dc4e 100644 --- a/nomad/client_alloc_endpoint_test.go +++ b/nomad/client_alloc_endpoint_test.go @@ -263,7 +263,7 @@ func TestClientAllocations_GarbageCollect_OldNode(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = node.ID - require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1006, time.Now().UnixNano(), []*nstructs.Allocation{alloc})) + require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1006, []*nstructs.Allocation{alloc})) req := &nstructs.AllocSpecificRequest{ AllocID: alloc.ID, @@ -329,7 +329,7 @@ func TestClientAllocations_GarbageCollect_Local(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*nstructs.Allocation{a})) + require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -387,7 +387,7 @@ func TestClientAllocations_GarbageCollect_Local_ACL(t *testing.T) { state := s.State() alloc := mock.Alloc() require.NoError(t, state.UpsertJob(nstructs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*nstructs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, []*nstructs.Allocation{alloc})) cases := []struct { Name string @@ -496,11 +496,10 @@ func TestClientAllocations_GarbageCollect_Remote(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() - now := time.Now().UnixNano() require.Nil(state1.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) + require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) require.Nil(state2.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) + require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -555,7 +554,7 @@ func TestClientAllocations_Stats_OldNode(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = node.ID - require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1006, time.Now().UnixNano(), []*nstructs.Allocation{alloc})) + require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1006, []*nstructs.Allocation{alloc})) req := &nstructs.AllocSpecificRequest{ AllocID: alloc.ID, @@ -619,7 +618,7 @@ func TestClientAllocations_Stats_Local(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*nstructs.Allocation{a})) + require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -678,7 +677,7 @@ func TestClientAllocations_Stats_Local_ACL(t *testing.T) { state := s.State() alloc := mock.Alloc() require.NoError(t, state.UpsertJob(nstructs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*nstructs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, []*nstructs.Allocation{alloc})) cases := []struct { Name string @@ -774,11 +773,10 @@ func TestClientAllocations_Stats_Remote(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() - now := time.Now().UnixNano() require.Nil(state1.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) + require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) require.Nil(state2.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) + require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -860,7 +858,7 @@ func TestClientAllocations_Restart_Local(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*nstructs.Allocation{a})) + require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -973,11 +971,10 @@ func TestClientAllocations_Restart_Remote(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() - now := time.Now().UnixNano() require.Nil(state1.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) + require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) require.Nil(state2.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) + require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -1036,7 +1033,7 @@ func TestClientAllocations_Restart_ACL(t *testing.T) { state := s.State() alloc := mock.Alloc() require.NoError(t, state.UpsertJob(nstructs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*nstructs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, []*nstructs.Allocation{alloc})) cases := []struct { Name string @@ -1142,13 +1139,12 @@ func TestAlloc_ExecStreaming(t *testing.T) { } // Upsert the allocation - now := time.Now().UnixNano() localState := localServer.State() require.Nil(t, localState.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(t, localState.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) + require.Nil(t, localState.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) remoteState := remoteServer.State() require.Nil(t, remoteState.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(t, remoteState.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, now, []*nstructs.Allocation{a})) + require.Nil(t, remoteState.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -1265,7 +1261,7 @@ func TestAlloc_ExecStreaming_TerminalAlloc(t *testing.T) { state := s.State() err := state.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, alloc.Job) must.NoError(t, err) - err = state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*nstructs.Allocation{alloc}) + err = state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{alloc}) must.NoError(t, err) // Make the exec request. diff --git a/nomad/client_fs_endpoint_test.go b/nomad/client_fs_endpoint_test.go index 3fc1437a772..470cad3dd21 100644 --- a/nomad/client_fs_endpoint_test.go +++ b/nomad/client_fs_endpoint_test.go @@ -69,7 +69,7 @@ func TestClientFS_List_Local(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -129,7 +129,7 @@ func TestClientFS_List_ACL(t *testing.T) { state := s.State() alloc := mock.Alloc() require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc})) cases := []struct { Name string @@ -228,11 +228,10 @@ func TestClientFS_List_Remote(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() - now := time.Now().UnixNano() require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) + require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) + require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -289,7 +288,7 @@ func TestClientFS_Stat_OldNode(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = node.ID - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1006, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1006, []*structs.Allocation{alloc})) req := &cstructs.FsStatRequest{ AllocID: alloc.ID, @@ -346,7 +345,7 @@ func TestClientFS_Stat_Local(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -406,7 +405,7 @@ func TestClientFS_Stat_ACL(t *testing.T) { state := s.State() alloc := mock.Alloc() require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc})) cases := []struct { Name string @@ -505,11 +504,10 @@ func TestClientFS_Stat_Remote(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() - now := time.Now().UnixNano() require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) + require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) + require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -639,7 +637,7 @@ func TestClientFS_Streaming_ACL(t *testing.T) { state := s.State() alloc := mock.Alloc() require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc})) cases := []struct { Name string @@ -780,7 +778,7 @@ func TestClientFS_Streaming_Local(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -916,7 +914,7 @@ func TestClientFS_Streaming_Local_Follow(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -1058,11 +1056,10 @@ func TestClientFS_Streaming_Remote_Server(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() - now := time.Now().UnixNano() require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) + require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) + require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -1206,7 +1203,7 @@ func TestClientFS_Streaming_Remote_Region(t *testing.T) { // Upsert the allocation state2 := s2.State() require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) + require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -1386,7 +1383,7 @@ func TestClientFS_Logs_OldNode(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = node.ID - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1006, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1006, []*structs.Allocation{alloc})) req := &cstructs.FsLogsRequest{ AllocID: alloc.ID, @@ -1469,7 +1466,7 @@ func TestClientFS_Logs_ACL(t *testing.T) { state := s.State() alloc := mock.Alloc() require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc})) cases := []struct { Name string @@ -1610,7 +1607,7 @@ func TestClientFS_Logs_Local(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -1747,7 +1744,7 @@ func TestClientFS_Logs_Local_Follow(t *testing.T) { // Upsert the allocation state := s.State() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -1890,11 +1887,10 @@ func TestClientFS_Logs_Remote_Server(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() - now := time.Now().UnixNano() require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) + require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a})) + require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { @@ -2039,7 +2035,7 @@ func TestClientFS_Logs_Remote_Region(t *testing.T) { // Upsert the allocation state2 := s2.State() require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) - require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{a})) + require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation testutil.WaitForResult(func() (bool, error) { diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index fc3bae3a3a3..ecc46bc13b6 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -59,7 +59,7 @@ func TestCSIVolumeEndpoint_Get(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }}, }} - err := state.UpsertCSIVolume(999, time.Now().UnixNano(), vols) + err := state.UpsertCSIVolume(999, vols) require.NoError(t, err) // Create the register request @@ -107,7 +107,7 @@ func TestCSIVolumeEndpoint_Get_ACL(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }}, }} - err := state.UpsertCSIVolume(999, time.Now().UnixNano(), vols) + err := state.UpsertCSIVolume(999, vols) require.NoError(t, err) // Create the register request @@ -186,7 +186,7 @@ func TestCSIVolume_pluginValidateVolume(t *testing.T) { if tc.updatePlugin != nil { tc.updatePlugin(plug) } - must.NoError(t, store.UpsertCSIPlugin(1000, time.Now().UnixNano(), plug)) + must.NoError(t, store.UpsertCSIPlugin(1000, plug)) got, err := csiVolume.pluginValidateVolume(vol) @@ -330,7 +330,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { index++ require.NoError(t, state.UpsertJobSummary(index, summary)) index++ - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc})) index++ must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, index, node)) @@ -382,7 +382,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { }}, }} index++ - err = state.UpsertCSIVolume(index, time.Now().UnixNano(), vols) + err = state.UpsertCSIVolume(index, vols) require.NoError(t, err) // Verify that the volume exists, and is healthy @@ -419,7 +419,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { index++ require.NoError(t, state.UpsertJobSummary(index, summary)) index++ - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc2})) claimReq.AllocationID = alloc2.ID err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) require.EqualError(t, err, structs.ErrCSIVolumeMaxClaims.Error(), @@ -453,7 +453,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { index++ require.NoError(t, state.UpsertJobSummary(index, summary)) index++ - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc3})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc3})) claimReq.AllocationID = alloc3.ID err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) require.NoError(t, err) @@ -515,14 +515,14 @@ func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }}, }} - err = state.UpsertCSIVolume(1003, time.Now().UnixNano(), vols) + err = state.UpsertCSIVolume(1003, vols) require.NoError(t, err) alloc := mock.BatchAlloc() alloc.NodeID = node.ID summary := mock.JobSummary(alloc.JobID) require.NoError(t, state.UpsertJobSummary(1004, summary)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, []*structs.Allocation{alloc})) // Make the volume claim claimReq := &structs.CSIVolumeClaimRequest{ @@ -649,7 +649,7 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { } index++ - err = state.UpsertCSIVolume(index, time.Now().UnixNano(), []*structs.CSIVolume{vol}) + err = state.UpsertCSIVolume(index, []*structs.CSIVolume{vol}) must.NoError(t, err) // setup: create an alloc that will claim our volume @@ -663,7 +663,7 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { index++ must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, - time.Now().UnixNano(), []*structs.Allocation{alloc, otherAlloc})) + []*structs.Allocation{alloc, otherAlloc})) // setup: claim the volume for our to-be-failed alloc claim := &structs.CSIVolumeClaim{ @@ -675,7 +675,7 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { index++ claim.State = structs.CSIVolumeClaimStateTaken - err = state.CSIVolumeClaim(index, time.Now().UnixNano(), ns, volID, claim) + err = state.CSIVolumeClaim(index, ns, volID, claim) must.NoError(t, err) // setup: claim the volume for our other alloc @@ -688,7 +688,7 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { index++ otherClaim.State = structs.CSIVolumeClaimStateTaken - err = state.CSIVolumeClaim(index, time.Now().UnixNano(), ns, volID, otherClaim) + err = state.CSIVolumeClaim(index, ns, volID, otherClaim) must.NoError(t, err) // test: unpublish and check the results @@ -707,7 +707,7 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusFailed index++ must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, - time.Now().UnixNano(), []*structs.Allocation{alloc})) + []*structs.Allocation{alloc})) err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Unpublish", req, &structs.CSIVolumeUnpublishResponse{}) @@ -787,7 +787,7 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }}, }} - err = state.UpsertCSIVolume(1002, time.Now().UnixNano(), vols) + err = state.UpsertCSIVolume(1002, vols) require.NoError(t, err) // Query everything in the namespace @@ -866,7 +866,7 @@ func TestCSIVolumeEndpoint_ListAllNamespaces(t *testing.T) { }}, }, } - err = state.UpsertCSIVolume(1001, time.Now().UnixNano(), vols) + err = state.UpsertCSIVolume(1001, vols) require.NoError(t, err) // Lookup volumes in all namespaces @@ -945,7 +945,7 @@ func TestCSIVolumeEndpoint_List_PaginationFiltering(t *testing.T) { volume.Namespace = m.namespace } index := 1000 + uint64(i) - require.NoError(t, state.UpsertCSIVolume(index, time.Now().UnixNano(), []*structs.CSIVolume{volume})) + require.NoError(t, state.UpsertCSIVolume(index, []*structs.CSIVolume{volume})) } cases := []struct { @@ -1328,7 +1328,7 @@ func TestCSIVolumeEndpoint_Delete(t *testing.T) { }, } index++ - err = state.UpsertCSIVolume(index, time.Now().UnixNano(), vols) + err = state.UpsertCSIVolume(index, vols) must.NoError(t, err) // Delete volumes @@ -1571,7 +1571,7 @@ func TestCSIVolumeEndpoint_CreateSnapshot(t *testing.T) { ExternalID: "vol-12345", }} index++ - require.NoError(t, state.UpsertCSIVolume(index, time.Now().UnixNano(), vols)) + require.NoError(t, state.UpsertCSIVolume(index, vols)) // Create the snapshot request req1 := &structs.CSISnapshotCreateRequest{ @@ -2194,7 +2194,7 @@ func TestCSIPluginEndpoint_DeleteViaGC(t *testing.T) { index, _ := state.LatestIndex() index++ - must.NoError(t, state.UpsertCSIPlugin(index, time.Now().UnixNano(), plugin)) + must.NoError(t, state.UpsertCSIPlugin(index, plugin)) // Retry now that it's empty must.NoError(t, msgpackrpc.CallWithCodec(codec, "CSIPlugin.Delete", reqDel, respDel)) @@ -2251,7 +2251,7 @@ func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { ControllerRequired: false, }, } - err = state.UpsertCSIVolume(1002, time.Now().UnixNano(), vols) + err = state.UpsertCSIVolume(1002, vols) require.NoError(t, err) // has controller @@ -2453,7 +2453,7 @@ func TestCSIPluginEndpoint_ACLNamespaceFilterAlloc(t *testing.T) { must.Eq(t, 3, len(allocs)) allocs[0].Namespace = ns1.Name - err := s.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), allocs) + err := s.UpsertAllocs(structs.MsgTypeTestSetup, 1003, allocs) must.NoError(t, err) req := &structs.CSIPluginGetRequest{ diff --git a/nomad/deployment_endpoint_test.go b/nomad/deployment_endpoint_test.go index c989cbd99a3..e63180ead90 100644 --- a/nomad/deployment_endpoint_test.go +++ b/nomad/deployment_endpoint_test.go @@ -37,7 +37,7 @@ func TestDeploymentEndpoint_GetDeployment(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Lookup the deployments get := &structs.DeploymentSpecificRequest{ @@ -69,7 +69,7 @@ func TestDeploymentEndpoint_GetDeployment_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", @@ -130,12 +130,12 @@ func TestDeploymentEndpoint_GetDeployment_Blocking(t *testing.T) { // Upsert a deployment we are not interested in first. time.AfterFunc(100*time.Millisecond, func() { - assert.Nil(state.UpsertDeployment(100, time.Now().UnixNano(), d1), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(100, d1), "UpsertDeployment") }) // Upsert another deployment later which should trigger the watch. time.AfterFunc(200*time.Millisecond, func() { - assert.Nil(state.UpsertDeployment(200, time.Now().UnixNano(), d2), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(200, d2), "UpsertDeployment") }) // Lookup the deployments @@ -175,7 +175,7 @@ func TestDeploymentEndpoint_Fail(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Mark the deployment as failed req := &structs.DeploymentFailRequest{ @@ -225,7 +225,7 @@ func TestDeploymentEndpoint_Fail_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", @@ -319,8 +319,8 @@ func TestDeploymentEndpoint_Fail_Rollback(t *testing.T) { a.DeploymentID = d.ID assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j2), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") // Mark the deployment as failed req := &structs.DeploymentFailRequest{ @@ -379,7 +379,7 @@ func TestDeploymentEndpoint_Pause(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Mark the deployment as failed req := &structs.DeploymentPauseRequest{ @@ -422,7 +422,7 @@ func TestDeploymentEndpoint_Pause_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", @@ -501,8 +501,8 @@ func TestDeploymentEndpoint_Promote(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") // Promote the deployment req := &structs.DeploymentPromoteRequest{ @@ -566,8 +566,8 @@ func TestDeploymentEndpoint_Promote_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", @@ -652,8 +652,8 @@ func TestDeploymentEndpoint_SetAllocHealth(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") // Set the alloc as healthy req := &structs.DeploymentAllocHealthRequest{ @@ -720,8 +720,8 @@ func TestDeploymentEndpoint_SetAllocHealth_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", @@ -826,8 +826,8 @@ func TestDeploymentEndpoint_SetAllocHealth_Rollback(t *testing.T) { a.DeploymentID = d.ID assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j2), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") // Set the alloc as unhealthy req := &structs.DeploymentAllocHealthRequest{ @@ -915,8 +915,8 @@ func TestDeploymentEndpoint_SetAllocHealth_NoRollback(t *testing.T) { a.DeploymentID = d.ID assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j2), "UpsertJob") - assert.Nil(state.UpsertDeployment(1000, time.Now().UnixNano(), d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") // Set the alloc as unhealthy req := &structs.DeploymentAllocHealthRequest{ @@ -983,7 +983,7 @@ func TestDeploymentEndpoint_List(t *testing.T) { state := s1.fsm.State() must.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), must.Sprint("UpsertJob")) - must.Nil(t, state.UpsertDeployment(1000, time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, state.UpsertDeployment(1000, d), must.Sprint("UpsertDeployment")) // Lookup the deployments get := &structs.DeploymentListRequest{ @@ -1021,7 +1021,7 @@ func TestDeploymentEndpoint_List(t *testing.T) { d2.JobID = j2.ID must.Nil(t, state.UpsertNamespaces(1001, []*structs.Namespace{{Name: "prod"}})) must.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 1002, nil, j2), must.Sprint("UpsertJob")) - must.Nil(t, state.UpsertDeployment(1003, time.Now().UnixNano(), d2), must.Sprint("UpsertDeployment")) + must.Nil(t, state.UpsertDeployment(1003, d2), must.Sprint("UpsertDeployment")) // Lookup the deployments with wildcard namespace get = &structs.DeploymentListRequest{ @@ -1087,19 +1087,17 @@ func TestDeploymentEndpoint_List_order(t *testing.T) { dep3 := mock.Deployment() dep3.ID = uuid3 - now := time.Now().UnixNano() - - err := s1.fsm.State().UpsertDeployment(1000, now, dep1) + err := s1.fsm.State().UpsertDeployment(1000, dep1) must.NoError(t, err) - err = s1.fsm.State().UpsertDeployment(1001, now, dep2) + err = s1.fsm.State().UpsertDeployment(1001, dep2) must.NoError(t, err) - err = s1.fsm.State().UpsertDeployment(1002, now, dep3) + err = s1.fsm.State().UpsertDeployment(1002, dep3) must.NoError(t, err) // update dep2 again so we can later assert create index order did not change - err = s1.fsm.State().UpsertDeployment(1003, now, dep2) + err = s1.fsm.State().UpsertDeployment(1003, dep2) must.NoError(t, err) t.Run("default", func(t *testing.T) { @@ -1177,9 +1175,8 @@ func TestDeploymentEndpoint_List_ACL(t *testing.T) { d2.Namespace = devNS.Name state := s1.fsm.State() - now := time.Now().UnixNano() - must.NoError(t, state.UpsertDeployment(1000, now, d1), must.Sprint("Upsert Deployment failed")) - must.NoError(t, state.UpsertDeployment(1001, now, d2), must.Sprint("Upsert Deployment failed")) + must.NoError(t, state.UpsertDeployment(1000, d1), must.Sprint("Upsert Deployment failed")) + must.NoError(t, state.UpsertDeployment(1001, d2), must.Sprint("Upsert Deployment failed")) // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1002, "test-valid", @@ -1283,7 +1280,7 @@ func TestDeploymentEndpoint_List_Blocking(t *testing.T) { // Upsert alloc triggers watches time.AfterFunc(100*time.Millisecond, func() { - must.Nil(t, state.UpsertDeployment(3, time.Now().UnixNano(), d), must.Sprint("UpsertDeployment")) + must.Nil(t, state.UpsertDeployment(3, d), must.Sprint("UpsertDeployment")) }) req := &structs.DeploymentListRequest{ @@ -1306,7 +1303,7 @@ func TestDeploymentEndpoint_List_Blocking(t *testing.T) { d2 := d.Copy() d2.Status = structs.DeploymentStatusPaused time.AfterFunc(100*time.Millisecond, func() { - must.Nil(t, state.UpsertDeployment(5, time.Now().UnixNano(), d2), must.Sprint("UpsertDeployment")) + must.Nil(t, state.UpsertDeployment(5, d2), must.Sprint("UpsertDeployment")) }) req.MinQueryIndex = 3 @@ -1368,7 +1365,7 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { if m.namespace != "" { // defaults to "default" deployment.Namespace = m.namespace } - must.NoError(t, state.UpsertDeployment(index, time.Now().UnixNano(), deployment)) + must.NoError(t, state.UpsertDeployment(index, deployment)) } aclToken := mock.CreatePolicyAndToken(t, state, 1100, "test-valid-read", @@ -1571,11 +1568,10 @@ func TestDeploymentEndpoint_Allocations(t *testing.T) { summary := mock.JobSummary(a.JobID) state := s1.fsm.State() - now := time.Now().UnixNano() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") assert.Nil(state.UpsertJobSummary(999, summary), "UpsertJobSummary") - assert.Nil(state.UpsertDeployment(1000, now, d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") // Lookup the allocations get := &structs.DeploymentSpecificRequest{ @@ -1610,11 +1606,10 @@ func TestDeploymentEndpoint_Allocations_ACL(t *testing.T) { summary := mock.JobSummary(a.JobID) state := s1.fsm.State() - now := time.Now().UnixNano() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") assert.Nil(state.UpsertJobSummary(999, summary), "UpsertJobSummary") - assert.Nil(state.UpsertDeployment(1000, now, d), "UpsertDeployment") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") // Create the namespace policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", @@ -1686,14 +1681,13 @@ func TestDeploymentEndpoint_Allocations_Blocking(t *testing.T) { a.DeploymentID = d.ID summary := mock.JobSummary(a.JobID) - now := time.Now().UnixNano() assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, j), "UpsertJob") - assert.Nil(state.UpsertDeployment(2, now, d), "UpsertDeployment") + assert.Nil(state.UpsertDeployment(2, d), "UpsertDeployment") assert.Nil(state.UpsertJobSummary(3, summary), "UpsertJobSummary") // Upsert alloc triggers watches time.AfterFunc(100*time.Millisecond, func() { - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 4, now, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a}), "UpsertAllocs") }) req := &structs.DeploymentSpecificRequest{ @@ -1721,8 +1715,7 @@ func TestDeploymentEndpoint_Allocations_Blocking(t *testing.T) { a2.ClientStatus = structs.AllocClientStatusRunning time.AfterFunc(100*time.Millisecond, func() { assert.Nil(state.UpsertJobSummary(5, mock.JobSummary(a2.JobID)), "UpsertJobSummary") - assert.Nil(state.UpdateAllocsFromClient( - structs.MsgTypeTestSetup, 6, time.Now().UnixNano(), []*structs.Allocation{a2}), "updateAllocsFromClient") + assert.Nil(state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 6, []*structs.Allocation{a2}), "updateAllocsFromClient") }) req.MinQueryIndex = 4 @@ -1749,7 +1742,7 @@ func TestDeploymentEndpoint_Reap(t *testing.T) { // Create the register request d1 := mock.Deployment() - assert.Nil(s1.fsm.State().UpsertDeployment(1000,time.Now().UnixNano(), d1), "UpsertDeployment") + assert.Nil(s1.fsm.State().UpsertDeployment(1000, d1), "UpsertDeployment") // Reap the eval get := &structs.DeploymentDeleteRequest{ diff --git a/nomad/drainer_int_test.go b/nomad/drainer_int_test.go index 7f853423fc9..02f4e314231 100644 --- a/nomad/drainer_int_test.go +++ b/nomad/drainer_int_test.go @@ -621,7 +621,7 @@ func TestDrainer_AllTypes_Deadline_GarbageCollectedNode(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete badAllocs = append(badAllocs, alloc) } - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1, time.Now().UnixNano(), badAllocs)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1, badAllocs)) // Create the second node n2 := mock.Node() diff --git a/nomad/eval_endpoint_test.go b/nomad/eval_endpoint_test.go index 2421c76488a..62e84036016 100644 --- a/nomad/eval_endpoint_test.go +++ b/nomad/eval_endpoint_test.go @@ -404,7 +404,7 @@ func TestEvalEndpoint_Dequeue_UpdateWaitIndex(t *testing.T) { EvalID: eval.ID, } assert := assert.New(t) - err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), &res) + err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) assert.Nil(err) // Dequeue the eval @@ -930,7 +930,7 @@ func TestEvalEndpoint_Delete(t *testing.T) { allocs = append(allocs, alloc) } index++ - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), allocs)) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, allocs)) // Delete all the unwanted evals @@ -1775,7 +1775,7 @@ func TestEvalEndpoint_Allocations(t *testing.T) { state := s1.fsm.State() state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -1814,7 +1814,7 @@ func TestEvalEndpoint_Allocations_ACL(t *testing.T) { state := s1.fsm.State() assert.Nil(state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) assert.Nil(state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) // Create ACL tokens validToken := mock.CreatePolicyAndToken(t, state, 1003, "test-valid", @@ -1876,12 +1876,10 @@ func TestEvalEndpoint_Allocations_Blocking(t *testing.T) { alloc1 := mock.Alloc() alloc2 := mock.Alloc() - now := time.Now().UnixNano() - // Upsert an unrelated alloc first time.AfterFunc(100*time.Millisecond, func() { state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, now, []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -1890,7 +1888,7 @@ func TestEvalEndpoint_Allocations_Blocking(t *testing.T) { // Upsert an alloc which will trigger the watch later time.AfterFunc(200*time.Millisecond, func() { state.UpsertJobSummary(199, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, now, []*structs.Allocation{alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } diff --git a/nomad/heartbeat_test.go b/nomad/heartbeat_test.go index b4027e5b106..52d234e6df1 100644 --- a/nomad/heartbeat_test.go +++ b/nomad/heartbeat_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestHeartbeat_InitializeHeartbeatTimers(t *testing.T) { @@ -70,18 +71,19 @@ func TestHeartbeat_ResetHeartbeatTimer(t *testing.T) { func TestHeartbeat_ResetHeartbeatTimer_Nonleader(t *testing.T) { ci.Parallel(t) + require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 3 // Won't become leader }) defer cleanupS1() - must.False(t, s1.IsLeader()) + require.False(s1.IsLeader()) // Create a new timer _, err := s1.resetHeartbeatTimer("test") - must.NotNil(t, err) - must.EqError(t, err, heartbeatNotLeader) + require.NotNil(err) + require.EqualError(err, heartbeatNotLeader) } func TestHeartbeat_ResetHeartbeatTimerLocked(t *testing.T) { @@ -148,6 +150,7 @@ func TestHeartbeat_ResetHeartbeatTimerLocked_Renew(t *testing.T) { func TestHeartbeat_InvalidateHeartbeat(t *testing.T) { ci.Parallel(t) + require := require.New(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -156,7 +159,7 @@ func TestHeartbeat_InvalidateHeartbeat(t *testing.T) { // Create a node node := mock.Node() state := s1.fsm.State() - must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1, node)) + require.NoError(state.UpsertNode(structs.MsgTypeTestSetup, 1, node)) // This should cause a status update s1.invalidateHeartbeat(node.ID) @@ -164,10 +167,10 @@ func TestHeartbeat_InvalidateHeartbeat(t *testing.T) { // Check it is updated ws := memdb.NewWatchSet() out, err := state.NodeByID(ws, node.ID) - must.NoError(t, err) - must.True(t, out.TerminalStatus()) - must.SliceLen(t, 2, out.Events) - must.Eq(t, NodeHeartbeatEventMissed, out.Events[1].Message) + require.NoError(err) + require.True(out.TerminalStatus()) + require.Len(out.Events, 2) + require.Equal(NodeHeartbeatEventMissed, out.Events[1].Message) } func TestHeartbeat_ClearHeartbeatTimer(t *testing.T) { @@ -340,7 +343,7 @@ func TestHeartbeat_InvalidateHeartbeat_DisconnectedClient(t *testing.T) { Time: tc.now, }} - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2, time.Now().UnixNano(), []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{alloc})) // Trigger status update s1.invalidateHeartbeat(node.ID) @@ -410,7 +413,7 @@ func TestHeartbeat_InvalidateHeartbeatDisconnectedClient(t *testing.T) { Value: structs.AllocClientStatusUnknown, Time: tc.now, }} - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2, time.Now().UnixNano(), []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{alloc})) // Trigger status update s1.invalidateHeartbeat(node.ID) diff --git a/nomad/job_endpoint_statuses_test.go b/nomad/job_endpoint_statuses_test.go index f224e487da5..af43f34d241 100644 --- a/nomad/job_endpoint_statuses_test.go +++ b/nomad/job_endpoint_statuses_test.go @@ -280,7 +280,7 @@ func TestJob_Statuses(t *testing.T) { t.Helper() a := mock.MinAllocForJob(job) must.NoError(t, - s.State().UpsertAllocs(structs.AllocUpdateRequestType, incIdx(t), time.Now().UnixNano(), []*structs.Allocation{a}), + s.State().UpsertAllocs(structs.AllocUpdateRequestType, incIdx(t), []*structs.Allocation{a}), must.Sprintf("error creating alloc for job %s", job.ID)) t.Cleanup(func() { test.NoError(t, s.State().DeleteEval(incIdx(t), []string{}, []string{a.ID}, false)) @@ -290,7 +290,7 @@ func TestJob_Statuses(t *testing.T) { t.Helper() deploy := mock.Deployment() deploy.JobID = job.ID - must.NoError(t, s.State().UpsertDeployment(incIdx(t), time.Now().UnixNano(), deploy)) + must.NoError(t, s.State().UpsertDeployment(incIdx(t), deploy)) t.Cleanup(func() { test.NoError(t, s.State().DeleteDeployment(incIdx(t), []string{deploy.ID})) }) diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 72ed6ab52c8..f0f8cef1fa2 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -3282,7 +3282,7 @@ func TestJobEndpoint_ForceRescheduleEvaluate(t *testing.T) { alloc.TaskGroup = job.TaskGroups[0].Name alloc.Namespace = job.Namespace alloc.ClientStatus = structs.AllocClientStatusFailed - err = s1.State().UpsertAllocs(structs.MsgTypeTestSetup, resp.Index+1, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err = s1.State().UpsertAllocs(structs.MsgTypeTestSetup, resp.Index+1, []*structs.Allocation{alloc}) require.Nil(err) // Force a re-evaluation @@ -5131,7 +5131,7 @@ func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) { alloc := mock.Alloc() alloc.JobID = job1.ID alloc.Job = job1 - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } }) @@ -5632,7 +5632,7 @@ func TestJobEndpoint_Allocations(t *testing.T) { state := s1.fsm.State() state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -5674,7 +5674,7 @@ func TestJobEndpoint_Allocations_ACL(t *testing.T) { alloc2.JobID = alloc1.JobID state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}) require.Nil(err) // Look up allocations for that job @@ -5737,7 +5737,7 @@ func TestJobEndpoint_Allocations_Blocking(t *testing.T) { // First upsert an unrelated alloc time.AfterFunc(100*time.Millisecond, func() { state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc1}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -5746,7 +5746,7 @@ func TestJobEndpoint_Allocations_Blocking(t *testing.T) { // Upsert an alloc for the job we are interested in later time.AfterFunc(200*time.Millisecond, func() { state.UpsertJobSummary(199, mock.JobSummary(alloc2.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{alloc2}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -5977,8 +5977,8 @@ func TestJobEndpoint_Deployments(t *testing.T) { d1.JobCreateIndex = j.CreateIndex d2.JobCreateIndex = j.CreateIndex - require.Nil(state.UpsertDeployment(1001, time.Now().UnixNano(), d1), "UpsertDeployment") - require.Nil(state.UpsertDeployment(1002, time.Now().UnixNano(), d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment") // Lookup the jobs get := &structs.JobSpecificRequest{ @@ -6013,8 +6013,8 @@ func TestJobEndpoint_Deployments_ACL(t *testing.T) { require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j), "UpsertJob") d1.JobCreateIndex = j.CreateIndex d2.JobCreateIndex = j.CreateIndex - require.Nil(state.UpsertDeployment(1001, time.Now().UnixNano(), d1), "UpsertDeployment") - require.Nil(state.UpsertDeployment(1002, time.Now().UnixNano(), d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment") // Lookup the jobs get := &structs.JobSpecificRequest{ @@ -6077,12 +6077,12 @@ func TestJobEndpoint_Deployments_Blocking(t *testing.T) { d2.JobCreateIndex = j.CreateIndex // First upsert an unrelated eval time.AfterFunc(100*time.Millisecond, func() { - require.Nil(state.UpsertDeployment(100, time.Now().UnixNano(), d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(100, d1), "UpsertDeployment") }) // Upsert an eval for the job we are interested in later time.AfterFunc(200*time.Millisecond, func() { - require.Nil(state.UpsertDeployment(200, time.Now().UnixNano(), d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(200, d2), "UpsertDeployment") }) // Lookup the jobs @@ -6126,8 +6126,8 @@ func TestJobEndpoint_LatestDeployment(t *testing.T) { require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j), "UpsertJob") d1.JobCreateIndex = j.CreateIndex d2.JobCreateIndex = j.CreateIndex - require.Nil(state.UpsertDeployment(1001, time.Now().UnixNano(), d1), "UpsertDeployment") - require.Nil(state.UpsertDeployment(1002, time.Now().UnixNano(), d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment") // Lookup the jobs get := &structs.JobSpecificRequest{ @@ -6165,8 +6165,8 @@ func TestJobEndpoint_LatestDeployment_ACL(t *testing.T) { require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j), "UpsertJob") d1.JobCreateIndex = j.CreateIndex d2.JobCreateIndex = j.CreateIndex - require.Nil(state.UpsertDeployment(1001, time.Now().UnixNano(), d1), "UpsertDeployment") - require.Nil(state.UpsertDeployment(1002, time.Now().UnixNano(), d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment") // Lookup the jobs get := &structs.JobSpecificRequest{ @@ -6233,12 +6233,12 @@ func TestJobEndpoint_LatestDeployment_Blocking(t *testing.T) { // First upsert an unrelated eval time.AfterFunc(100*time.Millisecond, func() { - require.Nil(state.UpsertDeployment(100, time.Now().UnixNano(), d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(100, d1), "UpsertDeployment") }) // Upsert an eval for the job we are interested in later time.AfterFunc(200*time.Millisecond, func() { - require.Nil(state.UpsertDeployment(200, time.Now().UnixNano(), d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(200, d2), "UpsertDeployment") }) // Lookup the jobs @@ -7502,7 +7502,7 @@ func TestJobEndpoint_Dispatch_JobChildrenSummary(t *testing.T) { alloc.TaskGroup = dispatchedJob.TaskGroups[0].Name alloc.Namespace = dispatchedJob.Namespace alloc.ClientStatus = structs.AllocClientStatusPending - err = s1.State().UpsertAllocs(structs.MsgTypeTestSetup, nextIdx, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err = s1.State().UpsertAllocs(structs.MsgTypeTestSetup, nextIdx, []*structs.Allocation{alloc}) require.NoError(t, err) require.Equal(t, &structs.JobChildrenSummary{Running: 1}, jobChildren()) require.Equal(t, structs.JobStatusRunning, dispatchedStatus()) @@ -7521,7 +7521,7 @@ func TestJobEndpoint_Dispatch_JobChildrenSummary(t *testing.T) { require.NoError(t, err) nalloc = nalloc.Copy() nalloc.ClientStatus = status - err = s1.State().UpdateAllocsFromClient(structs.MsgTypeTestSetup, nextIdx, time.Now().UnixNano(), []*structs.Allocation{nalloc}) + err = s1.State().UpdateAllocsFromClient(structs.MsgTypeTestSetup, nextIdx, []*structs.Allocation{nalloc}) require.NoError(t, err) } @@ -7700,13 +7700,13 @@ func TestJobEndpoint_Scale_DeploymentBlocking(t *testing.T) { d1.StatusDescription = structs.DeploymentStatusDescriptionNewerJob d1.JobID = job.ID d1.JobCreateIndex = job.CreateIndex - require.Nil(state.UpsertDeployment(1001, time.Now().UnixNano(), d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment") d2 := mock.Deployment() d2.Status = structs.DeploymentStatusSuccessful d2.StatusDescription = structs.DeploymentStatusDescriptionSuccessful d2.JobID = job.ID d2.JobCreateIndex = job.CreateIndex - require.Nil(state.UpsertDeployment(1002, time.Now().UnixNano(), d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment") // add the latest deployment for the test case dLatest := mock.Deployment() @@ -7714,7 +7714,7 @@ func TestJobEndpoint_Scale_DeploymentBlocking(t *testing.T) { dLatest.StatusDescription = "description does not matter for this test" dLatest.JobID = job.ID dLatest.JobCreateIndex = job.CreateIndex - require.Nil(state.UpsertDeployment(1003, time.Now().UnixNano(), dLatest), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1003, dLatest), "UpsertDeployment") // attempt to scale originalCount := job.TaskGroups[0].Count @@ -7825,13 +7825,13 @@ func TestJobEndpoint_Scale_InformationalEventsShouldNotBeBlocked(t *testing.T) { d1.StatusDescription = structs.DeploymentStatusDescriptionNewerJob d1.JobID = job.ID d1.JobCreateIndex = job.CreateIndex - require.Nil(state.UpsertDeployment(1001, time.Now().UnixNano(), d1), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment") d2 := mock.Deployment() d2.Status = structs.DeploymentStatusSuccessful d2.StatusDescription = structs.DeploymentStatusDescriptionSuccessful d2.JobID = job.ID d2.JobCreateIndex = job.CreateIndex - require.Nil(state.UpsertDeployment(1002, time.Now().UnixNano(), d2), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment") // add the latest deployment for the test case dLatest := mock.Deployment() @@ -7839,7 +7839,7 @@ func TestJobEndpoint_Scale_InformationalEventsShouldNotBeBlocked(t *testing.T) { dLatest.StatusDescription = "description does not matter for this test" dLatest.JobID = job.ID dLatest.JobCreateIndex = job.CreateIndex - require.Nil(state.UpsertDeployment(1003, time.Now().UnixNano(), dLatest), "UpsertDeployment") + require.Nil(state.UpsertDeployment(1003, dLatest), "UpsertDeployment") // register informational scaling event groupName := job.TaskGroups[0].Name @@ -8387,7 +8387,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { a0.Namespace = jobV1.Namespace a0.JobID = jobV1.ID a0.ClientStatus = structs.AllocClientStatusComplete - require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1010, time.Now().UnixNano(), []*structs.Allocation{a0}), "UpsertAllocs") + require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1010, []*structs.Allocation{a0}), "UpsertAllocs") jobV2 := jobV1.Copy() require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1100, nil, jobV2), "UpsertJob") @@ -8426,7 +8426,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { a4.JobID = jobV2.ID a4.ClientStatus = structs.AllocClientStatusRunning // upsert allocations - require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1110, time.Now().UnixNano(), []*structs.Allocation{a1, a2, a3, a4}), "UpsertAllocs") + require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1110, []*structs.Allocation{a1, a2, a3, a4}), "UpsertAllocs") event := &structs.ScalingEvent{ Time: time.Now().Unix(), diff --git a/nomad/namespace_endpoint_test.go b/nomad/namespace_endpoint_test.go index c0257cd174f..6a0583cc620 100644 --- a/nomad/namespace_endpoint_test.go +++ b/nomad/namespace_endpoint_test.go @@ -530,7 +530,7 @@ func TestNamespaceEndpoint_DeleteNamespaces_NoAssociatedVolumes_Local(t *testing // Create a volume in one vol := mock.CSIVolume(mock.CSIPlugin()) vol.Namespace = ns1.Name - must.Nil(t, s1.fsm.State().UpsertCSIVolume(1001, time.Now().UnixNano(), []*structs.CSIVolume{vol})) + must.Nil(t, s1.fsm.State().UpsertCSIVolume(1001, []*structs.CSIVolume{vol})) // Lookup the namespaces req := &structs.NamespaceDeleteRequest{ diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index a9b15a7b64c..3a3d3acc250 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -2426,7 +2426,7 @@ func TestClientEndpoint_GetAllocs(t *testing.T) { alloc.NodeID = node.ID state := s1.fsm.State() state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -2478,7 +2478,7 @@ func TestClientEndpoint_GetAllocs_ACL_Basic(t *testing.T) { assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode") assert.Nil(state.UpsertJobSummary(2, mock.JobSummary(allocDefaultNS.JobID)), "UpsertJobSummary") allocs := []*structs.Allocation{allocDefaultNS} - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 5, time.Now().UnixNano(), allocs), "UpsertAllocs") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 5, allocs), "UpsertAllocs") // Create the namespace policy and tokens validDefaultToken := mock.CreatePolicyAndToken(t, state, 1001, "test-default-valid", mock.NodePolicy(acl.PolicyRead)+ @@ -2568,7 +2568,7 @@ func TestClientEndpoint_GetAllocs_ACL_Namespaces(t *testing.T) { assert.Nil(state.UpsertJobSummary(4, mock.JobSummary(allocAltNS.JobID)), "UpsertJobSummary") assert.Nil(state.UpsertJobSummary(5, mock.JobSummary(allocOtherNS.JobID)), "UpsertJobSummary") allocs := []*structs.Allocation{allocDefaultNS, allocAltNS, allocOtherNS} - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 6, time.Now().UnixNano(), allocs), "UpsertAllocs") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 6, allocs), "UpsertAllocs") // Create the namespace policy and tokens validDefaultToken := mock.CreatePolicyAndToken(t, state, 1001, "test-default-valid", mock.NodePolicy(acl.PolicyRead)+ @@ -2653,7 +2653,7 @@ func TestClientEndpoint_GetClientAllocs(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = node.ID state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -2744,7 +2744,7 @@ func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) { store.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) start := time.Now() time.AfterFunc(100*time.Millisecond, func() { - err := store.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err := store.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -2811,7 +2811,7 @@ func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) { allocUpdate.ID = alloc.ID allocUpdate.ClientStatus = structs.AllocClientStatusRunning store.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID)) - err := store.UpsertAllocs(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{allocUpdate}) + err := store.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{allocUpdate}) if err != nil { t.Fatalf("err: %v", err) } @@ -2865,7 +2865,7 @@ func TestClientEndpoint_GetClientAllocs_Blocking_GC(t *testing.T) { state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID)) start := time.Now() time.AfterFunc(100*time.Millisecond, func() { - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc1, alloc2})) }) // Lookup the allocs in a blocking query @@ -2944,7 +2944,7 @@ func TestClientEndpoint_GetClientAllocs_WithoutMigrateTokens(t *testing.T) { alloc.DesiredStatus = structs.AllocClientStatusComplete state := s1.fsm.State() state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{prevAlloc, alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{prevAlloc, alloc}) assert.Nil(err) // Lookup the allocs @@ -2994,7 +2994,7 @@ func TestClientEndpoint_GetAllocs_Blocking(t *testing.T) { state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) start := time.Now() time.AfterFunc(100*time.Millisecond, func() { - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -3034,7 +3034,7 @@ func TestClientEndpoint_GetAllocs_Blocking(t *testing.T) { allocUpdate.ID = alloc.ID allocUpdate.ClientStatus = structs.AllocClientStatusRunning state.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID)) - err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 200, time.Now().UnixNano(), []*structs.Allocation{allocUpdate}) + err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 200, []*structs.Allocation{allocUpdate}) if err != nil { t.Fatalf("err: %v", err) } @@ -3101,7 +3101,7 @@ func TestNode_UpdateAlloc(t *testing.T) { must.NoError(t, store.UpsertJobSummary(99, mock.JobSummary(alloc2.JobID))) alloc2.TaskGroup = job.TaskGroups[0].Name - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2})) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc, alloc2})) // Attempt updates of more than one alloc for the same job clientAlloc1 := new(structs.Allocation) @@ -3173,7 +3173,7 @@ func TestNode_UpdateAlloc_NodeNotReady(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusRunning must.NoError(t, store.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))) - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc})) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})) // Mark node as down. must.NoError(t, store.UpdateNodeStatus( @@ -3262,7 +3262,7 @@ func TestNode_UpdateAllocServiceRegistrations(t *testing.T) { alloc2.TaskGroup = job.TaskGroups[0].Name index++ - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc0, alloc1, alloc2})) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc0, alloc1, alloc2})) serviceFor := func(allocID string, port int) *structs.ServiceRegistration { return &structs.ServiceRegistration{ @@ -3338,7 +3338,7 @@ func TestClientEndpoint_BatchUpdate(t *testing.T) { alloc.NodeID = node.ID state := s1.fsm.State() state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -3400,7 +3400,7 @@ func TestClientEndpoint_UpdateAlloc_Vault(t *testing.T) { alloc.NodeID = node.ID state := s1.fsm.State() state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -3483,7 +3483,7 @@ func TestClientEndpoint_UpdateAlloc_VaultWorkloadIdentity(t *testing.T) { alloc.NodeID = node.ID state := s1.fsm.State() state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) must.NoError(t, err) var accessors []*structs.VaultAccessor @@ -3544,13 +3544,13 @@ func TestClientEndpoint_CreateNodeEvals(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = node.ID state.UpsertJobSummary(1, mock.JobSummary(alloc.JobID)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, idx, time.Now().UnixNano(), []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, idx, []*structs.Allocation{alloc})) idx++ sysBatchAlloc := mock.SysBatchAlloc() sysBatchAlloc.NodeID = node.ID state.UpsertJobSummary(1, mock.JobSummary(sysBatchAlloc.JobID)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, idx, time.Now().UnixNano(), []*structs.Allocation{sysBatchAlloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, idx, []*structs.Allocation{sysBatchAlloc})) idx++ // Inject a fake system job. @@ -3752,7 +3752,7 @@ func TestClientEndpoint_Evaluate(t *testing.T) { t.Fatalf("err: %v", err) } state.UpsertJobSummary(2, mock.JobSummary(alloc.JobID)) - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -3837,7 +3837,7 @@ func TestClientEndpoint_Evaluate_ACL(t *testing.T) { assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode") assert.Nil(state.UpsertJobSummary(2, mock.JobSummary(alloc.JobID)), "UpsertJobSummary") - assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{alloc}), "UpsertAllocs") + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}), "UpsertAllocs") // Create the policy and tokens validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite)) @@ -4194,7 +4194,7 @@ func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) { alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] tasks := []string{task.Name} - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -4230,7 +4230,7 @@ func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) { // Update to be running on the node alloc.NodeID = node.ID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 4, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -4244,7 +4244,7 @@ func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) { // Update to be client-terminal alloc.ClientStatus = structs.AllocClientStatusFailed - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 5, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 5, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -4288,7 +4288,7 @@ func TestClientEndpoint_DeriveVaultToken(t *testing.T) { task := alloc.Job.TaskGroups[0].Tasks[0] tasks := []string{task.Name} task.Vault = &structs.Vault{Policies: []string{"a", "b"}} - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -4381,7 +4381,7 @@ func TestClientEndpoint_DeriveVaultToken_VaultError(t *testing.T) { task := alloc.Job.TaskGroups[0].Tasks[0] tasks := []string{task.Name} task.Vault = &structs.Vault{Policies: []string{"a", "b"}} - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -4514,7 +4514,7 @@ func TestClientEndpoint_DeriveSIToken(t *testing.T) { mutateConnectJob(t, alloc.Job) // appends sidecar task sidecarTask := alloc.Job.TaskGroups[0].Tasks[1] - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}) r.NoError(err) request := &structs.DeriveSITokenRequest{ @@ -4572,7 +4572,7 @@ func TestClientEndpoint_DeriveSIToken_ConsulError(t *testing.T) { m := NewConsulACLsAPI(mockACLsAPI, s1.logger, nil) s1.consulACLs = m - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}) r.NoError(err) request := &structs.DeriveSITokenRequest{ @@ -4816,7 +4816,7 @@ func TestClientEndpoint_UpdateAlloc_Evals_ByTrigger(t *testing.T) { } if !tc.missingAlloc { - err = fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 100, time.Now().UnixNano(), []*structs.Allocation{serverAlloc}) + err = fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{serverAlloc}) require.NoError(t, err) } diff --git a/nomad/operator_endpoint_test.go b/nomad/operator_endpoint_test.go index 4a893637125..1afcb44b253 100644 --- a/nomad/operator_endpoint_test.go +++ b/nomad/operator_endpoint_test.go @@ -1353,7 +1353,7 @@ func TestOperator_UpgradeCheckRequest_VaultWorkloadIdentity(t *testing.T) { allocJobWithWID.JobID = jobWithWID.ID allocJobWithWID.NodeID = node.ID - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1006, time.Now().UnixNano(), []*structs.Allocation{allocJobNoWID, allocJobWithWID}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1006, []*structs.Allocation{allocJobNoWID, allocJobWithWID}) must.NoError(t, err) // Create Vault token accessor for job without Vault identity and one that diff --git a/nomad/periodic_test.go b/nomad/periodic_test.go index df9b1b4f9b2..1718c434ca9 100644 --- a/nomad/periodic_test.go +++ b/nomad/periodic_test.go @@ -758,7 +758,7 @@ func TestPeriodicDispatch_RunningChildren_ActiveAllocs(t *testing.T) { alloc.JobID = childjob.ID alloc.EvalID = eval.ID alloc.DesiredStatus = structs.AllocDesiredStatusRun - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc}); err != nil { t.Fatalf("UpsertAllocs failed: %v", err) } diff --git a/nomad/plan_apply_test.go b/nomad/plan_apply_test.go index 2e7167b4247..687d5511832 100644 --- a/nomad/plan_apply_test.go +++ b/nomad/plan_apply_test.go @@ -83,7 +83,7 @@ func TestPlanApply_applyPlan(t *testing.T) { // Register a fake deployment oldDeployment := mock.Deployment() - if err := s1.State().UpsertDeployment(900, time.Now().UnixNano(), oldDeployment); err != nil { + if err := s1.State().UpsertDeployment(900, oldDeployment); err != nil { t.Fatalf("UpsertDeployment failed: %v", err) } @@ -259,7 +259,7 @@ func TestPlanApply_applyPlanWithNormalizedAllocs(t *testing.T) { // Register a fake deployment oldDeployment := mock.Deployment() - if err := s1.State().UpsertDeployment(900, time.Now().UnixNano(), oldDeployment); err != nil { + if err := s1.State().UpsertDeployment(900, oldDeployment); err != nil { t.Fatalf("UpsertDeployment failed: %v", err) } @@ -290,7 +290,7 @@ func TestPlanApply_applyPlanWithNormalizedAllocs(t *testing.T) { PreemptedByAllocation: alloc.ID, } s1.State().UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)) - s1.State().UpsertAllocs(structs.MsgTypeTestSetup, 1100, time.Now().UnixNano(), []*structs.Allocation{stoppedAlloc, preemptedAlloc}) + s1.State().UpsertAllocs(structs.MsgTypeTestSetup, 1100, []*structs.Allocation{stoppedAlloc, preemptedAlloc}) // Create an eval eval := mock.Eval() eval.JobID = alloc.JobID @@ -615,7 +615,7 @@ func TestPlanApply_EvalPlan_Preemption(t *testing.T) { } // Insert a preempted alloc such that the alloc will fit only after preemption - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{preemptedAlloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{preemptedAlloc}) alloc := mock.Alloc() alloc.AllocatedResources = &structs.AllocatedResources{ @@ -904,7 +904,7 @@ func TestPlanApply_EvalNodePlan_NodeFull(t *testing.T) { alloc.AllocatedResources = structs.NodeResourcesToAllocatedResources(node.NodeResources) state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) alloc2 := mock.Alloc() alloc2.NodeID = node.ID @@ -954,7 +954,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_Device(t *testing.T) { state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) // Alloc2 tries to use the same device alloc2 := mock.Alloc() @@ -994,7 +994,7 @@ func TestPlanApply_EvalNodePlan_UpdateExisting(t *testing.T) { alloc.NodeID = node.ID alloc.AllocatedResources = structs.NodeResourcesToAllocatedResources(node.NodeResources) state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) snap, _ := state.Snapshot() plan := &structs.Plan{ @@ -1027,7 +1027,7 @@ func TestPlanApply_EvalNodePlan_UpdateExisting_Ineligible(t *testing.T) { alloc.NodeID = node.ID alloc.AllocatedResources = structs.NodeResourcesToAllocatedResources(node.NodeResources) state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) snap, _ := state.Snapshot() plan := &structs.Plan{ @@ -1058,7 +1058,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_Evict(t *testing.T) { alloc.NodeID = node.ID alloc.AllocatedResources = structs.NodeResourcesToAllocatedResources(node.NodeResources) state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) snap, _ := state.Snapshot() allocEvict := new(structs.Allocation) @@ -1097,7 +1097,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_AllocEvict(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusEvict alloc.AllocatedResources = structs.NodeResourcesToAllocatedResources(node.NodeResources) state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) snap, _ := state.Snapshot() alloc2 := mock.Alloc() @@ -1130,7 +1130,7 @@ func TestPlanApply_EvalNodePlan_NodeDown_EvictOnly(t *testing.T) { node.ReservedResources = nil node.Status = structs.NodeStatusDown state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) snap, _ := state.Snapshot() allocEvict := new(structs.Allocation) diff --git a/nomad/search_endpoint_test.go b/nomad/search_endpoint_test.go index 6da307049e1..e06688ac927 100644 --- a/nomad/search_endpoint_test.go +++ b/nomad/search_endpoint_test.go @@ -1,4 +1,4 @@ -// Copyright (c) HashiCorp, Inc.e +// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package nomad @@ -9,7 +9,6 @@ import ( "strconv" "strings" "testing" - "time" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" "github.com/hashicorp/nomad/acl" @@ -34,7 +33,7 @@ func registerMockJob(s *Server, t *testing.T, prefix string, counter int) *struc func registerJob(s *Server, t *testing.T, job *structs.Job) { fsmState := s.fsm.State() - must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) } func mockAlloc() *structs.Allocation { @@ -71,9 +70,9 @@ func TestSearch_PrefixSearch_Job(t *testing.T) { t.Fatalf("err: %v", err) } - must.Len(t, 1, resp.Matches[structs.Jobs]) - must.Eq(t, job.ID, resp.Matches[structs.Jobs][0]) - must.Eq(t, uint64(jobIndex), resp.Index) + require.Len(t, resp.Matches[structs.Jobs], 1) + require.Equal(t, job.ID, resp.Matches[structs.Jobs][0]) + require.Equal(t, uint64(jobIndex), resp.Index) } func TestSearch_PrefixSearch_ACL(t *testing.T) { @@ -103,7 +102,7 @@ func TestSearch_PrefixSearch_ACL(t *testing.T) { must.NoError(t, resp.Error) plugin := mock.CSIPlugin() - must.NoError(t, store.UpsertCSIPlugin(1002, time.Now().UnixNano(), plugin)) + must.NoError(t, store.UpsertCSIPlugin(1002, plugin)) node := mock.Node() must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 1003, node)) @@ -283,8 +282,8 @@ func TestSearch_PrefixSearch_All_JobWithHyphen(t *testing.T) { summary := mock.JobSummary(alloc.JobID) fsmState := s.fsm.State() - must.NoError(t, fsmState.UpsertJobSummary(999, summary)) - must.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, fsmState.UpsertJobSummary(999, summary)) + require.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) req := &structs.SearchRequest{ Context: structs.All, @@ -298,10 +297,10 @@ func TestSearch_PrefixSearch_All_JobWithHyphen(t *testing.T) { for i := 1; i < len(prefix); i++ { req.Prefix = prefix[:i] var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Eq(t, 1, len(resp.Matches[structs.Jobs])) - must.Eq(t, job.ID, resp.Matches[structs.Jobs][0]) - must.Eq(t, jobIndex, resp.Index) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.Equal(t, 1, len(resp.Matches[structs.Jobs])) + require.Equal(t, job.ID, resp.Matches[structs.Jobs][0]) + require.EqualValues(t, jobIndex, resp.Index) } } @@ -324,8 +323,8 @@ func TestSearch_PrefixSearch_All_LongJob(t *testing.T) { summary := mock.JobSummary(alloc.JobID) fsmState := s.fsm.State() - must.NoError(t, fsmState.UpsertJobSummary(999, summary)) - must.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, fsmState.UpsertJobSummary(999, summary)) + require.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) req := &structs.SearchRequest{ Prefix: prefix, @@ -337,11 +336,11 @@ func TestSearch_PrefixSearch_All_LongJob(t *testing.T) { } var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Jobs]) - must.Eq(t, job.ID, resp.Matches[structs.Jobs][0]) - must.Eq(t, jobIndex, resp.Index) + require.Len(t, resp.Matches[structs.Jobs], 1) + require.Equal(t, job.ID, resp.Matches[structs.Jobs][0]) + require.EqualValues(t, jobIndex, resp.Index) } // truncate should limit results to 20 @@ -371,11 +370,11 @@ func TestSearch_PrefixSearch_Truncate(t *testing.T) { } var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 20, resp.Matches[structs.Jobs]) - must.True(t, resp.Truncations[structs.Jobs]) - must.Eq(t, uint64(jobIndex), resp.Index) + require.Len(t, resp.Matches[structs.Jobs], 20) + require.True(t, resp.Truncations[structs.Jobs]) + require.Equal(t, uint64(jobIndex), resp.Index) } func TestSearch_PrefixSearch_AllWithJob(t *testing.T) { @@ -394,7 +393,7 @@ func TestSearch_PrefixSearch_AllWithJob(t *testing.T) { job := registerMockJob(s, t, prefix, 0) eval1 := mock.Eval() eval1.ID = job.ID - must.NoError(t, s.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 2000, []*structs.Evaluation{eval1})) + require.NoError(t, s.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 2000, []*structs.Evaluation{eval1})) req := &structs.SearchRequest{ Prefix: prefix, @@ -406,12 +405,12 @@ func TestSearch_PrefixSearch_AllWithJob(t *testing.T) { } var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Jobs]) - must.Eq(t, job.ID, resp.Matches[structs.Jobs][0]) - must.Len(t, 1, resp.Matches[structs.Evals]) - must.Eq(t, eval1.ID, resp.Matches[structs.Evals][0]) + require.Len(t, resp.Matches[structs.Jobs], 1) + require.Equal(t, job.ID, resp.Matches[structs.Jobs][0]) + require.Len(t, resp.Matches[structs.Evals], 1) + require.Equal(t, eval1.ID, resp.Matches[structs.Evals][0]) } func TestSearch_PrefixSearch_Evals(t *testing.T) { @@ -425,7 +424,7 @@ func TestSearch_PrefixSearch_Evals(t *testing.T) { testutil.WaitForLeader(t, s.RPC) eval1 := mock.Eval() - must.NoError(t, s.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 2000, []*structs.Evaluation{eval1})) + require.NoError(t, s.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 2000, []*structs.Evaluation{eval1})) prefix := eval1.ID[:len(eval1.ID)-2] @@ -439,12 +438,12 @@ func TestSearch_PrefixSearch_Evals(t *testing.T) { } var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Evals]) - must.Eq(t, eval1.ID, resp.Matches[structs.Evals][0]) - must.False(t, resp.Truncations[structs.Evals]) - must.Eq(t, uint64(2000), resp.Index) + require.Len(t, resp.Matches[structs.Evals], 1) + require.Equal(t, eval1.ID, resp.Matches[structs.Evals][0]) + require.False(t, resp.Truncations[structs.Evals]) + require.Equal(t, uint64(2000), resp.Index) } func TestSearch_PrefixSearch_Allocation(t *testing.T) { @@ -461,8 +460,8 @@ func TestSearch_PrefixSearch_Allocation(t *testing.T) { summary := mock.JobSummary(alloc.JobID) fsmState := s.fsm.State() - must.NoError(t, fsmState.UpsertJobSummary(999, summary)) - must.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 90, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, fsmState.UpsertJobSummary(999, summary)) + require.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 90, []*structs.Allocation{alloc})) prefix := alloc.ID[:len(alloc.ID)-2] @@ -476,12 +475,12 @@ func TestSearch_PrefixSearch_Allocation(t *testing.T) { } var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Allocs]) - must.Eq(t, alloc.ID, resp.Matches[structs.Allocs][0]) - must.False(t, resp.Truncations[structs.Allocs]) - must.Eq(t, uint64(90), resp.Index) + require.Len(t, resp.Matches[structs.Allocs], 1) + require.Equal(t, alloc.ID, resp.Matches[structs.Allocs][0]) + require.False(t, resp.Truncations[structs.Allocs]) + require.Equal(t, uint64(90), resp.Index) } func TestSearch_PrefixSearch_All_UUID(t *testing.T) { @@ -498,15 +497,15 @@ func TestSearch_PrefixSearch_All_UUID(t *testing.T) { summary := mock.JobSummary(alloc.JobID) fsmState := s.fsm.State() - must.NoError(t, fsmState.UpsertJobSummary(999, summary)) - must.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, fsmState.UpsertJobSummary(999, summary)) + require.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) node := mock.Node() - must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, node)) + require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, node)) eval1 := mock.Eval() eval1.ID = node.ID - must.NoError(t, fsmState.UpsertEvals(structs.MsgTypeTestSetup, 1002, []*structs.Evaluation{eval1})) + require.NoError(t, fsmState.UpsertEvals(structs.MsgTypeTestSetup, 1002, []*structs.Evaluation{eval1})) req := &structs.SearchRequest{ Context: structs.All, @@ -519,11 +518,11 @@ func TestSearch_PrefixSearch_All_UUID(t *testing.T) { for i := 1; i < len(alloc.ID); i++ { req.Prefix = alloc.ID[:i] var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Allocs]) - must.Eq(t, alloc.ID, resp.Matches[structs.Allocs][0]) - must.False(t, resp.Truncations[structs.Allocs]) - must.Eq(t, 1002, resp.Index) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.Len(t, resp.Matches[structs.Allocs], 1) + require.Equal(t, alloc.ID, resp.Matches[structs.Allocs][0]) + require.False(t, resp.Truncations[structs.Allocs]) + require.EqualValues(t, 1002, resp.Index) } } @@ -540,7 +539,7 @@ func TestSearch_PrefixSearch_Node(t *testing.T) { fsmState := s.fsm.State() node := mock.Node() - must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 100, node)) + require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 100, node)) prefix := node.ID[:len(node.ID)-2] @@ -558,10 +557,10 @@ func TestSearch_PrefixSearch_Node(t *testing.T) { t.Fatalf("err: %v", err) } - must.Len(t, 1, resp.Matches[structs.Nodes]) - must.Eq(t, node.ID, resp.Matches[structs.Nodes][0]) - must.False(t, resp.Truncations[structs.Nodes]) - must.Eq(t, uint64(100), resp.Index) + require.Len(t, resp.Matches[structs.Nodes], 1) + require.Equal(t, node.ID, resp.Matches[structs.Nodes][0]) + require.False(t, resp.Truncations[structs.Nodes]) + require.Equal(t, uint64(100), resp.Index) } func TestSearch_PrefixSearch_NodePool(t *testing.T) { @@ -776,7 +775,7 @@ func TestSearch_PrefixSearch_Deployment(t *testing.T) { testutil.WaitForLeader(t, s.RPC) deployment := mock.Deployment() - must.NoError(t, s.fsm.State().UpsertDeployment(2000, time.Now().UnixNano(), deployment)) + require.NoError(t, s.fsm.State().UpsertDeployment(2000, deployment)) prefix := deployment.ID[:len(deployment.ID)-2] @@ -790,11 +789,11 @@ func TestSearch_PrefixSearch_Deployment(t *testing.T) { } var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Deployments]) - must.Eq(t, deployment.ID, resp.Matches[structs.Deployments][0]) - must.False(t, resp.Truncations[structs.Deployments]) - must.Eq(t, uint64(2000), resp.Index) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.Len(t, resp.Matches[structs.Deployments], 1) + require.Equal(t, deployment.ID, resp.Matches[structs.Deployments][0]) + require.False(t, resp.Truncations[structs.Deployments]) + require.Equal(t, uint64(2000), resp.Index) } func TestSearch_PrefixSearch_AllContext(t *testing.T) { @@ -810,11 +809,11 @@ func TestSearch_PrefixSearch_AllContext(t *testing.T) { fsmState := s.fsm.State() node := mock.Node() - must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 100, node)) + require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 100, node)) eval1 := mock.Eval() eval1.ID = node.ID - must.NoError(t, fsmState.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1})) + require.NoError(t, fsmState.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1})) prefix := node.ID[:len(node.ID)-2] @@ -828,13 +827,13 @@ func TestSearch_PrefixSearch_AllContext(t *testing.T) { } var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Nodes]) - must.Len(t, 1, resp.Matches[structs.Evals]) - must.Eq(t, node.ID, resp.Matches[structs.Nodes][0]) - must.Eq(t, eval1.ID, resp.Matches[structs.Evals][0]) - must.Eq(t, uint64(1000), resp.Index) + require.Len(t, resp.Matches[structs.Nodes], 1) + require.Len(t, resp.Matches[structs.Evals], 1) + require.Equal(t, node.ID, resp.Matches[structs.Nodes][0]) + require.Equal(t, eval1.ID, resp.Matches[structs.Evals][0]) + require.Equal(t, uint64(1000), resp.Index) } // Tests that the top 20 matches are returned when no prefix is set @@ -862,10 +861,10 @@ func TestSearch_PrefixSearch_NoPrefix(t *testing.T) { } var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Jobs]) - must.Eq(t, job.ID, resp.Matches[structs.Jobs][0]) - must.Eq(t, uint64(jobIndex), resp.Index) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.Len(t, resp.Matches[structs.Jobs], 1) + require.Equal(t, job.ID, resp.Matches[structs.Jobs][0]) + require.Equal(t, uint64(jobIndex), resp.Index) } // Tests that the zero matches are returned when a prefix has no matching @@ -892,9 +891,9 @@ func TestSearch_PrefixSearch_NoMatches(t *testing.T) { } var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) require.Empty(t, resp.Matches[structs.Jobs]) - must.Eq(t, uint64(0), resp.Index) + require.Equal(t, uint64(0), resp.Index) } // Prefixes can only be looked up if their length is a power of two. For @@ -926,9 +925,9 @@ func TestSearch_PrefixSearch_RoundDownToEven(t *testing.T) { } var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Jobs]) - must.Eq(t, job.ID, resp.Matches[structs.Jobs][0]) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.Len(t, resp.Matches[structs.Jobs], 1) + require.Equal(t, job.ID, resp.Matches[structs.Jobs][0]) } func TestSearch_PrefixSearch_MultiRegion(t *testing.T) { @@ -965,11 +964,11 @@ func TestSearch_PrefixSearch_MultiRegion(t *testing.T) { codec := rpcClient(t, s2) var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Jobs]) - must.Eq(t, job.ID, resp.Matches[structs.Jobs][0]) - must.Eq(t, uint64(jobIndex), resp.Index) + require.Len(t, resp.Matches[structs.Jobs], 1) + require.Equal(t, job.ID, resp.Matches[structs.Jobs][0]) + require.Equal(t, uint64(jobIndex), resp.Index) } func TestSearch_PrefixSearch_CSIPlugin(t *testing.T) { @@ -996,11 +995,11 @@ func TestSearch_PrefixSearch_CSIPlugin(t *testing.T) { } var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Plugins]) - must.Eq(t, id, resp.Matches[structs.Plugins][0]) - must.False(t, resp.Truncations[structs.Plugins]) + require.Len(t, resp.Matches[structs.Plugins], 1) + require.Equal(t, id, resp.Matches[structs.Plugins][0]) + require.False(t, resp.Truncations[structs.Plugins]) } func TestSearch_PrefixSearch_CSIVolume(t *testing.T) { @@ -1014,12 +1013,12 @@ func TestSearch_PrefixSearch_CSIVolume(t *testing.T) { testutil.WaitForLeader(t, s.RPC) id := uuid.Generate() - err := s.fsm.State().UpsertCSIVolume(1000, time.Now().UnixNano(), []*structs.CSIVolume{{ + err := s.fsm.State().UpsertCSIVolume(1000, []*structs.CSIVolume{{ ID: id, Namespace: structs.DefaultNamespace, PluginID: "glade", }}) - must.NoError(t, err) + require.NoError(t, err) prefix := id[:len(id)-2] @@ -1033,11 +1032,11 @@ func TestSearch_PrefixSearch_CSIVolume(t *testing.T) { } var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Volumes]) - must.Eq(t, id, resp.Matches[structs.Volumes][0]) - must.False(t, resp.Truncations[structs.Volumes]) + require.Len(t, resp.Matches[structs.Volumes], 1) + require.Equal(t, id, resp.Matches[structs.Volumes][0]) + require.False(t, resp.Truncations[structs.Volumes]) } func TestSearch_PrefixSearch_Namespace(t *testing.T) { @@ -1051,7 +1050,7 @@ func TestSearch_PrefixSearch_Namespace(t *testing.T) { testutil.WaitForLeader(t, s.RPC) ns := mock.Namespace() - must.NoError(t, s.fsm.State().UpsertNamespaces(2000, []*structs.Namespace{ns})) + require.NoError(t, s.fsm.State().UpsertNamespaces(2000, []*structs.Namespace{ns})) prefix := ns.Name[:len(ns.Name)-2] @@ -1064,12 +1063,12 @@ func TestSearch_PrefixSearch_Namespace(t *testing.T) { } var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Namespaces]) - must.Eq(t, ns.Name, resp.Matches[structs.Namespaces][0]) - must.False(t, resp.Truncations[structs.Namespaces]) - must.Eq(t, uint64(2000), resp.Index) + require.Len(t, resp.Matches[structs.Namespaces], 1) + require.Equal(t, ns.Name, resp.Matches[structs.Namespaces][0]) + require.False(t, resp.Truncations[structs.Namespaces]) + require.Equal(t, uint64(2000), resp.Index) } func TestSearch_PrefixSearch_Namespace_ACL(t *testing.T) { @@ -1217,7 +1216,7 @@ func TestSearch_PrefixSearch_ScalingPolicy(t *testing.T) { prefix := policy.ID fsmState := s.fsm.State() - must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) req := &structs.SearchRequest{ Prefix: prefix, @@ -1229,16 +1228,16 @@ func TestSearch_PrefixSearch_ScalingPolicy(t *testing.T) { } var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.ScalingPolicies]) - must.Eq(t, policy.ID, resp.Matches[structs.ScalingPolicies][0]) - must.Eq(t, uint64(jobIndex), resp.Index) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.Len(t, resp.Matches[structs.ScalingPolicies], 1) + require.Equal(t, policy.ID, resp.Matches[structs.ScalingPolicies][0]) + require.Equal(t, uint64(jobIndex), resp.Index) req.Context = structs.All - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.ScalingPolicies]) - must.Eq(t, policy.ID, resp.Matches[structs.ScalingPolicies][0]) - must.Eq(t, uint64(jobIndex), resp.Index) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) + require.Len(t, resp.Matches[structs.ScalingPolicies], 1) + require.Equal(t, policy.ID, resp.Matches[structs.ScalingPolicies][0]) + require.Equal(t, uint64(jobIndex), resp.Index) } func TestSearch_FuzzySearch_ACL(t *testing.T) { @@ -1270,7 +1269,7 @@ func TestSearch_FuzzySearch_ACL(t *testing.T) { plugin := mock.CSIPlugin() plugin.ID = "mock.hashicorp.com" - must.NoError(t, store.UpsertCSIPlugin(1002, time.Now().UnixNano(), plugin)) + must.NoError(t, store.UpsertCSIPlugin(1002, plugin)) node := mock.Node() must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 1003, node)) @@ -1344,7 +1343,7 @@ func TestSearch_FuzzySearch_ACL(t *testing.T) { req.Text = "jo" // mock job Name is my-job var resp structs.FuzzySearchResponse must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Jobs]) + require.Len(t, resp.Matches[structs.Jobs], 1) must.Eq(t, structs.FuzzyMatch{ ID: "my-job", Scope: []string{"default", job.ID}, @@ -1426,7 +1425,7 @@ func TestSearch_FuzzySearch_NotEnabled(t *testing.T) { job := mock.Job() registerJob(s, t, job) - must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) + require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) req := &structs.FuzzySearchRequest{ Text: "foo", // min set to 5 @@ -1435,7 +1434,7 @@ func TestSearch_FuzzySearch_NotEnabled(t *testing.T) { } var resp structs.FuzzySearchResponse - must.EqError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp), + require.EqualError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp), "fuzzy search is not enabled") } @@ -1454,7 +1453,7 @@ func TestSearch_FuzzySearch_ShortText(t *testing.T) { job := mock.Job() registerJob(s, t, job) - must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) + require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) req := &structs.FuzzySearchRequest{ Text: "foo", // min set to 5 @@ -1463,7 +1462,7 @@ func TestSearch_FuzzySearch_ShortText(t *testing.T) { } var resp structs.FuzzySearchResponse - must.EqError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp), + require.EqualError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp), "fuzzy search query must be at least 5 characters, got 3") } @@ -1478,7 +1477,7 @@ func TestSearch_FuzzySearch_TruncateLimitQuery(t *testing.T) { testutil.WaitForLeader(t, s.RPC) fsmState := s.fsm.State() - must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) + require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) req := &structs.FuzzySearchRequest{ Text: "job", @@ -1493,11 +1492,11 @@ func TestSearch_FuzzySearch_TruncateLimitQuery(t *testing.T) { } var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 20, resp.Matches[structs.Jobs]) - must.True(t, resp.Truncations[structs.Jobs]) - must.Eq(t, uint64(jobIndex), resp.Index) + require.Len(t, resp.Matches[structs.Jobs], 20) + require.True(t, resp.Truncations[structs.Jobs]) + require.Equal(t, uint64(jobIndex), resp.Index) } func TestSearch_FuzzySearch_TruncateLimitResults(t *testing.T) { @@ -1513,7 +1512,7 @@ func TestSearch_FuzzySearch_TruncateLimitResults(t *testing.T) { testutil.WaitForLeader(t, s.RPC) fsmState := s.fsm.State() - must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) + require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) req := &structs.FuzzySearchRequest{ Text: "job", @@ -1528,11 +1527,11 @@ func TestSearch_FuzzySearch_TruncateLimitResults(t *testing.T) { } var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 5, resp.Matches[structs.Jobs]) - must.True(t, resp.Truncations[structs.Jobs]) - must.Eq(t, uint64(jobIndex), resp.Index) + require.Len(t, resp.Matches[structs.Jobs], 5) + require.True(t, resp.Truncations[structs.Jobs]) + require.Equal(t, uint64(jobIndex), resp.Index) } func TestSearch_FuzzySearch_Evals(t *testing.T) { @@ -1547,7 +1546,7 @@ func TestSearch_FuzzySearch_Evals(t *testing.T) { eval1 := mock.Eval() eval1.ID = "f7dee5a1-d2b0-2f6a-2e75-6c8e467a4b99" - must.NoError(t, s.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 2000, []*structs.Evaluation{eval1})) + require.NoError(t, s.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 2000, []*structs.Evaluation{eval1})) req := &structs.FuzzySearchRequest{ Text: "f7dee", // evals are prefix searched @@ -1559,12 +1558,12 @@ func TestSearch_FuzzySearch_Evals(t *testing.T) { } var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Evals]) - must.Eq(t, eval1.ID, resp.Matches[structs.Evals][0].ID) - must.False(t, resp.Truncations[structs.Evals]) - must.Eq(t, uint64(2000), resp.Index) + require.Len(t, resp.Matches[structs.Evals], 1) + require.Equal(t, eval1.ID, resp.Matches[structs.Evals][0].ID) + require.False(t, resp.Truncations[structs.Evals]) + require.Equal(t, uint64(2000), resp.Index) } func TestSearch_FuzzySearch_Allocation(t *testing.T) { @@ -1581,8 +1580,8 @@ func TestSearch_FuzzySearch_Allocation(t *testing.T) { summary := mock.JobSummary(alloc.JobID) fsmState := s.fsm.State() - must.NoError(t, fsmState.UpsertJobSummary(999, summary)) - must.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 90, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, fsmState.UpsertJobSummary(999, summary)) + require.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 90, []*structs.Allocation{alloc})) req := &structs.FuzzySearchRequest{ Text: "web", @@ -1594,12 +1593,12 @@ func TestSearch_FuzzySearch_Allocation(t *testing.T) { } var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Allocs]) - must.Eq(t, alloc.Name, resp.Matches[structs.Allocs][0].ID) - must.False(t, resp.Truncations[structs.Allocs]) - must.Eq(t, uint64(90), resp.Index) + require.Len(t, resp.Matches[structs.Allocs], 1) + require.Equal(t, alloc.Name, resp.Matches[structs.Allocs][0].ID) + require.False(t, resp.Truncations[structs.Allocs]) + require.Equal(t, uint64(90), resp.Index) } func TestSearch_FuzzySearch_Node(t *testing.T) { @@ -1615,7 +1614,7 @@ func TestSearch_FuzzySearch_Node(t *testing.T) { fsmState := s.fsm.State() node := mock.Node() - must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 100, node)) + require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 100, node)) req := &structs.FuzzySearchRequest{ Text: "oo", @@ -1627,11 +1626,11 @@ func TestSearch_FuzzySearch_Node(t *testing.T) { } var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Nodes]) - must.Eq(t, node.Name, resp.Matches[structs.Nodes][0].ID) - must.False(t, resp.Truncations[structs.Nodes]) - must.Eq(t, uint64(100), resp.Index) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.Len(t, resp.Matches[structs.Nodes], 1) + require.Equal(t, node.Name, resp.Matches[structs.Nodes][0].ID) + require.False(t, resp.Truncations[structs.Nodes]) + require.Equal(t, uint64(100), resp.Index) } func TestSearch_FuzzySearch_NodePool(t *testing.T) { @@ -1851,7 +1850,7 @@ func TestSearch_FuzzySearch_Deployment(t *testing.T) { testutil.WaitForLeader(t, s.RPC) deployment := mock.Deployment() - must.NoError(t, s.fsm.State().UpsertDeployment(2000, time.Now().UnixNano(), deployment)) + require.NoError(t, s.fsm.State().UpsertDeployment(2000, deployment)) req := &structs.FuzzySearchRequest{ Text: deployment.ID[0:3], // deployments are prefix searched @@ -1863,11 +1862,11 @@ func TestSearch_FuzzySearch_Deployment(t *testing.T) { } var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Deployments]) - must.Eq(t, deployment.ID, resp.Matches[structs.Deployments][0].ID) - must.False(t, resp.Truncations[structs.Deployments]) - must.Eq(t, uint64(2000), resp.Index) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.Len(t, resp.Matches[structs.Deployments], 1) + require.Equal(t, deployment.ID, resp.Matches[structs.Deployments][0].ID) + require.False(t, resp.Truncations[structs.Deployments]) + require.Equal(t, uint64(2000), resp.Index) } func TestSearch_FuzzySearch_CSIPlugin(t *testing.T) { @@ -1891,11 +1890,11 @@ func TestSearch_FuzzySearch_CSIPlugin(t *testing.T) { } var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Plugins]) - must.Eq(t, "my-plugin", resp.Matches[structs.Plugins][0].ID) - must.False(t, resp.Truncations[structs.Plugins]) + require.Len(t, resp.Matches[structs.Plugins], 1) + require.Equal(t, "my-plugin", resp.Matches[structs.Plugins][0].ID) + require.False(t, resp.Truncations[structs.Plugins]) } func TestSearch_FuzzySearch_CSIVolume(t *testing.T) { @@ -1909,12 +1908,12 @@ func TestSearch_FuzzySearch_CSIVolume(t *testing.T) { testutil.WaitForLeader(t, s.RPC) id := uuid.Generate() - err := s.fsm.State().UpsertCSIVolume(1000, time.Now().UnixNano(), []*structs.CSIVolume{{ + err := s.fsm.State().UpsertCSIVolume(1000, []*structs.CSIVolume{{ ID: id, Namespace: structs.DefaultNamespace, PluginID: "glade", }}) - must.NoError(t, err) + require.NoError(t, err) req := &structs.FuzzySearchRequest{ Text: id[0:3], // volumes are prefix searched @@ -1926,11 +1925,11 @@ func TestSearch_FuzzySearch_CSIVolume(t *testing.T) { } var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Volumes]) - must.Eq(t, id, resp.Matches[structs.Volumes][0].ID) - must.False(t, resp.Truncations[structs.Volumes]) + require.Len(t, resp.Matches[structs.Volumes], 1) + require.Equal(t, id, resp.Matches[structs.Volumes][0].ID) + require.False(t, resp.Truncations[structs.Volumes]) } func TestSearch_FuzzySearch_Namespace(t *testing.T) { @@ -1944,7 +1943,7 @@ func TestSearch_FuzzySearch_Namespace(t *testing.T) { testutil.WaitForLeader(t, s.RPC) ns := mock.Namespace() - must.NoError(t, s.fsm.State().UpsertNamespaces(2000, []*structs.Namespace{ns})) + require.NoError(t, s.fsm.State().UpsertNamespaces(2000, []*structs.Namespace{ns})) req := &structs.FuzzySearchRequest{ Text: "am", // mock is team- @@ -1955,12 +1954,12 @@ func TestSearch_FuzzySearch_Namespace(t *testing.T) { } var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Namespaces]) - must.Eq(t, ns.Name, resp.Matches[structs.Namespaces][0].ID) - must.False(t, resp.Truncations[structs.Namespaces]) - must.Eq(t, uint64(2000), resp.Index) + require.Len(t, resp.Matches[structs.Namespaces], 1) + require.Equal(t, ns.Name, resp.Matches[structs.Namespaces][0].ID) + require.False(t, resp.Truncations[structs.Namespaces]) + require.Equal(t, uint64(2000), resp.Index) } func TestSearch_FuzzySearch_Namespace_caseInsensitive(t *testing.T) { @@ -1975,7 +1974,7 @@ func TestSearch_FuzzySearch_Namespace_caseInsensitive(t *testing.T) { ns := mock.Namespace() ns.Name = "TheFooNamespace" - must.NoError(t, s.fsm.State().UpsertNamespaces(2000, []*structs.Namespace{ns})) + require.NoError(t, s.fsm.State().UpsertNamespaces(2000, []*structs.Namespace{ns})) req := &structs.FuzzySearchRequest{ Text: "foon", @@ -1986,12 +1985,12 @@ func TestSearch_FuzzySearch_Namespace_caseInsensitive(t *testing.T) { } var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Namespaces]) - must.Eq(t, ns.Name, resp.Matches[structs.Namespaces][0].ID) - must.False(t, resp.Truncations[structs.Namespaces]) - must.Eq(t, uint64(2000), resp.Index) + require.Len(t, resp.Matches[structs.Namespaces], 1) + require.Equal(t, ns.Name, resp.Matches[structs.Namespaces][0].ID) + require.False(t, resp.Truncations[structs.Namespaces]) + require.Equal(t, uint64(2000), resp.Index) } func TestSearch_FuzzySearch_ScalingPolicy(t *testing.T) { @@ -2007,7 +2006,7 @@ func TestSearch_FuzzySearch_ScalingPolicy(t *testing.T) { job, policy := mock.JobWithScalingPolicy() fsmState := s.fsm.State() - must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) req := &structs.FuzzySearchRequest{ Text: policy.ID[0:3], // scaling policies are prefix searched @@ -2019,16 +2018,16 @@ func TestSearch_FuzzySearch_ScalingPolicy(t *testing.T) { } var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.ScalingPolicies]) - must.Eq(t, policy.ID, resp.Matches[structs.ScalingPolicies][0].ID) - must.Eq(t, uint64(jobIndex), resp.Index) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.Len(t, resp.Matches[structs.ScalingPolicies], 1) + require.Equal(t, policy.ID, resp.Matches[structs.ScalingPolicies][0].ID) + require.Equal(t, uint64(jobIndex), resp.Index) req.Context = structs.All - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.ScalingPolicies]) - must.Eq(t, policy.ID, resp.Matches[structs.ScalingPolicies][0].ID) - must.Eq(t, uint64(jobIndex), resp.Index) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.Len(t, resp.Matches[structs.ScalingPolicies], 1) + require.Equal(t, policy.ID, resp.Matches[structs.ScalingPolicies][0].ID) + require.Equal(t, uint64(jobIndex), resp.Index) } func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { @@ -2045,18 +2044,18 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { ns := mock.Namespace() ns.Name = "team-job-app" - must.NoError(t, fsmState.UpsertNamespaces(500, []*structs.Namespace{ns})) + require.NoError(t, fsmState.UpsertNamespaces(500, []*structs.Namespace{ns})) job1 := mock.Job() - must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 502, nil, job1)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 502, nil, job1)) job2 := mock.Job() job2.Namespace = ns.Name - must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 504, nil, job2)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 504, nil, job2)) node := mock.Node() node.Name = "run-jobs" - must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, node)) + require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, node)) req := &structs.FuzzySearchRequest{ Text: "set-text-in-test", @@ -2071,7 +2070,7 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { { var resp structs.FuzzySearchResponse err := msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp) - must.EqError(t, err, structs.ErrPermissionDenied.Error()) + require.EqualError(t, err, structs.ErrPermissionDenied.Error()) } // Try with an invalid token and expect failure @@ -2081,7 +2080,7 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { req.AuthToken = invalidToken.SecretID var resp structs.FuzzySearchResponse err := msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp) - must.EqError(t, err, structs.ErrPermissionDenied.Error()) + require.EqualError(t, err, structs.ErrPermissionDenied.Error()) } // Try with a node:read token and expect failure due to Namespaces being the context @@ -2091,7 +2090,7 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { req.AuthToken = validToken.SecretID var resp structs.FuzzySearchResponse err := msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp) - must.EqError(t, err, structs.ErrPermissionDenied.Error()) + require.EqualError(t, err, structs.ErrPermissionDenied.Error()) } // Try with a node:read token and expect success due to All context @@ -2101,12 +2100,12 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { req.Context = structs.All req.AuthToken = validToken.SecretID var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Eq(t, uint64(1001), resp.Index) - must.Len(t, 1, resp.Matches[structs.Nodes]) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.Equal(t, uint64(1001), resp.Index) + require.Len(t, resp.Matches[structs.Nodes], 1) // Jobs filtered out since token only has access to node:read - must.Len(t, 0, resp.Matches[structs.Jobs]) + require.Len(t, resp.Matches[structs.Jobs], 0) } // Try with a valid token for non-default namespace:read-job @@ -2118,15 +2117,15 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { req.AuthToken = validToken.SecretID req.Namespace = job2.Namespace var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Jobs]) - must.Eq(t, job2.Name, resp.Matches[structs.Jobs][0].ID) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.Len(t, resp.Matches[structs.Jobs], 1) + require.Equal(t, job2.Name, resp.Matches[structs.Jobs][0].ID) // Index of job - not node - because node context is filtered out - must.Eq(t, uint64(504), resp.Index) + require.Equal(t, uint64(504), resp.Index) // Nodes filtered out since token only has access to namespace:read-job - must.Len(t, 0, resp.Matches[structs.Nodes]) + require.Len(t, resp.Matches[structs.Nodes], 0) } // Try with a management token @@ -2136,12 +2135,12 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { req.AuthToken = root.SecretID req.Namespace = job1.Namespace var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Eq(t, uint64(1001), resp.Index) - must.Len(t, 1, resp.Matches[structs.Jobs]) - must.Eq(t, job1.Name, resp.Matches[structs.Jobs][0].ID) - must.Len(t, 1, resp.Matches[structs.Nodes]) - must.Len(t, 1, resp.Matches[structs.Namespaces]) // matches "team-job-app" + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.Equal(t, uint64(1001), resp.Index) + require.Len(t, resp.Matches[structs.Jobs], 1) + require.Equal(t, job1.Name, resp.Matches[structs.Jobs][0].ID) + require.Len(t, resp.Matches[structs.Nodes], 1) + require.Len(t, resp.Matches[structs.Namespaces], 1) // matches "team-job-app" } } @@ -2157,7 +2156,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { testutil.WaitForLeader(t, s.RPC) fsmState := s.fsm.State() - must.NoError(t, fsmState.UpsertNamespaces(500, []*structs.Namespace{{ + require.NoError(t, fsmState.UpsertNamespaces(500, []*structs.Namespace{{ Name: "teamA", Description: "first namespace", CreateIndex: 100, @@ -2186,29 +2185,29 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { job1.Name = "teamA-job1" job1.ID = "job1" job1.Namespace = "teamA" - must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), nil, job1)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), nil, job1)) job2 := mock.Job() job2.Name = "teamB-job2" job2.ID = "job2" job2.Namespace = "teamB" - must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), nil, job2)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), nil, job2)) job3 := mock.Job() job3.Name = "teamC-job3" job3.ID = "job3" job3.Namespace = "teamC" - must.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), nil, job3)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), nil, job3)) // Upsert a node node := mock.Node() node.Name = "node-for-teams" - must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, inc(), node)) + require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, inc(), node)) // Upsert a node that will not be matched node2 := mock.Node() node2.Name = "node-for-ops" - must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, inc(), node2)) + require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, inc(), node2)) // Create parameterized requests request := func(text, namespace, token string, context structs.Context) *structs.FuzzySearchRequest { @@ -2227,7 +2226,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { var resp structs.FuzzySearchResponse req := request("anything", job1.Namespace, "", structs.Jobs) err := msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp) - must.EqError(t, err, structs.ErrPermissionDenied.Error()) + require.EqualError(t, err, structs.ErrPermissionDenied.Error()) }) t.Run("with an invalid token expect failure", func(t *testing.T) { @@ -2237,7 +2236,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { var resp structs.FuzzySearchResponse err := msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp) - must.EqError(t, err, structs.ErrPermissionDenied.Error()) + require.EqualError(t, err, structs.ErrPermissionDenied.Error()) }) t.Run("with node:read token search namespaces expect failure", func(t *testing.T) { @@ -2246,7 +2245,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { var resp structs.FuzzySearchResponse err := msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp) - must.EqError(t, err, structs.ErrPermissionDenied.Error()) + require.EqualError(t, err, structs.ErrPermissionDenied.Error()) }) t.Run("with node:read token search all expect success", func(t *testing.T) { @@ -2254,13 +2253,13 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { req := request("team", job1.Namespace, validToken.SecretID, structs.All) var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) // One matching node - must.Len(t, 1, resp.Matches[structs.Nodes]) + require.Len(t, resp.Matches[structs.Nodes], 1) // Jobs filtered out since token only has access to node:read - must.Len(t, 0, resp.Matches[structs.Jobs]) + require.Len(t, resp.Matches[structs.Jobs], 0) }) t.Run("with a teamB/job:read token search all expect 1 job", func(t *testing.T) { @@ -2269,12 +2268,12 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { req := request("team", job2.Namespace, token.SecretID, structs.All) var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Jobs]) - must.Eq(t, job2.Name, resp.Matches[structs.Jobs][0].ID) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.Len(t, resp.Matches[structs.Jobs], 1) + require.Equal(t, job2.Name, resp.Matches[structs.Jobs][0].ID) // Nodes filtered out since token only has access to namespace:read-job - must.Len(t, 0, resp.Matches[structs.Nodes]) + require.Len(t, resp.Matches[structs.Nodes], 0) }) // Using a token that can read jobs in 2 namespaces, we should get job results from @@ -2291,10 +2290,10 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { req := request("team", structs.AllNamespacesSentinel, token.SecretID, structs.Jobs) var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 2, resp.Matches[structs.Jobs]) - must.Eq(t, job2.Name, resp.Matches[structs.Jobs][0].ID) - must.Eq(t, job3.Name, resp.Matches[structs.Jobs][1].ID) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.Len(t, resp.Matches[structs.Jobs], 2) + require.Equal(t, job2.Name, resp.Matches[structs.Jobs][0].ID) + require.Equal(t, job3.Name, resp.Matches[structs.Jobs][1].ID) }) // Using a management token, we should get job results from all three namespaces @@ -2303,11 +2302,11 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { req := request("team", structs.AllNamespacesSentinel, root.SecretID, structs.Jobs) var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 3, resp.Matches[structs.Jobs]) - must.Eq(t, job1.Name, resp.Matches[structs.Jobs][0].ID) - must.Eq(t, job2.Name, resp.Matches[structs.Jobs][1].ID) - must.Eq(t, job3.Name, resp.Matches[structs.Jobs][2].ID) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.Len(t, resp.Matches[structs.Jobs], 3) + require.Equal(t, job1.Name, resp.Matches[structs.Jobs][0].ID) + require.Equal(t, job2.Name, resp.Matches[structs.Jobs][1].ID) + require.Equal(t, job3.Name, resp.Matches[structs.Jobs][2].ID) }) // Using a token that can read nodes, we should get our 1 matching node when @@ -2320,9 +2319,9 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { req := request("team", structs.AllNamespacesSentinel, token.SecretID, structs.Nodes) var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Nodes]) - must.Eq(t, "node-for-teams", resp.Matches[structs.Nodes][0].ID) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.Len(t, resp.Matches[structs.Nodes], 1) + require.Equal(t, "node-for-teams", resp.Matches[structs.Nodes][0].ID) }) // Using a token that cannot read nodes, we should get no matching nodes when @@ -2334,7 +2333,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { token := mock.CreateToken(t, fsmState, inc(), []string{"agent-read-policy"}) req := request("team", structs.AllNamespacesSentinel, token.SecretID, structs.Nodes) var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) require.Empty(t, resp.Matches[structs.Nodes]) }) @@ -2350,31 +2349,31 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { alloc1.Name = job1.Name + ".task[0]" alloc1.Namespace = job1.Namespace summary1 := mock.JobSummary(alloc1.JobID) - must.NoError(t, fsmState.UpsertJobSummary(inc(), summary1)) + require.NoError(t, fsmState.UpsertJobSummary(inc(), summary1)) alloc2 := mockAlloc() alloc2.JobID = job2.ID alloc2.Name = job2.Name + ".task[0]" alloc2.Namespace = job2.Namespace summary2 := mock.JobSummary(alloc2.JobID) - must.NoError(t, fsmState.UpsertJobSummary(inc(), summary2)) + require.NoError(t, fsmState.UpsertJobSummary(inc(), summary2)) alloc3 := mockAlloc() alloc3.JobID = job3.ID alloc3.Name = job3.Name + ".task[0]" alloc3.Namespace = job3.Namespace summary3 := mock.JobSummary(alloc3.JobID) - must.NoError(t, fsmState.UpsertJobSummary(inc(), summary3)) + require.NoError(t, fsmState.UpsertJobSummary(inc(), summary3)) // Upsert the allocs - must.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, inc(), time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2, alloc3})) + require.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, inc(), []*structs.Allocation{alloc1, alloc2, alloc3})) token := mock.CreateToken(t, fsmState, inc(), []string{"policyD"}) req := request("team", structs.AllNamespacesSentinel, token.SecretID, structs.Allocs) var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Allocs]) - must.Eq(t, "teamB-job2.task[0]", resp.Matches[structs.Allocs][0].ID) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.Len(t, resp.Matches[structs.Allocs], 1) + require.Equal(t, "teamB-job2.task[0]", resp.Matches[structs.Allocs][0].ID) }) // Using a management token should return allocs from all the jobs. @@ -2386,7 +2385,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { alloc1.Name = "test-alloc.one[0]" alloc1.Namespace = job1.Namespace summary1 := mock.JobSummary(alloc1.JobID) - must.NoError(t, fsmState.UpsertJobSummary(inc(), summary1)) + require.NoError(t, fsmState.UpsertJobSummary(inc(), summary1)) alloc2 := mockAlloc() alloc2.ID = uuid.Generate() @@ -2394,7 +2393,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { alloc2.Name = "test-alloc.two[0]" alloc2.Namespace = job2.Namespace summary2 := mock.JobSummary(alloc2.JobID) - must.NoError(t, fsmState.UpsertJobSummary(inc(), summary2)) + require.NoError(t, fsmState.UpsertJobSummary(inc(), summary2)) alloc3 := mockAlloc() alloc3.ID = uuid.Generate() @@ -2402,21 +2401,21 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { alloc3.Name = "test-alloc.three[0]" alloc3.Namespace = job3.Namespace summary3 := mock.JobSummary(alloc3.JobID) - must.NoError(t, fsmState.UpsertJobSummary(inc(), summary3)) + require.NoError(t, fsmState.UpsertJobSummary(inc(), summary3)) // Upsert the allocs - must.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, inc(), time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2, alloc3})) + require.NoError(t, fsmState.UpsertAllocs(structs.MsgTypeTestSetup, inc(), []*structs.Allocation{alloc1, alloc2, alloc3})) req := request("alloc", structs.AllNamespacesSentinel, root.SecretID, structs.Allocs) var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 3, resp.Matches[structs.Allocs]) - must.Eq(t, alloc1.Name, resp.Matches[structs.Allocs][0].ID) - must.Eq(t, []string{"teamA", alloc1.ID}, resp.Matches[structs.Allocs][0].Scope) - must.Eq(t, alloc2.Name, resp.Matches[structs.Allocs][1].ID) - must.Eq(t, []string{"teamB", alloc2.ID}, resp.Matches[structs.Allocs][1].Scope) - must.Eq(t, alloc3.Name, resp.Matches[structs.Allocs][2].ID) - must.Eq(t, []string{"teamC", alloc3.ID}, resp.Matches[structs.Allocs][2].Scope) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.Len(t, resp.Matches[structs.Allocs], 3) + require.Equal(t, alloc1.Name, resp.Matches[structs.Allocs][0].ID) + require.Equal(t, []string{"teamA", alloc1.ID}, resp.Matches[structs.Allocs][0].Scope) + require.Equal(t, alloc2.Name, resp.Matches[structs.Allocs][1].ID) + require.Equal(t, []string{"teamB", alloc2.ID}, resp.Matches[structs.Allocs][1].Scope) + require.Equal(t, alloc3.Name, resp.Matches[structs.Allocs][2].ID) + require.Equal(t, []string{"teamC", alloc3.ID}, resp.Matches[structs.Allocs][2].Scope) }) // Allow plugin read and wildcard namespace @@ -2432,9 +2431,9 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { req := request("teams", structs.AllNamespacesSentinel, token.SecretID, structs.Plugins) var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - must.Len(t, 1, resp.Matches[structs.Plugins]) + require.Len(t, resp.Matches[structs.Plugins], 1) require.Empty(t, resp.Matches[structs.Plugins][0].Scope) // no scope }) } @@ -2499,9 +2498,9 @@ func TestSearch_FuzzySearch_Job(t *testing.T) { ns := mock.Namespace() ns.Name = job.Namespace - must.NoError(t, fsmState.UpsertNamespaces(2000, []*structs.Namespace{ns})) + require.NoError(t, fsmState.UpsertNamespaces(2000, []*structs.Namespace{ns})) registerJob(s, t, job) - must.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1003, mock.Node())) + require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1003, mock.Node())) t.Run("sleep", func(t *testing.T) { req := &structs.FuzzySearchRequest{ @@ -2513,16 +2512,16 @@ func TestSearch_FuzzySearch_Job(t *testing.T) { }, } var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) m := resp.Matches - must.Eq(t, uint64(1000), resp.Index) // job is explicit search context, has id=1000 + require.Equal(t, uint64(1000), resp.Index) // job is explicit search context, has id=1000 // just the one job - must.Len(t, 1, m[structs.Jobs]) + require.Len(t, m[structs.Jobs], 1) // 3 services (1 group, 2 task) - must.Len(t, 3, m[structs.Services]) - must.Eq(t, []structs.FuzzyMatch{{ + require.Len(t, m[structs.Services], 3) + require.Equal(t, []structs.FuzzyMatch{{ ID: "some-sleepy-task-svc-one", Scope: []string{"team-sleepy", job.ID, "qa-sleeper-group-one", "qa-sleep-task-one"}, }, { @@ -2534,8 +2533,8 @@ func TestSearch_FuzzySearch_Job(t *testing.T) { }}, m[structs.Services]) // 3 groups - must.Len(t, 3, m[structs.Groups]) - must.Eq(t, []structs.FuzzyMatch{{ + require.Len(t, m[structs.Groups], 3) + require.Equal(t, []structs.FuzzyMatch{{ ID: "sleep-in-java", Scope: []string{"team-sleepy", job.ID}, }, { @@ -2547,8 +2546,8 @@ func TestSearch_FuzzySearch_Job(t *testing.T) { }}, m[structs.Groups]) // 3 tasks (1 does not match) - must.Len(t, 3, m[structs.Tasks]) - must.Eq(t, []structs.FuzzyMatch{{ + require.Len(t, m[structs.Tasks], 3) + require.Equal(t, []structs.FuzzyMatch{{ ID: "qa-sleep-task-one", Scope: []string{"team-sleepy", job.ID, "qa-sleeper-group-one"}, }, { @@ -2560,8 +2559,8 @@ func TestSearch_FuzzySearch_Job(t *testing.T) { }}, m[structs.Tasks]) // 2 tasks with command - must.Len(t, 2, m[structs.Commands]) - must.Eq(t, []structs.FuzzyMatch{{ + require.Len(t, m[structs.Commands], 2) + require.Equal(t, []structs.FuzzyMatch{{ ID: "/bin/sleep", Scope: []string{"team-sleepy", job.ID, "prod-sleeper-group-one", "prod-sleep-task-one"}, }, { @@ -2570,15 +2569,15 @@ func TestSearch_FuzzySearch_Job(t *testing.T) { }}, m[structs.Commands]) // 1 task with image - must.Len(t, 1, m[structs.Images]) - must.Eq(t, []structs.FuzzyMatch{{ + require.Len(t, m[structs.Images], 1) + require.Equal(t, []structs.FuzzyMatch{{ ID: "sleeper:latest", Scope: []string{"team-sleepy", job.ID, "qa-sleeper-group-one", "qa-sleep-task-one"}, }}, m[structs.Images]) // 1 task with class - must.Len(t, 1, m[structs.Classes]) - must.Eq(t, []structs.FuzzyMatch{{ + require.Len(t, m[structs.Classes], 1) + require.Equal(t, []structs.FuzzyMatch{{ ID: "sleep.class", Scope: []string{"team-sleepy", job.ID, "sleep-in-java", "prod-java-sleep"}, }}, m[structs.Classes]) @@ -2597,6 +2596,6 @@ func TestSearch_FuzzySearch_fuzzyIndex(t *testing.T) { {name: "foo-bar-baz", text: "zap", exp: -1}, } { result := fuzzyIndex(tc.name, tc.text) - must.Eq(t, tc.exp, result, must.Sprintf("name: %s, text: %s, exp: %d, got: %d", tc.name, tc.text, tc.exp, result)) + require.Equal(t, tc.exp, result, "name: %s, text: %s, exp: %d, got: %d", tc.name, tc.text, tc.exp, result) } } diff --git a/nomad/service_registration_endpoint_test.go b/nomad/service_registration_endpoint_test.go index b192a9fc4ec..07d8ef17dfb 100644 --- a/nomad/service_registration_endpoint_test.go +++ b/nomad/service_registration_endpoint_test.go @@ -878,7 +878,7 @@ func TestServiceRegistration_List(t *testing.T) { allocs[0].Namespace = "platform" require.NoError(t, s.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, job)) signAllocIdentities(s.encrypter, job, allocs, time.Now()) - require.NoError(t, s.State().UpsertAllocs(structs.MsgTypeTestSetup, 15, time.Now().UnixNano(), allocs)) + require.NoError(t, s.State().UpsertAllocs(structs.MsgTypeTestSetup, 15, allocs)) signedToken := allocs[0].SignedIdentities["web"] @@ -1155,7 +1155,7 @@ func TestServiceRegistration_GetService(t *testing.T) { job := allocs[0].Job require.NoError(t, s.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, job)) signAllocIdentities(s.encrypter, job, allocs, time.Now()) - require.NoError(t, s.State().UpsertAllocs(structs.MsgTypeTestSetup, 15, time.Now().UnixNano(), allocs)) + require.NoError(t, s.State().UpsertAllocs(structs.MsgTypeTestSetup, 15, allocs)) signedToken := allocs[0].SignedIdentities["web"] diff --git a/nomad/state/deployment_events_test.go b/nomad/state/deployment_events_test.go index be8667def36..3bf9e9f6c0a 100644 --- a/nomad/state/deployment_events_test.go +++ b/nomad/state/deployment_events_test.go @@ -31,7 +31,7 @@ func TestDeploymentEventFromChanges(t *testing.T) { d.JobID = j.ID require.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) - require.NoError(t, s.upsertDeploymentImpl(10, time.Now().UnixNano(), d, setupTx)) + require.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) setupTx.Txn.Commit() @@ -47,7 +47,7 @@ func TestDeploymentEventFromChanges(t *testing.T) { // Exlude Job and assert its added } - require.NoError(t, s.UpdateDeploymentStatus(msgType, 100, time.Now().UnixNano(), req)) + require.NoError(t, s.UpdateDeploymentStatus(msgType, 100, req)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) require.Len(t, events, 2) diff --git a/nomad/state/events_test.go b/nomad/state/events_test.go index 945039fc4e4..8e15e27fb9d 100644 --- a/nomad/state/events_test.go +++ b/nomad/state/events_test.go @@ -115,7 +115,7 @@ func TestEventsFromChanges_DeploymentUpdate(t *testing.T) { d.JobID = j.ID require.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) - require.NoError(t, s.upsertDeploymentImpl(10, time.Now().UnixNano(), d, setupTx)) + require.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) setupTx.Txn.Commit() @@ -131,7 +131,7 @@ func TestEventsFromChanges_DeploymentUpdate(t *testing.T) { // Exlude Job and assert its added } - require.NoError(t, s.UpdateDeploymentStatus(msgType, 100, time.Now().UnixNano(), req)) + require.NoError(t, s.UpdateDeploymentStatus(msgType, 100, req)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) require.Len(t, events, 2) @@ -173,7 +173,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { DesiredCanaries: 1, }, } - require.NoError(t, s.upsertDeploymentImpl(10, time.Now().UnixNano(), d, setupTx)) + require.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) // create set of allocs c1 := mock.Alloc() @@ -192,7 +192,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { Healthy: pointer.Of(true), } - require.NoError(t, s.upsertAllocsImpl(10, time.Now().UnixNano(), []*structs.Allocation{c1, c2}, setupTx)) + require.NoError(t, s.upsertAllocsImpl(10, []*structs.Allocation{c1, c2}, setupTx)) // commit setup transaction setupTx.Txn.Commit() @@ -208,7 +208,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { Eval: e, } - require.NoError(t, s.UpdateDeploymentPromotion(msgType, 100, time.Now().UnixNano(), req)) + require.NoError(t, s.UpdateDeploymentPromotion(msgType, 100, req)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) require.Len(t, events, 4) @@ -250,7 +250,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { DesiredCanaries: 1, }, } - require.NoError(t, s.upsertDeploymentImpl(10, time.Now().UnixNano(), d, setupTx)) + require.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) // create set of allocs c1 := mock.Alloc() @@ -269,7 +269,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { Healthy: pointer.Of(true), } - require.NoError(t, s.upsertAllocsImpl(10, time.Now().UnixNano(), []*structs.Allocation{c1, c2}, setupTx)) + require.NoError(t, s.upsertAllocsImpl(10, []*structs.Allocation{c1, c2}, setupTx)) // Commit setup setupTx.Commit() @@ -287,7 +287,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { }, } - require.NoError(t, s.UpdateDeploymentAllocHealth(msgType, 100, time.Now().UnixNano(), req)) + require.NoError(t, s.UpdateDeploymentAllocHealth(msgType, 100, req)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) require.Len(t, events, 3) @@ -514,7 +514,7 @@ func TestEventsFromChanges_ApplyPlanResultsRequestType(t *testing.T) { EvalID: eval.ID, } - require.NoError(t, s.UpsertPlanResults(msgType, 100, time.Now().UnixNano(), req)) + require.NoError(t, s.UpsertPlanResults(msgType, 100, req)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) require.Len(t, events, 5) @@ -644,7 +644,7 @@ func TestEventsFromChanges_AllocUpdateDesiredTransitionRequestType(t *testing.T) alloc := mock.Alloc() require.Nil(t, s.UpsertJob(structs.MsgTypeTestSetup, 10, nil, alloc.Job)) - require.Nil(t, s.UpsertAllocs(structs.MsgTypeTestSetup, 11, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.Nil(t, s.UpsertAllocs(structs.MsgTypeTestSetup, 11, []*structs.Allocation{alloc})) msgType := structs.AllocUpdateDesiredTransitionRequestType @@ -977,7 +977,7 @@ func TestNodeDrainEventFromChanges(t *testing.T) { alloc2.NodeID = node.ID require.NoError(t, upsertNodeTxn(setupTx, 10, node)) - require.NoError(t, s.upsertAllocsImpl(100, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}, setupTx)) + require.NoError(t, s.upsertAllocsImpl(100, []*structs.Allocation{alloc1, alloc2}, setupTx)) setupTx.Txn.Commit() // changes diff --git a/nomad/state/testing.go b/nomad/state/testing.go index 76221bc7dd7..cb955ffa46c 100644 --- a/nomad/state/testing.go +++ b/nomad/state/testing.go @@ -231,7 +231,7 @@ func TestBadCSIState(t testing.TB, store *StateStore) error { alloc1.DesiredStatus = structs.AllocDesiredStatusRun // Insert allocs into the state store - err := store.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc1}) + err := store.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc1}) if err != nil { return err } @@ -311,7 +311,7 @@ func TestBadCSIState(t testing.TB, store *StateStore) error { } vol = vol.Copy() // canonicalize - err = store.UpsertCSIVolume(index, time.Now().UnixNano(), []*structs.CSIVolume{vol}) + err = store.UpsertCSIVolume(index, []*structs.CSIVolume{vol}) if err != nil { return err } diff --git a/nomad/variables_endpoint_test.go b/nomad/variables_endpoint_test.go index 3d2e4a646b0..43c1ab7dde0 100644 --- a/nomad/variables_endpoint_test.go +++ b/nomad/variables_endpoint_test.go @@ -488,7 +488,7 @@ func TestVariablesEndpoint_auth(t *testing.T) { store := srv.fsm.State() must.NoError(t, store.UpsertNamespaces(1000, []*structs.Namespace{{Name: ns}})) must.NoError(t, store.UpsertAllocs( - structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2, alloc3, alloc4})) + structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1, alloc2, alloc3, alloc4})) wiHandle := &structs.WIHandle{ WorkloadIdentifier: "web", @@ -590,7 +590,7 @@ func TestVariablesEndpoint_auth(t *testing.T) { // make alloc non-terminal alloc1.ClientStatus = structs.AllocClientStatusRunning must.NoError(t, store.UpsertAllocs( - structs.MsgTypeTestSetup, 1200, time.Now().UnixNano(), []*structs.Allocation{alloc1})) + structs.MsgTypeTestSetup, 1200, []*structs.Allocation{alloc1})) t.Run("wrong namespace should be denied", func(t *testing.T) { err := testFn(&structs.QueryOptions{ @@ -877,7 +877,7 @@ func TestVariablesEndpoint_ListFiltering(t *testing.T) { must.NoError(t, store.UpsertNamespaces(idx, []*structs.Namespace{{Name: ns}})) idx++ must.NoError(t, store.UpsertAllocs( - structs.MsgTypeTestSetup, idx, time.Now().UnixNano(), []*structs.Allocation{alloc})) + structs.MsgTypeTestSetup, idx, []*structs.Allocation{alloc})) wiHandle := &structs.WIHandle{ WorkloadIdentifier: "web", diff --git a/scheduler/context_test.go b/scheduler/context_test.go index 51cf902f046..5fe23e2a863 100644 --- a/scheduler/context_test.go +++ b/scheduler/context_test.go @@ -5,7 +5,6 @@ package scheduler import ( "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/lib/idset" @@ -163,7 +162,7 @@ func TestEvalContext_ProposedAlloc(t *testing.T) { } require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) // Add a planned eviction to alloc1 plan := ctx.Plan() @@ -303,7 +302,7 @@ func TestEvalContext_ProposedAlloc_EvictPreempt(t *testing.T) { require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(allocEvict.JobID))) require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPreempt.JobID))) require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPropose.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{allocEvict, allocPreempt, allocPropose})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{allocEvict, allocPreempt, allocPropose})) // Plan to evict one alloc and preempt another plan := ctx.Plan() diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index 5453e25c91c..f552b70c9f3 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -368,7 +368,7 @@ func TestCSIVolumeChecker(t *testing.T) { {Segments: map[string]string{"rack": "R1"}}, {Segments: map[string]string{"rack": "R2"}}, } - err := state.UpsertCSIVolume(index, time.Now().UnixNano(), []*structs.CSIVolume{vol}) + err := state.UpsertCSIVolume(index, []*structs.CSIVolume{vol}) must.NoError(t, err) index++ @@ -379,14 +379,14 @@ func TestCSIVolumeChecker(t *testing.T) { vol2.Namespace = structs.DefaultNamespace vol2.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter vol2.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem - err = state.UpsertCSIVolume(index, time.Now().UnixNano(), []*structs.CSIVolume{vol2}) + err = state.UpsertCSIVolume(index, []*structs.CSIVolume{vol2}) must.NoError(t, err) index++ vid3 := "volume-id[0]" vol3 := vol.Copy() vol3.ID = vid3 - err = state.UpsertCSIVolume(index, time.Now().UnixNano(), []*structs.CSIVolume{vol3}) + err = state.UpsertCSIVolume(index, []*structs.CSIVolume{vol3}) must.NoError(t, err) index++ @@ -405,7 +405,7 @@ func TestCSIVolumeChecker(t *testing.T) { summary := mock.JobSummary(alloc.JobID) must.NoError(t, state.UpsertJobSummary(index, summary)) index++ - err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc}) must.NoError(t, err) index++ @@ -1857,7 +1857,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { NodeID: nodes[4].ID, }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } @@ -2066,7 +2066,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { NodeID: nodes[1].ID, }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } @@ -2153,7 +2153,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testin NodeID: nodes[0].ID, }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } @@ -2232,7 +2232,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible(t *testing.T) { NodeID: nodes[1].ID, }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } @@ -2329,7 +2329,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testin NodeID: nodes[1].ID, }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } @@ -2446,7 +2446,7 @@ func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) { NodeID: nodes[2].ID, }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index 90bd3ef92d1..adda5e2cb2a 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -592,7 +592,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - assert.Nil(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs), "UpsertAllocs") + assert.Nil(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs), "UpsertAllocs") // Update the count job2 := job.Copy() @@ -808,7 +808,7 @@ func TestServiceSched_JobRegister_Datacenter_Downgrade(t *testing.T) { } allocs = append(allocs, alloc) } - must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update job to place it in dc2. job2 := job1.Copy() @@ -942,7 +942,7 @@ func TestServiceSched_JobRegister_NodePool_Downgrade(t *testing.T) { } allocs = append(allocs, alloc) } - must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update job to place it in the spread node pool. job2 := job1.Copy() @@ -1891,7 +1891,7 @@ func TestServiceSched_JobModify(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Add a few terminal status allocations, these should be ignored var terminal []*structs.Allocation @@ -1905,7 +1905,7 @@ func TestServiceSched_JobModify(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusFailed // #10446 terminal = append(terminal, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), terminal)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) // Update the job job2 := mock.Job() @@ -2003,7 +2003,7 @@ func TestServiceSched_JobModify_ExistingDuplicateAllocIndex(t *testing.T) { } allocs = append(allocs, alloc) } - must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, testHarness.NextIndex(), time.Now().UnixNano(), allocs)) + must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, testHarness.NextIndex(), allocs)) // Generate a job modification which will force a destructive update. mockJob2 := mock.Job() @@ -2081,7 +2081,7 @@ func TestServiceSched_JobModify_ProposedDuplicateAllocIndex(t *testing.T) { alloc.Name = structs.AllocName(mockJob.ID, mockJob.TaskGroups[0].Name, uint(i)) allocs = append(allocs, alloc) } - must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, testHarness.NextIndex(), time.Now().UnixNano(), allocs)) + must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, testHarness.NextIndex(), allocs)) // Generate a job modification which will force a destructive update as // well as a scaling. @@ -2105,7 +2105,7 @@ func TestServiceSched_JobModify_ProposedDuplicateAllocIndex(t *testing.T) { canaryAlloc.Name = structs.AllocName(mockJob2.ID, mockJob2.TaskGroups[0].Name, uint(0)) canaryAlloc.DeploymentID = deploymentID canaryAlloc.ClientStatus = structs.AllocClientStatusRunning - must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, nextRaftIndex, time.Now().UnixNano(), []*structs.Allocation{ + must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, nextRaftIndex, []*structs.Allocation{ canaryAlloc, })) @@ -2131,7 +2131,7 @@ func TestServiceSched_JobModify_ProposedDuplicateAllocIndex(t *testing.T) { EvalPriority: 50, JobCreateIndex: mockJob2.CreateIndex, } - must.NoError(t, testHarness.State.UpsertDeployment(nextRaftIndex, time.Now().UnixNano(), &canaryDeployment)) + must.NoError(t, testHarness.State.UpsertDeployment(nextRaftIndex, &canaryDeployment)) // Create a mock evaluation which represents work to reconcile the job // update. @@ -2218,7 +2218,7 @@ func TestServiceSched_JobModify_ExistingDuplicateAllocIndexNonDestructive(t *tes allocs = append(allocs, alloc) allocIDs = append(allocIDs, alloc.ID) } - must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, testHarness.NextIndex(), time.Now().UnixNano(), allocs)) + must.NoError(t, testHarness.State.UpsertAllocs(structs.MsgTypeTestSetup, testHarness.NextIndex(), allocs)) // Generate a job modification which will be an in-place update. mockJob2 := mockJob.Copy() @@ -2300,7 +2300,7 @@ func TestServiceSched_JobModify_Datacenters(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job to 2 DCs job2 := job.Copy() @@ -2375,7 +2375,7 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { alloc.Name = "my-job.web[0]" alloc.AllocatedResources.Tasks["web"].Cpu.CpuShares = 256 allocs = append(allocs, alloc) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job to count 3 job2.TaskGroups[0].Count = 3 @@ -2471,7 +2471,7 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { alloc.Name = structs.AllocName(alloc.JobID, alloc.TaskGroup, uint(i)) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Add a few terminal status allocations, these should be ignored var terminal []*structs.Allocation @@ -2484,7 +2484,7 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop terminal = append(terminal, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), terminal)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) // Update the job to be count zero job2 := mock.Job() @@ -2573,7 +2573,7 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := mock.Job() @@ -2699,7 +2699,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Update the job to place more versions of the task group, drop the count // and force destructive updates @@ -2803,7 +2803,7 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := mock.Job() @@ -2921,7 +2921,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { d := mock.Deployment() d.JobID = job.ID require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), d)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) taskName := job.TaskGroups[0].Tasks[0].Name @@ -2950,7 +2950,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { alloc.AllocatedResources.Shared = asr allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := mock.Job() @@ -3077,7 +3077,7 @@ func TestServiceSched_JobModify_InPlace08(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.AllocatedResources = nil // 0.8 didn't have this - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Update the job inplace job2 := job.Copy() @@ -3175,7 +3175,7 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -3297,7 +3297,7 @@ func TestServiceSched_JobModify_NodeReschedulePenalty(t *testing.T) { failedAllocID := failedAlloc.ID successAllocID := allocs[0].ID - require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create and process a mock evaluation eval := &structs.Evaluation{ @@ -3397,7 +3397,7 @@ func TestServiceSched_JobDeregister_Purged(t *testing.T) { for _, alloc := range allocs { h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -3466,7 +3466,7 @@ func TestServiceSched_JobDeregister_Stopped(t *testing.T) { alloc.JobID = job.ID allocs = append(allocs, alloc) } - require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a summary where the queued allocs are set as we want to assert // they get zeroed out. @@ -3603,7 +3603,7 @@ func TestServiceSched_NodeDown(t *testing.T) { alloc.DesiredTransition.Migrate = pointer.Of(tc.migrate) allocs := []*structs.Allocation{alloc} - must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -3757,7 +3757,7 @@ func TestServiceSched_StopAfterClientDisconnect(t *testing.T) { }} } must.NoError(t, h.State.UpsertAllocs( - structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with node going down evals := []*structs.Evaluation{{ @@ -3865,14 +3865,14 @@ func TestServiceSched_NodeUpdate(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Mark some allocs as running ws := memdb.NewWatchSet() for i := 0; i < 4; i++ { out, _ := h.State.AllocByID(ws, allocs[i].ID) out.ClientStatus = structs.AllocClientStatusRunning - require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{out})) + require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{out})) } // Create a mock evaluation which won't trigger any new placements @@ -3928,7 +3928,7 @@ func TestServiceSched_NodeDrain(t *testing.T) { alloc.DesiredTransition.Migrate = pointer.Of(true) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -3996,8 +3996,6 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { job := mock.Job() require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) - now := time.Now().UnixNano() - var allocs []*structs.Allocation for i := 0; i < 10; i++ { alloc := mock.Alloc() @@ -4007,7 +4005,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), now, allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Set the desired state of the allocs to stop var stop []*structs.Allocation @@ -4017,7 +4015,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { newAlloc.DesiredTransition.Migrate = pointer.Of(true) stop = append(stop, newAlloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), now, stop)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), stop)) // Mark some of the allocations as running var running []*structs.Allocation @@ -4026,7 +4024,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { newAlloc.ClientStatus = structs.AllocClientStatusRunning running = append(running, newAlloc) } - require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), now, running)) + require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), running)) // Mark some of the allocations as complete var complete []*structs.Allocation @@ -4045,7 +4043,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { newAlloc.ClientStatus = structs.AllocClientStatusComplete complete = append(complete, newAlloc) } - require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), now, complete)) + require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), complete)) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -4122,7 +4120,7 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { alloc.DesiredTransition.Migrate = pointer.Of(true) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) node.DrainStrategy = mock.DrainNode().DrainStrategy require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) @@ -4189,7 +4187,7 @@ func TestServiceSched_NodeDrain_TaskHandle(t *testing.T) { } allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) node.DrainStrategy = mock.DrainNode().DrainStrategy require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) @@ -4342,7 +4340,7 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { failedAllocID := allocs[1].ID successAllocID := allocs[0].ID - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), now.UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -4387,7 +4385,7 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { // Mark this alloc as failed again, should not get rescheduled newAlloc.ClientStatus = structs.AllocClientStatusFailed - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{newAlloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{newAlloc})) // Create another mock evaluation eval = &structs.Evaluation{ @@ -4456,7 +4454,7 @@ func TestServiceSched_Reschedule_Later(t *testing.T) { FinishedAt: now}} failedAllocID := allocs[1].ID - require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -4545,7 +4543,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), now.UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -4616,7 +4614,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { failedAllocId = newAlloc.ID failedNodeID = newAlloc.NodeID - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{newAlloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{newAlloc})) // Create another mock evaluation eval = &structs.Evaluation{ @@ -4678,7 +4676,7 @@ func TestServiceSched_BlockedReschedule(t *testing.T) { failedAllocID := alloc.ID must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, - h.NextIndex(), now.UnixNano(), []*structs.Allocation{alloc})) + h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation for the allocation failure eval := &structs.Evaluation{ @@ -4751,7 +4749,7 @@ func TestServiceSched_BlockedReschedule(t *testing.T) { StartedAt: now.Add(-1 * time.Hour), FinishedAt: now}} must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, - h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation for the allocation failure eval.ID = uuid.Generate() @@ -4819,7 +4817,7 @@ func TestServiceSched_BlockedReschedule(t *testing.T) { alloc = alloc.Copy() alloc.FollowupEvalID = blockedEval.ID must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, - h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + h.NextIndex(), []*structs.Allocation{alloc})) must.NoError(t, h.Process(NewServiceScheduler, blockedEval)) must.Len(t, 5, h.Plans) @@ -4919,7 +4917,7 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { failedAllocID := allocs[1].ID successAllocID := allocs[0].ID - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -5006,7 +5004,7 @@ func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { deployment.Status = structs.DeploymentStatusFailed } - require.Nil(h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), deployment)) + require.Nil(h.State.UpsertDeployment(h.NextIndex(), deployment)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -5025,7 +5023,7 @@ func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { FinishedAt: time.Now().Add(-10 * time.Hour)}} allocs[1].DesiredTransition.Reschedule = pointer.Of(true) - require.Nil(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.Nil(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -5083,7 +5081,7 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.ClientStatus = structs.AllocClientStatusComplete - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5148,7 +5146,7 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) { alloc.TaskStates = map[string]*structs.TaskState{tgName: {State: "dead", StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5232,7 +5230,7 @@ func TestBatchSched_Run_LostAlloc(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop alloc.ClientStatus = structs.AllocClientStatusComplete allocs = append(allocs, alloc) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5308,7 +5306,7 @@ func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { alloc.TaskStates = map[string]*structs.TaskState{tgName: {State: "dead", StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), now.UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5371,7 +5369,7 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { }, }, } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to rerun the job eval := &structs.Evaluation{ @@ -5438,7 +5436,7 @@ func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to trigger the job eval := &structs.Evaluation{ @@ -5492,7 +5490,7 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := mock.Job() @@ -5523,7 +5521,7 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { } allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -5575,7 +5573,7 @@ func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.ClientStatus = structs.AllocClientStatusRunning - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create an update job job2 := job.Copy() @@ -5658,7 +5656,7 @@ func TestBatchSched_NodeDrain_Complete(t *testing.T) { }, }, } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5727,7 +5725,7 @@ func TestBatchSched_ScaleDown_SameName(t *testing.T) { alloc.Metrics = scoreMetric allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job's modify index to force an inplace upgrade updatedJob := job.Copy() @@ -6050,7 +6048,7 @@ func TestServiceSched_NodeDrain_Sticky(t *testing.T) { alloc.Job.TaskGroups[0].EphemeralDisk.Sticky = true alloc.DesiredTransition.Migrate = pointer.Of(true) must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, alloc.Job)) - must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -6105,7 +6103,7 @@ func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { d.JobID = job.ID d.JobCreateIndex = job.CreateIndex d.JobModifyIndex = job.JobModifyIndex - 1 - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), d)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -6176,7 +6174,7 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { // Create a deployment for an old version of the job d := mock.Deployment() d.JobID = job.ID - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), d)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) // Upsert again to bump job version require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) @@ -6667,7 +6665,7 @@ func TestServiceSched_Migrate_NonCanary(t *testing.T) { Status: structs.DeploymentStatusSuccessful, StatusDescription: structs.DeploymentStatusDescriptionSuccessful, } - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), deployment)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment)) alloc := mock.Alloc() alloc.Job = job @@ -6678,7 +6676,7 @@ func TestServiceSched_Migrate_NonCanary(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusRun alloc.ClientStatus = structs.AllocClientStatusRunning alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation eval := &structs.Evaluation{ @@ -6742,7 +6740,7 @@ func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { Status: structs.DeploymentStatusSuccessful, StatusDescription: structs.DeploymentStatusDescriptionSuccessful, } - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), deployment)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment)) var allocs []*structs.Allocation for i := 0; i < 3; i++ { @@ -6753,7 +6751,7 @@ func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // new update with new task group job2 := job.Copy() @@ -6940,7 +6938,7 @@ func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { Status: structs.DeploymentStatusSuccessful, StatusDescription: structs.DeploymentStatusDescriptionSuccessful, } - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), initDeployment)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), initDeployment)) deploymentIDs := []string{initDeployment.ID} @@ -6974,7 +6972,7 @@ func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { Status: structs.DeploymentStatusSuccessful, StatusDescription: structs.DeploymentStatusDescriptionSuccessful, } - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), time.Now().UnixNano(), deployment)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment)) deploymentIDs = append(deploymentIDs, deployment.ID) @@ -7026,7 +7024,7 @@ func TestServiceSched_RunningWithNextAllocation(t *testing.T) { // simulate a case where .NextAllocation is set but alloc is still running allocs[2].PreviousAllocation = allocs[0].ID allocs[0].NextAllocation = allocs[2].ID - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // new update with new task group job2 := job.Copy() @@ -7112,7 +7110,7 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { shared.AccessMode = structs.CSIVolumeAccessModeMultiNodeReader require.NoError(h.State.UpsertCSIVolume( - h.NextIndex(), time.Now().UnixNano(), []*structs.CSIVolume{shared, vol0, vol1, vol2})) + h.NextIndex(), []*structs.CSIVolume{shared, vol0, vol1, vol2})) // Create a job that uses both job := mock.Job() @@ -7205,7 +7203,7 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { vol5 := vol0.Copy() vol5.ID = "volume-unique[4]" require.NoError(h.State.UpsertCSIVolume( - h.NextIndex(), time.Now().UnixNano(), []*structs.CSIVolume{vol4, vol5})) + h.NextIndex(), []*structs.CSIVolume{vol4, vol5})) // Process again with failure fixed. It should create a new plan eval.ID = uuid.Generate() @@ -7284,7 +7282,7 @@ func TestServiceSched_CSITopology(t *testing.T) { vol1.RequestedTopologies.Required[0].Segments["zone"] = "zone-1" require.NoError(t, h.State.UpsertCSIVolume( - h.NextIndex(), time.Now().UnixNano(), []*structs.CSIVolume{vol0, vol1})) + h.NextIndex(), []*structs.CSIVolume{vol0, vol1})) // Create a job that uses those volumes job := mock.Job() @@ -7535,7 +7533,7 @@ func TestServiceSched_Client_Disconnect_Creates_Updates_and_Evals(t *testing.T) // Simulate that NodeAllocation got processed. must.NoError(t, h.State.UpsertAllocs( - structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), + structs.MsgTypeTestSetup, h.NextIndex(), h.Plans[0].NodeAllocation[disconnectedNode.ID])) // Validate that the StateStore Upsert applied the ClientStatus we specified. @@ -7574,7 +7572,7 @@ func initNodeAndAllocs(t *testing.T, h *Harness, job *structs.Job, allocs[i] = alloc } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) return node, job, allocs } diff --git a/scheduler/preemption_test.go b/scheduler/preemption_test.go index 3a8c7c9ec97..eb718f5dcd3 100644 --- a/scheduler/preemption_test.go +++ b/scheduler/preemption_test.go @@ -8,7 +8,6 @@ import ( "maps" "strconv" "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/lib/numalib" @@ -1397,7 +1396,7 @@ func TestPreemption_Normal(t *testing.T) { alloc.NodeID = node.ID } require := require.New(t) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), tc.currentAllocations) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, tc.currentAllocations) require.Nil(err) if tc.currentPreemptions != nil { @@ -1532,7 +1531,7 @@ func TestPreemptionMultiple(t *testing.T) { allocs = append(allocs, alloc) allocIDs[alloc.ID] = struct{}{} } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().Unix(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // new high priority job with 2 allocs, each using 2 GPUs highPrioJob := mock.Job() diff --git a/scheduler/rank_test.go b/scheduler/rank_test.go index 788b885f6a9..6dec2f77784 100644 --- a/scheduler/rank_test.go +++ b/scheduler/rank_test.go @@ -6,7 +6,6 @@ package scheduler import ( "sort" "testing" - "time" "github.com/hashicorp/nomad/client/lib/idset" "github.com/hashicorp/nomad/client/lib/numalib" @@ -787,7 +786,7 @@ func TestBinPackIterator_Network_PortCollision_Alloc(t *testing.T) { } require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) taskGroup := &structs.TaskGroup{ EphemeralDisk: &structs.EphemeralDisk{}, @@ -1378,7 +1377,7 @@ func TestBinPackIterator_ReservedCores(t *testing.T) { } require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) taskGroup := &structs.TaskGroup{ EphemeralDisk: &structs.EphemeralDisk{}, @@ -1490,7 +1489,7 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) { } require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) taskGroup := &structs.TaskGroup{ EphemeralDisk: &structs.EphemeralDisk{}, @@ -1604,7 +1603,7 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) { } require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) // Add a planned eviction to alloc1 plan := ctx.Plan() @@ -1928,7 +1927,7 @@ func TestBinPackIterator_Devices(t *testing.T) { for _, alloc := range c.ExistingAllocs { alloc.NodeID = c.Node.ID } - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), c.ExistingAllocs)) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, c.ExistingAllocs)) } static := NewStaticRankIterator(ctx, []*RankedNode{{Node: c.Node}}) diff --git a/scheduler/scheduler_sysbatch_test.go b/scheduler/scheduler_sysbatch_test.go index df1c0b98548..7c14fa10f47 100644 --- a/scheduler/scheduler_sysbatch_test.go +++ b/scheduler/scheduler_sysbatch_test.go @@ -7,7 +7,6 @@ import ( "fmt" "sort" "testing" - "time" "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" @@ -116,7 +115,7 @@ func TestSysBatch_JobRegister_AddNode_Running(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusRunning allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Add a new node. node := mock.Node() @@ -194,7 +193,7 @@ func TestSysBatch_JobRegister_AddNode_Dead(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Add a new node. node := mock.Node() @@ -271,7 +270,7 @@ func TestSysBatch_JobModify(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusPending allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Add a few terminal status allocations, these should be reinstated var terminal []*structs.Allocation @@ -284,7 +283,7 @@ func TestSysBatch_JobModify(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete terminal = append(terminal, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), terminal)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) // Update the job job2 := mock.SystemBatchJob() @@ -359,7 +358,7 @@ func TestSysBatch_JobModify_InPlace(t *testing.T) { alloc.Name = "my-sysbatch.pinger[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := mock.SystemBatchJob() @@ -436,7 +435,7 @@ func TestSysBatch_JobDeregister_Purged(t *testing.T) { for _, alloc := range allocs { require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSysBatchSummary(alloc.JobID))) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -499,7 +498,7 @@ func TestSysBatch_JobDeregister_Stopped(t *testing.T) { for _, alloc := range allocs { require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSysBatchSummary(alloc.JobID))) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -557,7 +556,7 @@ func TestSysBatch_NodeDown(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-sysbatch.pinger[0]" alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -617,7 +616,7 @@ func TestSysBatch_NodeDrain_Down(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-sysbatch.pinger[0]" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -671,7 +670,7 @@ func TestSysBatch_NodeDrain(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-sysbatch.pinger[0]" alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -727,7 +726,7 @@ func TestSysBatch_NodeUpdate(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-system.pinger[0]" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -1335,7 +1334,7 @@ func TestSysBatch_PlanWithDrainedNode(t *testing.T) { alloc2.NodeID = node2.ID alloc2.Name = "my-sysbatch.pinger2[0]" alloc2.TaskGroup = "pinger2" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc, alloc2})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1584,7 +1583,7 @@ func TestSysBatch_Preemption(t *testing.T) { }, Shared: structs.AllocatedSharedResources{DiskMB: 5 * 1024}, } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2, alloc3})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc1, alloc2, alloc3})) // Create a high priority job and allocs for it // These allocs should not be preempted @@ -1628,7 +1627,7 @@ func TestSysBatch_Preemption(t *testing.T) { }, } require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job4)) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc4})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc4})) // Create a system job such that it would need to preempt both allocs to succeed job := mock.SystemBatchJob() diff --git a/scheduler/scheduler_system_test.go b/scheduler/scheduler_system_test.go index a667a570952..ef0fc491910 100644 --- a/scheduler/scheduler_system_test.go +++ b/scheduler/scheduler_system_test.go @@ -136,7 +136,7 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) { // Get an allocation and mark it as failed alloc := planned[4].Copy() alloc.ClientStatus = structs.AllocClientStatusFailed - require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to handle the update eval = &structs.Evaluation{ @@ -441,7 +441,7 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Add a new node. node := mock.Node() @@ -555,7 +555,7 @@ func TestSystemSched_JobModify(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Add a few terminal status allocations, these should be ignored var terminal []*structs.Allocation @@ -568,7 +568,7 @@ func TestSystemSched_JobModify(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop terminal = append(terminal, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), terminal)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) // Update the job job2 := mock.SystemJob() @@ -644,7 +644,7 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := mock.SystemJob() @@ -743,7 +743,7 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := mock.SystemJob() @@ -835,7 +835,7 @@ func TestSystemSched_JobModify_RemoveDC(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := job.Copy() @@ -913,7 +913,7 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) { for _, alloc := range allocs { require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -976,7 +976,7 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) { for _, alloc := range allocs { require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs)) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -1034,7 +1034,7 @@ func TestSystemSched_NodeDown(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1094,7 +1094,7 @@ func TestSystemSched_NodeDrain_Down(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -1148,7 +1148,7 @@ func TestSystemSched_NodeDrain(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1204,7 +1204,7 @@ func TestSystemSched_NodeUpdate(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -1757,7 +1757,7 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { alloc2.NodeID = node2.ID alloc2.Name = "my-job.web2[0]" alloc2.TaskGroup = "web2" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc, alloc2})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -2006,7 +2006,7 @@ func TestSystemSched_Preemption(t *testing.T) { }, Shared: structs.AllocatedSharedResources{DiskMB: 5 * 1024}, } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2, alloc3})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc1, alloc2, alloc3})) // Create a high priority job and allocs for it // These allocs should not be preempted @@ -2050,7 +2050,7 @@ func TestSystemSched_Preemption(t *testing.T) { }, } require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job4)) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc4})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc4})) // Create a system job such that it would need to preempt both allocs to succeed job := mock.SystemJob() @@ -2916,7 +2916,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { alloc.TaskStates = tc.taskState if tc.exists { - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) } if tc.modifyJob { @@ -2937,7 +2937,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { prev.ClientStatus = structs.AllocClientStatusComplete prev.DesiredStatus = structs.AllocDesiredStatusRun - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), []*structs.Allocation{prev})) + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{prev})) } // Create a mock evaluation to deal with disconnect eval := &structs.Evaluation{ @@ -3051,7 +3051,7 @@ func TestSystemSched_CSITopology(t *testing.T) { } must.NoError(t, h.State.UpsertCSIVolume( - h.NextIndex(), time.Now().UnixNano(), []*structs.CSIVolume{vol0})) + h.NextIndex(), []*structs.CSIVolume{vol0})) // Create a job that uses that volumes job := mock.SystemJob() diff --git a/scheduler/spread_test.go b/scheduler/spread_test.go index dbd154dca01..f23ad185136 100644 --- a/scheduler/spread_test.go +++ b/scheduler/spread_test.go @@ -67,7 +67,7 @@ func TestSpreadIterator_SingleAttribute(t *testing.T) { }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } @@ -229,7 +229,7 @@ func TestSpreadIterator_MultipleAttributes(t *testing.T) { }, } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), upserting); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, upserting); err != nil { t.Fatalf("failed to UpsertAllocs: %v", err) } @@ -996,7 +996,7 @@ func TestSpreadPanicDowngrade(t *testing.T) { } allocs = append(allocs, alloc) } - err = h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), time.Now().UnixNano(), allocs) + err = h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs) require.NoError(t, err) // job version 2 diff --git a/scheduler/stack_test.go b/scheduler/stack_test.go index a800c3ca355..ebdda77c101 100644 --- a/scheduler/stack_test.go +++ b/scheduler/stack_test.go @@ -8,7 +8,6 @@ import ( "reflect" "runtime" "testing" - "time" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" @@ -335,7 +334,7 @@ func TestServiceStack_Select_CSI(t *testing.T) { v.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter v.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem v.PluginID = "bar" - err := state.UpsertCSIVolume(999, time.Now().UnixNano(), []*structs.CSIVolume{v}) + err := state.UpsertCSIVolume(999, []*structs.CSIVolume{v}) must.NoError(t, err) // Create a node with healthy fingerprints for both controller and node plugins diff --git a/scheduler/testing.go b/scheduler/testing.go index 8f35047c8f9..347a279069c 100644 --- a/scheduler/testing.go +++ b/scheduler/testing.go @@ -180,7 +180,7 @@ func (h *Harness) SubmitPlan(plan *structs.Plan) (*structs.PlanResult, State, er } // Apply the full plan - err := h.State.UpsertPlanResults(structs.MsgTypeTestSetup, index, time.Now().UnixNano(), &req) + err := h.State.UpsertPlanResults(structs.MsgTypeTestSetup, index, &req) return result, nil, err } diff --git a/scheduler/util_test.go b/scheduler/util_test.go index 980a5b267ec..b0d17b37aa1 100644 --- a/scheduler/util_test.go +++ b/scheduler/util_test.go @@ -829,7 +829,7 @@ func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) { } alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) // Create a new task group that prevents in-place updates. tg := &structs.TaskGroup{} @@ -885,7 +885,7 @@ func TestInplaceUpdate_AllocatedResources(t *testing.T) { } alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) // Update TG to add a new service (inplace) tg := job.TaskGroups[0] @@ -945,7 +945,7 @@ func TestInplaceUpdate_NoMatch(t *testing.T) { } alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) // Create a new task group that requires too much resources. tg := &structs.TaskGroup{} @@ -998,7 +998,7 @@ func TestInplaceUpdate_Success(t *testing.T) { } alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) // Create a new task group that updates the resources. tg := &structs.TaskGroup{} @@ -1070,7 +1070,7 @@ func TestInplaceUpdate_WildcardDatacenters(t *testing.T) { alloc.Job = job alloc.JobID = job.ID must.NoError(t, store.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc})) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) updates := []allocTuple{{Alloc: alloc, TaskGroup: job.TaskGroups[0]}} stack := NewGenericStack(false, ctx) @@ -1111,7 +1111,7 @@ func TestInplaceUpdate_NodePools(t *testing.T) { t.Logf("alloc1=%s alloc2=%s", alloc1.ID, alloc2.ID) - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1004, time.Now().UnixNano(), + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1004, []*structs.Allocation{alloc1, alloc2})) updates := []allocTuple{ From 2e722c6c08c4d053810c9d5aeba3d2f6c43e7855 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Wed, 30 Oct 2024 15:06:34 +0100 Subject: [PATCH 18/40] state store and test fixes --- nomad/state/state_store.go | 6 +- nomad/state/state_store_test.go | 167 ++++++++++++++------------------ 2 files changed, 78 insertions(+), 95 deletions(-) diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index a7c40b9165b..a4a5eab2f5f 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -415,7 +415,7 @@ func (s *StateStore) UpsertPlanResults(msgType structs.MessageType, index uint64 // Update the status of deployments effected by the plan. if len(results.DeploymentUpdates) != 0 { - s.upsertDeploymentUpdates(index, now, results.DeploymentUpdates, txn) + s.upsertDeploymentUpdates(index, results.UpdatedAt, results.DeploymentUpdates, txn) } if results.EvalID != "" { @@ -457,7 +457,7 @@ func (s *StateStore) UpsertPlanResults(msgType structs.MessageType, index uint64 alloc.Canonicalize() } - if err := s.upsertAllocsImpl(index, now, allocsToUpsert, txn); err != nil { + if err := s.upsertAllocsImpl(index, results.UpdatedAt, allocsToUpsert, txn); err != nil { return err } @@ -517,7 +517,7 @@ func addComputedAllocAttrs(allocs []*structs.Allocation, job *structs.Job) { // updates. func (s *StateStore) upsertDeploymentUpdates(index uint64, now int64, updates []*structs.DeploymentStatusUpdate, txn *txn) error { for _, u := range updates { - if err := s.updateDeploymentStatusImpl(index, now, u, txn); err != nil { + if err := s.updateDeploymentStatusImpl(index, u, txn); err != nil { return err } } diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 8cede33131a..59e7aecd9d3 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -143,7 +143,7 @@ func TestStateStore_UpsertPlanResults_AllocationsCreated_Denormalized(t *testing EvalID: eval.ID, } assert := assert.New(t) - err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), &res) + err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) assert.Nil(err) ws := memdb.NewWatchSet() @@ -220,7 +220,7 @@ func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) { } assert := assert.New(t) planModifyIndex := uint64(1000) - err := state.UpsertPlanResults(structs.MsgTypeTestSetup, planModifyIndex, time.Now().UnixNano(), &res) + err := state.UpsertPlanResults(structs.MsgTypeTestSetup, planModifyIndex, &res) require.NoError(err) ws := memdb.NewWatchSet() @@ -301,8 +301,7 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { EvalID: eval.ID, } - now := time.Now().UnixNano() - err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, now, &res) + err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) if err != nil { t.Fatalf("err: %v", err) } @@ -350,7 +349,7 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { EvalID: eval.ID, } - err = state.UpsertPlanResults(structs.MsgTypeTestSetup, 1001, now, &res) + err = state.UpsertPlanResults(structs.MsgTypeTestSetup, 1001, &res) if err != nil { t.Fatalf("err: %v", err) } @@ -420,7 +419,7 @@ func TestStateStore_UpsertPlanResults_PreemptedAllocs(t *testing.T) { PreemptionEvals: []*structs.Evaluation{eval2}, } - err = state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, now, &res) + err = state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) require.NoError(err) ws := memdb.NewWatchSet() @@ -460,8 +459,6 @@ func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() - // Create a job that applies to all job := mock.Job() if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, job); err != nil { @@ -472,7 +469,7 @@ func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { doutstanding := mock.Deployment() doutstanding.JobID = job.ID - if err := state.UpsertDeployment(1000, now, doutstanding); err != nil { + if err := state.UpsertDeployment(1000, doutstanding); err != nil { t.Fatalf("err: %v", err) } @@ -508,7 +505,7 @@ func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { EvalID: eval.ID, } - err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, now, &res) + err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) if err != nil { t.Fatalf("err: %v", err) } @@ -576,7 +573,7 @@ func TestStateStore_UpsertPlanResults_AllocationResources(t *testing.T) { EvalID: eval.ID, } - must.NoError(t, state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), &res)) + must.NoError(t, state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res)) out, err := state.AllocByID(nil, alloc.ID) must.NoError(t, err) @@ -598,7 +595,7 @@ func TestStateStore_UpsertDeployment(t *testing.T) { t.Fatalf("bad: %v", err) } - err = state.UpsertDeployment(1000, time.Now().UnixNano(), deployment) + err = state.UpsertDeployment(1000, deployment) if err != nil { t.Fatalf("err: %v", err) } @@ -648,13 +645,11 @@ func TestStateStore_OldDeployment(t *testing.T) { require := require.New(t) - now := time.Now().UnixNano() - // Insert both deployments - err := state.UpsertDeployment(1001, now, deploy1) + err := state.UpsertDeployment(1001, deploy1) require.Nil(err) - err = state.UpsertDeployment(1002, now, deploy2) + err = state.UpsertDeployment(1002, deploy2) require.Nil(err) ws := memdb.NewWatchSet() @@ -677,13 +672,11 @@ func TestStateStore_DeleteDeployment(t *testing.T) { d1 := mock.Deployment() d2 := mock.Deployment() - now := time.Now().UnixNano() - - err := state.UpsertDeployment(1000, now, d1) + err := state.UpsertDeployment(1000, d1) if err != nil { t.Fatalf("err: %v", err) } - if err := state.UpsertDeployment(1001, now, d2); err != nil { + if err := state.UpsertDeployment(1001, d2); err != nil { t.Fatalf("err: %v", err) } @@ -735,7 +728,7 @@ func TestStateStore_Deployments(t *testing.T) { deployment := mock.Deployment() deployments = append(deployments, deployment) - err := state.UpsertDeployment(1000+uint64(i), time.Now().UnixNano(), deployment) + err := state.UpsertDeployment(1000+uint64(i), deployment) require.NoError(t, err) } @@ -784,11 +777,10 @@ func TestStateStore_Deployments_Namespace(t *testing.T) { _, err = state.DeploymentsByNamespace(watches[1], ns2.Name) require.NoError(t, err) - now := time.Now().UnixNano() - require.NoError(t, state.UpsertDeployment(1001, now, deploy1)) - require.NoError(t, state.UpsertDeployment(1002, now, deploy2)) - require.NoError(t, state.UpsertDeployment(1003, now, deploy3)) - require.NoError(t, state.UpsertDeployment(1004, now, deploy4)) + require.NoError(t, state.UpsertDeployment(1001, deploy1)) + require.NoError(t, state.UpsertDeployment(1002, deploy2)) + require.NoError(t, state.UpsertDeployment(1003, deploy3)) + require.NoError(t, state.UpsertDeployment(1004, deploy4)) require.True(t, watchFired(watches[0])) require.True(t, watchFired(watches[1])) @@ -838,10 +830,8 @@ func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { state := testStateStore(t) deploy := mock.Deployment() - now := time.Now().UnixNano() - deploy.ID = "11111111-662e-d0ab-d1c9-3e434af7bdb4" - err := state.UpsertDeployment(1000, now, deploy) + err := state.UpsertDeployment(1000, deploy) require.NoError(t, err) gatherDeploys := func(iter memdb.ResultIterator) []*structs.Deployment { @@ -880,7 +870,7 @@ func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { deploy = mock.Deployment() deploy.ID = "11222222-662e-d0ab-d1c9-3e434af7bdb4" - err = state.UpsertDeployment(1001, now, deploy) + err = state.UpsertDeployment(1001, deploy) require.NoError(t, err) t.Run("more than one", func(t *testing.T) { @@ -937,10 +927,9 @@ func TestStateStore_DeploymentsByIDPrefix_Namespaces(t *testing.T) { deploy1.Namespace = ns1.Name deploy2.Namespace = ns2.Name - now := time.Now().UnixNano() require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) - require.NoError(t, state.UpsertDeployment(1000, now, deploy1)) - require.NoError(t, state.UpsertDeployment(1001, now, deploy2)) + require.NoError(t, state.UpsertDeployment(1000, deploy1)) + require.NoError(t, state.UpsertDeployment(1001, deploy2)) gatherDeploys := func(iter memdb.ResultIterator) []*structs.Deployment { var deploys []*structs.Deployment @@ -4235,7 +4224,7 @@ func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { // from multiple nodes concurrently but not in a single // RPC call. But this guarantees we'll trigger any nested // transaction setup bugs - err = store.UpdateAllocsFromClient(structs.MsgTypeTestSetup, nextIndex(store), now, allocs) + err = store.UpdateAllocsFromClient(structs.MsgTypeTestSetup, nextIndex(store), allocs) } must.NoError(t, err) return allocs @@ -5843,7 +5832,7 @@ func TestStateStore_UpdateAllocsFromClient(t *testing.T) { JobID: alloc.JobID, TaskGroup: alloc.TaskGroup, } - err = state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{update}) + err = state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{update}) must.NoError(t, err) must.True(t, watchFired(ws)) @@ -5924,7 +5913,7 @@ func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { TaskGroup: alloc2.TaskGroup, } - err = state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{update, update2}) + err = state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{update, update2}) must.NoError(t, err) for _, ws := range watches { @@ -6004,7 +5993,7 @@ func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { TaskGroup: alloc.TaskGroup, } - err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{update, update2}) + err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{update, update2}) must.NoError(t, err) ws := memdb.NewWatchSet() @@ -6053,7 +6042,7 @@ func TestStateStore_UpdateAllocsFromClient_Deployment(t *testing.T) { must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) - must.NoError(t, state.UpsertDeployment(1000, now.UnixNano(), deployment)) + must.NoError(t, state.UpsertDeployment(1000, deployment)) must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now.UnixNano(), []*structs.Allocation{alloc})) healthy := now.Add(time.Second) @@ -6068,7 +6057,7 @@ func TestStateStore_UpdateAllocsFromClient_Deployment(t *testing.T) { Timestamp: healthy, }, } - must.NoError(t, state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, now.UnixNano(), []*structs.Allocation{update})) + must.NoError(t, state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{update})) // Check that the deployment state was updated because the healthy // deployment @@ -6105,7 +6094,7 @@ func TestStateStore_UpdateAllocsFromClient_DeploymentStateMerges(t *testing.T) { must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) - must.NoError(t, state.UpsertDeployment(1000, now.UnixNano(), deployment)) + must.NoError(t, state.UpsertDeployment(1000, deployment)) must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now.UnixNano(), []*structs.Allocation{alloc})) update := &structs.Allocation{ @@ -6119,7 +6108,7 @@ func TestStateStore_UpdateAllocsFromClient_DeploymentStateMerges(t *testing.T) { Canary: false, }, } - must.NoError(t, state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, now.UnixNano(), []*structs.Allocation{update})) + must.NoError(t, state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{update})) // Check that the merging of the deployment status was correct out, err := state.AllocByID(nil, alloc.ID) @@ -6194,7 +6183,7 @@ func TestStateStore_UpdateAllocsFromClient_UpdateNodes(t *testing.T) { TaskGroup: "group", } - err = state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1005, now, []*structs.Allocation{ + err = state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1005, []*structs.Allocation{ updateAlloc1, updateAlloc2, updateAllocNonExisting, }) must.NoError(t, err) @@ -6315,7 +6304,7 @@ func TestStateStore_UpsertAlloc_Deployment(t *testing.T) { alloc.DeploymentID = deployment.ID require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) - require.Nil(state.UpsertDeployment(1000, now.UnixNano(), deployment)) + require.Nil(state.UpsertDeployment(1000, deployment)) // Create a watch set so we can test that update fires the watch ws := memdb.NewWatchSet() @@ -6660,7 +6649,7 @@ func TestStateStore_UpdateAlloc_NoJob(t *testing.T) { // Update the client state of the allocation to complete allocCopy1 := allocCopy.Copy() allocCopy1.ClientStatus = structs.AllocClientStatusComplete - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{allocCopy1}); err != nil { + if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{allocCopy1}); err != nil { t.Fatalf("err: %v", err) } @@ -6772,12 +6761,12 @@ func TestStateStore_JobSummary(t *testing.T) { alloc1 := alloc.Copy() alloc1.ClientStatus = structs.AllocClientStatusPending alloc1.DesiredStatus = "" - state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 920, now, []*structs.Allocation{alloc}) + state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 920, []*structs.Allocation{alloc}) alloc3 := alloc.Copy() alloc3.ClientStatus = structs.AllocClientStatusRunning alloc3.DesiredStatus = "" - state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 930, now, []*structs.Allocation{alloc3}) + state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 930, []*structs.Allocation{alloc3}) // Upsert the alloc alloc4 := alloc.Copy() @@ -6820,7 +6809,7 @@ func TestStateStore_JobSummary(t *testing.T) { alloc6 := alloc.Copy() alloc6.ClientStatus = structs.AllocClientStatusRunning alloc6.DesiredStatus = "" - state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 990, now, []*structs.Allocation{alloc6}) + state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 990, []*structs.Allocation{alloc6}) // We shouldn't have any summary at this point summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) @@ -6847,7 +6836,7 @@ func TestStateStore_JobSummary(t *testing.T) { alloc7.Job = outJob alloc7.ClientStatus = structs.AllocClientStatusComplete alloc7.DesiredStatus = structs.AllocDesiredStatusRun - state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1020, now, []*structs.Allocation{alloc7}) + state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1020, []*structs.Allocation{alloc7}) expectedSummary = structs.JobSummary{ JobID: job.ID, @@ -6893,7 +6882,7 @@ func TestStateStore_ReconcileJobSummary(t *testing.T) { // Change the state of the first alloc to running alloc3 := alloc.Copy() alloc3.ClientStatus = structs.AllocClientStatusRunning - state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 120, now, []*structs.Allocation{alloc3}) + state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 120, []*structs.Allocation{alloc3}) //Add some more allocs to the second tg alloc4 := mock.Alloc() @@ -6932,7 +6921,7 @@ func TestStateStore_ReconcileJobSummary(t *testing.T) { state.UpsertAllocs(structs.MsgTypeTestSetup, 130, now, []*structs.Allocation{alloc4, alloc6, alloc8, alloc10, alloc12}) - state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 150, now, []*structs.Allocation{alloc5, alloc7, alloc9, alloc11}) + state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 150, []*structs.Allocation{alloc5, alloc7, alloc9, alloc11}) // DeleteJobSummary is a helper method and doesn't modify the indexes table state.DeleteJobSummary(130, alloc.Namespace, alloc.Job.ID) @@ -7073,7 +7062,7 @@ func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { alloc1.ClientStatus = structs.AllocClientStatusRunning // Updating allocation should not throw any error - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 400, now, []*structs.Allocation{alloc1}); err != nil { + if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 400, []*structs.Allocation{alloc1}); err != nil { t.Fatalf("expect err: %v", err) } @@ -7083,7 +7072,7 @@ func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { // Update the alloc again alloc2 := alloc.Copy() alloc2.ClientStatus = structs.AllocClientStatusComplete - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 400, now, []*structs.Allocation{alloc1}); err != nil { + if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 400, []*structs.Allocation{alloc1}); err != nil { t.Fatalf("expect err: %v", err) } @@ -8040,7 +8029,7 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { alloc5.JobID = alloc3.JobID alloc5.ClientStatus = structs.AllocClientStatusComplete - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1004, now, []*structs.Allocation{alloc4, alloc5}); err != nil { + if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1004, []*structs.Allocation{alloc4, alloc5}); err != nil { t.Fatalf("err: %v", err) } @@ -8118,7 +8107,7 @@ func TestJobSummary_UpdateClientStatus(t *testing.T) { alloc6.JobID = alloc.JobID alloc6.ClientStatus = structs.AllocClientStatusRunning - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1002, now, []*structs.Allocation{alloc4, alloc5, alloc6}); err != nil { + if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc4, alloc5, alloc6}); err != nil { t.Fatalf("err: %v", err) } @@ -8157,7 +8146,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Nonexistent(t *testing.T) { Status: structs.DeploymentStatusRunning, }, } - err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, time.Now().UnixNano(), req) + err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, req) if err == nil || !strings.Contains(err.Error(), "does not exist") { t.Fatalf("expected error updating the status because the deployment doesn't exist") } @@ -8168,13 +8157,12 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Terminal(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() // Insert a terminal deployment d := mock.Deployment() d.Status = structs.DeploymentStatusFailed - if err := state.UpsertDeployment(1, now, d); err != nil { + if err := state.UpsertDeployment(1, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8185,7 +8173,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Terminal(t *testing.T) { Status: structs.DeploymentStatusRunning, }, } - err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, now, req) + err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, req) if err == nil || !strings.Contains(err.Error(), "has terminal status") { t.Fatalf("expected error updating the status because the deployment is terminal") } @@ -8197,11 +8185,10 @@ func TestStateStore_UpsertDeploymentStatusUpdate_NonTerminal(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() // Insert a deployment d := mock.Deployment() - if err := state.UpsertDeployment(1, now, d); err != nil { + if err := state.UpsertDeployment(1, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8220,7 +8207,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_NonTerminal(t *testing.T) { Job: j, Eval: e, } - err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, now, req) + err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, req) if err != nil { t.Fatalf("bad: %v", err) } @@ -8269,8 +8256,8 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) { } // Insert a deployment - d := structs.NewDeployment(job, 50) - if err := state.UpsertDeployment(2, now, d); err != nil { + d := structs.NewDeployment(job, 50, now) + if err := state.UpsertDeployment(2, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8282,7 +8269,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) { StatusDescription: structs.DeploymentStatusDescriptionSuccessful, }, } - err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 3, now, req) + err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 3, req) if err != nil { t.Fatalf("bad: %v", err) } @@ -8362,7 +8349,7 @@ func TestStateStore_UpsertDeploymentPromotion_Nonexistent(t *testing.T) { All: true, }, } - err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 2, time.Now().UnixNano(), req) + err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 2, req) if err == nil || !strings.Contains(err.Error(), "does not exist") { t.Fatalf("expected error promoting because the deployment doesn't exist") } @@ -8373,13 +8360,12 @@ func TestStateStore_UpsertDeploymentPromotion_Terminal(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() // Insert a terminal deployment d := mock.Deployment() d.Status = structs.DeploymentStatusFailed - if err := state.UpsertDeployment(1, now, d); err != nil { + if err := state.UpsertDeployment(1, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8390,7 +8376,7 @@ func TestStateStore_UpsertDeploymentPromotion_Terminal(t *testing.T) { All: true, }, } - err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 2, now, req) + err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 2, req) if err == nil || !strings.Contains(err.Error(), "has terminal status") { t.Fatalf("expected error updating the status because the deployment is terminal: %v", err) } @@ -8412,7 +8398,7 @@ func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { d := mock.Deployment() d.JobID = j.ID d.TaskGroups["web"].DesiredCanaries = 2 - require.Nil(state.UpsertDeployment(2, now, d)) + require.Nil(state.UpsertDeployment(2, d)) // Create a set of allocations c1 := mock.Alloc() @@ -8441,7 +8427,7 @@ func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { All: true, }, } - err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, now, req) + err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, req) require.NotNil(err) require.Contains(err.Error(), `Task group "web" has 0/2 healthy allocations`) } @@ -8450,7 +8436,6 @@ func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { func TestStateStore_UpsertDeploymentPromotion_NoCanaries(t *testing.T) { ci.Parallel(t) - now := time.Now().UnixNano() state := testStateStore(t) require := require.New(t) @@ -8462,7 +8447,7 @@ func TestStateStore_UpsertDeploymentPromotion_NoCanaries(t *testing.T) { d := mock.Deployment() d.TaskGroups["web"].DesiredCanaries = 2 d.JobID = j.ID - require.Nil(state.UpsertDeployment(2, now, d)) + require.Nil(state.UpsertDeployment(2, d)) // Promote the canaries req := &structs.ApplyDeploymentPromoteRequest{ @@ -8471,7 +8456,7 @@ func TestStateStore_UpsertDeploymentPromotion_NoCanaries(t *testing.T) { All: true, }, } - err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, now, req) + err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, req) require.NotNil(err) require.Contains(err.Error(), `Task group "web" has 0/2 healthy allocations`) } @@ -8507,7 +8492,7 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { DesiredCanaries: 1, }, } - if err := state.UpsertDeployment(2, now, d); err != nil { + if err := state.UpsertDeployment(2, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8543,7 +8528,7 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { }, Eval: e, } - err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, now, req) + err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, req) if err != nil { t.Fatalf("bad: %v", err) } @@ -8605,7 +8590,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { DesiredCanaries: 1, }, } - require.Nil(state.UpsertDeployment(2, now, d)) + require.Nil(state.UpsertDeployment(2, d)) // Create a set of allocations for both groups, including an unhealthy one c1 := mock.Alloc() @@ -8650,7 +8635,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { }, Eval: e, } - require.Nil(state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, now, req)) + require.Nil(state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, req)) // Check that the status per task group was updated properly ws := memdb.NewWatchSet() @@ -8693,7 +8678,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_Nonexistent(t *testing.T) { HealthyAllocationIDs: []string{uuid.Generate()}, }, } - err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 2, time.Now().UnixNano(), req) + err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 2, req) if err == nil || !strings.Contains(err.Error(), "does not exist") { t.Fatalf("expected error because the deployment doesn't exist: %v", err) } @@ -8704,13 +8689,12 @@ func TestStateStore_UpsertDeploymentAllocHealth_Terminal(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() // Insert a terminal deployment d := mock.Deployment() d.Status = structs.DeploymentStatusFailed - if err := state.UpsertDeployment(1, now, d); err != nil { + if err := state.UpsertDeployment(1, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8721,7 +8705,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_Terminal(t *testing.T) { HealthyAllocationIDs: []string{uuid.Generate()}, }, } - err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 2, now, req) + err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 2, req) if err == nil || !strings.Contains(err.Error(), "has terminal status") { t.Fatalf("expected error because the deployment is terminal: %v", err) } @@ -8732,11 +8716,10 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_Nonexistent(t *testing. ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() // Insert a deployment d := mock.Deployment() - if err := state.UpsertDeployment(1, now, d); err != nil { + if err := state.UpsertDeployment(1, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8747,7 +8730,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_Nonexistent(t *testing. HealthyAllocationIDs: []string{uuid.Generate()}, }, } - err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 2, now, req) + err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 2, req) if err == nil || !strings.Contains(err.Error(), "unknown alloc") { t.Fatalf("expected error because the alloc doesn't exist: %v", err) } @@ -8762,7 +8745,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { // Create a deployment d1 := mock.Deployment() - require.NoError(t, state.UpsertDeployment(2, now, d1)) + require.NoError(t, state.UpsertDeployment(2, d1)) // Create a Job job := mock.Job() @@ -8806,7 +8789,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { // Create a second deployment d2 := mock.Deployment() - require.NoError(t, state.UpsertDeployment(5, now, d2)) + require.NoError(t, state.UpsertDeployment(5, d2)) c := mock.Alloc() c.JobID = job.ID @@ -8833,7 +8816,7 @@ func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { // Create a deployment d1 := mock.Deployment() - require.NoError(t, state.UpsertDeployment(2, now, d1)) + require.NoError(t, state.UpsertDeployment(2, d1)) // Create a Job job := mock.Job() @@ -8869,10 +8852,10 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_MismatchDeployment(t *t // Insert two deployment d1 := mock.Deployment() d2 := mock.Deployment() - if err := state.UpsertDeployment(1, now, d1); err != nil { + if err := state.UpsertDeployment(1, d1); err != nil { t.Fatalf("bad: %v", err) } - if err := state.UpsertDeployment(2, now, d2); err != nil { + if err := state.UpsertDeployment(2, d2); err != nil { t.Fatalf("bad: %v", err) } @@ -8890,7 +8873,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_MismatchDeployment(t *t HealthyAllocationIDs: []string{a.ID}, }, } - err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 4, now, req) + err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 4, req) if err == nil || !strings.Contains(err.Error(), "not part of deployment") { t.Fatalf("expected error because the alloc isn't part of the deployment: %v", err) } @@ -8906,7 +8889,7 @@ func TestStateStore_UpsertDeploymentAllocHealth(t *testing.T) { // Insert a deployment d := mock.Deployment() d.TaskGroups["web"].ProgressDeadline = 5 * time.Minute - if err := state.UpsertDeployment(1, now, d); err != nil { + if err := state.UpsertDeployment(1, d); err != nil { t.Fatalf("bad: %v", err) } @@ -8948,7 +8931,7 @@ func TestStateStore_UpsertDeploymentAllocHealth(t *testing.T) { DeploymentUpdate: u, Timestamp: ts, } - err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 3, now, req) + err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 3, req) if err != nil { t.Fatalf("bad: %v", err) } From 18166f46ad09c3850db202e4d3b2d7c7466f2d14 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Wed, 30 Oct 2024 16:29:07 +0100 Subject: [PATCH 19/40] fsm_test fixes --- nomad/fsm_test.go | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/nomad/fsm_test.go b/nomad/fsm_test.go index 331e3929867..1d5375cbb1e 100644 --- a/nomad/fsm_test.go +++ b/nomad/fsm_test.go @@ -1387,7 +1387,7 @@ func TestFSM_UpdateAllocFromClient_Unblock(t *testing.T) { alloc2.NodeID = node.ID state.UpsertJobSummary(8, mock.JobSummary(alloc.JobID)) state.UpsertJobSummary(9, mock.JobSummary(alloc2.JobID)) - state.UpsertAllocs(structs.MsgTypeTestSetup, 10, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc, alloc2}) clientAlloc := new(structs.Allocation) *clientAlloc = *alloc @@ -1455,7 +1455,7 @@ func TestFSM_UpdateAllocFromClient(t *testing.T) { alloc := mock.Alloc() state.UpsertJobSummary(9, mock.JobSummary(alloc.JobID)) - state.UpsertAllocs(structs.MsgTypeTestSetup, 10, time.Now().UnixNano(), []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc}) clientAlloc := new(structs.Allocation) *clientAlloc = *alloc @@ -1506,7 +1506,7 @@ func TestFSM_UpdateAllocDesiredTransition(t *testing.T) { alloc2.Job = alloc.Job alloc2.JobID = alloc.JobID state.UpsertJobSummary(9, mock.JobSummary(alloc.JobID)) - state.UpsertAllocs(structs.MsgTypeTestSetup, 10, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc, alloc2}) t1 := &structs.DesiredTransition{ Migrate: pointer.Of(true), @@ -1735,7 +1735,7 @@ func TestFSM_ApplyPlanResults(t *testing.T) { alloc2.JobID = job2.ID alloc2.PreemptedByAllocation = alloc.ID - fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) + fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1, []*structs.Allocation{alloc1, alloc2}) // evals for preempted jobs eval1 := mock.Eval() @@ -1849,7 +1849,7 @@ func TestFSM_DeploymentStatusUpdate(t *testing.T) { // Upsert a deployment d := mock.Deployment() - if err := state.UpsertDeployment(1, time.Now().UnixNano(), d); err != nil { + if err := state.UpsertDeployment(1, d); err != nil { t.Fatalf("bad: %v", err) } @@ -1980,7 +1980,7 @@ func TestFSM_DeploymentPromotion(t *testing.T) { DesiredCanaries: 1, }, } - if err := state.UpsertDeployment(2, time.Now().UnixNano(), d); err != nil { + if err := state.UpsertDeployment(2, d); err != nil { t.Fatalf("bad: %v", err) } @@ -2001,7 +2001,7 @@ func TestFSM_DeploymentPromotion(t *testing.T) { Healthy: pointer.Of(true), } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, time.Now().UnixNano(), []*structs.Allocation{c1, c2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2}); err != nil { t.Fatalf("err: %v", err) } @@ -2064,7 +2064,7 @@ func TestFSM_DeploymentAllocHealth(t *testing.T) { // Insert a deployment d := mock.Deployment() - if err := state.UpsertDeployment(1, time.Now().UnixNano(), d); err != nil { + if err := state.UpsertDeployment(1, d); err != nil { t.Fatalf("bad: %v", err) } @@ -2073,7 +2073,7 @@ func TestFSM_DeploymentAllocHealth(t *testing.T) { a1.DeploymentID = d.ID a2 := mock.Alloc() a2.DeploymentID = d.ID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, time.Now().UnixNano(), []*structs.Allocation{a1, a2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{a1, a2}); err != nil { t.Fatalf("bad: %v", err) } @@ -2170,7 +2170,7 @@ func TestFSM_DeleteDeployment(t *testing.T) { // Upsert a deployments d := mock.Deployment() - if err := state.UpsertDeployment(1, time.Now().UnixNano(), d); err != nil { + if err := state.UpsertDeployment(1, d); err != nil { t.Fatalf("bad: %v", err) } @@ -2476,8 +2476,8 @@ func TestFSM_SnapshotRestore_Allocs(t *testing.T) { alloc2 := mock.Alloc() state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1}) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc2}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc2}) // Verify the contents fsm2 := testSnapshotRestore(t, fsm) @@ -2504,7 +2504,7 @@ func TestFSM_SnapshotRestore_Allocs_Canonicalize(t *testing.T) { alloc.AllocatedResources = nil state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID)) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) // Verify the contents fsm2 := testSnapshotRestore(t, fsm) @@ -2668,8 +2668,8 @@ func TestFSM_SnapshotRestore_Deployments(t *testing.T) { d2.JobID = j.ID state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j) - state.UpsertDeployment(1000, time.Now().UnixNano(), d1) - state.UpsertDeployment(1001, time.Now().UnixNano(), d2) + state.UpsertDeployment(1000, d1) + state.UpsertDeployment(1001, d2) // Verify the contents fsm2 := testSnapshotRestore(t, fsm) @@ -2959,7 +2959,7 @@ func TestFSM_ReconcileSummaries(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = node.ID require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, alloc.Job)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc})) // Delete the summaries require.NoError(t, state.DeleteJobSummary(1030, job1.Namespace, job1.ID)) @@ -3056,7 +3056,7 @@ func TestFSM_ReconcileParentJobSummary(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusRunning state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, childJob) - state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, time.Now().UnixNano(), []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc}) // Make the summary incorrect in the state store summary, err := state.JobSummaryByID(nil, job1.Namespace, job1.ID) @@ -3098,7 +3098,7 @@ func TestFSM_LeakedDeployments(t *testing.T) { fsm := testFSM(t) state := fsm.State() d := mock.Deployment() - require.NoError(state.UpsertDeployment(1000, time.Now().UnixNano(), d)) + require.NoError(state.UpsertDeployment(1000, d)) // Verify the contents fsm2 := testSnapshotRestore(t, fsm) From 3a0f7460c92fcef7084fbf840fd3cafbda68cbe1 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Wed, 30 Oct 2024 17:00:00 +0100 Subject: [PATCH 20/40] fixed some csi tests --- nomad/core_sched_test.go | 62 ++++----- nomad/csi_endpoint_test.go | 6 +- nomad/mock/csi.go | 3 + nomad/plan_apply.go | 2 +- nomad/state/state_store.go | 15 +- nomad/state/state_store_test.go | 145 ++++++++------------ nomad/volumewatcher/volumes_watcher_test.go | 14 +- scheduler/reconcile_test.go | 86 ++++++------ 8 files changed, 149 insertions(+), 184 deletions(-) diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 31bc33f6c03..8629b01fa83 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -62,7 +62,7 @@ func TestCoreScheduler_EvalGC(t *testing.T) { alloc2.JobID = eval.JobID alloc2.TaskGroup = job.TaskGroups[0].Name must.NoError(t, store.UpsertAllocs( - structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2})) + structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2})) // Insert service for "dead" alloc service := &structs.ServiceRegistration{ @@ -172,7 +172,7 @@ func TestCoreScheduler_EvalGC_ReschedulingAllocs(t *testing.T) { }, }, } - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2}) require.Nil(t, err) // Create a core scheduler @@ -244,7 +244,7 @@ func TestCoreScheduler_EvalGC_StoppedJob_Reschedulable(t *testing.T) { }, }, } - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) require.Nil(t, err) // Create a core scheduler @@ -321,10 +321,8 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { stoppedJobLostAlloc.DesiredStatus = structs.AllocDesiredStatusRun stoppedJobLostAlloc.ClientStatus = structs.AllocClientStatusLost - now := time.Now().UnixNano() - err = store.UpsertAllocs( - structs.MsgTypeTestSetup, jobModifyIdx+3, now, + structs.MsgTypeTestSetup, jobModifyIdx+3, []*structs.Allocation{stoppedJobStoppedAlloc, stoppedJobLostAlloc}) must.NoError(t, err) @@ -359,7 +357,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { lostAlloc.DesiredStatus = structs.AllocDesiredStatusRun lostAlloc.ClientStatus = structs.AllocClientStatusLost - err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx+2, now, []*structs.Allocation{stoppedAlloc, lostAlloc}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx+2, []*structs.Allocation{stoppedAlloc, lostAlloc}) must.NoError(t, err) // An "alive" job #2 containing two complete evals. The first with: @@ -397,7 +395,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobLostAlloc.DesiredStatus = structs.AllocDesiredStatusRun activeJobLostAlloc.ClientStatus = structs.AllocClientStatusLost - err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, now, []*structs.Allocation{activeJobRunningAlloc, activeJobLostAlloc}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{activeJobRunningAlloc, activeJobLostAlloc}) must.NoError(t, err) activeJobCompleteEval := mock.Eval() @@ -414,7 +412,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobCompletedEvalCompletedAlloc.DesiredStatus = structs.AllocDesiredStatusStop activeJobCompletedEvalCompletedAlloc.ClientStatus = structs.AllocClientStatusComplete - err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, now, []*structs.Allocation{activeJobCompletedEvalCompletedAlloc}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{activeJobCompletedEvalCompletedAlloc}) must.NoError(t, err) // A job that ran once and was then purged. @@ -438,7 +436,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { purgedJobCompleteAlloc.DesiredStatus = structs.AllocDesiredStatusRun purgedJobCompleteAlloc.ClientStatus = structs.AllocClientStatusLost - err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, now, []*structs.Allocation{purgedJobCompleteAlloc}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{purgedJobCompleteAlloc}) must.NoError(t, err) purgedJobCompleteEval := mock.Eval() @@ -706,9 +704,7 @@ func TestCoreScheduler_EvalGC_Partial(t *testing.T) { alloc2.DesiredStatus = structs.AllocDesiredStatusRun alloc2.ClientStatus = structs.AllocClientStatusLost - now := time.Now().UnixNano() - - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, now, []*structs.Allocation{alloc, alloc2}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -718,7 +714,7 @@ func TestCoreScheduler_EvalGC_Partial(t *testing.T) { alloc3.EvalID = eval.ID alloc3.JobID = job.ID store.UpsertJobSummary(1003, mock.JobSummary(alloc3.JobID)) - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1004, now, []*structs.Allocation{alloc3}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1004, []*structs.Allocation{alloc3}) if err != nil { t.Fatalf("err: %v", err) } @@ -821,7 +817,7 @@ func TestCoreScheduler_EvalGC_Force(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop alloc.TaskGroup = job.TaskGroups[0].Name store.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID)) - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -931,7 +927,7 @@ func TestCoreScheduler_NodeGC_TerminalAllocs(t *testing.T) { alloc := mock.Alloc() alloc.DesiredStatus = structs.AllocDesiredStatusStop store.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID)) - if err := store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { + if err := store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -982,7 +978,7 @@ func TestCoreScheduler_NodeGC_RunningAllocs(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusRun alloc.ClientStatus = structs.AllocClientStatusRunning store.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID)) - if err := store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { + if err := store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -1199,8 +1195,6 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { t.Fatalf("err: %v", err) } - now := time.Now().UnixNano() - // Insert two allocs, one terminal and one not alloc := mock.Alloc() alloc.JobID = job.ID @@ -1216,7 +1210,7 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { alloc2.ClientStatus = structs.AllocClientStatusRunning alloc2.TaskGroup = job.TaskGroups[0].Name - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, now, []*structs.Allocation{alloc, alloc2}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -1263,7 +1257,7 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { // Update the second alloc to be terminal alloc2.ClientStatus = structs.AllocClientStatusComplete - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{alloc2}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -1351,7 +1345,7 @@ func TestCoreScheduler_JobGC_OneShot(t *testing.T) { alloc2.EvalID = eval2.ID alloc2.DesiredStatus = structs.AllocDesiredStatusRun - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -1456,7 +1450,7 @@ func TestCoreScheduler_JobGC_Stopped(t *testing.T) { alloc.EvalID = eval.ID alloc.DesiredStatus = structs.AllocDesiredStatusStop alloc.TaskGroup = job.TaskGroups[0].Name - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -1737,8 +1731,6 @@ func TestCoreScheduler_jobGC(t *testing.T) { testFn := func(inputJob *structs.Job) { - now := time.Now().UnixNano() - // Create and upsert a job which has a completed eval and 2 running // allocations associated. inputJob.Status = structs.JobStatusRunning @@ -1763,7 +1755,7 @@ func TestCoreScheduler_jobGC(t *testing.T) { must.NoError(t, testServer.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 10, []*structs.Evaluation{mockEval1})) must.NoError(t, - testServer.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 10, now, []*structs.Allocation{ + testServer.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 10, []*structs.Allocation{ mockJob1Alloc1, mockJob1Alloc2})) // Trigger a run of the job GC using the forced GC max index value to @@ -1825,7 +1817,7 @@ func TestCoreScheduler_jobGC(t *testing.T) { mockJob1Alloc2.ClientStatus = structs.AllocClientStatusComplete must.NoError(t, - testServer.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 30, now, []*structs.Allocation{ + testServer.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 30, []*structs.Allocation{ mockJob1Alloc1, mockJob1Alloc2})) // Force another GC. This time all objects are in a terminal state, so @@ -1863,21 +1855,19 @@ func TestCoreScheduler_DeploymentGC(t *testing.T) { testutil.WaitForLeader(t, s1.RPC) assert := assert.New(t) - now := time.Now().UnixNano() - // Insert an active, terminal, and terminal with allocations deployment store := s1.fsm.State() d1, d2, d3 := mock.Deployment(), mock.Deployment(), mock.Deployment() d1.Status = structs.DeploymentStatusFailed d3.Status = structs.DeploymentStatusSuccessful - assert.Nil(store.UpsertDeployment(1000, now, d1), "UpsertDeployment") - assert.Nil(store.UpsertDeployment(1001, now, d2), "UpsertDeployment") - assert.Nil(store.UpsertDeployment(1002, now, d3), "UpsertDeployment") + assert.Nil(store.UpsertDeployment(1000, d1), "UpsertDeployment") + assert.Nil(store.UpsertDeployment(1001, d2), "UpsertDeployment") + assert.Nil(store.UpsertDeployment(1002, d3), "UpsertDeployment") a := mock.Alloc() a.JobID = d3.JobID a.DeploymentID = d3.ID - assert.Nil(store.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{a}), "UpsertAllocs") + assert.Nil(store.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}), "UpsertAllocs") // Create a core scheduler snap, err := store.Snapshot() @@ -1916,14 +1906,12 @@ func TestCoreScheduler_DeploymentGC_Force(t *testing.T) { testutil.WaitForLeader(t, server.RPC) assert := assert.New(t) - now := time.Now().UnixNano() - // Insert terminal and active deployment store := server.fsm.State() d1, d2 := mock.Deployment(), mock.Deployment() d1.Status = structs.DeploymentStatusFailed - assert.Nil(store.UpsertDeployment(1000, now, d1), "UpsertDeployment") - assert.Nil(store.UpsertDeployment(1001, now, d2), "UpsertDeployment") + assert.Nil(store.UpsertDeployment(1000, d1), "UpsertDeployment") + assert.Nil(store.UpsertDeployment(1001, d2), "UpsertDeployment") // Create a core scheduler snap, err := store.Snapshot() diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index ecc46bc13b6..77d97c35354 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -673,9 +673,11 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { Mode: structs.CSIVolumeClaimRead, } + now := time.Now().UnixNano() + index++ claim.State = structs.CSIVolumeClaimStateTaken - err = state.CSIVolumeClaim(index, ns, volID, claim) + err = state.CSIVolumeClaim(index, now, ns, volID, claim) must.NoError(t, err) // setup: claim the volume for our other alloc @@ -688,7 +690,7 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { index++ otherClaim.State = structs.CSIVolumeClaimStateTaken - err = state.CSIVolumeClaim(index, ns, volID, otherClaim) + err = state.CSIVolumeClaim(index, now, ns, volID, otherClaim) must.NoError(t, err) // test: unpublish and check the results diff --git a/nomad/mock/csi.go b/nomad/mock/csi.go index aa4176c59d4..350490dcdff 100644 --- a/nomad/mock/csi.go +++ b/nomad/mock/csi.go @@ -5,6 +5,7 @@ package mock import ( "fmt" + "time" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" @@ -50,6 +51,8 @@ func CSIVolume(plugin *structs.CSIPlugin) *structs.CSIVolume { ControllersExpected: len(plugin.Controllers), NodesHealthy: plugin.NodesHealthy, NodesExpected: len(plugin.Nodes), + CreateTime: time.Now().Add(-1 * time.Hour).UnixNano(), + ModifyTime: time.Now().UnixNano(), } } diff --git a/nomad/plan_apply.go b/nomad/plan_apply.go index 13b56115fcb..e2e76894a76 100644 --- a/nomad/plan_apply.go +++ b/nomad/plan_apply.go @@ -356,7 +356,7 @@ func (p *planner) applyPlan(plan *structs.Plan, result *structs.PlanResult, snap // Optimistically apply to our state view if snap != nil { nextIdx := p.srv.raft.AppliedIndex() + 1 - if err := snap.UpsertPlanResults(structs.ApplyPlanResultsRequestType, nextIdx, time.Now().UnixNano(), &req); err != nil { + if err := snap.UpsertPlanResults(structs.ApplyPlanResultsRequestType, nextIdx, &req); err != nil { return future, err } } diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index a4a5eab2f5f..4a84c3bebfd 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -457,7 +457,7 @@ func (s *StateStore) UpsertPlanResults(msgType structs.MessageType, index uint64 alloc.Canonicalize() } - if err := s.upsertAllocsImpl(index, results.UpdatedAt, allocsToUpsert, txn); err != nil { + if err := s.upsertAllocsImpl(index, allocsToUpsert, txn); err != nil { return err } @@ -2557,7 +2557,7 @@ func (s *StateStore) JobSummaryByPrefix(ws memdb.WatchSet, namespace, id string) } // UpsertCSIVolume inserts a volume in the state store. -func (s *StateStore) UpsertCSIVolume(index uint64, now int64, volumes []*structs.CSIVolume) error { +func (s *StateStore) UpsertCSIVolume(index uint64, volumes []*structs.CSIVolume) error { txn := s.db.WriteTxn(index) defer txn.Abort() @@ -2583,10 +2583,8 @@ func (s *StateStore) UpsertCSIVolume(index uint64, now int64, volumes []*structs } } else { v.CreateIndex = index - v.CreateTime = now } v.ModifyIndex = index - v.ModifyTime = now // Allocations are copy on write, so we want to keep the Allocation ID // but we need to clear the pointer so that we don't store it when we @@ -3162,7 +3160,7 @@ func (s *StateStore) CSIPluginDenormalizeTxn(txn Txn, ws memdb.WatchSet, plug *s // UpsertCSIPlugin writes the plugin to the state store. Note: there // is currently no raft message for this, as it's intended to support // testing use cases. -func (s *StateStore) UpsertCSIPlugin(index uint64, now int64, plug *structs.CSIPlugin) error { +func (s *StateStore) UpsertCSIPlugin(index uint64, plug *structs.CSIPlugin) error { txn := s.db.WriteTxn(index) defer txn.Abort() @@ -3172,7 +3170,6 @@ func (s *StateStore) UpsertCSIPlugin(index uint64, now int64, plug *structs.CSIP } plug.ModifyIndex = index - plug.ModifyTime = now if existing != nil { plug.CreateIndex = existing.(*structs.CSIPlugin).CreateIndex plug.CreateTime = existing.(*structs.CSIPlugin).CreateTime @@ -4091,10 +4088,10 @@ func (s *StateStore) updateClientAllocUpdateIndex(txn *txn, index uint64, nodeID // UpsertAllocs is used to evict a set of allocations and allocate new ones at // the same time. -func (s *StateStore) UpsertAllocs(msgType structs.MessageType, index uint64, now int64, allocs []*structs.Allocation) error { +func (s *StateStore) UpsertAllocs(msgType structs.MessageType, index uint64, allocs []*structs.Allocation) error { txn := s.db.WriteTxn(index) defer txn.Abort() - if err := s.upsertAllocsImpl(index, now, allocs, txn); err != nil { + if err := s.upsertAllocsImpl(index, allocs, txn); err != nil { return err } return txn.Commit() @@ -4102,7 +4099,7 @@ func (s *StateStore) UpsertAllocs(msgType structs.MessageType, index uint64, now // upsertAllocs is the actual implementation of UpsertAllocs so that it may be // used with an existing transaction. -func (s *StateStore) upsertAllocsImpl(index uint64, now int64, allocs []*structs.Allocation, txn *txn) error { +func (s *StateStore) upsertAllocsImpl(index uint64, allocs []*structs.Allocation, txn *txn) error { // Handle the allocations jobs := make(map[structs.NamespacedID]string, 1) for _, alloc := range allocs { diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 59e7aecd9d3..d7f35867026 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -193,7 +193,7 @@ func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) { require := require.New(t) require.NoError(state.UpsertAllocs( - structs.MsgTypeTestSetup, 900, time.Now().UnixNano(), []*structs.Allocation{stoppedAlloc, preemptedAlloc})) + structs.MsgTypeTestSetup, 900, []*structs.Allocation{stoppedAlloc, preemptedAlloc})) require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job)) // modify job and ensure that stopped and preempted alloc point to original Job @@ -391,11 +391,9 @@ func TestStateStore_UpsertPlanResults_PreemptedAllocs(t *testing.T) { err = state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval}) require.NoError(err) - now := time.Now().UnixNano() - // Insert alloc that will be preempted in the plan preemptedAlloc := mock.Alloc() - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 2, now, []*structs.Allocation{preemptedAlloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{preemptedAlloc}) require.NoError(err) minimalPreemptedAlloc := &structs.Allocation{ @@ -3992,7 +3990,7 @@ func TestStateStore_CSIVolume(t *testing.T) { now := time.Now().UnixNano() index++ - err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, now, []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc}) require.NoError(t, err) ns := structs.DefaultNamespace @@ -4072,7 +4070,7 @@ func TestStateStore_CSIVolume(t *testing.T) { a0 := mock.Alloc() a1 := mock.Alloc() index++ - err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, now, []*structs.Allocation{a0, a1}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{a0, a1}) require.NoError(t, err) // Claims @@ -4218,7 +4216,7 @@ func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { } switch kind { case SERVER: - err = store.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(store), now, allocs) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(store), allocs) case CLIENT: // this is somewhat artificial b/c we get alloc updates // from multiple nodes concurrently but not in a single @@ -4308,7 +4306,7 @@ func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { allocIDs = append(allocIDs, nodeAlloc.ID) allocs = append(allocs, nodeAlloc) } - err = store.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(store), now, allocs) + err = store.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(store), allocs) must.NoError(t, err) // node plugin now has expected counts too @@ -5001,7 +4999,7 @@ func TestStateStore_DeleteEval_Eval(t *testing.T) { t.Fatalf("err: %v", err) } - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1, alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -5116,7 +5114,7 @@ func TestStateStore_DeleteEval_ChildJob(t *testing.T) { t.Fatalf("err: %v", err) } - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc1}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } @@ -5788,7 +5786,6 @@ func TestStateStore_UpdateAllocsFromClient(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() node := mock.Node() must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 997, node)) @@ -5805,7 +5802,7 @@ func TestStateStore_UpdateAllocsFromClient(t *testing.T) { alloc.NodeID = node.ID alloc.JobID = child.ID alloc.Job = child - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) ws := memdb.NewWatchSet() summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) @@ -5854,7 +5851,6 @@ func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() node := mock.Node() @@ -5867,7 +5863,7 @@ func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc1.Job)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc2.Job)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{alloc1, alloc2})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) // Create watchsets so we can test that update fires the watch watches := make([]memdb.WatchSet, 8) @@ -5963,7 +5959,6 @@ func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() node := mock.Node() @@ -5972,7 +5967,7 @@ func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) // Create the delta updates ts := map[string]*structs.TaskState{"web": {State: structs.TaskStatePending}} @@ -6043,7 +6038,7 @@ func TestStateStore_UpdateAllocsFromClient_Deployment(t *testing.T) { must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) must.NoError(t, state.UpsertDeployment(1000, deployment)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now.UnixNano(), []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) healthy := now.Add(time.Second) update := &structs.Allocation{ @@ -6095,7 +6090,7 @@ func TestStateStore_UpdateAllocsFromClient_DeploymentStateMerges(t *testing.T) { must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) must.NoError(t, state.UpsertDeployment(1000, deployment)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now.UnixNano(), []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) update := &structs.Allocation{ ID: alloc.ID, @@ -6125,7 +6120,6 @@ func TestStateStore_UpdateAllocsFromClient_UpdateNodes(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() node1 := mock.Node() alloc1 := mock.Alloc() @@ -6144,7 +6138,7 @@ func TestStateStore_UpdateAllocsFromClient_UpdateNodes(t *testing.T) { must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1002, node3)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, nil, alloc1.Job)) must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1004, nil, alloc2.Job)) - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, now, []*structs.Allocation{alloc1, alloc2, alloc3})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, []*structs.Allocation{alloc1, alloc2, alloc3})) // Create watches to make sure they fire when nodes are updated. ws1 := memdb.NewWatchSet() @@ -6242,7 +6236,7 @@ func TestStateStore_UpsertAlloc_Alloc(t *testing.T) { t.Fatalf("bad: %v", err) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -6310,7 +6304,7 @@ func TestStateStore_UpsertAlloc_Deployment(t *testing.T) { ws := memdb.NewWatchSet() require.Nil(state.AllocsByDeployment(ws, alloc.DeploymentID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now.UnixNano(), []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) require.Nil(err) if !watchFired(ws) { @@ -6375,7 +6369,7 @@ func TestStateStore_UpsertAlloc_AllocsByNamespace(t *testing.T) { _, err = state.AllocsByNamespace(watches[1], ns2.Name) require.NoError(t, err) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2, alloc3, alloc4})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1, alloc2, alloc3, alloc4})) require.True(t, watchFired(watches[0])) require.True(t, watchFired(watches[1])) @@ -6428,7 +6422,7 @@ func TestStateStore_UpsertAlloc_No_Job(t *testing.T) { alloc := mock.Alloc() alloc.Job = nil - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 999, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 999, []*structs.Allocation{alloc}) if err == nil || !strings.Contains(err.Error(), "without a job") { t.Fatalf("expect err: %v", err) } @@ -6457,7 +6451,7 @@ func TestStateStore_UpsertAlloc_ChildJob(t *testing.T) { _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) require.NoError(t, err) - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) require.NoError(t, err) require.True(t, watchFired(ws)) @@ -6482,13 +6476,12 @@ func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { state := testStateStore(t) alloc := mock.Alloc() - now := time.Now().UnixNano() if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job); err != nil { t.Fatalf("err: %v", err) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -6526,7 +6519,7 @@ func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { t.Fatalf("bad: %v", err) } - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, now, []*structs.Allocation{alloc2}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -6581,7 +6574,6 @@ func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { // when set rather than preferring the existing status. func TestStateStore_UpdateAlloc_Lost(t *testing.T) { ci.Parallel(t) - now := time.Now().UnixNano() state := testStateStore(t) alloc := mock.Alloc() @@ -6591,7 +6583,7 @@ func TestStateStore_UpdateAlloc_Lost(t *testing.T) { t.Fatalf("err: %v", err) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -6599,7 +6591,7 @@ func TestStateStore_UpdateAlloc_Lost(t *testing.T) { alloc2 := new(structs.Allocation) *alloc2 = *alloc alloc2.ClientStatus = structs.AllocClientStatusLost - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{alloc2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc2}); err != nil { t.Fatalf("err: %v", err) } @@ -6622,7 +6614,6 @@ func TestStateStore_UpdateAlloc_NoJob(t *testing.T) { state := testStateStore(t) alloc := mock.Alloc() - now := time.Now().UnixNano() // Upsert a job state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID)) @@ -6630,7 +6621,7 @@ func TestStateStore_UpdateAlloc_NoJob(t *testing.T) { t.Fatalf("err: %v", err) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -6642,7 +6633,7 @@ func TestStateStore_UpdateAlloc_NoJob(t *testing.T) { // Update the desired state of the allocation to stop allocCopy := alloc.Copy() allocCopy.DesiredStatus = structs.AllocDesiredStatusStop - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, now, []*structs.Allocation{allocCopy}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{allocCopy}); err != nil { t.Fatalf("err: %v", err) } @@ -6670,7 +6661,7 @@ func TestStateStore_UpdateAllocDesiredTransition(t *testing.T) { alloc := mock.Alloc() require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) t1 := &structs.DesiredTransition{ Migrate: pointer.Of(true), @@ -6734,7 +6725,6 @@ func TestStateStore_JobSummary(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() // Add a job job := mock.Job() @@ -6755,7 +6745,7 @@ func TestStateStore_JobSummary(t *testing.T) { alloc := mock.Alloc() alloc.JobID = job.ID alloc.Job = job - state.UpsertAllocs(structs.MsgTypeTestSetup, 910, now, []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 910, []*structs.Allocation{alloc}) // Update the alloc from client alloc1 := alloc.Copy() @@ -6772,13 +6762,13 @@ func TestStateStore_JobSummary(t *testing.T) { alloc4 := alloc.Copy() alloc4.ClientStatus = structs.AllocClientStatusPending alloc4.DesiredStatus = structs.AllocDesiredStatusRun - state.UpsertAllocs(structs.MsgTypeTestSetup, 950, now, []*structs.Allocation{alloc4}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 950, []*structs.Allocation{alloc4}) // Again upsert the alloc alloc5 := alloc.Copy() alloc5.ClientStatus = structs.AllocClientStatusPending alloc5.DesiredStatus = structs.AllocDesiredStatusRun - state.UpsertAllocs(structs.MsgTypeTestSetup, 970, now, []*structs.Allocation{alloc5}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 970, []*structs.Allocation{alloc5}) if !watchFired(ws) { t.Fatalf("bad") @@ -6859,7 +6849,6 @@ func TestStateStore_ReconcileJobSummary(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() // Create an alloc alloc := mock.Alloc() @@ -6877,7 +6866,7 @@ func TestStateStore_ReconcileJobSummary(t *testing.T) { alloc2.Job = alloc.Job // Upserts the alloc - state.UpsertAllocs(structs.MsgTypeTestSetup, 110, now, []*structs.Allocation{alloc, alloc2}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 110, []*structs.Allocation{alloc, alloc2}) // Change the state of the first alloc to running alloc3 := alloc.Copy() @@ -6919,7 +6908,7 @@ func TestStateStore_ReconcileJobSummary(t *testing.T) { alloc12.TaskGroup = "db" alloc12.ClientStatus = structs.AllocClientStatusUnknown - state.UpsertAllocs(structs.MsgTypeTestSetup, 130, now, []*structs.Allocation{alloc4, alloc6, alloc8, alloc10, alloc12}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 130, []*structs.Allocation{alloc4, alloc6, alloc8, alloc10, alloc12}) state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 150, []*structs.Allocation{alloc5, alloc7, alloc9, alloc11}) @@ -6994,7 +6983,7 @@ func TestStateStore_ReconcileParentJobSummary(t *testing.T) { alloc2.ClientStatus = structs.AllocClientStatusFailed require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 110, nil, childJob)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 111, time.Now().UnixNano(), []*structs.Allocation{alloc, alloc2})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 111, []*structs.Allocation{alloc, alloc2})) // Make the summary incorrect in the state store summary, err := state.JobSummaryByID(nil, job1.Namespace, job1.ID) @@ -7048,11 +7037,10 @@ func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() alloc := mock.Alloc() state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, alloc.Job) - state.UpsertAllocs(structs.MsgTypeTestSetup, 200, now, []*structs.Allocation{alloc}) + state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc}) // Delete the job state.DeleteJob(300, alloc.Namespace, alloc.Job.ID) @@ -7101,10 +7089,9 @@ func TestStateStore_EvictAlloc_Alloc(t *testing.T) { state := testStateStore(t) alloc := mock.Alloc() - now := time.Now().UnixNano() state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, []*structs.Allocation{alloc}) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -7112,7 +7099,7 @@ func TestStateStore_EvictAlloc_Alloc(t *testing.T) { alloc2 := new(structs.Allocation) *alloc2 = *alloc alloc2.DesiredStatus = structs.AllocDesiredStatusEvict - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{alloc2}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } @@ -7152,7 +7139,7 @@ func TestStateStore_AllocsByNode(t *testing.T) { state.UpsertJobSummary(uint64(900+idx), mock.JobSummary(alloc.JobID)) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), allocs) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) if err != nil { t.Fatalf("err: %v", err) } @@ -7197,7 +7184,7 @@ func TestStateStore_AllocsByNodeTerminal(t *testing.T) { state.UpsertJobSummary(uint64(900+idx), mock.JobSummary(alloc.JobID)) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), allocs) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) if err != nil { t.Fatalf("err: %v", err) } @@ -7250,7 +7237,7 @@ func TestStateStore_AllocsByJob(t *testing.T) { state.UpsertJobSummary(uint64(900+i), mock.JobSummary(alloc.JobID)) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), allocs) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) if err != nil { t.Fatalf("err: %v", err) } @@ -7275,7 +7262,6 @@ func TestStateStore_AllocsByJob(t *testing.T) { func TestStateStore_AllocsForRegisteredJob(t *testing.T) { ci.Parallel(t) - now := time.Now().UnixNano() state := testStateStore(t) var allocs []*structs.Allocation @@ -7290,7 +7276,7 @@ func TestStateStore_AllocsForRegisteredJob(t *testing.T) { alloc.JobID = job.ID allocs = append(allocs, alloc) } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, now, allocs); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, allocs); err != nil { t.Fatalf("err: %v", err) } @@ -7309,7 +7295,7 @@ func TestStateStore_AllocsForRegisteredJob(t *testing.T) { allocs1 = append(allocs1, alloc) } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, allocs1); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs1); err != nil { t.Fatalf("err: %v", err) } @@ -7366,7 +7352,7 @@ func TestStateStore_AllocsByIDPrefix(t *testing.T) { state.UpsertJobSummary(uint64(900+i), mock.JobSummary(alloc.JobID)) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), allocs) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) require.NoError(t, err) gatherAllocs := func(iter memdb.ResultIterator) []*structs.Allocation { @@ -7458,7 +7444,7 @@ func TestStateStore_AllocsByIDPrefix_Namespaces(t *testing.T) { require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) require.NoError(t, state.UpsertAllocs( - structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc1, alloc2})) + structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) gatherAllocs := func(iter memdb.ResultIterator) []*structs.Allocation { var allocs []*structs.Allocation @@ -7506,7 +7492,7 @@ func TestStateStore_Allocs(t *testing.T) { state.UpsertJobSummary(uint64(900+i), mock.JobSummary(alloc.JobID)) } - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), allocs) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) if err != nil { t.Fatalf("err: %v", err) } @@ -7556,9 +7542,7 @@ func TestStateStore_Allocs_PrevAlloc(t *testing.T) { allocs[1].PreviousAllocation = allocs[0].ID allocs[2].PreviousAllocation = allocs[1].ID - now := time.Now().UnixNano() - - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, now, allocs) + err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) require.Nil(err) ws := memdb.NewWatchSet() @@ -7587,7 +7571,7 @@ func TestStateStore_Allocs_PrevAlloc(t *testing.T) { // Insert another alloc, verify index of previous alloc also got updated alloc := mock.Alloc() alloc.PreviousAllocation = allocs[0].ID - err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{alloc}) + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) require.Nil(err) alloc0, err := state.AllocByID(nil, allocs[0].ID) require.Nil(err) @@ -7755,7 +7739,7 @@ func TestStateStore_GetJobStatus_DeadEvalsAndAllocs(t *testing.T) { alloc.JobID = job.ID alloc.DesiredStatus = structs.AllocDesiredStatusStop state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -7789,7 +7773,7 @@ func TestStateStore_GetJobStatus_RunningAlloc(t *testing.T) { alloc.JobID = job.ID alloc.DesiredStatus = structs.AllocDesiredStatusRun state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, time.Now().UnixNano(), []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -7927,7 +7911,6 @@ func TestStateStore_SetJobStatus_SystemJob(t *testing.T) { func TestStateJobSummary_UpdateJobCount(t *testing.T) { ci.Parallel(t) - now := time.Now().UnixNano() state := testStateStore(t) alloc := mock.Alloc() @@ -7944,7 +7927,7 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { t.Fatalf("err: %v", err) } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{alloc}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } @@ -7984,7 +7967,7 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { alloc3.Job = job alloc3.JobID = job.ID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, now, []*structs.Allocation{alloc2, alloc3}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc2, alloc3}); err != nil { t.Fatalf("err: %v", err) } @@ -8059,7 +8042,6 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { func TestJobSummary_UpdateClientStatus(t *testing.T) { ci.Parallel(t) - now := time.Now().UnixNano() state := testStateStore(t) alloc := mock.Alloc() @@ -8079,7 +8061,7 @@ func TestJobSummary_UpdateClientStatus(t *testing.T) { t.Fatalf("err: %v", err) } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, now, []*structs.Allocation{alloc, alloc2, alloc3}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2, alloc3}); err != nil { t.Fatalf("err: %v", err) } @@ -8124,7 +8106,7 @@ func TestJobSummary_UpdateClientStatus(t *testing.T) { alloc7.Job = alloc.Job alloc7.JobID = alloc.JobID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, now, []*structs.Allocation{alloc7}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc7}); err != nil { t.Fatalf("err: %v", err) } summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) @@ -8385,7 +8367,6 @@ func TestStateStore_UpsertDeploymentPromotion_Terminal(t *testing.T) { // Test promoting unhealthy canaries in a deployment. func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { ci.Parallel(t) - now := time.Now().UnixNano() state := testStateStore(t) require := require.New(t) @@ -8418,7 +8399,7 @@ func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { c3.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} d.TaskGroups[c3.TaskGroup].PlacedCanaries = append(d.TaskGroups[c3.TaskGroup].PlacedCanaries, c3.ID) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, now, []*structs.Allocation{c1, c2, c3})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2, c3})) // Promote the canaries req := &structs.ApplyDeploymentPromoteRequest{ @@ -8466,7 +8447,6 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() // Create a job with two task groups j := mock.Job() @@ -8513,7 +8493,7 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { Healthy: pointer.Of(true), } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, now, []*structs.Allocation{c1, c2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2}); err != nil { t.Fatalf("err: %v", err) } @@ -8567,7 +8547,6 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { require := require.New(t) state := testStateStore(t) - now := time.Now().UnixNano() // Create a job with two task groups j := mock.Job() @@ -8622,7 +8601,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { Canary: true, } - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, now, []*structs.Allocation{c1, c2, c3})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2, c3})) // Create an eval e := mock.Eval() @@ -8741,7 +8720,6 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() // Create a deployment d1 := mock.Deployment() @@ -8759,7 +8737,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { Healthy: pointer.Of(false), Canary: true, } - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, now, []*structs.Allocation{a})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a})) // Pull the deployment from state ws := memdb.NewWatchSet() @@ -8777,7 +8755,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { Healthy: pointer.Of(false), Canary: false, } - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, now, []*structs.Allocation{b})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{b})) // Pull the deployment from state ws = memdb.NewWatchSet() @@ -8798,7 +8776,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { Healthy: pointer.Of(false), Canary: true, } - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 6, now, []*structs.Allocation{c})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 6, []*structs.Allocation{c})) ws = memdb.NewWatchSet() deploy2, err := state.DeploymentByID(ws, d2.ID) @@ -8812,7 +8790,6 @@ func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() // Create a deployment d1 := mock.Deployment() @@ -8830,7 +8807,7 @@ func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { Healthy: pointer.Of(true), Canary: false, } - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, now, []*structs.Allocation{a})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a})) // Pull the deployment from state ws := memdb.NewWatchSet() @@ -8847,7 +8824,6 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_MismatchDeployment(t *t ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() // Insert two deployment d1 := mock.Deployment() @@ -8862,7 +8838,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_MismatchDeployment(t *t // Insert an alloc for a random deployment a := mock.Alloc() a.DeploymentID = d1.ID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, now, []*structs.Allocation{a}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{a}); err != nil { t.Fatalf("bad: %v", err) } @@ -8884,7 +8860,6 @@ func TestStateStore_UpsertDeploymentAllocHealth(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - now := time.Now().UnixNano() // Insert a deployment d := mock.Deployment() @@ -8898,7 +8873,7 @@ func TestStateStore_UpsertDeploymentAllocHealth(t *testing.T) { a1.DeploymentID = d.ID a2 := mock.Alloc() a2.DeploymentID = d.ID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, now, []*structs.Allocation{a1, a2}); err != nil { + if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{a1, a2}); err != nil { t.Fatalf("bad: %v", err) } diff --git a/nomad/volumewatcher/volumes_watcher_test.go b/nomad/volumewatcher/volumes_watcher_test.go index 83dc2e1f449..0ecfa67d386 100644 --- a/nomad/volumewatcher/volumes_watcher_test.go +++ b/nomad/volumewatcher/volumes_watcher_test.go @@ -48,7 +48,7 @@ func TestVolumeWatch_EnableDisable(t *testing.T) { State: structs.CSIVolumeClaimStateNodeDetached, } index++ - err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, time.Now().UnixNano(), vol.Namespace, vol.ID, claim) require.NoError(t, err) require.Eventually(t, func() bool { watcher.wlock.RLock() @@ -127,7 +127,7 @@ func TestVolumeWatch_LeadershipTransition(t *testing.T) { State: structs.CSIVolumeClaimStateUnpublishing, } index++ - err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, time.Now().UnixNano(), vol.Namespace, vol.ID, claim) require.NoError(t, err) // create a new watcher and enable it to simulate the leadership @@ -197,11 +197,11 @@ func TestVolumeWatch_StartStop(t *testing.T) { } index++ - err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, time.Now().UnixNano(), vol.Namespace, vol.ID, claim) require.NoError(t, err) claim.AllocationID = alloc2.ID index++ - err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, time.Now().UnixNano(), vol.Namespace, vol.ID, claim) require.NoError(t, err) // reap the volume and assert nothing has happened @@ -210,7 +210,7 @@ func TestVolumeWatch_StartStop(t *testing.T) { NodeID: node.ID, } index++ - err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, time.Now().UnixNano(), vol.Namespace, vol.ID, claim) require.NoError(t, err) ws := memdb.NewWatchSet() @@ -225,7 +225,7 @@ func TestVolumeWatch_StartStop(t *testing.T) { require.NoError(t, err) index++ claim.State = structs.CSIVolumeClaimStateReadyToFree - err = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim) + err = srv.State().CSIVolumeClaim(index, time.Now().UnixNano(), vol.Namespace, vol.ID, claim) require.NoError(t, err) // watcher stops and 1 claim has been released @@ -270,7 +270,7 @@ func TestVolumeWatch_Delete(t *testing.T) { // write a GC claim to the volume and then immediately delete, to // potentially hit the race condition between updates and deletes index++ - must.NoError(t, srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, + must.NoError(t, srv.State().CSIVolumeClaim(index, time.Now().UnixNano(), vol.Namespace, vol.ID, &structs.CSIVolumeClaim{ Mode: structs.CSIVolumeClaimGC, State: structs.CSIVolumeClaimStateReadyToFree, diff --git a/scheduler/reconcile_test.go b/scheduler/reconcile_test.go index 958f55ce4d5..e67894580b1 100644 --- a/scheduler/reconcile_test.go +++ b/scheduler/reconcile_test.go @@ -1625,7 +1625,7 @@ func TestReconciler_MultiTG_SingleUpdateBlock(t *testing.T) { } } - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredTotal: 10, } @@ -2384,7 +2384,7 @@ func TestReconciler_RescheduleNow_Service_WithCanaries(t *testing.T) { job2 := job.Copy() job2.Version++ - d := structs.NewDeployment(job2, 50) + d := structs.NewDeployment(job2, 50, time.Now().UnixNano()) d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion s := &structs.DeploymentState{ DesiredCanaries: 2, @@ -2491,7 +2491,7 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { job2 := job.Copy() job2.Version++ - d := structs.NewDeployment(job2, 50) + d := structs.NewDeployment(job2, 50, time.Now().UnixNano()) d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion s := &structs.DeploymentState{ DesiredCanaries: 2, @@ -2619,7 +2619,7 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { job2 := job.Copy() job2.Version++ - d := structs.NewDeployment(job2, 50) + d := structs.NewDeployment(job2, 50, time.Now().UnixNano()) d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion s := &structs.DeploymentState{ DesiredCanaries: 2, @@ -2790,8 +2790,8 @@ func TestReconciler_CancelDeployment_JobStop(t *testing.T) { job := mock.Job() job.Stop = true - running := structs.NewDeployment(job, 50) - failed := structs.NewDeployment(job, 50) + running := structs.NewDeployment(job, 50, time.Now().UnixNano()) + failed := structs.NewDeployment(job, 50, time.Now().UnixNano()) failed.Status = structs.DeploymentStatusFailed cases := []struct { @@ -2891,8 +2891,8 @@ func TestReconciler_CancelDeployment_JobUpdate(t *testing.T) { job := mock.Job() // Create two deployments - running := structs.NewDeployment(job, 50) - failed := structs.NewDeployment(job, 50) + running := structs.NewDeployment(job, 50, time.Now().UnixNano()) + failed := structs.NewDeployment(job, 50, time.Now().UnixNano()) failed.Status = structs.DeploymentStatusFailed // Make the job newer than the deployment @@ -2985,7 +2985,7 @@ func TestReconciler_CreateDeployment_RollingUpgrade_Destructive(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredTotal: 10, } @@ -3031,7 +3031,7 @@ func TestReconciler_CreateDeployment_RollingUpgrade_Inplace(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredTotal: 10, } @@ -3076,7 +3076,7 @@ func TestReconciler_CreateDeployment_NewerCreateIndex(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredTotal: 5, } @@ -3167,7 +3167,7 @@ func TestReconciler_PausedOrFailedDeployment_NoMoreCanaries(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { // Create a deployment that is paused/failed and has placed some canaries - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.Status = c.deploymentStatus d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ Promoted: false, @@ -3248,7 +3248,7 @@ func TestReconciler_PausedOrFailedDeployment_NoMorePlacements(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { // Create a deployment that is paused and has placed some canaries - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.Status = c.deploymentStatus d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ Promoted: false, @@ -3314,7 +3314,7 @@ func TestReconciler_PausedOrFailedDeployment_NoMoreDestructiveUpdates(t *testing for _, c := range cases { t.Run(c.name, func(t *testing.T) { // Create a deployment that is paused and has placed some canaries - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.Status = c.deploymentStatus d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ Promoted: false, @@ -3374,7 +3374,7 @@ func TestReconciler_DrainNode_Canary(t *testing.T) { job.TaskGroups[0].Update = canaryUpdate // Create a deployment that is paused and has placed some canaries - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) s := &structs.DeploymentState{ Promoted: false, DesiredTotal: 10, @@ -3449,7 +3449,7 @@ func TestReconciler_LostNode_Canary(t *testing.T) { job.TaskGroups[0].Update = canaryUpdate // Create a deployment that is paused and has placed some canaries - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) s := &structs.DeploymentState{ Promoted: false, DesiredTotal: 10, @@ -3525,7 +3525,7 @@ func TestReconciler_StopOldCanaries(t *testing.T) { job.TaskGroups[0].Update = canaryUpdate // Create an old deployment that has placed some canaries - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) s := &structs.DeploymentState{ Promoted: false, DesiredTotal: 10, @@ -3567,7 +3567,7 @@ func TestReconciler_StopOldCanaries(t *testing.T) { allocs, nil, "", 50, true) r := reconciler.Compute() - newD := structs.NewDeployment(job, 50) + newD := structs.NewDeployment(job, 50, time.Now().UnixNano()) newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredCanaries: 2, @@ -3623,7 +3623,7 @@ func TestReconciler_NewCanaries(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - newD := structs.NewDeployment(job, 50) + newD := structs.NewDeployment(job, 50, time.Now().UnixNano()) newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredCanaries: 2, @@ -3674,7 +3674,7 @@ func TestReconciler_NewCanaries_CountGreater(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - newD := structs.NewDeployment(job, 50) + newD := structs.NewDeployment(job, 50, time.Now().UnixNano()) newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion state := &structs.DeploymentState{ DesiredCanaries: 7, @@ -3728,7 +3728,7 @@ func TestReconciler_NewCanaries_MultiTG(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - newD := structs.NewDeployment(job, 50) + newD := structs.NewDeployment(job, 50, time.Now().UnixNano()) newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion state := &structs.DeploymentState{ DesiredCanaries: 2, @@ -3784,7 +3784,7 @@ func TestReconciler_NewCanaries_ScaleUp(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - newD := structs.NewDeployment(job, 50) + newD := structs.NewDeployment(job, 50, time.Now().UnixNano()) newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredCanaries: 2, @@ -3835,7 +3835,7 @@ func TestReconciler_NewCanaries_ScaleDown(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - newD := structs.NewDeployment(job, 50) + newD := structs.NewDeployment(job, 50, time.Now().UnixNano()) newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredCanaries: 2, @@ -3876,7 +3876,7 @@ func TestReconciler_NewCanaries_FillNames(t *testing.T) { } // Create an existing deployment that has placed some canaries - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) s := &structs.DeploymentState{ Promoted: false, DesiredTotal: 10, @@ -3942,7 +3942,7 @@ func TestReconciler_PromoteCanaries_Unblock(t *testing.T) { // Create an existing deployment that has placed some canaries and mark them // promoted - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) s := &structs.DeploymentState{ Promoted: true, DesiredTotal: 10, @@ -4018,7 +4018,7 @@ func TestReconciler_PromoteCanaries_CanariesEqualCount(t *testing.T) { // Create an existing deployment that has placed some canaries and mark them // promoted - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) s := &structs.DeploymentState{ Promoted: true, DesiredTotal: 2, @@ -4123,7 +4123,7 @@ func TestReconciler_DeploymentLimit_HealthAccounting(t *testing.T) { t.Run(fmt.Sprintf("%d healthy", c.healthy), func(t *testing.T) { // Create an existing deployment that has placed some canaries and mark them // promoted - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ Promoted: true, DesiredTotal: 10, @@ -4195,7 +4195,7 @@ func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) { job.TaskGroups[0].Update = noCanaryUpdate // Create an existing deployment that has some placed allocs - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ Promoted: true, DesiredTotal: 10, @@ -4282,7 +4282,7 @@ func TestReconciler_FailedDeployment_TaintedNodes(t *testing.T) { job.TaskGroups[0].Update = noCanaryUpdate // Create an existing failed deployment that has some placed allocs - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.Status = structs.DeploymentStatusFailed d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ Promoted: true, @@ -4367,7 +4367,7 @@ func TestReconciler_CompleteDeployment(t *testing.T) { job := mock.Job() job.TaskGroups[0].Update = canaryUpdate - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.Status = structs.DeploymentStatusSuccessful d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ Promoted: true, @@ -4421,7 +4421,7 @@ func TestReconciler_MarkDeploymentComplete_FailedAllocations(t *testing.T) { job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredTotal: 10, PlacedAllocs: 20, @@ -4489,7 +4489,7 @@ func TestReconciler_FailedDeployment_CancelCanaries(t *testing.T) { job.TaskGroups[1].Name = "two" // Create an existing failed deployment that has promoted one task group - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.Status = structs.DeploymentStatusFailed s0 := &structs.DeploymentState{ Promoted: true, @@ -4582,7 +4582,7 @@ func TestReconciler_FailedDeployment_NewJob(t *testing.T) { job.TaskGroups[0].Update = noCanaryUpdate // Create an existing failed deployment that has some placed allocs - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.Status = structs.DeploymentStatusFailed d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ Promoted: true, @@ -4625,7 +4625,7 @@ func TestReconciler_FailedDeployment_NewJob(t *testing.T) { d, allocs, nil, "", 50, true) r := reconciler.Compute() - dnew := structs.NewDeployment(jobNew, 50) + dnew := structs.NewDeployment(jobNew, 50, time.Now().UnixNano()) dnew.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredTotal: 10, } @@ -4653,7 +4653,7 @@ func TestReconciler_MarkDeploymentComplete(t *testing.T) { job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ Promoted: true, DesiredTotal: 10, @@ -4715,7 +4715,7 @@ func TestReconciler_JobChange_ScaleUp_SecondEval(t *testing.T) { job.TaskGroups[0].Count = 30 // Create a deployment that is paused and has placed some canaries - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ Promoted: false, DesiredTotal: 30, @@ -4791,7 +4791,7 @@ func TestReconciler_RollingUpgrade_MissingAllocs(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredTotal: 10, } @@ -4874,7 +4874,7 @@ func TestReconciler_FailedDeployment_DontReschedule(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() // Create an existing failed deployment that has some placed allocs - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.Status = structs.DeploymentStatusFailed d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ Promoted: true, @@ -4934,7 +4934,7 @@ func TestReconciler_DeploymentWithFailedAllocs_DontReschedule(t *testing.T) { now := time.Now() // Mock deployment with failed allocs, but deployment watcher hasn't marked it as failed yet - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.Status = structs.DeploymentStatusRunning d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ Promoted: false, @@ -5010,7 +5010,7 @@ func TestReconciler_FailedDeployment_AutoRevert_CancelCanaries(t *testing.T) { jobv2.Version = 2 jobv2.TaskGroups[0].Meta = map[string]string{"version": "2"} - d := structs.NewDeployment(jobv2, 50) + d := structs.NewDeployment(jobv2, 50, time.Now().UnixNano()) state := &structs.DeploymentState{ Promoted: true, DesiredTotal: 3, @@ -5092,7 +5092,7 @@ func TestReconciler_SuccessfulDeploymentWithFailedAllocs_Reschedule(t *testing.T now := time.Now() // Mock deployment with failed allocs, but deployment watcher hasn't marked it as failed yet - d := structs.NewDeployment(job, 50) + d := structs.NewDeployment(job, 50, time.Now().UnixNano()) d.Status = structs.DeploymentStatusSuccessful d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ Promoted: false, @@ -6181,7 +6181,7 @@ func TestReconciler_Client_Disconnect_Canaries(t *testing.T) { // Validate tc.canaryAllocs against tc.deploymentState must.Eq(t, tc.deploymentState.PlacedAllocs, canariesConfigured, must.Sprintf("invalid canary configuration: expect %d got %d", tc.deploymentState.PlacedAllocs, canariesConfigured)) - deployment := structs.NewDeployment(updatedJob, 50) + deployment := structs.NewDeployment(updatedJob, 50, time.Now().UnixNano()) deployment.TaskGroups[updatedJob.TaskGroups[0].Name] = tc.deploymentState // Build a map of tainted nodes that contains the last canary @@ -6331,7 +6331,7 @@ func TestReconciler_ComputeDeploymentPaused(t *testing.T) { // fetched by the scheduler before handing it to the // reconciler. if job.UsesDeployments() { - deployment = structs.NewDeployment(job, 100) + deployment = structs.NewDeployment(job, 100, time.Now().UnixNano()) deployment.Status = structs.DeploymentStatusInitializing deployment.StatusDescription = structs.DeploymentStatusDescriptionPendingForPeer } From 5e13ba8caa39f7a28dea510b40fded270fa69b74 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Wed, 30 Oct 2024 17:51:36 +0100 Subject: [PATCH 21/40] fixes --- nomad/core_sched_test.go | 144 ++++++++++++++++---------------- nomad/fsm.go | 2 +- nomad/state/state_store_test.go | 10 +-- 3 files changed, 78 insertions(+), 78 deletions(-) diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 8629b01fa83..df3b0f34290 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -2042,63 +2042,63 @@ func TestAllocation_GCEligible(t *testing.T) { PreventRescheduleOnLost *bool AllocJobModifyIndex uint64 JobModifyIndex uint64 - ModifyIndex uint64 + ModifyTime int64 NextAllocID string ReschedulePolicy *structs.ReschedulePolicy RescheduleTrackers []*structs.RescheduleEvent - ThresholdIndex uint64 + CutoffTime time.Time ShouldGC bool } - fail := time.Now() + now := time.Now() harness := []testCase{ { - Desc: "Don't GC when non terminal", - ClientStatus: structs.AllocClientStatusPending, - DesiredStatus: structs.AllocDesiredStatusRun, - GCTime: fail, - ModifyIndex: 90, - ThresholdIndex: 90, - ShouldGC: false, + Desc: "Don't GC when non terminal", + ClientStatus: structs.AllocClientStatusPending, + DesiredStatus: structs.AllocDesiredStatusRun, + GCTime: now, + ModifyTime: now.UnixNano(), + CutoffTime: now, + ShouldGC: false, }, { - Desc: "Don't GC when non terminal and job stopped", - ClientStatus: structs.AllocClientStatusPending, - DesiredStatus: structs.AllocDesiredStatusRun, - JobStop: true, - GCTime: fail, - ModifyIndex: 90, - ThresholdIndex: 90, - ShouldGC: false, + Desc: "Don't GC when non terminal and job stopped", + ClientStatus: structs.AllocClientStatusPending, + DesiredStatus: structs.AllocDesiredStatusRun, + JobStop: true, + GCTime: now, + ModifyTime: now.UnixNano(), + CutoffTime: now, + ShouldGC: false, }, { - Desc: "Don't GC when non terminal and job dead", - ClientStatus: structs.AllocClientStatusPending, - DesiredStatus: structs.AllocDesiredStatusRun, - JobStatus: structs.JobStatusDead, - GCTime: fail, - ModifyIndex: 90, - ThresholdIndex: 90, - ShouldGC: false, + Desc: "Don't GC when non terminal and job dead", + ClientStatus: structs.AllocClientStatusPending, + DesiredStatus: structs.AllocDesiredStatusRun, + JobStatus: structs.JobStatusDead, + GCTime: now, + ModifyTime: now.UnixNano(), + CutoffTime: now, + ShouldGC: false, }, { - Desc: "Don't GC when non terminal on client and job dead", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusStop, - JobStatus: structs.JobStatusDead, - GCTime: fail, - ModifyIndex: 90, - ThresholdIndex: 90, - ShouldGC: false, + Desc: "Don't GC when non terminal on client and job dead", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusStop, + JobStatus: structs.JobStatusDead, + GCTime: now, + ModifyTime: now.UnixNano(), + CutoffTime: now, + ShouldGC: false, }, { Desc: "GC when terminal but not failed ", ClientStatus: structs.AllocClientStatusComplete, DesiredStatus: structs.AllocDesiredStatusRun, - GCTime: fail, - ModifyIndex: 90, - ThresholdIndex: 90, + GCTime: now, + ModifyTime: now.UnixNano(), + CutoffTime: now, ReschedulePolicy: nil, ShouldGC: true, }, @@ -2106,9 +2106,9 @@ func TestAllocation_GCEligible(t *testing.T) { Desc: "Don't GC when threshold not met", ClientStatus: structs.AllocClientStatusComplete, DesiredStatus: structs.AllocDesiredStatusStop, - GCTime: fail, - ModifyIndex: 100, - ThresholdIndex: 90, + GCTime: now, + ModifyTime: now.UnixNano(), + CutoffTime: now.Add(-1 * time.Hour), ReschedulePolicy: nil, ShouldGC: false, }, @@ -2116,29 +2116,29 @@ func TestAllocation_GCEligible(t *testing.T) { Desc: "GC when no reschedule policy", ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusRun, - GCTime: fail, + GCTime: now, ReschedulePolicy: nil, - ModifyIndex: 90, - ThresholdIndex: 90, + ModifyTime: now.UnixNano(), + CutoffTime: now, ShouldGC: true, }, { Desc: "GC when empty policy", ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusRun, - GCTime: fail, + GCTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 0, Interval: 0 * time.Minute}, - ModifyIndex: 90, - ThresholdIndex: 90, + ModifyTime: now.UnixNano(), + CutoffTime: now, ShouldGC: true, }, { Desc: "Don't GC when no previous reschedule attempts", ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusRun, - GCTime: fail, - ModifyIndex: 90, - ThresholdIndex: 90, + GCTime: now, + ModifyTime: now.UnixNano(), + CutoffTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, ShouldGC: false, }, @@ -2147,12 +2147,12 @@ func TestAllocation_GCEligible(t *testing.T) { ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusRun, ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 2, Interval: 30 * time.Minute}, - GCTime: fail, - ModifyIndex: 90, - ThresholdIndex: 90, + GCTime: now, + ModifyTime: now.UnixNano(), + CutoffTime: now, RescheduleTrackers: []*structs.RescheduleEvent{ { - RescheduleTime: fail.Add(-5 * time.Minute).UTC().UnixNano(), + RescheduleTime: now.Add(-5 * time.Minute).UTC().UnixNano(), }, }, ShouldGC: false, @@ -2161,14 +2161,14 @@ func TestAllocation_GCEligible(t *testing.T) { Desc: "GC with prev reschedule attempt outside interval", ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusRun, - GCTime: fail, + GCTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 5, Interval: 30 * time.Minute}, RescheduleTrackers: []*structs.RescheduleEvent{ { - RescheduleTime: fail.Add(-45 * time.Minute).UTC().UnixNano(), + RescheduleTime: now.Add(-45 * time.Minute).UTC().UnixNano(), }, { - RescheduleTime: fail.Add(-60 * time.Minute).UTC().UnixNano(), + RescheduleTime: now.Add(-60 * time.Minute).UTC().UnixNano(), }, }, ShouldGC: true, @@ -2177,11 +2177,11 @@ func TestAllocation_GCEligible(t *testing.T) { Desc: "GC when next alloc id is set", ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusRun, - GCTime: fail, + GCTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 5, Interval: 30 * time.Minute}, RescheduleTrackers: []*structs.RescheduleEvent{ { - RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(), + RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(), }, }, NextAllocID: uuid.Generate(), @@ -2191,11 +2191,11 @@ func TestAllocation_GCEligible(t *testing.T) { Desc: "Don't GC when next alloc id is not set and unlimited restarts", ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusRun, - GCTime: fail, + GCTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Unlimited: true, Delay: 5 * time.Second, DelayFunction: "constant"}, RescheduleTrackers: []*structs.RescheduleEvent{ { - RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(), + RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(), }, }, ShouldGC: false, @@ -2204,11 +2204,11 @@ func TestAllocation_GCEligible(t *testing.T) { Desc: "GC when job is stopped", ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusRun, - GCTime: fail, + GCTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 5, Interval: 30 * time.Minute}, RescheduleTrackers: []*structs.RescheduleEvent{ { - RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(), + RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(), }, }, JobStop: true, @@ -2218,7 +2218,7 @@ func TestAllocation_GCEligible(t *testing.T) { Desc: "GC when alloc is lost and eligible for reschedule", ClientStatus: structs.AllocClientStatusLost, DesiredStatus: structs.AllocDesiredStatusStop, - GCTime: fail, + GCTime: now, JobStatus: structs.JobStatusDead, ShouldGC: true, }, @@ -2226,11 +2226,11 @@ func TestAllocation_GCEligible(t *testing.T) { Desc: "GC when job status is dead", ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusRun, - GCTime: fail, + GCTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 5, Interval: 30 * time.Minute}, RescheduleTrackers: []*structs.RescheduleEvent{ { - RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(), + RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(), }, }, JobStatus: structs.JobStatusDead, @@ -2240,7 +2240,7 @@ func TestAllocation_GCEligible(t *testing.T) { Desc: "GC when desired status is stop, unlimited reschedule policy, no previous reschedule events", ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusStop, - GCTime: fail, + GCTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Unlimited: true, Delay: 5 * time.Second, DelayFunction: "constant"}, ShouldGC: true, }, @@ -2248,11 +2248,11 @@ func TestAllocation_GCEligible(t *testing.T) { Desc: "GC when desired status is stop, limited reschedule policy, some previous reschedule events", ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusStop, - GCTime: fail, + GCTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 5, Interval: 30 * time.Minute}, RescheduleTrackers: []*structs.RescheduleEvent{ { - RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(), + RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(), }, }, ShouldGC: true, @@ -2261,7 +2261,7 @@ func TestAllocation_GCEligible(t *testing.T) { Desc: "GC when alloc is unknown and but desired state is running", ClientStatus: structs.AllocClientStatusUnknown, DesiredStatus: structs.AllocDesiredStatusRun, - GCTime: fail, + GCTime: now, JobStatus: structs.JobStatusRunning, ShouldGC: false, }, @@ -2269,7 +2269,7 @@ func TestAllocation_GCEligible(t *testing.T) { for _, tc := range harness { alloc := &structs.Allocation{} - alloc.ModifyIndex = tc.ModifyIndex + alloc.ModifyTime = tc.ModifyTime alloc.DesiredStatus = tc.DesiredStatus alloc.ClientStatus = tc.ClientStatus alloc.RescheduleTracker = &structs.RescheduleTracker{Events: tc.RescheduleTrackers} @@ -2286,7 +2286,7 @@ func TestAllocation_GCEligible(t *testing.T) { job.Stop = tc.JobStop t.Run(tc.Desc, func(t *testing.T) { - if got := allocGCEligible(alloc, job, tc.GCTime, tc.ThresholdIndex); got != tc.ShouldGC { + if got := allocGCEligible(alloc, job, tc.GCTime, tc.CutoffTime); got != tc.ShouldGC { t.Fatalf("expected %v but got %v", tc.ShouldGC, got) } }) @@ -2296,7 +2296,7 @@ func TestAllocation_GCEligible(t *testing.T) { // Verify nil job alloc := mock.Alloc() alloc.ClientStatus = structs.AllocClientStatusComplete - require.True(t, allocGCEligible(alloc, nil, time.Now(), 1000)) + require.True(t, allocGCEligible(alloc, nil, time.Now(), time.Now())) } func TestCoreScheduler_CSIPluginGC(t *testing.T) { diff --git a/nomad/fsm.go b/nomad/fsm.go index 977e050ada3..39837895ed0 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -1366,7 +1366,7 @@ func (n *nomadFSM) applyCSIVolumeRegister(buf []byte, index uint64) interface{} } defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_register"}, time.Now()) - if err := n.state.UpsertCSIVolume(index, req.Timestamp, req.Volumes); err != nil { + if err := n.state.UpsertCSIVolume(index, req.Volumes); err != nil { n.logger.Error("CSIVolumeRegister failed", "error", err) return err } diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index d7f35867026..5e56af1fed7 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -1082,7 +1082,7 @@ func TestStateStore_DeleteNamespaces_CSIVolumes(t *testing.T) { vol := mock.CSIVolume(plugin) vol.Namespace = ns.Name - require.NoError(t, state.UpsertCSIVolume(1001, time.Now().UnixNano(), []*structs.CSIVolume{vol})) + require.NoError(t, state.UpsertCSIVolume(1001, []*structs.CSIVolume{vol})) // Create a watchset so we can test that delete fires the watch ws := memdb.NewWatchSet() @@ -4021,18 +4021,18 @@ func TestStateStore_CSIVolume(t *testing.T) { }} index++ - err = state.UpsertCSIVolume(index, now, []*structs.CSIVolume{v0, v1}) + err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v0, v1}) require.NoError(t, err) // volume registration is idempotent, unless identies are changed index++ - err = state.UpsertCSIVolume(index, now, []*structs.CSIVolume{v0, v1}) + err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v0, v1}) require.NoError(t, err) index++ v2 := v0.Copy() v2.PluginID = "new-id" - err = state.UpsertCSIVolume(index, now, []*structs.CSIVolume{v2}) + err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v2}) require.Error(t, err, fmt.Sprintf("volume exists: %s", v0.ID)) ws := memdb.NewWatchSet() @@ -4455,7 +4455,7 @@ func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { Namespace: structs.DefaultNamespace, PluginID: plugID, } - err = store.UpsertCSIVolume(nextIndex(store), now, []*structs.CSIVolume{vol}) + err = store.UpsertCSIVolume(nextIndex(store), []*structs.CSIVolume{vol}) must.NoError(t, err) err = store.DeleteJob(nextIndex(store), structs.DefaultNamespace, controllerJobID) From 53ecd9844fb63f2d8d364d7891930b04980e49c3 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Wed, 30 Oct 2024 19:17:17 +0100 Subject: [PATCH 22/40] TestAllocation_GCEligible --- nomad/core_sched_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index df3b0f34290..67e3686f94c 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -2162,6 +2162,7 @@ func TestAllocation_GCEligible(t *testing.T) { ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusRun, GCTime: now, + CutoffTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 5, Interval: 30 * time.Minute}, RescheduleTrackers: []*structs.RescheduleEvent{ { @@ -2178,6 +2179,7 @@ func TestAllocation_GCEligible(t *testing.T) { ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusRun, GCTime: now, + CutoffTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 5, Interval: 30 * time.Minute}, RescheduleTrackers: []*structs.RescheduleEvent{ { @@ -2192,6 +2194,7 @@ func TestAllocation_GCEligible(t *testing.T) { ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusRun, GCTime: now, + CutoffTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Unlimited: true, Delay: 5 * time.Second, DelayFunction: "constant"}, RescheduleTrackers: []*structs.RescheduleEvent{ { @@ -2205,6 +2208,7 @@ func TestAllocation_GCEligible(t *testing.T) { ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusRun, GCTime: now, + CutoffTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 5, Interval: 30 * time.Minute}, RescheduleTrackers: []*structs.RescheduleEvent{ { @@ -2219,6 +2223,7 @@ func TestAllocation_GCEligible(t *testing.T) { ClientStatus: structs.AllocClientStatusLost, DesiredStatus: structs.AllocDesiredStatusStop, GCTime: now, + CutoffTime: now, JobStatus: structs.JobStatusDead, ShouldGC: true, }, @@ -2227,6 +2232,7 @@ func TestAllocation_GCEligible(t *testing.T) { ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusRun, GCTime: now, + CutoffTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 5, Interval: 30 * time.Minute}, RescheduleTrackers: []*structs.RescheduleEvent{ { @@ -2241,6 +2247,7 @@ func TestAllocation_GCEligible(t *testing.T) { ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusStop, GCTime: now, + CutoffTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Unlimited: true, Delay: 5 * time.Second, DelayFunction: "constant"}, ShouldGC: true, }, @@ -2249,6 +2256,7 @@ func TestAllocation_GCEligible(t *testing.T) { ClientStatus: structs.AllocClientStatusFailed, DesiredStatus: structs.AllocDesiredStatusStop, GCTime: now, + CutoffTime: now, ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 5, Interval: 30 * time.Minute}, RescheduleTrackers: []*structs.RescheduleEvent{ { @@ -2262,6 +2270,7 @@ func TestAllocation_GCEligible(t *testing.T) { ClientStatus: structs.AllocClientStatusUnknown, DesiredStatus: structs.AllocDesiredStatusRun, GCTime: now, + CutoffTime: now, JobStatus: structs.JobStatusRunning, ShouldGC: false, }, From 46a2aca4be695ae20f3e4b74a5d3827f17a547b2 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Wed, 30 Oct 2024 19:53:40 +0100 Subject: [PATCH 23/40] =?UTF-8?q?wip=20on=20TestCoreScheduler=5FEvalGC,=20?= =?UTF-8?q?there's=20something=20=F0=9F=90=9F-y=20going=20on=20here?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- nomad/core_sched_test.go | 6 ++++++ nomad/mock/alloc.go | 3 +++ 2 files changed, 9 insertions(+) diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 67e3686f94c..811d0326540 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -34,6 +34,7 @@ func TestCoreScheduler_EvalGC(t *testing.T) { // Insert "dead" eval store := s1.fsm.State() eval := mock.Eval() + eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds eval.Status = structs.EvalStatusFailed store.UpsertJobSummary(999, mock.JobSummary(eval.JobID)) must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval})) @@ -45,6 +46,7 @@ func TestCoreScheduler_EvalGC(t *testing.T) { Attempts: 0, Interval: 0 * time.Second, } + job.SubmitTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job)) // Insert "dead" alloc @@ -53,6 +55,8 @@ func TestCoreScheduler_EvalGC(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop alloc.JobID = eval.JobID alloc.TaskGroup = job.TaskGroups[0].Name + alloc.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + alloc.ModifyTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // Insert "lost" alloc alloc2 := mock.Alloc() @@ -61,6 +65,8 @@ func TestCoreScheduler_EvalGC(t *testing.T) { alloc2.ClientStatus = structs.AllocClientStatusLost alloc2.JobID = eval.JobID alloc2.TaskGroup = job.TaskGroups[0].Name + alloc2.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + alloc2.ModifyTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() must.NoError(t, store.UpsertAllocs( structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2})) diff --git a/nomad/mock/alloc.go b/nomad/mock/alloc.go index 170c3b54b27..5a9dc0dc9c3 100644 --- a/nomad/mock/alloc.go +++ b/nomad/mock/alloc.go @@ -5,6 +5,7 @@ package mock import ( "math/rand" + "time" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" @@ -80,6 +81,8 @@ func Alloc() *structs.Allocation { Job: job, DesiredStatus: structs.AllocDesiredStatusRun, ClientStatus: structs.AllocClientStatusPending, + CreateTime: time.Now().UTC().UnixNano(), + ModifyTime: time.Now().UTC().UnixNano(), } alloc.JobID = alloc.Job.ID alloc.Canonicalize() From 021b8626a3a5af178f896bd72e929b6a5cc93c5e Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 31 Oct 2024 11:02:19 +0100 Subject: [PATCH 24/40] api update --- api/csi.go | 6 ++++++ api/deployments.go | 7 +++++++ nomad/state/state_store.go | 2 +- nomad/state/state_store_test.go | 2 -- nomad/structs/structs.go | 2 +- 5 files changed, 15 insertions(+), 4 deletions(-) diff --git a/api/csi.go b/api/csi.go index 65e1ca569f3..f77b4d96cdd 100644 --- a/api/csi.go +++ b/api/csi.go @@ -350,6 +350,8 @@ type CSIVolume struct { CreateIndex uint64 ModifyIndex uint64 + CreateTime int64 + ModifyTime int64 // ExtraKeysHCL is used by the hcl parser to report unexpected keys ExtraKeysHCL []string `hcl1:",unusedKeys" json:"-"` @@ -401,6 +403,8 @@ type CSIVolumeListStub struct { CreateIndex uint64 ModifyIndex uint64 + CreateTime int64 + ModifyTime int64 } type CSIVolumeListExternalResponse struct { @@ -543,6 +547,8 @@ type CSIPlugin struct { NodesExpected int CreateIndex uint64 ModifyIndex uint64 + CreateTime int64 + ModifyTime int64 } type CSIPluginListStub struct { diff --git a/api/deployments.go b/api/deployments.go index 6785b8f6d6a..94e2e97323f 100644 --- a/api/deployments.go +++ b/api/deployments.go @@ -193,6 +193,10 @@ type Deployment struct { CreateIndex uint64 ModifyIndex uint64 + + // Creation and modification times, stored as UnixNano + CreateTime int64 + ModifyTime int64 } // DeploymentState tracks the state of a deployment for a given task group. @@ -261,6 +265,9 @@ type DeploymentPromoteRequest struct { // Groups is used to set the promotion status per task group Groups []string + // PromotedAt is the timestamp stored as Unix nano + PromotedAt int64 + WriteRequest } diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 4a84c3bebfd..545c3f3201b 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -1,4 +1,4 @@ -// Copyright (c) HashupdateCorp, Inc. +// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package state diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 5e56af1fed7..c0efa31112f 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -4198,8 +4198,6 @@ func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { CLIENT ) - now := time.Now().UnixNano() - // helper function calling client-side update with with // UpsertAllocs and/or UpdateAllocsFromClient, depending on which // status(es) are set diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index de3ccf17321..b877c69bdf1 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -10843,7 +10843,7 @@ type DeploymentStatusUpdate struct { // StatusDescription is the new status description of the deployment. StatusDescription string - // Updated at is the time of the update + // UpdatedAt is the time of the update UpdatedAt int64 } From 409b9855cadb5f96b20be042a9f12b6bd951c418 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 31 Oct 2024 13:27:35 +0100 Subject: [PATCH 25/40] TestPlanApply_applyPlan --- nomad/mock/alloc.go | 3 -- nomad/mock/mock.go | 2 + nomad/plan_apply_test.go | 106 +++++++++++++++------------------------ 3 files changed, 43 insertions(+), 68 deletions(-) diff --git a/nomad/mock/alloc.go b/nomad/mock/alloc.go index 5a9dc0dc9c3..170c3b54b27 100644 --- a/nomad/mock/alloc.go +++ b/nomad/mock/alloc.go @@ -5,7 +5,6 @@ package mock import ( "math/rand" - "time" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" @@ -81,8 +80,6 @@ func Alloc() *structs.Allocation { Job: job, DesiredStatus: structs.AllocDesiredStatusRun, ClientStatus: structs.AllocClientStatusPending, - CreateTime: time.Now().UTC().UnixNano(), - ModifyTime: time.Now().UTC().UnixNano(), } alloc.JobID = alloc.Job.ID alloc.Canonicalize() diff --git a/nomad/mock/mock.go b/nomad/mock/mock.go index 6636d210166..306928032a0 100644 --- a/nomad/mock/mock.go +++ b/nomad/mock/mock.go @@ -184,6 +184,8 @@ func Deployment() *structs.Deployment { StatusDescription: structs.DeploymentStatusDescriptionRunning, ModifyIndex: 23, CreateIndex: 21, + CreateTime: time.Now().UTC().UnixNano(), + ModifyTime: time.Now().UTC().UnixNano(), } } diff --git a/nomad/plan_apply_test.go b/nomad/plan_apply_test.go index 687d5511832..4ca17430aa4 100644 --- a/nomad/plan_apply_test.go +++ b/nomad/plan_apply_test.go @@ -52,23 +52,6 @@ func testRegisterNode(t *testing.T, s *Server, n *structs.Node) { } } -func testRegisterJob(t *testing.T, s *Server, j *structs.Job) { - // Create the register request - req := &structs.JobRegisterRequest{ - Job: j, - WriteRequest: structs.WriteRequest{Region: "global"}, - } - - // Fetch the response - var resp structs.JobRegisterResponse - if err := s.RPC("Job.Register", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - if resp.Index == 0 { - t.Fatalf("bad index: %d", resp.Index) - } -} - // COMPAT 0.11: Tests the older unoptimized code path for applyPlan func TestPlanApply_applyPlan(t *testing.T) { ci.Parallel(t) @@ -83,9 +66,7 @@ func TestPlanApply_applyPlan(t *testing.T) { // Register a fake deployment oldDeployment := mock.Deployment() - if err := s1.State().UpsertDeployment(900, oldDeployment); err != nil { - t.Fatalf("UpsertDeployment failed: %v", err) - } + must.NoError(t, s1.State().UpsertDeployment(900, oldDeployment)) // Create a deployment dnew := mock.Deployment() @@ -102,13 +83,11 @@ func TestPlanApply_applyPlan(t *testing.T) { // Register alloc, deployment and deployment update alloc := mock.Alloc() - s1.State().UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)) + must.NoError(t, s1.State().UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) // Create an eval eval := mock.Eval() eval.JobID = alloc.JobID - if err := s1.State().UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, s1.State().UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval})) planRes := &structs.PlanResult{ NodeAllocation: map[string][]*structs.Allocation{ @@ -120,9 +99,7 @@ func TestPlanApply_applyPlan(t *testing.T) { // Snapshot the state snap, err := s1.State().Snapshot() - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) // Create the plan with a deployment plan := &structs.Plan{ @@ -134,50 +111,49 @@ func TestPlanApply_applyPlan(t *testing.T) { // Apply the plan future, err := s1.applyPlan(plan, planRes, snap) - assert := assert.New(t) - assert.Nil(err) + must.NoError(t, err) // Verify our optimistic snapshot is updated ws := memdb.NewWatchSet() allocOut, err := snap.AllocByID(ws, alloc.ID) - assert.Nil(err) - assert.NotNil(allocOut) + must.NoError(t, err) + must.NotNil(t, allocOut) deploymentOut, err := snap.DeploymentByID(ws, plan.Deployment.ID) - assert.Nil(err) - assert.NotNil(deploymentOut) + must.NoError(t, err) + must.NotNil(t, deploymentOut) // Check plan does apply cleanly index, err := planWaitFuture(future) - assert.Nil(err) - assert.NotEqual(0, index) + must.NoError(t, err) + must.NotNil(t, index) // Lookup the allocation fsmState := s1.fsm.State() allocOut, err = fsmState.AllocByID(ws, alloc.ID) - assert.Nil(err) - assert.NotNil(allocOut) - assert.True(allocOut.CreateTime > 0) - assert.True(allocOut.ModifyTime > 0) - assert.Equal(allocOut.CreateTime, allocOut.ModifyTime) + must.NoError(t, err) + must.NotNil(t, allocOut) + must.True(t, allocOut.CreateTime > 0) + must.True(t, allocOut.ModifyTime > 0) + must.Eq(t, allocOut.CreateTime, allocOut.ModifyTime) // Lookup the new deployment dout, err := fsmState.DeploymentByID(ws, plan.Deployment.ID) - assert.Nil(err) - assert.NotNil(dout) + must.NoError(t, err) + must.NotNil(t, dout) // Lookup the updated deployment dout2, err := fsmState.DeploymentByID(ws, oldDeployment.ID) - assert.Nil(err) - assert.NotNil(dout2) - assert.Equal(desiredStatus, dout2.Status) - assert.Equal(desiredStatusDescription, dout2.StatusDescription) + must.NoError(t, err) + must.NotNil(t, dout2) + must.Eq(t, desiredStatus, dout2.Status) + must.Eq(t, desiredStatusDescription, dout2.StatusDescription) // Lookup updated eval evalOut, err := fsmState.EvalByID(ws, eval.ID) - assert.Nil(err) - assert.NotNil(evalOut) - assert.Equal(index, evalOut.ModifyIndex) + must.NoError(t, err) + must.NotNil(t, evalOut) + must.Eq(t, index, evalOut.ModifyIndex) // Evict alloc, Register alloc2 allocEvict := new(structs.Allocation) @@ -186,7 +162,7 @@ func TestPlanApply_applyPlan(t *testing.T) { job := allocEvict.Job allocEvict.Job = nil alloc2 := mock.Alloc() - s1.State().UpsertJobSummary(1500, mock.JobSummary(alloc2.JobID)) + must.NoError(t, s1.State().UpsertJobSummary(1500, mock.JobSummary(alloc2.JobID))) planRes = &structs.PlanResult{ NodeUpdate: map[string][]*structs.Allocation{ node.ID: {allocEvict}, @@ -198,7 +174,7 @@ func TestPlanApply_applyPlan(t *testing.T) { // Snapshot the state snap, err = s1.State().Snapshot() - assert.Nil(err) + must.NoError(t, err) // Apply the plan plan = &structs.Plan{ @@ -206,40 +182,40 @@ func TestPlanApply_applyPlan(t *testing.T) { EvalID: eval.ID, } future, err = s1.applyPlan(plan, planRes, snap) - assert.Nil(err) + must.NoError(t, err) // Check that our optimistic view is updated out, _ := snap.AllocByID(ws, allocEvict.ID) if out.DesiredStatus != structs.AllocDesiredStatusEvict && out.DesiredStatus != structs.AllocDesiredStatusStop { - assert.Equal(structs.AllocDesiredStatusEvict, out.DesiredStatus) + must.Eq(t, structs.AllocDesiredStatusEvict, out.DesiredStatus) } // Verify plan applies cleanly index, err = planWaitFuture(future) - assert.Nil(err) - assert.NotEqual(0, index) + must.NoError(t, err) + must.NotEq(t, 0, index) // Lookup the allocation allocOut, err = s1.fsm.State().AllocByID(ws, alloc.ID) - assert.Nil(err) + must.NoError(t, err) if allocOut.DesiredStatus != structs.AllocDesiredStatusEvict && allocOut.DesiredStatus != structs.AllocDesiredStatusStop { - assert.Equal(structs.AllocDesiredStatusEvict, allocOut.DesiredStatus) + must.Eq(t, structs.AllocDesiredStatusEvict, allocOut.DesiredStatus) } - assert.NotNil(allocOut.Job) - assert.True(allocOut.ModifyTime > 0) + must.NotNil(t, allocOut.Job) + must.True(t, allocOut.ModifyTime > 0) // Lookup the allocation allocOut, err = s1.fsm.State().AllocByID(ws, alloc2.ID) - assert.Nil(err) - assert.NotNil(allocOut) - assert.NotNil(allocOut.Job) + must.NoError(t, err) + must.NotNil(t, allocOut) + must.NotNil(t, allocOut.Job) // Lookup updated eval evalOut, err = fsmState.EvalByID(ws, eval.ID) - assert.Nil(err) - assert.NotNil(evalOut) - assert.Equal(index, evalOut.ModifyIndex) + must.NoError(t, err) + must.NotNil(t, evalOut) + must.Eq(t, index, evalOut.ModifyIndex) } // Verifies that applyPlan properly updates the constituent objects in MemDB, From 6cbb9ee49812430b66aa7c219d431335a7bec6db Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 31 Oct 2024 13:38:56 +0100 Subject: [PATCH 26/40] TestPlanApply_applyPlanWithNormalizedAllocs --- nomad/plan_apply_test.go | 79 +++++++++++++++++++--------------------- 1 file changed, 37 insertions(+), 42 deletions(-) diff --git a/nomad/plan_apply_test.go b/nomad/plan_apply_test.go index 4ca17430aa4..ed6deefc298 100644 --- a/nomad/plan_apply_test.go +++ b/nomad/plan_apply_test.go @@ -18,7 +18,6 @@ import ( "github.com/hashicorp/nomad/testutil" "github.com/hashicorp/raft" "github.com/shoenig/test/must" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -265,8 +264,9 @@ func TestPlanApply_applyPlanWithNormalizedAllocs(t *testing.T) { ID: preemptedAlloc.ID, PreemptedByAllocation: alloc.ID, } - s1.State().UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)) - s1.State().UpsertAllocs(structs.MsgTypeTestSetup, 1100, []*structs.Allocation{stoppedAlloc, preemptedAlloc}) + must.NoError(t, s1.State().UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) + must.NoError(t, s1.State().UpsertAllocs(structs.MsgTypeTestSetup, 1100, []*structs.Allocation{stoppedAlloc, preemptedAlloc})) + // Create an eval eval := mock.Eval() eval.JobID = alloc.JobID @@ -274,7 +274,7 @@ func TestPlanApply_applyPlanWithNormalizedAllocs(t *testing.T) { t.Fatalf("err: %v", err) } - timestampBeforeCommit := time.Now().UTC().UnixNano() + timestampBeforeCommit := time.Now().UnixNano() planRes := &structs.PlanResult{ NodeAllocation: map[string][]*structs.Allocation{ node.ID: {alloc}, @@ -291,9 +291,7 @@ func TestPlanApply_applyPlanWithNormalizedAllocs(t *testing.T) { // Snapshot the state snap, err := s1.State().Snapshot() - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) // Create the plan with a deployment plan := &structs.Plan{ @@ -303,72 +301,69 @@ func TestPlanApply_applyPlanWithNormalizedAllocs(t *testing.T) { EvalID: eval.ID, } - require := require.New(t) - assert := assert.New(t) - // Apply the plan future, err := s1.applyPlan(plan, planRes, snap) - require.NoError(err) + must.NoError(t, err) // Verify our optimistic snapshot is updated ws := memdb.NewWatchSet() allocOut, err := snap.AllocByID(ws, alloc.ID) - require.NoError(err) - require.NotNil(allocOut) + must.NoError(t, err) + must.NotNil(t, allocOut) deploymentOut, err := snap.DeploymentByID(ws, plan.Deployment.ID) - require.NoError(err) - require.NotNil(deploymentOut) + must.NoError(t, err) + must.NotNil(t, deploymentOut) // Check plan does apply cleanly index, err := planWaitFuture(future) - require.NoError(err) - assert.NotEqual(0, index) + must.NoError(t, err) + must.NotEq(t, 0, index) // Lookup the allocation fsmState := s1.fsm.State() allocOut, err = fsmState.AllocByID(ws, alloc.ID) - require.NoError(err) - require.NotNil(allocOut) - assert.True(allocOut.CreateTime > 0) - assert.True(allocOut.ModifyTime > 0) - assert.Equal(allocOut.CreateTime, allocOut.ModifyTime) + must.NoError(t, err) + must.NotNil(t, allocOut) + must.True(t, allocOut.CreateTime > 0) + must.True(t, allocOut.ModifyTime > 0) + must.Eq(t, allocOut.CreateTime, allocOut.ModifyTime) // Verify stopped alloc diff applied cleanly updatedStoppedAlloc, err := fsmState.AllocByID(ws, stoppedAlloc.ID) - require.NoError(err) - require.NotNil(updatedStoppedAlloc) - assert.True(updatedStoppedAlloc.ModifyTime > timestampBeforeCommit) - assert.Equal(updatedStoppedAlloc.DesiredDescription, stoppedAllocDiff.DesiredDescription) - assert.Equal(updatedStoppedAlloc.ClientStatus, stoppedAllocDiff.ClientStatus) - assert.Equal(updatedStoppedAlloc.DesiredStatus, structs.AllocDesiredStatusStop) + must.NoError(t, err) + must.NotNil(t, updatedStoppedAlloc) + must.True(t, updatedStoppedAlloc.ModifyTime > timestampBeforeCommit) + must.Eq(t, updatedStoppedAlloc.DesiredDescription, stoppedAllocDiff.DesiredDescription) + must.Eq(t, updatedStoppedAlloc.ClientStatus, stoppedAllocDiff.ClientStatus) + must.Eq(t, updatedStoppedAlloc.DesiredStatus, structs.AllocDesiredStatusStop) // Verify preempted alloc diff applied cleanly updatedPreemptedAlloc, err := fsmState.AllocByID(ws, preemptedAlloc.ID) - require.NoError(err) - require.NotNil(updatedPreemptedAlloc) - assert.True(updatedPreemptedAlloc.ModifyTime > timestampBeforeCommit) - assert.Equal(updatedPreemptedAlloc.DesiredDescription, + must.NoError(t, err) + must.NotNil(t, updatedPreemptedAlloc) + must.True(t, updatedPreemptedAlloc.ModifyTime > timestampBeforeCommit) + must.Eq(t, updatedPreemptedAlloc.DesiredDescription, "Preempted by alloc ID "+preemptedAllocDiff.PreemptedByAllocation) - assert.Equal(updatedPreemptedAlloc.DesiredStatus, structs.AllocDesiredStatusEvict) + must.Eq(t, updatedPreemptedAlloc.DesiredStatus, structs.AllocDesiredStatusEvict) // Lookup the new deployment dout, err := fsmState.DeploymentByID(ws, plan.Deployment.ID) - require.NoError(err) - require.NotNil(dout) + must.NoError(t, err) + must.NotNil(t, dout) // Lookup the updated deployment dout2, err := fsmState.DeploymentByID(ws, oldDeployment.ID) - require.NoError(err) - require.NotNil(dout2) - assert.Equal(desiredStatus, dout2.Status) - assert.Equal(desiredStatusDescription, dout2.StatusDescription) + must.NoError(t, err) + must.NotNil(t, dout2) + must.Eq(t, desiredStatus, dout2.Status) + must.Eq(t, desiredStatusDescription, dout2.StatusDescription) // Lookup updated eval evalOut, err := fsmState.EvalByID(ws, eval.ID) - require.NoError(err) - require.NotNil(evalOut) - assert.Equal(index, evalOut.ModifyIndex) + must.NoError(t, err) + must.NotNil(t, evalOut) + must.Eq(t, index, evalOut.ModifyIndex) } func TestPlanApply_signAllocIdentities(t *testing.T) { From d357903f599ca5ef02a4a464956de779e24d15a2 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 31 Oct 2024 14:00:48 +0100 Subject: [PATCH 27/40] TestSystemEndpoint_GarbageCollect --- nomad/system_endpoint_test.go | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/nomad/system_endpoint_test.go b/nomad/system_endpoint_test.go index c1467adefbc..a2fff45e64e 100644 --- a/nomad/system_endpoint_test.go +++ b/nomad/system_endpoint_test.go @@ -7,6 +7,7 @@ import ( "fmt" "reflect" "testing" + "time" memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" @@ -15,6 +16,8 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" + "github.com/shoenig/test/must" + "github.com/shoenig/test/wait" "github.com/stretchr/testify/assert" ) @@ -31,16 +34,16 @@ func TestSystemEndpoint_GarbageCollect(t *testing.T) { job := mock.Job() job.Type = structs.JobTypeBatch job.Stop = true - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { - t.Fatalf("UpsertJob() failed: %v", err) - } + // submit time must be older than default job GC + job.SubmitTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) eval := mock.Eval() eval.Status = structs.EvalStatusComplete eval.JobID = job.ID - if err := state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval}); err != nil { - t.Fatalf("UpsertEvals() failed: %v", err) - } + // modify time must be older than default eval GC + eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + must.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval})) // Make the GC request req := &structs.GenericRequest{ @@ -49,11 +52,9 @@ func TestSystemEndpoint_GarbageCollect(t *testing.T) { }, } var resp structs.GenericResponse - if err := msgpackrpc.CallWithCodec(codec, "System.GarbageCollect", req, &resp); err != nil { - t.Fatalf("expect err") - } + must.NoError(t, msgpackrpc.CallWithCodec(codec, "System.GarbageCollect", req, &resp)) - testutil.WaitForResult(func() (bool, error) { + must.Wait(t, wait.InitialSuccess(wait.TestFunc(func() (bool, error) { // Check if the job has been GC'd ws := memdb.NewWatchSet() exist, err := state.JobByID(ws, job.Namespace, job.ID) @@ -64,9 +65,7 @@ func TestSystemEndpoint_GarbageCollect(t *testing.T) { return false, fmt.Errorf("job %+v wasn't garbage collected", job) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }), wait.Timeout(3*time.Second))) } func TestSystemEndpoint_GarbageCollect_ACL(t *testing.T) { From 589a8ae326c9e227eaa787e6e26226be2373384e Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 31 Oct 2024 15:06:51 +0100 Subject: [PATCH 28/40] first chunk of core_sched_test fixes --- nomad/core_sched_test.go | 164 +++++++++++++++++++-------------------- nomad/mock/alloc.go | 6 ++ nomad/mock/job.go | 1 + 3 files changed, 87 insertions(+), 84 deletions(-) diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 811d0326540..51386a5e0d3 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -35,8 +35,9 @@ func TestCoreScheduler_EvalGC(t *testing.T) { store := s1.fsm.State() eval := mock.Eval() eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() eval.Status = structs.EvalStatusFailed - store.UpsertJobSummary(999, mock.JobSummary(eval.JobID)) + must.NoError(t, store.UpsertJobSummary(999, mock.JobSummary(eval.JobID))) must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval})) // Insert mock job with rescheduling disabled @@ -46,7 +47,6 @@ func TestCoreScheduler_EvalGC(t *testing.T) { Attempts: 0, Interval: 0 * time.Second, } - job.SubmitTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job)) // Insert "dead" alloc @@ -55,8 +55,6 @@ func TestCoreScheduler_EvalGC(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop alloc.JobID = eval.JobID alloc.TaskGroup = job.TaskGroups[0].Name - alloc.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - alloc.ModifyTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // Insert "lost" alloc alloc2 := mock.Alloc() @@ -65,8 +63,6 @@ func TestCoreScheduler_EvalGC(t *testing.T) { alloc2.ClientStatus = structs.AllocClientStatusLost alloc2.JobID = eval.JobID alloc2.TaskGroup = job.TaskGroups[0].Name - alloc2.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - alloc2.ModifyTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() must.NoError(t, store.UpsertAllocs( structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2})) @@ -124,24 +120,25 @@ func TestCoreScheduler_EvalGC_ReschedulingAllocs(t *testing.T) { // Insert "dead" eval store := s1.fsm.State() eval := mock.Eval() + eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() eval.Status = structs.EvalStatusFailed - store.UpsertJobSummary(999, mock.JobSummary(eval.JobID)) - err := store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}) - require.Nil(t, err) + must.NoError(t, store.UpsertJobSummary(999, mock.JobSummary(eval.JobID))) + must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval})) // Insert "pending" eval for same job eval2 := mock.Eval() eval2.JobID = eval.JobID - store.UpsertJobSummary(999, mock.JobSummary(eval2.JobID)) - err = store.UpsertEvals(structs.MsgTypeTestSetup, 1003, []*structs.Evaluation{eval2}) - require.Nil(t, err) + eval2.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval2.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + must.NoError(t, store.UpsertJobSummary(999, mock.JobSummary(eval2.JobID))) + must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, 1003, []*structs.Evaluation{eval2})) // Insert mock job with default reschedule policy of 2 in 10 minutes job := mock.Job() job.ID = eval.JobID - err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job) - require.Nil(t, err) + must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job)) // Insert failed alloc with an old reschedule attempt, can be GCed alloc := mock.Alloc() @@ -178,35 +175,31 @@ func TestCoreScheduler_EvalGC_ReschedulingAllocs(t *testing.T) { }, }, } - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2}) - require.Nil(t, err) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2})) // Create a core scheduler snap, err := store.Snapshot() - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) core := NewCoreScheduler(s1, snap) // Attempt the GC, job has all terminal allocs and one pending eval gc := s1.coreJobEval(structs.CoreJobEvalGC, 2000) - err = core.Process(gc) - require.Nil(t, err) + must.NoError(t, core.Process(gc)) // Eval should still exist ws := memdb.NewWatchSet() out, err := store.EvalByID(ws, eval.ID) - require.Nil(t, err) - require.NotNil(t, out) - require.Equal(t, eval.ID, out.ID) + must.Nil(t, err) + must.NotNil(t, out) + must.Eq(t, eval.ID, out.ID) outA, err := store.AllocByID(ws, alloc.ID) - require.Nil(t, err) - require.Nil(t, outA) + must.Nil(t, err) + must.Nil(t, outA) outA2, err := store.AllocByID(ws, alloc2.ID) - require.Nil(t, err) - require.Equal(t, alloc2.ID, outA2.ID) + must.Nil(t, err) + must.Eq(t, alloc2.ID, outA2.ID) } @@ -222,17 +215,17 @@ func TestCoreScheduler_EvalGC_StoppedJob_Reschedulable(t *testing.T) { store := s1.fsm.State() eval := mock.Eval() eval.Status = structs.EvalStatusFailed - store.UpsertJobSummary(999, mock.JobSummary(eval.JobID)) - err := store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}) - require.Nil(t, err) + eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + must.NoError(t, store.UpsertJobSummary(999, mock.JobSummary(eval.JobID))) + must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval})) // Insert mock stopped job with default reschedule policy of 2 in 10 minutes job := mock.Job() job.ID = eval.JobID job.Stop = true - err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job) - require.Nil(t, err) + must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job)) // Insert failed alloc with a recent reschedule attempt alloc := mock.Alloc() @@ -250,8 +243,7 @@ func TestCoreScheduler_EvalGC_StoppedJob_Reschedulable(t *testing.T) { }, }, } - err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) - require.Nil(t, err) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) // Create a core scheduler snap, err := store.Snapshot() @@ -262,20 +254,18 @@ func TestCoreScheduler_EvalGC_StoppedJob_Reschedulable(t *testing.T) { // Attempt the GC gc := s1.coreJobEval(structs.CoreJobEvalGC, 2000) - err = core.Process(gc) - require.Nil(t, err) + must.NoError(t, core.Process(gc)) // Eval should not exist ws := memdb.NewWatchSet() out, err := store.EvalByID(ws, eval.ID) - require.Nil(t, err) - require.Nil(t, out) + must.Nil(t, err) + must.Nil(t, out) // Alloc should not exist outA, err := store.AllocByID(ws, alloc.ID) - require.Nil(t, err) - require.Nil(t, outA) - + must.Nil(t, err) + must.Nil(t, outA) } // An EvalGC should never reap a batch job that has not been stopped @@ -303,15 +293,15 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { Attempts: 0, Interval: 0 * time.Second, } - err := store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx+1, nil, stoppedJob) - must.NoError(t, err) + must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx+1, nil, stoppedJob)) stoppedJobEval := mock.Eval() stoppedJobEval.Status = structs.EvalStatusComplete stoppedJobEval.Type = structs.JobTypeBatch stoppedJobEval.JobID = stoppedJob.ID - err = store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+2, []*structs.Evaluation{stoppedJobEval}) - must.NoError(t, err) + stoppedJobEval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + stoppedJobEval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+2, []*structs.Evaluation{stoppedJobEval})) stoppedJobStoppedAlloc := mock.Alloc() stoppedJobStoppedAlloc.Job = stoppedJob @@ -327,10 +317,9 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { stoppedJobLostAlloc.DesiredStatus = structs.AllocDesiredStatusRun stoppedJobLostAlloc.ClientStatus = structs.AllocClientStatusLost - err = store.UpsertAllocs( + must.NoError(t, store.UpsertAllocs( structs.MsgTypeTestSetup, jobModifyIdx+3, - []*structs.Allocation{stoppedJobStoppedAlloc, stoppedJobLostAlloc}) - must.NoError(t, err) + []*structs.Allocation{stoppedJobStoppedAlloc, stoppedJobLostAlloc})) // A "dead" job containing one "complete" eval with: // 1. A "stopped" alloc @@ -339,15 +328,15 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { deadJob := mock.Job() deadJob.Type = structs.JobTypeBatch deadJob.Status = structs.JobStatusDead - err = store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx, nil, deadJob) - must.NoError(t, err) + must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx, nil, deadJob)) deadJobEval := mock.Eval() deadJobEval.Status = structs.EvalStatusComplete deadJobEval.Type = structs.JobTypeBatch deadJobEval.JobID = deadJob.ID - err = store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{deadJobEval}) - must.NoError(t, err) + deadJobEval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + deadJobEval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{deadJobEval})) stoppedAlloc := mock.Alloc() stoppedAlloc.Job = deadJob @@ -363,8 +352,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { lostAlloc.DesiredStatus = structs.AllocDesiredStatusRun lostAlloc.ClientStatus = structs.AllocClientStatusLost - err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx+2, []*structs.Allocation{stoppedAlloc, lostAlloc}) - must.NoError(t, err) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx+2, []*structs.Allocation{stoppedAlloc, lostAlloc})) // An "alive" job #2 containing two complete evals. The first with: // 1. A "lost" alloc @@ -377,15 +365,15 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJob := mock.Job() activeJob.Type = structs.JobTypeBatch activeJob.Status = structs.JobStatusDead - err = store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx, nil, activeJob) - must.NoError(t, err) + must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx, nil, activeJob)) activeJobEval := mock.Eval() activeJobEval.Status = structs.EvalStatusComplete activeJobEval.Type = structs.JobTypeBatch activeJobEval.JobID = activeJob.ID - err = store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{activeJobEval}) - must.NoError(t, err) + activeJobEval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + activeJobEval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{activeJobEval})) activeJobRunningAlloc := mock.Alloc() activeJobRunningAlloc.Job = activeJob @@ -401,15 +389,16 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobLostAlloc.DesiredStatus = structs.AllocDesiredStatusRun activeJobLostAlloc.ClientStatus = structs.AllocClientStatusLost - err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{activeJobRunningAlloc, activeJobLostAlloc}) - must.NoError(t, err) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{activeJobRunningAlloc, activeJobLostAlloc})) activeJobCompleteEval := mock.Eval() activeJobCompleteEval.Status = structs.EvalStatusComplete activeJobCompleteEval.Type = structs.JobTypeBatch activeJobCompleteEval.JobID = activeJob.ID - err = store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Evaluation{activeJobCompleteEval}) - must.NoError(t, err) + activeJobCompleteEval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + activeJobCompleteEval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + + must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Evaluation{activeJobCompleteEval})) activeJobCompletedEvalCompletedAlloc := mock.Alloc() activeJobCompletedEvalCompletedAlloc.Job = activeJob @@ -418,22 +407,19 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobCompletedEvalCompletedAlloc.DesiredStatus = structs.AllocDesiredStatusStop activeJobCompletedEvalCompletedAlloc.ClientStatus = structs.AllocClientStatusComplete - err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{activeJobCompletedEvalCompletedAlloc}) - must.NoError(t, err) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{activeJobCompletedEvalCompletedAlloc})) // A job that ran once and was then purged. purgedJob := mock.Job() purgedJob.Type = structs.JobTypeBatch purgedJob.Status = structs.JobStatusDead - err = store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx, nil, purgedJob) - must.NoError(t, err) + must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx, nil, purgedJob)) purgedJobEval := mock.Eval() purgedJobEval.Status = structs.EvalStatusComplete purgedJobEval.Type = structs.JobTypeBatch purgedJobEval.JobID = purgedJob.ID - err = store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{purgedJobEval}) - must.NoError(t, err) + must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{purgedJobEval})) purgedJobCompleteAlloc := mock.Alloc() purgedJobCompleteAlloc.Job = purgedJob @@ -442,19 +428,19 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { purgedJobCompleteAlloc.DesiredStatus = structs.AllocDesiredStatusRun purgedJobCompleteAlloc.ClientStatus = structs.AllocClientStatusLost - err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{purgedJobCompleteAlloc}) - must.NoError(t, err) + must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{purgedJobCompleteAlloc})) purgedJobCompleteEval := mock.Eval() purgedJobCompleteEval.Status = structs.EvalStatusComplete purgedJobCompleteEval.Type = structs.JobTypeBatch purgedJobCompleteEval.JobID = purgedJob.ID - err = store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Evaluation{purgedJobCompleteEval}) - must.NoError(t, err) + purgedJobCompleteEval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + purgedJobCompleteEval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + + must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Evaluation{purgedJobCompleteEval})) // Purge job. - err = store.DeleteJob(jobModifyIdx, purgedJob.Namespace, purgedJob.ID) - must.NoError(t, err) + must.NoError(t, store.DeleteJob(jobModifyIdx, purgedJob.Namespace, purgedJob.ID)) // A little helper for assertions assertCorrectJobEvalAlloc := func( @@ -604,6 +590,9 @@ func TestCoreScheduler_EvalGC_JobVersionTag(t *testing.T) { eval := mock.Eval() eval.JobID = job.ID eval.Status = structs.EvalStatusComplete + eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, 999, []*structs.Evaluation{eval})) // upsert a couple versions of the job, so the "jobs" table has one // and the "job_version" table has two. @@ -684,6 +673,9 @@ func TestCoreScheduler_EvalGC_Partial(t *testing.T) { store := s1.fsm.State() eval := mock.Eval() eval.Status = structs.EvalStatusComplete + eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + store.UpsertJobSummary(999, mock.JobSummary(eval.JobID)) err := store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}) if err != nil { @@ -801,6 +793,9 @@ func TestCoreScheduler_EvalGC_Force(t *testing.T) { store := server.fsm.State() eval := mock.Eval() eval.Status = structs.EvalStatusFailed + eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + store.UpsertJobSummary(999, mock.JobSummary(eval.JobID)) err := store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}) if err != nil { @@ -1075,6 +1070,8 @@ func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) { eval := mock.Eval() eval.JobID = job.ID eval.Status = structs.EvalStatusComplete + eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() eval2 := mock.Eval() eval2.JobID = job.ID @@ -3044,8 +3041,7 @@ func TestCoreScheduler_ExpiredACLTokenGC_Force(t *testing.T) { allTokens = append(allTokens, nonExpiredLocalTokens...) // Upsert them all. - err := testServer.State().UpsertACLTokens(structs.MsgTypeTestSetup, 10, allTokens) - require.NoError(t, err) + must.NoError(t, testServer.State().UpsertACLTokens(structs.MsgTypeTestSetup, 10, allTokens)) // This function provides an easy way to get all tokens out of the // iterator. @@ -3059,28 +3055,28 @@ func TestCoreScheduler_ExpiredACLTokenGC_Force(t *testing.T) { // Check all the tokens are correctly stored within state. iter, err := testServer.State().ACLTokens(nil, state.SortDefault) - require.NoError(t, err) + must.NoError(t, err) tokens := fromIteratorFunc(iter) - require.ElementsMatch(t, allTokens, tokens) + must.SliceContainsAll(t, allTokens, tokens) // Generate the core scheduler and trigger a forced garbage collection // which should delete all expired tokens. snap, err := testServer.State().Snapshot() - require.NoError(t, err) + must.NoError(t, err) coreScheduler := NewCoreScheduler(testServer, snap) index, err := testServer.State().LatestIndex() - require.NoError(t, err) + must.NoError(t, err) index++ forceGCEval := testServer.coreJobEval(structs.CoreJobForceGC, index) - require.NoError(t, coreScheduler.Process(forceGCEval)) + must.NoError(t, coreScheduler.Process(forceGCEval)) // List all the remaining ACL tokens to be sure they are as expected. iter, err = testServer.State().ACLTokens(nil, state.SortDefault) - require.NoError(t, err) + must.NoError(t, err) tokens = fromIteratorFunc(iter) - require.ElementsMatch(t, append(nonExpiredGlobalTokens, nonExpiredLocalTokens...), tokens) + must.Eq(t, append(nonExpiredGlobalTokens, nonExpiredLocalTokens...), tokens) } diff --git a/nomad/mock/alloc.go b/nomad/mock/alloc.go index 170c3b54b27..05887887921 100644 --- a/nomad/mock/alloc.go +++ b/nomad/mock/alloc.go @@ -5,6 +5,7 @@ package mock import ( "math/rand" + "time" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" @@ -80,6 +81,11 @@ func Alloc() *structs.Allocation { Job: job, DesiredStatus: structs.AllocDesiredStatusRun, ClientStatus: structs.AllocClientStatusPending, + + // it's convenient for GC tests to have the allocs be a bit older than default GC + // thresholds + CreateTime: time.Now().Add(-1 * 7 * time.Hour).UnixNano(), + ModifyTime: time.Now().Add(-1 * 6 * time.Hour).UnixNano(), } alloc.JobID = alloc.Job.ID alloc.Canonicalize() diff --git a/nomad/mock/job.go b/nomad/mock/job.go index d3a70904286..044235273d5 100644 --- a/nomad/mock/job.go +++ b/nomad/mock/job.go @@ -144,6 +144,7 @@ func Job() *structs.Job { CreateIndex: 42, ModifyIndex: 99, JobModifyIndex: 99, + SubmitTime: time.Now().Add(-1 * 6 * time.Hour).UnixNano(), } job.Canonicalize() return job From 5e8a31ec3b7810adc0c2e65a6bf4734db289a30e Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 31 Oct 2024 15:59:21 +0100 Subject: [PATCH 29/40] setting create/modify times in the mock causes issues --- nomad/core_sched.go | 9 ++-- nomad/core_sched_test.go | 101 +++++++++++++++------------------------ nomad/mock/alloc.go | 6 --- 3 files changed, 43 insertions(+), 73 deletions(-) diff --git a/nomad/core_sched.go b/nomad/core_sched.go index 81c9ded458a..e45f2f5cdce 100644 --- a/nomad/core_sched.go +++ b/nomad/core_sched.go @@ -285,11 +285,10 @@ func (c *CoreScheduler) evalGC(eval *structs.Evaluation) error { return c.evalReap(gcEval, gcAlloc) } -// gcEval returns whether the eval should be garbage collected given a raft -// threshold index. The eval disqualifies for garbage collection if it or its -// allocs are not older than the threshold. If the eval should be garbage -// collected, the associated alloc ids that should also be removed are also -// returned +// gcEval returns whether the eval should be garbage collected given the cutoff +// time. The eval disqualifies for garbage collection if it or its allocs are not +// older than the cutoff. If the eval should be garbage collected, the associated +// alloc ids that should also be removed are also returned func (c *CoreScheduler) gcEval(eval *structs.Evaluation, cutoffTime time.Time, allowBatch bool) ( bool, []string, error) { diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 51386a5e0d3..bcc857917dc 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -1061,10 +1061,8 @@ func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) { job := mock.Job() job.Type = structs.JobTypeBatch job.Status = structs.JobStatusDead - err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) - if err != nil { - t.Fatalf("err: %v", err) - } + job.SubmitTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() + must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) // Insert two evals, one terminal and one not eval := mock.Eval() @@ -1076,96 +1074,58 @@ func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) { eval2 := mock.Eval() eval2.JobID = job.ID eval2.Status = structs.EvalStatusPending - err = store.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval, eval2}) - if err != nil { - t.Fatalf("err: %v", err) - } + eval2.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval2.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval, eval2})) // Create a core scheduler snap, err := store.Snapshot() - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) core := NewCoreScheduler(s1, snap) // Attempt the GC gc := s1.coreJobEval(structs.CoreJobJobGC, 2000) - err = core.Process(gc) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, core.Process(gc)) // Should still exist ws := memdb.NewWatchSet() out, err := store.JobByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if out == nil { - t.Fatalf("bad: %v", out) - } + must.NoError(t, err) + must.NotNil(t, out) outE, err := store.EvalByID(ws, eval.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if outE == nil { - t.Fatalf("bad: %v", outE) - } + must.NoError(t, err) + must.NotNil(t, outE) outE2, err := store.EvalByID(ws, eval2.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if outE2 == nil { - t.Fatalf("bad: %v", outE2) - } + must.NoError(t, err) + must.NotNil(t, outE2) // Update the second eval to be terminal eval2.Status = structs.EvalStatusComplete - err = store.UpsertEvals(structs.MsgTypeTestSetup, 1003, []*structs.Evaluation{eval2}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, 1003, []*structs.Evaluation{eval2})) // Create a core scheduler snap, err = store.Snapshot() - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) core = NewCoreScheduler(s1, snap) // Attempt the GC gc = s1.coreJobEval(structs.CoreJobJobGC, 2000) - err = core.Process(gc) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, core.Process(gc)) // Should not still exist out, err = store.JobByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if out != nil { - t.Fatalf("bad: %v", out) - } + must.NoError(t, err) + must.Nil(t, out) outE, err = store.EvalByID(ws, eval.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if outE != nil { - t.Fatalf("bad: %v", outE) - } + must.NoError(t, err) + must.Nil(t, outE) outE2, err = store.EvalByID(ws, eval2.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if outE2 != nil { - t.Fatalf("bad: %v", outE2) - } + must.NoError(t, err) + must.Nil(t, outE2) } func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { @@ -1193,6 +1153,8 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { eval := mock.Eval() eval.JobID = job.ID eval.Status = structs.EvalStatusComplete + eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() err = store.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval}) if err != nil { t.Fatalf("err: %v", err) @@ -1437,10 +1399,14 @@ func TestCoreScheduler_JobGC_Stopped(t *testing.T) { eval := mock.Eval() eval.JobID = job.ID eval.Status = structs.EvalStatusComplete + eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() eval2 := mock.Eval() eval2.JobID = job.ID eval2.Status = structs.EvalStatusComplete + eval2.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval2.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() err = store.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval, eval2}) if err != nil { @@ -1453,6 +1419,8 @@ func TestCoreScheduler_JobGC_Stopped(t *testing.T) { alloc.EvalID = eval.ID alloc.DesiredStatus = structs.AllocDesiredStatusStop alloc.TaskGroup = job.TaskGroups[0].Name + alloc.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() + alloc.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) @@ -1535,6 +1503,9 @@ func TestCoreScheduler_JobGC_Force(t *testing.T) { eval := mock.Eval() eval.JobID = job.ID eval.Status = structs.EvalStatusComplete + eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + err = store.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval}) if err != nil { t.Fatalf("err: %v", err) @@ -1742,16 +1713,22 @@ func TestCoreScheduler_jobGC(t *testing.T) { mockEval1.JobID = inputJob.ID mockEval1.Namespace = inputJob.Namespace mockEval1.Status = structs.EvalStatusComplete + mockEval1.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + mockEval1.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() mockJob1Alloc1 := mock.Alloc() mockJob1Alloc1.EvalID = mockEval1.ID mockJob1Alloc1.JobID = inputJob.ID mockJob1Alloc1.ClientStatus = structs.AllocClientStatusRunning + mockJob1Alloc1.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() + mockJob1Alloc1.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() mockJob1Alloc2 := mock.Alloc() mockJob1Alloc2.EvalID = mockEval1.ID mockJob1Alloc2.JobID = inputJob.ID mockJob1Alloc2.ClientStatus = structs.AllocClientStatusRunning + mockJob1Alloc2.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() + mockJob1Alloc2.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() must.NoError(t, testServer.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, inputJob)) diff --git a/nomad/mock/alloc.go b/nomad/mock/alloc.go index 05887887921..170c3b54b27 100644 --- a/nomad/mock/alloc.go +++ b/nomad/mock/alloc.go @@ -5,7 +5,6 @@ package mock import ( "math/rand" - "time" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" @@ -81,11 +80,6 @@ func Alloc() *structs.Allocation { Job: job, DesiredStatus: structs.AllocDesiredStatusRun, ClientStatus: structs.AllocClientStatusPending, - - // it's convenient for GC tests to have the allocs be a bit older than default GC - // thresholds - CreateTime: time.Now().Add(-1 * 7 * time.Hour).UnixNano(), - ModifyTime: time.Now().Add(-1 * 6 * time.Hour).UnixNano(), } alloc.JobID = alloc.Job.ID alloc.Canonicalize() From d92418fb45dd1dedbe7619e03995e74c29618cb7 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 31 Oct 2024 16:22:44 +0100 Subject: [PATCH 30/40] more core_sched tests fixed --- nomad/core_sched_test.go | 55 +++++++++++++++++++++++++--------------- nomad/state/testing.go | 1 + 2 files changed, 35 insertions(+), 21 deletions(-) diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index bcc857917dc..9d91c48bb90 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -1833,42 +1833,47 @@ func TestCoreScheduler_DeploymentGC(t *testing.T) { s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) - assert := assert.New(t) // Insert an active, terminal, and terminal with allocations deployment store := s1.fsm.State() d1, d2, d3 := mock.Deployment(), mock.Deployment(), mock.Deployment() d1.Status = structs.DeploymentStatusFailed + d1.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() + d1.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() d3.Status = structs.DeploymentStatusSuccessful - assert.Nil(store.UpsertDeployment(1000, d1), "UpsertDeployment") - assert.Nil(store.UpsertDeployment(1001, d2), "UpsertDeployment") - assert.Nil(store.UpsertDeployment(1002, d3), "UpsertDeployment") + + must.Nil(t, store.UpsertDeployment(1000, d1), must.Sprint("UpsertDeployment")) + must.Nil(t, store.UpsertDeployment(1001, d2), must.Sprint("UpsertDeployment")) + must.Nil(t, store.UpsertDeployment(1002, d3), must.Sprint("UpsertDeployment")) a := mock.Alloc() a.JobID = d3.JobID a.DeploymentID = d3.ID - assert.Nil(store.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}), "UpsertAllocs") + a.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() + a.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + must.Nil(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Create a core scheduler snap, err := store.Snapshot() - assert.Nil(err, "Snapshot") + must.NoError(t, err) core := NewCoreScheduler(s1, snap) // Attempt the GC gc := s1.coreJobEval(structs.CoreJobDeploymentGC, 2000) - assert.Nil(core.Process(gc), "Process GC") + must.NoError(t, core.Process(gc)) // Should be gone ws := memdb.NewWatchSet() out, err := store.DeploymentByID(ws, d1.ID) - assert.Nil(err, "DeploymentByID") - assert.Nil(out, "Terminal Deployment") + must.NoError(t, err) + must.Nil(t, out) + out2, err := store.DeploymentByID(ws, d2.ID) - assert.Nil(err, "DeploymentByID") - assert.NotNil(out2, "Active Deployment") + must.NoError(t, err) + must.NotNil(t, out2) out3, err := store.DeploymentByID(ws, d3.ID) - assert.Nil(err, "DeploymentByID") - assert.NotNil(out3, "Terminal Deployment With Allocs") + must.NoError(t, err) + must.NotNil(t, out3) } func TestCoreScheduler_DeploymentGC_Force(t *testing.T) { @@ -1890,6 +1895,8 @@ func TestCoreScheduler_DeploymentGC_Force(t *testing.T) { store := server.fsm.State() d1, d2 := mock.Deployment(), mock.Deployment() d1.Status = structs.DeploymentStatusFailed + d1.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() + d1.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() assert.Nil(store.UpsertDeployment(1000, d1), "UpsertDeployment") assert.Nil(store.UpsertDeployment(1001, d2), "UpsertDeployment") @@ -2299,7 +2306,6 @@ func TestCoreScheduler_CSIPluginGC(t *testing.T) { defer deleteNodes() store := srv.fsm.State() - // Update the time tables to make this work index := uint64(2000) // Create a core scheduler @@ -2931,37 +2937,42 @@ func TestCoreScheduler_ExpiredACLTokenGC(t *testing.T) { unexpiredLocal := mock.ACLToken() unexpiredLocal.ExpirationTime = pointer.Of(now.Add(2 * time.Hour)) + // Set creation time in the past for all the tokens, otherwise GC won't trigger + for _, token := range []*structs.ACLToken{expiredGlobal, unexpiredGlobal, expiredLocal, unexpiredLocal} { + token.CreateTime = time.Now().Add(-1 * 10 * time.Hour) + } + // Upsert these into state. err := testServer.State().UpsertACLTokens(structs.MsgTypeTestSetup, 10, []*structs.ACLToken{ expiredGlobal, unexpiredGlobal, expiredLocal, unexpiredLocal, }) - require.NoError(t, err) + must.NoError(t, err) // Generate the core scheduler. snap, err := testServer.State().Snapshot() - require.NoError(t, err) + must.NoError(t, err) coreScheduler := NewCoreScheduler(testServer, snap) // Trigger global and local periodic garbage collection runs. index, err := testServer.State().LatestIndex() - require.NoError(t, err) + must.NoError(t, err) index++ globalGCEval := testServer.coreJobEval(structs.CoreJobGlobalTokenExpiredGC, index) - require.NoError(t, coreScheduler.Process(globalGCEval)) + must.NoError(t, coreScheduler.Process(globalGCEval)) localGCEval := testServer.coreJobEval(structs.CoreJobLocalTokenExpiredGC, index) - require.NoError(t, coreScheduler.Process(localGCEval)) + must.NoError(t, coreScheduler.Process(localGCEval)) // Ensure the ACL tokens stored within state are as expected. iter, err := testServer.State().ACLTokens(nil, state.SortDefault) - require.NoError(t, err) + must.NoError(t, err) var tokens []*structs.ACLToken for raw := iter.Next(); raw != nil; raw = iter.Next() { tokens = append(tokens, raw.(*structs.ACLToken)) } - require.ElementsMatch(t, []*structs.ACLToken{rootACLToken, unexpiredGlobal, unexpiredLocal}, tokens) + must.SliceContainsAll(t, []*structs.ACLToken{rootACLToken, unexpiredGlobal, unexpiredLocal}, tokens) } func TestCoreScheduler_ExpiredACLTokenGC_Force(t *testing.T) { @@ -2990,6 +3001,7 @@ func TestCoreScheduler_ExpiredACLTokenGC_Force(t *testing.T) { for i := 0; i < 20; i++ { mockedToken := mock.ACLToken() mockedToken.Global = true + mockedToken.CreateTime = time.Now().Add(-1 * 10 * time.Hour) if i%2 == 0 { expiredGlobalTokens = append(expiredGlobalTokens, mockedToken) mockedToken.ExpirationTime = pointer.Of(expiryTimeThreshold.Add(-24 * time.Hour)) @@ -3004,6 +3016,7 @@ func TestCoreScheduler_ExpiredACLTokenGC_Force(t *testing.T) { for i := 0; i < 20; i++ { mockedToken := mock.ACLToken() mockedToken.Global = false + mockedToken.CreateTime = time.Now().Add(-1 * 10 * time.Hour) if i%2 == 0 { expiredLocalTokens = append(expiredLocalTokens, mockedToken) mockedToken.ExpirationTime = pointer.Of(expiryTimeThreshold.Add(-24 * time.Hour)) diff --git a/nomad/state/testing.go b/nomad/state/testing.go index cb955ffa46c..596ac4421ae 100644 --- a/nomad/state/testing.go +++ b/nomad/state/testing.go @@ -92,6 +92,7 @@ func createTestCSIPlugin(s *StateStore, id string, requiresController bool) func SupportsCreateDeleteSnapshot: true, SupportsListSnapshots: true, }, + UpdateTime: time.Now().Add(-1 * 6 * time.Hour), }, } From 25769abdd8682d26bdcef69a3d0bf1a88a3cc891 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 31 Oct 2024 16:53:46 +0100 Subject: [PATCH 31/40] reconciler test fixes --- scheduler/reconcile_test.go | 42 +++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/scheduler/reconcile_test.go b/scheduler/reconcile_test.go index e67894580b1..653ea1d9a0d 100644 --- a/scheduler/reconcile_test.go +++ b/scheduler/reconcile_test.go @@ -2985,7 +2985,9 @@ func TestReconciler_CreateDeployment_RollingUpgrade_Destructive(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - d := structs.NewDeployment(job, 50, time.Now().UnixNano()) + // reconciler sets the creation time automatically so we have to copy here, + // otherwise there will be a discrepancy + d := structs.NewDeployment(job, 50, r.deployment.CreateTime) d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredTotal: 10, } @@ -3031,7 +3033,9 @@ func TestReconciler_CreateDeployment_RollingUpgrade_Inplace(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - d := structs.NewDeployment(job, 50, time.Now().UnixNano()) + // reconciler sets the creation time automatically so we have to copy here, + // otherwise there will be a discrepancy + d := structs.NewDeployment(job, 50, r.deployment.CreateTime) d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredTotal: 10, } @@ -3076,7 +3080,9 @@ func TestReconciler_CreateDeployment_NewerCreateIndex(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - d := structs.NewDeployment(job, 50, time.Now().UnixNano()) + // reconciler sets the creation time automatically so we have to copy here, + // otherwise there will be a discrepancy + d := structs.NewDeployment(job, 50, r.deployment.CreateTime) d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredTotal: 5, } @@ -3567,7 +3573,9 @@ func TestReconciler_StopOldCanaries(t *testing.T) { allocs, nil, "", 50, true) r := reconciler.Compute() - newD := structs.NewDeployment(job, 50, time.Now().UnixNano()) + // reconciler sets the creation time automatically so we have to copy here, + // otherwise there will be a discrepancy + newD := structs.NewDeployment(job, 50, r.deployment.CreateTime) newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredCanaries: 2, @@ -3623,7 +3631,9 @@ func TestReconciler_NewCanaries(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - newD := structs.NewDeployment(job, 50, time.Now().UnixNano()) + // reconciler sets the creation time automatically so we have to copy here, + // otherwise there will be a discrepancy + newD := structs.NewDeployment(job, 50, r.deployment.CreateTime) newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredCanaries: 2, @@ -3674,7 +3684,9 @@ func TestReconciler_NewCanaries_CountGreater(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - newD := structs.NewDeployment(job, 50, time.Now().UnixNano()) + // reconciler sets the creation time automatically so we have to copy here, + // otherwise there will be a discrepancy + newD := structs.NewDeployment(job, 50, r.deployment.CreateTime) newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion state := &structs.DeploymentState{ DesiredCanaries: 7, @@ -3728,7 +3740,9 @@ func TestReconciler_NewCanaries_MultiTG(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - newD := structs.NewDeployment(job, 50, time.Now().UnixNano()) + // reconciler sets the creation time automatically so we have to copy here, + // otherwise there will be a discrepancy + newD := structs.NewDeployment(job, 50, r.deployment.CreateTime) newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion state := &structs.DeploymentState{ DesiredCanaries: 2, @@ -3784,7 +3798,9 @@ func TestReconciler_NewCanaries_ScaleUp(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - newD := structs.NewDeployment(job, 50, time.Now().UnixNano()) + // reconciler sets the creation time automatically so we have to copy here, + // otherwise there will be a discrepancy + newD := structs.NewDeployment(job, 50, r.deployment.CreateTime) newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredCanaries: 2, @@ -3835,7 +3851,9 @@ func TestReconciler_NewCanaries_ScaleDown(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - newD := structs.NewDeployment(job, 50, time.Now().UnixNano()) + // reconciler sets the creation time automatically so we have to copy here, + // otherwise there will be a discrepancy + newD := structs.NewDeployment(job, 50, r.deployment.CreateTime) newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredCanaries: 2, @@ -4625,7 +4643,9 @@ func TestReconciler_FailedDeployment_NewJob(t *testing.T) { d, allocs, nil, "", 50, true) r := reconciler.Compute() - dnew := structs.NewDeployment(jobNew, 50, time.Now().UnixNano()) + // reconciler sets the creation time automatically so we have to copy here, + // otherwise there will be a discrepancy + dnew := structs.NewDeployment(jobNew, 50, r.deployment.CreateTime) dnew.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredTotal: 10, } @@ -4791,7 +4811,7 @@ func TestReconciler_RollingUpgrade_MissingAllocs(t *testing.T) { nil, allocs, nil, "", 50, true) r := reconciler.Compute() - d := structs.NewDeployment(job, 50, time.Now().UnixNano()) + d := structs.NewDeployment(job, 50, r.deployment.CreateTime) d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{ DesiredTotal: 10, } From 6a2b46468790aff45e5320af33e80188a43ad947 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 31 Oct 2024 17:20:43 +0100 Subject: [PATCH 32/40] oof, must.SliceContainsAll --- nomad/core_sched_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 9d91c48bb90..9feb9ac4398 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -3068,5 +3068,5 @@ func TestCoreScheduler_ExpiredACLTokenGC_Force(t *testing.T) { must.NoError(t, err) tokens = fromIteratorFunc(iter) - must.Eq(t, append(nonExpiredGlobalTokens, nonExpiredLocalTokens...), tokens) + must.SliceContainsAll(t, append(nonExpiredGlobalTokens, nonExpiredLocalTokens...), tokens) } From 3e1feee01e21f0520ac143baa13206a4f6c4749d Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 31 Oct 2024 17:56:47 +0100 Subject: [PATCH 33/40] TestCoreScheduler_CSIPluginGC fix --- nomad/core_sched_test.go | 123 ++++++++++++++++++---------------- nomad/mock/csi.go | 4 +- nomad/mock/job.go | 2 +- nomad/state/testing.go | 2 +- nomad/system_endpoint_test.go | 4 +- 5 files changed, 72 insertions(+), 63 deletions(-) diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 9feb9ac4398..ef9d872917f 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -34,8 +34,8 @@ func TestCoreScheduler_EvalGC(t *testing.T) { // Insert "dead" eval store := s1.fsm.State() eval := mock.Eval() - eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + eval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() eval.Status = structs.EvalStatusFailed must.NoError(t, store.UpsertJobSummary(999, mock.JobSummary(eval.JobID))) must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval})) @@ -120,8 +120,8 @@ func TestCoreScheduler_EvalGC_ReschedulingAllocs(t *testing.T) { // Insert "dead" eval store := s1.fsm.State() eval := mock.Eval() - eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + eval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() eval.Status = structs.EvalStatusFailed must.NoError(t, store.UpsertJobSummary(999, mock.JobSummary(eval.JobID))) must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval})) @@ -129,8 +129,8 @@ func TestCoreScheduler_EvalGC_ReschedulingAllocs(t *testing.T) { // Insert "pending" eval for same job eval2 := mock.Eval() eval2.JobID = eval.JobID - eval2.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - eval2.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + eval2.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval2.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() must.NoError(t, store.UpsertJobSummary(999, mock.JobSummary(eval2.JobID))) must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, 1003, []*structs.Evaluation{eval2})) @@ -152,7 +152,7 @@ func TestCoreScheduler_EvalGC_ReschedulingAllocs(t *testing.T) { alloc.RescheduleTracker = &structs.RescheduleTracker{ Events: []*structs.RescheduleEvent{ { - RescheduleTime: time.Now().Add(-1 * time.Hour).UTC().UnixNano(), + RescheduleTime: time.Now().Add(-time.Hour).UTC().UnixNano(), PrevNodeID: uuid.Generate(), PrevAllocID: uuid.Generate(), }, @@ -215,8 +215,8 @@ func TestCoreScheduler_EvalGC_StoppedJob_Reschedulable(t *testing.T) { store := s1.fsm.State() eval := mock.Eval() eval.Status = structs.EvalStatusFailed - eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + eval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() must.NoError(t, store.UpsertJobSummary(999, mock.JobSummary(eval.JobID))) must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval})) @@ -299,8 +299,8 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { stoppedJobEval.Status = structs.EvalStatusComplete stoppedJobEval.Type = structs.JobTypeBatch stoppedJobEval.JobID = stoppedJob.ID - stoppedJobEval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - stoppedJobEval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + stoppedJobEval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + stoppedJobEval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+2, []*structs.Evaluation{stoppedJobEval})) stoppedJobStoppedAlloc := mock.Alloc() @@ -334,8 +334,8 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { deadJobEval.Status = structs.EvalStatusComplete deadJobEval.Type = structs.JobTypeBatch deadJobEval.JobID = deadJob.ID - deadJobEval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - deadJobEval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + deadJobEval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + deadJobEval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{deadJobEval})) stoppedAlloc := mock.Alloc() @@ -371,8 +371,8 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobEval.Status = structs.EvalStatusComplete activeJobEval.Type = structs.JobTypeBatch activeJobEval.JobID = activeJob.ID - activeJobEval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - activeJobEval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + activeJobEval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + activeJobEval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{activeJobEval})) activeJobRunningAlloc := mock.Alloc() @@ -395,8 +395,8 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobCompleteEval.Status = structs.EvalStatusComplete activeJobCompleteEval.Type = structs.JobTypeBatch activeJobCompleteEval.JobID = activeJob.ID - activeJobCompleteEval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - activeJobCompleteEval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + activeJobCompleteEval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + activeJobCompleteEval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Evaluation{activeJobCompleteEval})) @@ -434,8 +434,8 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { purgedJobCompleteEval.Status = structs.EvalStatusComplete purgedJobCompleteEval.Type = structs.JobTypeBatch purgedJobCompleteEval.JobID = purgedJob.ID - purgedJobCompleteEval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - purgedJobCompleteEval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + purgedJobCompleteEval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + purgedJobCompleteEval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Evaluation{purgedJobCompleteEval})) @@ -590,8 +590,8 @@ func TestCoreScheduler_EvalGC_JobVersionTag(t *testing.T) { eval := mock.Eval() eval.JobID = job.ID eval.Status = structs.EvalStatusComplete - eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + eval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, 999, []*structs.Evaluation{eval})) // upsert a couple versions of the job, so the "jobs" table has one @@ -673,8 +673,8 @@ func TestCoreScheduler_EvalGC_Partial(t *testing.T) { store := s1.fsm.State() eval := mock.Eval() eval.Status = structs.EvalStatusComplete - eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + eval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() store.UpsertJobSummary(999, mock.JobSummary(eval.JobID)) err := store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}) @@ -793,8 +793,8 @@ func TestCoreScheduler_EvalGC_Force(t *testing.T) { store := server.fsm.State() eval := mock.Eval() eval.Status = structs.EvalStatusFailed - eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + eval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() store.UpsertJobSummary(999, mock.JobSummary(eval.JobID)) err := store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}) @@ -1061,21 +1061,21 @@ func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) { job := mock.Job() job.Type = structs.JobTypeBatch job.Status = structs.JobStatusDead - job.SubmitTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() + job.SubmitTime = time.Now().Add(-6 * time.Hour).UnixNano() must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) // Insert two evals, one terminal and one not eval := mock.Eval() eval.JobID = job.ID eval.Status = structs.EvalStatusComplete - eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + eval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() eval2 := mock.Eval() eval2.JobID = job.ID eval2.Status = structs.EvalStatusPending - eval2.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - eval2.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + eval2.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval2.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval, eval2})) // Create a core scheduler @@ -1153,8 +1153,8 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { eval := mock.Eval() eval.JobID = job.ID eval.Status = structs.EvalStatusComplete - eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + eval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() err = store.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval}) if err != nil { t.Fatalf("err: %v", err) @@ -1399,14 +1399,14 @@ func TestCoreScheduler_JobGC_Stopped(t *testing.T) { eval := mock.Eval() eval.JobID = job.ID eval.Status = structs.EvalStatusComplete - eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + eval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() eval2 := mock.Eval() eval2.JobID = job.ID eval2.Status = structs.EvalStatusComplete - eval2.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - eval2.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + eval2.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval2.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() err = store.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval, eval2}) if err != nil { @@ -1419,8 +1419,8 @@ func TestCoreScheduler_JobGC_Stopped(t *testing.T) { alloc.EvalID = eval.ID alloc.DesiredStatus = structs.AllocDesiredStatusStop alloc.TaskGroup = job.TaskGroups[0].Name - alloc.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() - alloc.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + alloc.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() + alloc.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) @@ -1503,8 +1503,8 @@ func TestCoreScheduler_JobGC_Force(t *testing.T) { eval := mock.Eval() eval.JobID = job.ID eval.Status = structs.EvalStatusComplete - eval.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + eval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + eval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() err = store.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval}) if err != nil { @@ -1713,22 +1713,22 @@ func TestCoreScheduler_jobGC(t *testing.T) { mockEval1.JobID = inputJob.ID mockEval1.Namespace = inputJob.Namespace mockEval1.Status = structs.EvalStatusComplete - mockEval1.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - mockEval1.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + mockEval1.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds + mockEval1.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() mockJob1Alloc1 := mock.Alloc() mockJob1Alloc1.EvalID = mockEval1.ID mockJob1Alloc1.JobID = inputJob.ID mockJob1Alloc1.ClientStatus = structs.AllocClientStatusRunning - mockJob1Alloc1.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() - mockJob1Alloc1.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + mockJob1Alloc1.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() + mockJob1Alloc1.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() mockJob1Alloc2 := mock.Alloc() mockJob1Alloc2.EvalID = mockEval1.ID mockJob1Alloc2.JobID = inputJob.ID mockJob1Alloc2.ClientStatus = structs.AllocClientStatusRunning - mockJob1Alloc2.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() - mockJob1Alloc2.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + mockJob1Alloc2.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() + mockJob1Alloc2.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() must.NoError(t, testServer.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, inputJob)) @@ -1838,8 +1838,8 @@ func TestCoreScheduler_DeploymentGC(t *testing.T) { store := s1.fsm.State() d1, d2, d3 := mock.Deployment(), mock.Deployment(), mock.Deployment() d1.Status = structs.DeploymentStatusFailed - d1.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() - d1.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + d1.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() + d1.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() d3.Status = structs.DeploymentStatusSuccessful must.Nil(t, store.UpsertDeployment(1000, d1), must.Sprint("UpsertDeployment")) @@ -1849,8 +1849,8 @@ func TestCoreScheduler_DeploymentGC(t *testing.T) { a := mock.Alloc() a.JobID = d3.JobID a.DeploymentID = d3.ID - a.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() - a.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + a.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() + a.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() must.Nil(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Create a core scheduler @@ -1895,8 +1895,8 @@ func TestCoreScheduler_DeploymentGC_Force(t *testing.T) { store := server.fsm.State() d1, d2 := mock.Deployment(), mock.Deployment() d1.Status = structs.DeploymentStatusFailed - d1.CreateTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() - d1.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + d1.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() + d1.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() assert.Nil(store.UpsertDeployment(1000, d1), "UpsertDeployment") assert.Nil(store.UpsertDeployment(1001, d2), "UpsertDeployment") @@ -2308,6 +2308,15 @@ func TestCoreScheduler_CSIPluginGC(t *testing.T) { index := uint64(2000) + ws := memdb.NewWatchSet() + plug, err := store.CSIPluginByID(ws, "foo") + must.NotNil(t, plug) + must.NoError(t, err) + // set the creation and modification times on the plugin in the past, otherwise + // they won't meet the GC threshold + plug.CreateTime = time.Now().Add(-10 * time.Hour).UnixNano() + plug.ModifyTime = time.Now().Add(-9 * time.Hour).UnixNano() + // Create a core scheduler snap, err := store.Snapshot() must.NoError(t, err) @@ -2319,8 +2328,7 @@ func TestCoreScheduler_CSIPluginGC(t *testing.T) { must.NoError(t, core.Process(gc)) // Should not be gone (plugin in use) - ws := memdb.NewWatchSet() - plug, err := store.CSIPluginByID(ws, "foo") + plug, err = store.CSIPluginByID(ws, "foo") must.NotNil(t, plug) must.NoError(t, err) @@ -2328,6 +2336,7 @@ func TestCoreScheduler_CSIPluginGC(t *testing.T) { plug = plug.Copy() plug.Controllers = map[string]*structs.CSIInfo{} plug.Nodes = map[string]*structs.CSIInfo{} + plug.ModifyTime = time.Now().Add(-6 * time.Hour).UnixNano() job := mock.CSIPluginJob(structs.CSIPluginTypeController, plug.ID) index++ @@ -2939,7 +2948,7 @@ func TestCoreScheduler_ExpiredACLTokenGC(t *testing.T) { // Set creation time in the past for all the tokens, otherwise GC won't trigger for _, token := range []*structs.ACLToken{expiredGlobal, unexpiredGlobal, expiredLocal, unexpiredLocal} { - token.CreateTime = time.Now().Add(-1 * 10 * time.Hour) + token.CreateTime = time.Now().Add(-10 * time.Hour) } // Upsert these into state. @@ -3001,7 +3010,7 @@ func TestCoreScheduler_ExpiredACLTokenGC_Force(t *testing.T) { for i := 0; i < 20; i++ { mockedToken := mock.ACLToken() mockedToken.Global = true - mockedToken.CreateTime = time.Now().Add(-1 * 10 * time.Hour) + mockedToken.CreateTime = time.Now().Add(-10 * time.Hour) if i%2 == 0 { expiredGlobalTokens = append(expiredGlobalTokens, mockedToken) mockedToken.ExpirationTime = pointer.Of(expiryTimeThreshold.Add(-24 * time.Hour)) @@ -3016,7 +3025,7 @@ func TestCoreScheduler_ExpiredACLTokenGC_Force(t *testing.T) { for i := 0; i < 20; i++ { mockedToken := mock.ACLToken() mockedToken.Global = false - mockedToken.CreateTime = time.Now().Add(-1 * 10 * time.Hour) + mockedToken.CreateTime = time.Now().Add(-10 * time.Hour) if i%2 == 0 { expiredLocalTokens = append(expiredLocalTokens, mockedToken) mockedToken.ExpirationTime = pointer.Of(expiryTimeThreshold.Add(-24 * time.Hour)) diff --git a/nomad/mock/csi.go b/nomad/mock/csi.go index 350490dcdff..01807dd4e3f 100644 --- a/nomad/mock/csi.go +++ b/nomad/mock/csi.go @@ -51,8 +51,8 @@ func CSIVolume(plugin *structs.CSIPlugin) *structs.CSIVolume { ControllersExpected: len(plugin.Controllers), NodesHealthy: plugin.NodesHealthy, NodesExpected: len(plugin.Nodes), - CreateTime: time.Now().Add(-1 * time.Hour).UnixNano(), - ModifyTime: time.Now().UnixNano(), + CreateTime: time.Now().Add(-6 * time.Hour).UnixNano(), + ModifyTime: time.Now().Add(-5 * time.Hour).UnixNano(), } } diff --git a/nomad/mock/job.go b/nomad/mock/job.go index 044235273d5..9e37f524209 100644 --- a/nomad/mock/job.go +++ b/nomad/mock/job.go @@ -144,7 +144,7 @@ func Job() *structs.Job { CreateIndex: 42, ModifyIndex: 99, JobModifyIndex: 99, - SubmitTime: time.Now().Add(-1 * 6 * time.Hour).UnixNano(), + SubmitTime: time.Now().Add(-6 * time.Hour).UnixNano(), } job.Canonicalize() return job diff --git a/nomad/state/testing.go b/nomad/state/testing.go index 596ac4421ae..e238dbedfed 100644 --- a/nomad/state/testing.go +++ b/nomad/state/testing.go @@ -92,7 +92,7 @@ func createTestCSIPlugin(s *StateStore, id string, requiresController bool) func SupportsCreateDeleteSnapshot: true, SupportsListSnapshots: true, }, - UpdateTime: time.Now().Add(-1 * 6 * time.Hour), + UpdateTime: time.Now().Add(-6 * time.Hour), }, } diff --git a/nomad/system_endpoint_test.go b/nomad/system_endpoint_test.go index a2fff45e64e..5d879b83481 100644 --- a/nomad/system_endpoint_test.go +++ b/nomad/system_endpoint_test.go @@ -35,14 +35,14 @@ func TestSystemEndpoint_GarbageCollect(t *testing.T) { job.Type = structs.JobTypeBatch job.Stop = true // submit time must be older than default job GC - job.SubmitTime = time.Now().Add(-1 * 6 * time.Hour).UnixNano() + job.SubmitTime = time.Now().Add(-6 * time.Hour).UnixNano() must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) eval := mock.Eval() eval.Status = structs.EvalStatusComplete eval.JobID = job.ID // modify time must be older than default eval GC - eval.ModifyTime = time.Now().Add(-1 * 5 * time.Hour).UnixNano() + eval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() must.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval})) // Make the GC request From d88e6b954c5f2770e02a30cc70a7ace542a56615 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Thu, 31 Oct 2024 21:17:17 +0100 Subject: [PATCH 34/40] pruneUnblockIndexes --- nomad/blocked_evals.go | 66 ++++++++++++++++++++++------------------ nomad/core_sched_test.go | 23 ++++++++------ 2 files changed, 49 insertions(+), 40 deletions(-) diff --git a/nomad/blocked_evals.go b/nomad/blocked_evals.go index 95917a5ab78..3889294bd7e 100644 --- a/nomad/blocked_evals.go +++ b/nomad/blocked_evals.go @@ -60,11 +60,11 @@ type BlockedEvals struct { // blocked eval exists for each job. The value is the blocked evaluation ID. jobs map[structs.NamespacedID]string - // unblockIndexes maps computed node classes or quota name to the index in - // which they were unblocked. This is used to check if an evaluation could - // have been unblocked between the time they were in the scheduler and the - // time they are being blocked. - unblockIndexes map[string]uint64 + // unblockIndexes maps computed node classes or quota name to the index and + // time at which they were unblocked. This is used to check if an + // evaluation could have been unblocked between the time they were in the + // scheduler and the time they are being blocked. + unblockIndexes map[string]unblockEvent // duplicates is the set of evaluations for jobs that had pre-existing // blocked evaluations. These should be marked as cancelled since only one @@ -80,6 +80,12 @@ type BlockedEvals struct { stopCh chan struct{} } +// unblockEvent keeps a record of the index and time of the unblock +type unblockEvent struct { + index uint64 + timestamp time.Time +} + // capacityUpdate stores unblock data. type capacityUpdate struct { computedClass string @@ -103,7 +109,7 @@ func NewBlockedEvals(evalBroker *EvalBroker, logger hclog.Logger) *BlockedEvals escaped: make(map[string]wrappedEval), system: newSystemEvals(), jobs: make(map[structs.NamespacedID]string), - unblockIndexes: make(map[string]uint64), + unblockIndexes: make(map[string]unblockEvent), capacityChangeCh: make(chan *capacityUpdate, unblockBuffer), duplicateCh: make(chan struct{}, 1), stopCh: make(chan struct{}), @@ -293,10 +299,10 @@ func latestEvalIndex(eval *structs.Evaluation) uint64 { // the lock held. func (b *BlockedEvals) missedUnblock(eval *structs.Evaluation) bool { var max uint64 = 0 - for id, index := range b.unblockIndexes { + for id, u := range b.unblockIndexes { // Calculate the max unblock index - if max < index { - max = index + if max < u.index { + max = u.index } // The evaluation is blocked because it has hit a quota limit not class @@ -305,7 +311,7 @@ func (b *BlockedEvals) missedUnblock(eval *structs.Evaluation) bool { if eval.QuotaLimitReached != id { // Not a match continue - } else if eval.SnapshotIndex < index { + } else if eval.SnapshotIndex < u.index { // The evaluation was processed before the quota specification was // updated, so unblock the evaluation. return true @@ -316,7 +322,7 @@ func (b *BlockedEvals) missedUnblock(eval *structs.Evaluation) bool { } elig, ok := eval.ClassEligibility[id] - if !ok && eval.SnapshotIndex < index { + if !ok && eval.SnapshotIndex < u.index { // The evaluation was processed and did not encounter this class // because it was added after it was processed. Thus for correctness // we need to unblock it. @@ -325,7 +331,7 @@ func (b *BlockedEvals) missedUnblock(eval *structs.Evaluation) bool { // The evaluation could use the computed node class and the eval was // processed before the last unblock. - if elig && eval.SnapshotIndex < index { + if elig && eval.SnapshotIndex < u.index { return true } } @@ -405,7 +411,7 @@ func (b *BlockedEvals) Unblock(computedClass string, index uint64) { // Store the index in which the unblock happened. We use this on subsequent // block calls in case the evaluation was in the scheduler when a trigger // occurred. - b.unblockIndexes[computedClass] = index + b.unblockIndexes[computedClass] = unblockEvent{index, time.Now()} // Capture chan in lock as Flush overwrites it ch := b.capacityChangeCh @@ -440,7 +446,7 @@ func (b *BlockedEvals) UnblockQuota(quota string, index uint64) { // Store the index in which the unblock happened. We use this on subsequent // block calls in case the evaluation was in the scheduler when a trigger // occurred. - b.unblockIndexes[quota] = index + b.unblockIndexes[quota] = unblockEvent{index, time.Now()} ch := b.capacityChangeCh done := b.stopCh b.l.Unlock() @@ -469,10 +475,11 @@ func (b *BlockedEvals) UnblockClassAndQuota(class, quota string, index uint64) { // Store the index in which the unblock happened. We use this on subsequent // block calls in case the evaluation was in the scheduler when a trigger // occurred. + now := time.Now() if quota != "" { - b.unblockIndexes[quota] = index + b.unblockIndexes[quota] = unblockEvent{index, now} } - b.unblockIndexes[class] = index + b.unblockIndexes[class] = unblockEvent{index, now} // Capture chan inside the lock to prevent a race with it getting reset // in Flush. @@ -689,7 +696,7 @@ func (b *BlockedEvals) Flush() { b.captured = make(map[string]wrappedEval) b.escaped = make(map[string]wrappedEval) b.jobs = make(map[structs.NamespacedID]string) - b.unblockIndexes = make(map[string]uint64) + b.unblockIndexes = make(map[string]unblockEvent) b.duplicates = nil b.capacityChangeCh = make(chan *capacityUpdate, unblockBuffer) b.stopCh = make(chan struct{}) @@ -763,25 +770,24 @@ func (b *BlockedEvals) prune(stopCh <-chan struct{}) { return case t := <-ticker.C: cutoff := t.UTC().Add(-1 * pruneThreshold) - // b.pruneUnblockIndexes(cutoff) + b.pruneUnblockIndexes(cutoff) b.pruneStats(cutoff) } } } // pruneUnblockIndexes is used to prune any tracked entry that is excessively -// old. This protects againsts unbounded growth of the map. -// func (b *BlockedEvals) pruneUnblockIndexes(cutoff time.Time) { -// b.l.Lock() -// defer b.l.Unlock() - -// oldThreshold := b.timetable.NearestIndex(cutoff) -// for key, index := range b.unblockIndexes { -// if index < oldThreshold { -// delete(b.unblockIndexes, key) -// } -// } -// } +// old. This protects against unbounded growth of the map. +func (b *BlockedEvals) pruneUnblockIndexes(cutoff time.Time) { + b.l.Lock() + defer b.l.Unlock() + + for key, u := range b.unblockIndexes { + if u.timestamp.Before(cutoff) { + delete(b.unblockIndexes, key) + } + } +} // pruneStats is used to prune any zero value stats that are excessively old. func (b *BlockedEvals) pruneStats(cutoff time.Time) { diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index ef9d872917f..c31df536463 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -299,8 +299,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { stoppedJobEval.Status = structs.EvalStatusComplete stoppedJobEval.Type = structs.JobTypeBatch stoppedJobEval.JobID = stoppedJob.ID - stoppedJobEval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - stoppedJobEval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() + stoppedJobEval.ModifyTime = time.Now().UnixNano() must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+2, []*structs.Evaluation{stoppedJobEval})) stoppedJobStoppedAlloc := mock.Alloc() @@ -309,6 +308,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { stoppedJobStoppedAlloc.EvalID = stoppedJobEval.ID stoppedJobStoppedAlloc.DesiredStatus = structs.AllocDesiredStatusStop stoppedJobStoppedAlloc.ClientStatus = structs.AllocClientStatusFailed + stoppedJobStoppedAlloc.ModifyTime = time.Now().UnixNano() stoppedJobLostAlloc := mock.Alloc() stoppedJobLostAlloc.Job = stoppedJob @@ -316,6 +316,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { stoppedJobLostAlloc.EvalID = stoppedJobEval.ID stoppedJobLostAlloc.DesiredStatus = structs.AllocDesiredStatusRun stoppedJobLostAlloc.ClientStatus = structs.AllocClientStatusLost + stoppedJobLostAlloc.ModifyTime = time.Now().UnixNano() must.NoError(t, store.UpsertAllocs( structs.MsgTypeTestSetup, jobModifyIdx+3, @@ -334,8 +335,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { deadJobEval.Status = structs.EvalStatusComplete deadJobEval.Type = structs.JobTypeBatch deadJobEval.JobID = deadJob.ID - deadJobEval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - deadJobEval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() + deadJobEval.ModifyTime = time.Now().UnixNano() must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{deadJobEval})) stoppedAlloc := mock.Alloc() @@ -344,6 +344,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { stoppedAlloc.EvalID = deadJobEval.ID stoppedAlloc.DesiredStatus = structs.AllocDesiredStatusStop stoppedAlloc.ClientStatus = structs.AllocClientStatusFailed + stoppedAlloc.ModifyTime = time.Now().UnixNano() lostAlloc := mock.Alloc() lostAlloc.Job = deadJob @@ -351,6 +352,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { lostAlloc.EvalID = deadJobEval.ID lostAlloc.DesiredStatus = structs.AllocDesiredStatusRun lostAlloc.ClientStatus = structs.AllocClientStatusLost + lostAlloc.ModifyTime = time.Now().UnixNano() must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx+2, []*structs.Allocation{stoppedAlloc, lostAlloc})) @@ -371,8 +373,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobEval.Status = structs.EvalStatusComplete activeJobEval.Type = structs.JobTypeBatch activeJobEval.JobID = activeJob.ID - activeJobEval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - activeJobEval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() + activeJobEval.ModifyTime = time.Now().UnixNano() must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{activeJobEval})) activeJobRunningAlloc := mock.Alloc() @@ -381,6 +382,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobRunningAlloc.EvalID = activeJobEval.ID activeJobRunningAlloc.DesiredStatus = structs.AllocDesiredStatusRun activeJobRunningAlloc.ClientStatus = structs.AllocClientStatusRunning + activeJobRunningAlloc.ModifyTime = time.Now().UnixNano() activeJobLostAlloc := mock.Alloc() activeJobLostAlloc.Job = activeJob @@ -388,6 +390,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobLostAlloc.EvalID = activeJobEval.ID activeJobLostAlloc.DesiredStatus = structs.AllocDesiredStatusRun activeJobLostAlloc.ClientStatus = structs.AllocClientStatusLost + activeJobLostAlloc.ModifyTime = time.Now().UnixNano() must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{activeJobRunningAlloc, activeJobLostAlloc})) @@ -395,8 +398,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobCompleteEval.Status = structs.EvalStatusComplete activeJobCompleteEval.Type = structs.JobTypeBatch activeJobCompleteEval.JobID = activeJob.ID - activeJobCompleteEval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - activeJobCompleteEval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() + activeJobCompleteEval.ModifyTime = time.Now().UnixNano() must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Evaluation{activeJobCompleteEval})) @@ -406,6 +408,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobCompletedEvalCompletedAlloc.EvalID = activeJobCompleteEval.ID activeJobCompletedEvalCompletedAlloc.DesiredStatus = structs.AllocDesiredStatusStop activeJobCompletedEvalCompletedAlloc.ClientStatus = structs.AllocClientStatusComplete + activeJobCompletedEvalCompletedAlloc.ModifyTime = time.Now().UnixNano() must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{activeJobCompletedEvalCompletedAlloc})) @@ -419,6 +422,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { purgedJobEval.Status = structs.EvalStatusComplete purgedJobEval.Type = structs.JobTypeBatch purgedJobEval.JobID = purgedJob.ID + purgedJobEval.ModifyTime = time.Now().UnixNano() must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{purgedJobEval})) purgedJobCompleteAlloc := mock.Alloc() @@ -427,6 +431,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { purgedJobCompleteAlloc.EvalID = purgedJobEval.ID purgedJobCompleteAlloc.DesiredStatus = structs.AllocDesiredStatusRun purgedJobCompleteAlloc.ClientStatus = structs.AllocClientStatusLost + purgedJobCompleteAlloc.ModifyTime = time.Now().UnixNano() must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{purgedJobCompleteAlloc})) @@ -434,8 +439,6 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { purgedJobCompleteEval.Status = structs.EvalStatusComplete purgedJobCompleteEval.Type = structs.JobTypeBatch purgedJobCompleteEval.JobID = purgedJob.ID - purgedJobCompleteEval.CreateTime = time.Now().Add(-6 * time.Hour).UnixNano() // make sure objects we insert are older than GC thresholds - purgedJobCompleteEval.ModifyTime = time.Now().Add(-5 * time.Hour).UnixNano() must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Evaluation{purgedJobCompleteEval})) From 6f60a453a08f35dd6563250cca5757f6f2c6fd27 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Fri, 1 Nov 2024 13:05:14 +0100 Subject: [PATCH 35/40] review comments --- nomad/fsm.go | 4 ++++ nomad/structs/csi.go | 13 ++++++++----- nomad/structs/structs.go | 2 +- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/nomad/fsm.go b/nomad/fsm.go index 39837895ed0..292e8808b85 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -1563,6 +1563,10 @@ func (n *nomadFSM) restoreImpl(old io.ReadCloser, filter *FSMFilter) error { // Decode snapType := SnapshotType(msgType[0]) switch snapType { + case TimeTableSnapshot: + // COMPAT: Nomad 1.9.2 removed the timetable, this case kept to gracefully handle + // tt snapshot requests + return nil case NodeSnapshot: node := new(structs.Node) if err := dec.Decode(node); err != nil { diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 858d3c385db..201e69dd4d4 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -368,8 +368,10 @@ type CSIVolListStub struct { CreateIndex uint64 ModifyIndex uint64 - CreateTime int64 - ModifyTime int64 + + // Create and modify times stored as UnixNano + CreateTime int64 + ModifyTime int64 } // NewCSIVolume creates the volume struct. No side-effects @@ -852,7 +854,7 @@ func (v *CSIVolume) Merge(other *CSIVolume) error { // Request and response wrappers type CSIVolumeRegisterRequest struct { Volumes []*CSIVolume - Timestamp int64 + Timestamp int64 // UnixNano WriteRequest } @@ -872,7 +874,7 @@ type CSIVolumeDeregisterResponse struct { type CSIVolumeCreateRequest struct { Volumes []*CSIVolume - Timestamp int64 + Timestamp int64 // UnixNano WriteRequest } @@ -929,7 +931,7 @@ type CSIVolumeClaimRequest struct { AccessMode CSIVolumeAccessMode AttachmentMode CSIVolumeAttachmentMode State CSIVolumeClaimState - Timestamp int64 + Timestamp int64 // UnixNano WriteRequest } @@ -1111,6 +1113,7 @@ type CSIPlugin struct { CreateIndex uint64 ModifyIndex uint64 + // Create and modify times stored as UnixNano CreateTime int64 ModifyTime int64 } diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index b877c69bdf1..2ce8d93f8fc 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -10843,7 +10843,7 @@ type DeploymentStatusUpdate struct { // StatusDescription is the new status description of the deployment. StatusDescription string - // UpdatedAt is the time of the update + // UpdatedAt is the time of the update, stored as UnixNano UpdatedAt int64 } From c31a0971ecc33fda24a503f6a887d420ce1ee0a5 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Fri, 1 Nov 2024 14:02:18 +0100 Subject: [PATCH 36/40] Tim's comment about API package UnixNano explanations --- api/csi.go | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/api/csi.go b/api/csi.go index f77b4d96cdd..492b011aa27 100644 --- a/api/csi.go +++ b/api/csi.go @@ -350,8 +350,11 @@ type CSIVolume struct { CreateIndex uint64 ModifyIndex uint64 - CreateTime int64 - ModifyTime int64 + + // CreateTime stored as UnixNano + CreateTime int64 + // ModifyTime stored as UnixNano + ModifyTime int64 // ExtraKeysHCL is used by the hcl parser to report unexpected keys ExtraKeysHCL []string `hcl1:",unusedKeys" json:"-"` @@ -403,8 +406,11 @@ type CSIVolumeListStub struct { CreateIndex uint64 ModifyIndex uint64 - CreateTime int64 - ModifyTime int64 + + // CreateTime stored as UnixNano + CreateTime int64 + // ModifyTime stored as UnixNano + ModifyTime int64 } type CSIVolumeListExternalResponse struct { @@ -547,8 +553,11 @@ type CSIPlugin struct { NodesExpected int CreateIndex uint64 ModifyIndex uint64 - CreateTime int64 - ModifyTime int64 + + // CreateTime stored as UnixNano + CreateTime int64 + // ModifyTime stored as UnixNano + ModifyTime int64 } type CSIPluginListStub struct { From a3ff3a27a60a056ef75ab8c8ff1d1f75ce387bba Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Fri, 1 Nov 2024 14:50:22 +0100 Subject: [PATCH 37/40] cl --- .changelog/24112.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/24112.txt diff --git a/.changelog/24112.txt b/.changelog/24112.txt new file mode 100644 index 00000000000..6383f63ed23 --- /dev/null +++ b/.changelog/24112.txt @@ -0,0 +1,3 @@ +```release-note:bug +state: Fixed setting GC threshold to more than 72hrs being ignored +``` From 3e43b568f5517cc92fc252d7205275d1daab12a3 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Fri, 1 Nov 2024 19:18:00 +0100 Subject: [PATCH 38/40] review comment --- nomad/structs/csi.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 201e69dd4d4..8dd85db766e 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -376,12 +376,13 @@ type CSIVolListStub struct { // NewCSIVolume creates the volume struct. No side-effects func NewCSIVolume(volumeID string, index uint64) *CSIVolume { + now := time.Now().UnixNano() out := &CSIVolume{ ID: volumeID, CreateIndex: index, ModifyIndex: index, - CreateTime: time.Now().UnixNano(), - ModifyTime: time.Now().UnixNano(), + CreateTime: now, + ModifyTime: now, } out.newStructs() @@ -1120,12 +1121,13 @@ type CSIPlugin struct { // NewCSIPlugin creates the plugin struct. No side-effects func NewCSIPlugin(id string, index uint64) *CSIPlugin { + now := time.Now().UnixNano() out := &CSIPlugin{ ID: id, CreateIndex: index, ModifyIndex: index, - CreateTime: time.Now().UnixNano(), - ModifyTime: time.Now().UnixNano(), + CreateTime: now, + ModifyTime: now, } out.newStructs() From da2d7413e090fef9403e5ac52e5c4c2d6589a255 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Fri, 1 Nov 2024 19:18:19 +0100 Subject: [PATCH 39/40] TestCoreScheduler_EvalGC_Batch fix --- nomad/core_sched.go | 111 ++++++++++++++++++++++++++++++++------- nomad/core_sched_test.go | 61 ++++++++------------- 2 files changed, 114 insertions(+), 58 deletions(-) diff --git a/nomad/core_sched.go b/nomad/core_sched.go index e45f2f5cdce..73431b6e81d 100644 --- a/nomad/core_sched.go +++ b/nomad/core_sched.go @@ -27,6 +27,18 @@ type CoreScheduler struct { srv *Server snap *state.StateSnapshot logger log.Logger + + // custom GC Threshold values can be used by unit tests to simulate time + // manipulation + customJobGCThreshold time.Duration + customEvalGCThreshold time.Duration + customBatchEvalGCThreshold time.Duration + customNodeGCThreshold time.Duration + customDeploymentGCThreshold time.Duration + customCSIVolumeClaimGCThreshold time.Duration + customCSIPluginGCThreshold time.Duration + customACLTokenExpirationGCThreshold time.Duration + customRootKeyGCThreshold time.Duration } // NewCoreScheduler is used to return a new system scheduler instance @@ -44,13 +56,13 @@ func (c *CoreScheduler) Process(eval *structs.Evaluation) error { job := strings.Split(eval.JobID, ":") // extra data can be smuggled in w/ JobID switch job[0] { case structs.CoreJobEvalGC: - return c.evalGC(eval) + return c.evalGC() case structs.CoreJobNodeGC: return c.nodeGC(eval) case structs.CoreJobJobGC: return c.jobGC(eval) case structs.CoreJobDeploymentGC: - return c.deploymentGC(eval) + return c.deploymentGC() case structs.CoreJobCSIVolumeClaimGC: return c.csiVolumeClaimGC(eval) case structs.CoreJobCSIPluginGC: @@ -77,10 +89,10 @@ func (c *CoreScheduler) forceGC(eval *structs.Evaluation) error { if err := c.jobGC(eval); err != nil { return err } - if err := c.evalGC(eval); err != nil { + if err := c.evalGC(); err != nil { return err } - if err := c.deploymentGC(eval); err != nil { + if err := c.deploymentGC(); err != nil { return err } if err := c.csiPluginGC(eval); err != nil { @@ -115,7 +127,15 @@ func (c *CoreScheduler) jobGC(eval *structs.Evaluation) error { return err } - cutoffTime := c.getCutoffTime(c.srv.config.JobGCThreshold) + var threshold time.Duration + threshold = c.srv.config.JobGCThreshold + + // custom threshold override + if c.customJobGCThreshold != 0 { + threshold = c.customJobGCThreshold + } + + cutoffTime := c.getCutoffTime(threshold) // Collect the allocations, evaluations and jobs to GC var gcAlloc, gcEval []string @@ -243,7 +263,7 @@ func (c *CoreScheduler) partitionJobReap(jobs []*structs.Job, leaderACL string, } // evalGC is used to garbage collect old evaluations -func (c *CoreScheduler) evalGC(eval *structs.Evaluation) error { +func (c *CoreScheduler) evalGC() error { // Iterate over the evaluations ws := memdb.NewWatchSet() iter, err := c.snap.Evals(ws, false) @@ -251,8 +271,20 @@ func (c *CoreScheduler) evalGC(eval *structs.Evaluation) error { return err } - cutoffTime := c.getCutoffTime(c.srv.config.EvalGCThreshold) - batchCutoffTime := c.getCutoffTime(c.srv.config.BatchEvalGCThreshold) + var threshold, batchThreshold time.Duration + threshold = c.srv.config.EvalGCThreshold + batchThreshold = c.srv.config.BatchEvalGCThreshold + + // custom threshold override + if c.customEvalGCThreshold != 0 { + threshold = c.customEvalGCThreshold + } + if c.customBatchEvalGCThreshold != 0 { + batchThreshold = c.customBatchEvalGCThreshold + } + + cutoffTime := c.getCutoffTime(threshold) + batchCutoffTime := c.getCutoffTime(batchThreshold) // Collect the allocations and evaluations to GC var gcAlloc, gcEval []string @@ -293,7 +325,7 @@ func (c *CoreScheduler) gcEval(eval *structs.Evaluation, cutoffTime time.Time, a bool, []string, error) { // Ignore non-terminal and new evaluations - mt := time.Unix(0, eval.ModifyTime) + mt := time.Unix(0, eval.ModifyTime).UTC() if !eval.TerminalStatus() || mt.After(cutoffTime) { return false, nil, nil } @@ -356,8 +388,8 @@ func (c *CoreScheduler) gcEval(eval *structs.Evaluation, cutoffTime time.Time, a return gcEval, gcAllocIDs, nil } -// olderVersionTerminalAllocs returns a list of terminal allocations that belong to the evaluation and may be -// GCed. +// olderVersionTerminalAllocs returns a list of terminal allocations that belong +// to the evaluation and may be GCed. func olderVersionTerminalAllocs(allocs []*structs.Allocation, job *structs.Job, cutoffTime time.Time) []string { var ret []string for _, alloc := range allocs { @@ -438,7 +470,14 @@ func (c *CoreScheduler) nodeGC(eval *structs.Evaluation) error { return err } - cutoffTime := c.getCutoffTime(c.srv.config.NodeGCThreshold) + var threshold time.Duration + threshold = c.srv.config.NodeGCThreshold + + // custom threshold override + if c.customNodeGCThreshold != 0 { + threshold = c.customNodeGCThreshold + } + cutoffTime := c.getCutoffTime(threshold) // Collect the nodes to GC var gcNode []string @@ -527,7 +566,7 @@ func (c *CoreScheduler) nodeReap(eval *structs.Evaluation, nodeIDs []string) err } // deploymentGC is used to garbage collect old deployments -func (c *CoreScheduler) deploymentGC(eval *structs.Evaluation) error { +func (c *CoreScheduler) deploymentGC() error { // Iterate over the deployments ws := memdb.NewWatchSet() iter, err := c.snap.Deployments(ws, state.SortDefault) @@ -535,7 +574,14 @@ func (c *CoreScheduler) deploymentGC(eval *structs.Evaluation) error { return err } - cutoffTime := c.getCutoffTime(c.srv.config.DeploymentGCThreshold) + var threshold time.Duration + threshold = c.srv.config.DeploymentGCThreshold + + // custom threshold override + if c.customDeploymentGCThreshold != 0 { + threshold = c.customDeploymentGCThreshold + } + cutoffTime := c.getCutoffTime(threshold) // Collect the deployments to GC var gcDeployment []string @@ -728,7 +774,14 @@ func (c *CoreScheduler) csiVolumeClaimGC(eval *structs.Evaluation) error { return err } - cutoffTime := c.getCutoffTime(c.srv.config.CSIVolumeClaimGCThreshold) + var threshold time.Duration + threshold = c.srv.config.CSIVolumeClaimGCThreshold + + // custom threshold override + if c.customCSIVolumeClaimGCThreshold != 0 { + threshold = c.customCSIVolumeClaimGCThreshold + } + cutoffTime := c.getCutoffTime(threshold) for i := iter.Next(); i != nil; i = iter.Next() { vol := i.(*structs.CSIVolume) @@ -768,7 +821,14 @@ func (c *CoreScheduler) csiPluginGC(eval *structs.Evaluation) error { return err } - cutoffTime := c.getCutoffTime(c.srv.config.CSIPluginGCThreshold) + var threshold time.Duration + threshold = c.srv.config.CSIPluginGCThreshold + + // custom threshold override + if c.customCSIPluginGCThreshold != 0 { + threshold = c.customCSIPluginGCThreshold + } + cutoffTime := c.getCutoffTime(threshold) for i := iter.Next(); i != nil; i = iter.Next() { plugin := i.(*structs.CSIPlugin) @@ -829,7 +889,14 @@ func (c *CoreScheduler) expiredACLTokenGC(eval *structs.Evaluation, global bool) return nil } - cutoffTime := c.getCutoffTime(c.srv.config.ACLTokenExpirationGCThreshold) + var threshold time.Duration + threshold = c.srv.config.ACLTokenExpirationGCThreshold + + // custom threshold override + if c.customACLTokenExpirationGCThreshold != 0 { + threshold = c.customACLTokenExpirationGCThreshold + } + cutoffTime := c.getCutoffTime(threshold) expiredIter, err := c.snap.ACLTokensByExpired(global) if err != nil { @@ -936,11 +1003,19 @@ func (c *CoreScheduler) rootKeyGC(eval *structs.Evaluation, now time.Time) error return err } + var threshold time.Duration + threshold = c.srv.config.RootKeyGCThreshold + + // custom threshold override + if c.customRootKeyGCThreshold != 0 { + threshold = c.customRootKeyGCThreshold + } + // the threshold is longer than we can support with the time table, and we // never want to force-GC keys because that will orphan signed Workload // Identities rotationThreshold := now.Add(-1 * - (c.srv.config.RootKeyRotationThreshold + c.srv.config.RootKeyGCThreshold)) + (c.srv.config.RootKeyRotationThreshold + threshold)) for { raw := iter.Next() diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index c31df536463..edf646305b4 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -275,8 +275,9 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { s1, cleanupS1 := TestServer(t, func(c *Config) { // Set EvalGCThreshold past BatchEvalThreshold to make sure that only // BatchEvalThreshold affects the results. - c.BatchEvalGCThreshold = time.Hour - c.EvalGCThreshold = 2 * time.Hour + c.BatchEvalGCThreshold = 2 * time.Hour + c.EvalGCThreshold = 4 * time.Hour + c.JobGCThreshold = 2 * time.Hour }) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) @@ -299,7 +300,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { stoppedJobEval.Status = structs.EvalStatusComplete stoppedJobEval.Type = structs.JobTypeBatch stoppedJobEval.JobID = stoppedJob.ID - stoppedJobEval.ModifyTime = time.Now().UnixNano() + stoppedJobEval.ModifyTime = time.Now().UTC().Add(-1 * time.Hour).UnixNano() // set to less than initial BatchEvalGCThreshold must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+2, []*structs.Evaluation{stoppedJobEval})) stoppedJobStoppedAlloc := mock.Alloc() @@ -308,7 +309,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { stoppedJobStoppedAlloc.EvalID = stoppedJobEval.ID stoppedJobStoppedAlloc.DesiredStatus = structs.AllocDesiredStatusStop stoppedJobStoppedAlloc.ClientStatus = structs.AllocClientStatusFailed - stoppedJobStoppedAlloc.ModifyTime = time.Now().UnixNano() + stoppedJobStoppedAlloc.ModifyTime = time.Now().UTC().Add(-1 * time.Hour).UnixNano() stoppedJobLostAlloc := mock.Alloc() stoppedJobLostAlloc.Job = stoppedJob @@ -316,7 +317,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { stoppedJobLostAlloc.EvalID = stoppedJobEval.ID stoppedJobLostAlloc.DesiredStatus = structs.AllocDesiredStatusRun stoppedJobLostAlloc.ClientStatus = structs.AllocClientStatusLost - stoppedJobLostAlloc.ModifyTime = time.Now().UnixNano() + stoppedJobLostAlloc.ModifyTime = time.Now().UTC().Add(-1 * time.Hour).UnixNano() must.NoError(t, store.UpsertAllocs( structs.MsgTypeTestSetup, jobModifyIdx+3, @@ -335,7 +336,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { deadJobEval.Status = structs.EvalStatusComplete deadJobEval.Type = structs.JobTypeBatch deadJobEval.JobID = deadJob.ID - deadJobEval.ModifyTime = time.Now().UnixNano() + deadJobEval.ModifyTime = time.Now().UTC().Add(-1 * time.Hour).UnixNano() // set to less than initial BatchEvalGCThreshold must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{deadJobEval})) stoppedAlloc := mock.Alloc() @@ -373,7 +374,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobEval.Status = structs.EvalStatusComplete activeJobEval.Type = structs.JobTypeBatch activeJobEval.JobID = activeJob.ID - activeJobEval.ModifyTime = time.Now().UnixNano() + activeJobEval.ModifyTime = time.Now().UTC().Add(-1 * time.Hour).UnixNano() // set to less than initial BatchEvalGCThreshold must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{activeJobEval})) activeJobRunningAlloc := mock.Alloc() @@ -390,7 +391,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobLostAlloc.EvalID = activeJobEval.ID activeJobLostAlloc.DesiredStatus = structs.AllocDesiredStatusRun activeJobLostAlloc.ClientStatus = structs.AllocClientStatusLost - activeJobLostAlloc.ModifyTime = time.Now().UnixNano() + activeJobLostAlloc.ModifyTime = time.Now().UTC().Add(-1 * time.Hour).UnixNano() must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{activeJobRunningAlloc, activeJobLostAlloc})) @@ -398,7 +399,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobCompleteEval.Status = structs.EvalStatusComplete activeJobCompleteEval.Type = structs.JobTypeBatch activeJobCompleteEval.JobID = activeJob.ID - activeJobCompleteEval.ModifyTime = time.Now().UnixNano() + activeJobCompleteEval.ModifyTime = time.Now().UTC().Add(-1 * time.Hour).UnixNano() // set to less than initial BatchEvalGCThreshold must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Evaluation{activeJobCompleteEval})) @@ -408,7 +409,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJobCompletedEvalCompletedAlloc.EvalID = activeJobCompleteEval.ID activeJobCompletedEvalCompletedAlloc.DesiredStatus = structs.AllocDesiredStatusStop activeJobCompletedEvalCompletedAlloc.ClientStatus = structs.AllocClientStatusComplete - activeJobCompletedEvalCompletedAlloc.ModifyTime = time.Now().UnixNano() + activeJobCompletedEvalCompletedAlloc.ModifyTime = time.Now().UTC().Add(-1 * time.Hour).UnixNano() must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{activeJobCompletedEvalCompletedAlloc})) @@ -422,7 +423,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { purgedJobEval.Status = structs.EvalStatusComplete purgedJobEval.Type = structs.JobTypeBatch purgedJobEval.JobID = purgedJob.ID - purgedJobEval.ModifyTime = time.Now().UnixNano() + purgedJobEval.ModifyTime = time.Now().UTC().Add(-1 * time.Hour).UnixNano() // set to less than initial BatchEvalGCThreshold must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{purgedJobEval})) purgedJobCompleteAlloc := mock.Alloc() @@ -431,7 +432,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { purgedJobCompleteAlloc.EvalID = purgedJobEval.ID purgedJobCompleteAlloc.DesiredStatus = structs.AllocDesiredStatusRun purgedJobCompleteAlloc.ClientStatus = structs.AllocClientStatusLost - purgedJobCompleteAlloc.ModifyTime = time.Now().UnixNano() + purgedJobCompleteAlloc.ModifyTime = time.Now().UTC().Add(-1 * time.Hour).UnixNano() must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{purgedJobCompleteAlloc})) @@ -439,6 +440,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { purgedJobCompleteEval.Status = structs.EvalStatusComplete purgedJobCompleteEval.Type = structs.JobTypeBatch purgedJobCompleteEval.JobID = purgedJob.ID + purgedJobCompleteEval.ModifyTime = time.Now().UTC().Add(-1 * time.Hour).UnixNano() // set to less than initial BatchEvalGCThreshold must.NoError(t, store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Evaluation{purgedJobCompleteEval})) @@ -493,12 +495,12 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { } } - // Create a core scheduler + // Create a core scheduler, no time modifications snap, err := store.Snapshot() must.NoError(t, err) core := NewCoreScheduler(s1, snap) - // Attempt the GC without moving the time at all + // Attempt the GC gc := s1.coreJobEval(structs.CoreJobEvalGC, jobModifyIdx) err = core.Process(gc) must.NoError(t, err) @@ -523,33 +525,12 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { []*structs.Allocation{}, ) + // set a shorter GC threshold this time gc = s1.coreJobEval(structs.CoreJobEvalGC, jobModifyIdx*2) - err = core.Process(gc) - must.NoError(t, err) - - // Nothing is gone. - assertCorrectJobEvalAlloc( - memdb.NewWatchSet(), - []*structs.Job{deadJob, activeJob, stoppedJob}, - []*structs.Job{}, - []*structs.Evaluation{ - deadJobEval, - activeJobEval, activeJobCompleteEval, - stoppedJobEval, - purgedJobEval, - }, - []*structs.Evaluation{}, - []*structs.Allocation{ - stoppedAlloc, lostAlloc, - activeJobRunningAlloc, activeJobLostAlloc, activeJobCompletedEvalCompletedAlloc, - stoppedJobStoppedAlloc, stoppedJobLostAlloc, - }, - []*structs.Allocation{}, - ) - - gc = s1.coreJobEval(structs.CoreJobEvalGC, jobModifyIdx*2) - err = core.Process(gc) - must.NoError(t, err) + core.(*CoreScheduler).customBatchEvalGCThreshold = time.Minute + //core.(*CoreScheduler).customEvalGCThreshold = time.Minute + //core.(*CoreScheduler).customJobGCThreshold = time.Minute + must.NoError(t, core.Process(gc)) // We expect the following: // From 752d92761b8c2f0d3e213093ea85cbb70e6349b6 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Fri, 1 Nov 2024 19:28:39 +0100 Subject: [PATCH 40/40] upgrade guide entry --- website/content/docs/upgrade/upgrade-specific.mdx | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/website/content/docs/upgrade/upgrade-specific.mdx b/website/content/docs/upgrade/upgrade-specific.mdx index c1fac53a6cd..8e008ee6687 100644 --- a/website/content/docs/upgrade/upgrade-specific.mdx +++ b/website/content/docs/upgrade/upgrade-specific.mdx @@ -13,6 +13,14 @@ upgrade. However, specific versions of Nomad may have more details provided for their upgrades as a result of new features or changed behavior. This page is used to document those details separately from the standard upgrade flow. +## Nomad 1.9.2 + +In Nomad 1.9.2, the mechanism used for calculating when objects are eligible +for garbage collection changes to a clock-based one. This has two consequences. +First, it allows to set arbitrarily long GC intervals. Second, it requires that +Nomad servers are kept roughly in sync time-wise, because GC can originate in a +follower. + ## Nomad 1.9.0 #### Dropped support for older clients