diff --git a/block_test.go b/block_test.go index 93f145e6..5a27a535 100644 --- a/block_test.go +++ b/block_test.go @@ -45,14 +45,14 @@ func TestSetCompactionFailed(t *testing.T) { b := createEmptyBlock(t, tmpdir, &BlockMeta{Version: 2}) - testutil.Equals(t, false, b.meta.Compaction.Failed) + testutil.Equals(t, false, b.meta.Compaction.Failed, "") testutil.Ok(t, b.setCompactionFailed()) - testutil.Equals(t, true, b.meta.Compaction.Failed) + testutil.Equals(t, true, b.meta.Compaction.Failed, "") testutil.Ok(t, b.Close()) b, err = OpenBlock(tmpdir, nil) testutil.Ok(t, err) - testutil.Equals(t, true, b.meta.Compaction.Failed) + testutil.Equals(t, true, b.meta.Compaction.Failed, "") } // createEmpty block creates a block with the given meta but without any data. diff --git a/checkpoint_test.go b/checkpoint_test.go index daa54df1..05a463af 100644 --- a/checkpoint_test.go +++ b/checkpoint_test.go @@ -32,31 +32,31 @@ func TestLastCheckpoint(t *testing.T) { defer os.RemoveAll(dir) s, k, err := LastCheckpoint(dir) - testutil.Equals(t, ErrNotFound, err) + testutil.Equals(t, ErrNotFound, err, "") testutil.Ok(t, os.MkdirAll(filepath.Join(dir, "checkpoint.0000"), 0777)) s, k, err = LastCheckpoint(dir) testutil.Ok(t, err) - testutil.Equals(t, "checkpoint.0000", s) - testutil.Equals(t, 0, k) + testutil.Equals(t, "checkpoint.0000", s, "") + testutil.Equals(t, 0, k, "") testutil.Ok(t, os.MkdirAll(filepath.Join(dir, "checkpoint.xyz"), 0777)) s, k, err = LastCheckpoint(dir) testutil.Ok(t, err) - testutil.Equals(t, "checkpoint.0000", s) - testutil.Equals(t, 0, k) + testutil.Equals(t, "checkpoint.0000", s, "") + testutil.Equals(t, 0, k, "") testutil.Ok(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1"), 0777)) s, k, err = LastCheckpoint(dir) testutil.Ok(t, err) - testutil.Equals(t, "checkpoint.1", s) - testutil.Equals(t, 1, k) + testutil.Equals(t, "checkpoint.1", s, "") + testutil.Equals(t, 1, k, "") testutil.Ok(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1000"), 0777)) s, k, err = LastCheckpoint(dir) testutil.Ok(t, err) - testutil.Equals(t, "checkpoint.1000", s) - testutil.Equals(t, 1000, k) + testutil.Equals(t, "checkpoint.1000", s, "") + testutil.Equals(t, 1000, k, "") } func TestDeleteCheckpoints(t *testing.T) { @@ -75,7 +75,7 @@ func TestDeleteCheckpoints(t *testing.T) { files, err := fileutil.ReadDir(dir) testutil.Ok(t, err) - testutil.Equals(t, []string{"checkpoint.02", "checkpoint.03"}, files) + testutil.Equals(t, []string{"checkpoint.02", "checkpoint.03"}, files, "") } func TestCheckpoint(t *testing.T) { @@ -145,8 +145,8 @@ func TestCheckpoint(t *testing.T) { // Only the new checkpoint should be left. files, err := fileutil.ReadDir(dir) testutil.Ok(t, err) - testutil.Equals(t, 1, len(files)) - testutil.Equals(t, "checkpoint.000106", files[0]) + testutil.Equals(t, 1, len(files), "") + testutil.Equals(t, "checkpoint.000106", files[0], "") sr, err := wal.NewSegmentsReader(filepath.Join(dir, "checkpoint.000106")) testutil.Ok(t, err) @@ -176,5 +176,5 @@ func TestCheckpoint(t *testing.T) { {Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")}, {Ref: 2, Labels: labels.FromStrings("a", "b", "c", "2")}, {Ref: 4, Labels: labels.FromStrings("a", "b", "c", "4")}, - }, series) + }, series, "") } diff --git a/compact_test.go b/compact_test.go index 42e38b5c..7ad6262f 100644 --- a/compact_test.go +++ b/compact_test.go @@ -123,7 +123,7 @@ func TestSplitByRange(t *testing.T) { } } - testutil.Equals(t, exp, splitByRange(blocks, c.trange)) + testutil.Equals(t, exp, splitByRange(blocks, c.trange), "") } } @@ -309,7 +309,7 @@ func TestLeveledCompactor_plan(t *testing.T) { res, err := compactor.plan(c.metas) testutil.Ok(t, err) - testutil.Equals(t, c.expected, res) + testutil.Equals(t, c.expected, res, "") }) { return } @@ -362,7 +362,7 @@ func TestRangeWithFailedCompactionWontGetSelected(t *testing.T) { res, err := compactor.plan(c.metas) testutil.Ok(t, err) - testutil.Equals(t, []string(nil), res) + testutil.Equals(t, []string(nil), res, "") } } diff --git a/db_test.go b/db_test.go index 073b727b..34224ab2 100644 --- a/db_test.go +++ b/db_test.go @@ -90,10 +90,10 @@ func TestDB_reloadOrder(t *testing.T) { testutil.Ok(t, db.reload()) blocks := db.Blocks() - testutil.Equals(t, 3, len(blocks)) - testutil.Equals(t, *metas[1], blocks[0].Meta()) - testutil.Equals(t, *metas[0], blocks[1].Meta()) - testutil.Equals(t, *metas[2], blocks[2].Meta()) + testutil.Equals(t, 3, len(blocks), "") + testutil.Equals(t, *metas[1], blocks[0].Meta(), "") + testutil.Equals(t, *metas[0], blocks[1].Meta(), "") + testutil.Equals(t, *metas[2], blocks[2].Meta(), "") } func TestDataAvailableOnlyAfterCommit(t *testing.T) { @@ -110,7 +110,7 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) { testutil.Ok(t, err) seriesSet := query(t, querier, labels.NewEqualMatcher("foo", "bar")) - testutil.Equals(t, map[string][]sample{}, seriesSet) + testutil.Equals(t, seriesSet, map[string][]sample{}, "") testutil.Ok(t, querier.Close()) err = app.Commit() @@ -122,7 +122,7 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) { seriesSet = query(t, querier, labels.NewEqualMatcher("foo", "bar")) - testutil.Equals(t, map[string][]sample{`{foo="bar"}`: {{t: 0, v: 0}}}, seriesSet) + testutil.Equals(t, seriesSet, map[string][]sample{`{foo="bar"}`: {{t: 0, v: 0}}}, "") } func TestDataNotAvailableAfterRollback(t *testing.T) { @@ -143,7 +143,7 @@ func TestDataNotAvailableAfterRollback(t *testing.T) { seriesSet := query(t, querier, labels.NewEqualMatcher("foo", "bar")) - testutil.Equals(t, map[string][]sample{}, seriesSet) + testutil.Equals(t, seriesSet, map[string][]sample{}, "") } func TestDBAppenderAddRef(t *testing.T) { @@ -179,7 +179,7 @@ func TestDBAppenderAddRef(t *testing.T) { testutil.Ok(t, err) err = app2.AddFast(9999999, 1, 1) - testutil.Equals(t, ErrNotFound, errors.Cause(err)) + testutil.Equals(t, errors.Cause(err), ErrNotFound, "") testutil.Ok(t, app2.Commit()) @@ -196,7 +196,7 @@ func TestDBAppenderAddRef(t *testing.T) { {t: 133, v: 1}, {t: 143, v: 2}, }, - }, res) + }, res, "") testutil.Ok(t, q.Close()) } @@ -258,7 +258,7 @@ Outer: for { eok, rok := expss.Next(), res.Next() - testutil.Equals(t, eok, rok) + testutil.Equals(t, eok, rok, "") if !eok { continue Outer @@ -266,13 +266,13 @@ Outer: sexp := expss.At() sres := res.At() - testutil.Equals(t, sexp.Labels(), sres.Labels()) + testutil.Equals(t, sexp.Labels(), sres.Labels(), "") smplExp, errExp := expandSeriesIterator(sexp.Iterator()) smplRes, errRes := expandSeriesIterator(sres.Iterator()) - testutil.Equals(t, errExp, errRes) - testutil.Equals(t, smplExp, smplRes) + testutil.Equals(t, errExp, errRes, "") + testutil.Equals(t, smplExp, smplRes, "") } } } @@ -289,7 +289,7 @@ func TestAmendDatapointCausesError(t *testing.T) { app = db.Appender() _, err = app.Add(labels.Labels{}, 0, 1) - testutil.Equals(t, ErrAmendSample, err) + testutil.Equals(t, ErrAmendSample, err, "") testutil.Ok(t, app.Rollback()) } @@ -320,7 +320,7 @@ func TestNonDuplicateNaNDatapointsCausesAmendError(t *testing.T) { app = db.Appender() _, err = app.Add(labels.Labels{}, 0, math.Float64frombits(0x7ff0000000000002)) - testutil.Equals(t, ErrAmendSample, err) + testutil.Equals(t, ErrAmendSample, err, "") } func TestSkippingInvalidValuesInSameTxn(t *testing.T) { @@ -344,7 +344,7 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) { testutil.Equals(t, map[string][]sample{ labels.New(labels.Label{"a", "b"}).String(): {{0, 1}}, - }, ssMap) + }, ssMap, "") testutil.Ok(t, q.Close()) @@ -363,7 +363,7 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) { testutil.Equals(t, map[string][]sample{ labels.New(labels.Label{"a", "b"}).String(): {{0, 1}, {10, 3}}, - }, ssMap) + }, ssMap, "") testutil.Ok(t, q.Close()) } @@ -412,7 +412,7 @@ func TestDB_Snapshot(t *testing.T) { testutil.Ok(t, series.Err()) } testutil.Ok(t, seriesSet.Err()) - testutil.Equals(t, 1000.0, sum) + testutil.Equals(t, sum, 1000.0, "") } func TestDB_SnapshotWithDelete(t *testing.T) { @@ -485,7 +485,7 @@ Outer: for { eok, rok := expss.Next(), res.Next() - testutil.Equals(t, eok, rok) + testutil.Equals(t, eok, rok, "") if !eok { continue Outer @@ -493,13 +493,13 @@ Outer: sexp := expss.At() sres := res.At() - testutil.Equals(t, sexp.Labels(), sres.Labels()) + testutil.Equals(t, sexp.Labels(), sres.Labels(), "") smplExp, errExp := expandSeriesIterator(sexp.Iterator()) smplRes, errRes := expandSeriesIterator(sres.Iterator()) - testutil.Equals(t, errExp, errRes) - testutil.Equals(t, smplExp, smplRes) + testutil.Equals(t, errExp, errRes, "") + testutil.Equals(t, smplExp, smplRes, "") } } } @@ -656,7 +656,7 @@ func TestDB_e2e(t *testing.T) { } testutil.Ok(t, ss.Err()) - testutil.Equals(t, expected, result) + testutil.Equals(t, expected, result, "") q.Close() } @@ -689,7 +689,7 @@ func TestWALFlushedOnDBClose(t *testing.T) { values, err := q.LabelValues("labelname") testutil.Ok(t, err) - testutil.Equals(t, []string{"labelvalue"}, values) + testutil.Equals(t, values, []string{"labelvalue"}, "") } func TestTombstoneClean(t *testing.T) { @@ -764,7 +764,7 @@ func TestTombstoneClean(t *testing.T) { for { eok, rok := expss.Next(), res.Next() - testutil.Equals(t, eok, rok) + testutil.Equals(t, eok, rok, "") if !eok { break @@ -772,17 +772,17 @@ func TestTombstoneClean(t *testing.T) { sexp := expss.At() sres := res.At() - testutil.Equals(t, sexp.Labels(), sres.Labels()) + testutil.Equals(t, sexp.Labels(), sres.Labels(), "") smplExp, errExp := expandSeriesIterator(sexp.Iterator()) smplRes, errRes := expandSeriesIterator(sres.Iterator()) - testutil.Equals(t, errExp, errRes) - testutil.Equals(t, smplExp, smplRes) + testutil.Equals(t, errExp, errRes, "") + testutil.Equals(t, smplExp, smplRes, "") } for _, b := range db.blocks { - testutil.Equals(t, NewMemTombstones(), b.tombstones) + testutil.Equals(t, NewMemTombstones(), b.tombstones, "") } } } @@ -833,7 +833,7 @@ func TestTombstoneCleanFail(t *testing.T) { // Now check that the CleanTombstones didn't leave any blocks behind after a failure. actualBlockDirs, err := blockDirs(db.dir) testutil.Ok(t, err) - testutil.Equals(t, expectedBlockDirs, actualBlockDirs) + testutil.Equals(t, expectedBlockDirs, actualBlockDirs, "") } // mockCompactorFailing creates a new empty block on every write and fails when reached the max allowed total. @@ -871,7 +871,7 @@ func (c *mockCompactorFailing) Write(dest string, b BlockReader, mint, maxt int6 actualBlockDirs, err := blockDirs(dest) testutil.Ok(c.t, err) - testutil.Equals(c.t, expectedBlocks, actualBlockDirs) + testutil.Equals(c.t, expectedBlocks, actualBlockDirs, "") return block.Meta().ULID, nil } @@ -905,7 +905,7 @@ func TestDB_Retention(t *testing.T) { db, err = Open(snap, nil, nil, nil) testutil.Ok(t, err) - testutil.Equals(t, 1, len(db.blocks)) + testutil.Equals(t, 1, len(db.blocks), "") app = db.Appender() _, err = app.Add(lbls, 100, 1) @@ -928,12 +928,12 @@ func TestDB_Retention(t *testing.T) { testutil.Ok(t, err) defer db.Close() - testutil.Equals(t, 2, len(db.blocks)) + testutil.Equals(t, 2, len(db.blocks), "") // Reload blocks, which should drop blocks beyond the retention boundary. testutil.Ok(t, db.reload()) - testutil.Equals(t, 1, len(db.blocks)) - testutil.Equals(t, int64(100), db.blocks[0].meta.MaxTime) // To verify its the right block. + testutil.Equals(t, 1, len(db.blocks), "") + testutil.Equals(t, int64(100), db.blocks[0].meta.MaxTime, "") // To verify its the right block. } func TestNotMatcherSelectsLabelsUnsetSeries(t *testing.T) { @@ -1005,7 +1005,7 @@ func TestNotMatcherSelectsLabelsUnsetSeries(t *testing.T) { lres, err := expandSeriesSet(ss) testutil.Ok(t, err) - testutil.Equals(t, c.series, lres) + testutil.Equals(t, c.series, lres, "") } } @@ -1042,27 +1042,27 @@ func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) { o1 := BlockMeta{MinTime: 15, MaxTime: 17} testutil.Equals(t, Overlaps{ {Min: 15, Max: 17}: {metas[1], o1}, - }, OverlappingBlocks(add(o1))) + }, OverlappingBlocks(add(o1)), "") // o2 overlaps with 20-30 and 30-40. o2 := BlockMeta{MinTime: 21, MaxTime: 31} testutil.Equals(t, Overlaps{ {Min: 21, Max: 30}: {metas[2], o2}, {Min: 30, Max: 31}: {o2, metas[3]}, - }, OverlappingBlocks(add(o2))) + }, OverlappingBlocks(add(o2)), "") // o3a and o3b overlaps with 30-40 and each other. o3a := BlockMeta{MinTime: 33, MaxTime: 39} o3b := BlockMeta{MinTime: 34, MaxTime: 36} testutil.Equals(t, Overlaps{ {Min: 34, Max: 36}: {metas[3], o3a, o3b}, - }, OverlappingBlocks(add(o3a, o3b))) + }, OverlappingBlocks(add(o3a, o3b)), "") // o4 is 1:1 overlap with 50-60. o4 := BlockMeta{MinTime: 50, MaxTime: 60} testutil.Equals(t, Overlaps{ {Min: 50, Max: 60}: {metas[5], o4}, - }, OverlappingBlocks(add(o4))) + }, OverlappingBlocks(add(o4)), "") // o5 overlaps with 60-70, 70-80 and 80-90. o5 := BlockMeta{MinTime: 61, MaxTime: 85} @@ -1070,7 +1070,7 @@ func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) { {Min: 61, Max: 70}: {metas[6], o5}, {Min: 70, Max: 80}: {o5, metas[7]}, {Min: 80, Max: 85}: {o5, metas[8]}, - }, OverlappingBlocks(add(o5))) + }, OverlappingBlocks(add(o5)), "") // o6a overlaps with 90-100, 100-110 and o6b, o6b overlaps with 90-100 and o6a. o6a := BlockMeta{MinTime: 92, MaxTime: 105} @@ -1078,7 +1078,7 @@ func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) { testutil.Equals(t, Overlaps{ {Min: 94, Max: 99}: {metas[9], o6a, o6b}, {Min: 100, Max: 105}: {o6a, metas[10]}, - }, OverlappingBlocks(add(o6a, o6b))) + }, OverlappingBlocks(add(o6a, o6b)), "") // All together. testutil.Equals(t, Overlaps{ @@ -1088,7 +1088,7 @@ func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) { {Min: 50, Max: 60}: {metas[5], o4}, {Min: 61, Max: 70}: {metas[6], o5}, {Min: 70, Max: 80}: {o5, metas[7]}, {Min: 80, Max: 85}: {o5, metas[8]}, {Min: 94, Max: 99}: {metas[9], o6a, o6b}, {Min: 100, Max: 105}: {o6a, metas[10]}, - }, OverlappingBlocks(add(o1, o2, o3a, o3b, o4, o5, o6a, o6b))) + }, OverlappingBlocks(add(o1, o2, o3a, o3b, o4, o5, o6a, o6b)), "") // Additional case. var nc1 []BlockMeta @@ -1107,7 +1107,7 @@ func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) { {Min: 3, Max: 5}: {nc1[0], nc1[5], nc1[6]}, // 1-5, 2-6, 3-5 {Min: 5, Max: 6}: {nc1[5], nc1[7]}, // 2-6, 5-7 {Min: 8, Max: 9}: {nc1[8], nc1[9]}, // 7-10, 8-9 - }, OverlappingBlocks(nc1)) + }, OverlappingBlocks(nc1), "") } // Regression test for https://github.com/prometheus/tsdb/issues/347 @@ -1207,16 +1207,16 @@ func TestInitializeHeadTimestamp(t *testing.T) { testutil.Ok(t, err) // Should be set to init values if no WAL or blocks exist so far. - testutil.Equals(t, int64(math.MaxInt64), db.head.MinTime()) - testutil.Equals(t, int64(math.MinInt64), db.head.MaxTime()) + testutil.Equals(t, int64(math.MaxInt64), db.head.MinTime(), "") + testutil.Equals(t, int64(math.MinInt64), db.head.MaxTime(), "") // First added sample initializes the writable range. app := db.Appender() _, err = app.Add(labels.FromStrings("a", "b"), 1000, 1) testutil.Ok(t, err) - testutil.Equals(t, int64(1000), db.head.MinTime()) - testutil.Equals(t, int64(1000), db.head.MaxTime()) + testutil.Equals(t, int64(1000), db.head.MinTime(), "") + testutil.Equals(t, int64(1000), db.head.MaxTime(), "") }) t.Run("wal-only", func(t *testing.T) { dir, err := ioutil.TempDir("", "test_head_init") @@ -1244,8 +1244,8 @@ func TestInitializeHeadTimestamp(t *testing.T) { db, err := Open(dir, nil, nil, nil) testutil.Ok(t, err) - testutil.Equals(t, int64(5000), db.head.MinTime()) - testutil.Equals(t, int64(15000), db.head.MaxTime()) + testutil.Equals(t, int64(5000), db.head.MinTime(), "") + testutil.Equals(t, int64(15000), db.head.MaxTime(), "") }) t.Run("existing-block", func(t *testing.T) { dir, err := ioutil.TempDir("", "test_head_init") @@ -1262,8 +1262,8 @@ func TestInitializeHeadTimestamp(t *testing.T) { db, err := Open(dir, nil, nil, nil) testutil.Ok(t, err) - testutil.Equals(t, int64(2000), db.head.MinTime()) - testutil.Equals(t, int64(2000), db.head.MaxTime()) + testutil.Equals(t, int64(2000), db.head.MinTime(), "") + testutil.Equals(t, int64(2000), db.head.MaxTime(), "") }) t.Run("existing-block-and-wal", func(t *testing.T) { dir, err := ioutil.TempDir("", "test_head_init") @@ -1298,7 +1298,7 @@ func TestInitializeHeadTimestamp(t *testing.T) { db, err := Open(dir, nil, nil, nil) testutil.Ok(t, err) - testutil.Equals(t, int64(6000), db.head.MinTime()) - testutil.Equals(t, int64(15000), db.head.MaxTime()) + testutil.Equals(t, int64(6000), db.head.MinTime(), "") + testutil.Equals(t, int64(15000), db.head.MaxTime(), "") }) } diff --git a/head_test.go b/head_test.go index 0392912c..67cfef2e 100644 --- a/head_test.go +++ b/head_test.go @@ -123,17 +123,17 @@ func TestHead_ReadWAL(t *testing.T) { defer head.Close() testutil.Ok(t, head.Init()) - testutil.Equals(t, uint64(100), head.lastSeriesID) + testutil.Equals(t, uint64(100), head.lastSeriesID, "") s10 := head.series.getByID(10) s11 := head.series.getByID(11) s50 := head.series.getByID(50) s100 := head.series.getByID(100) - testutil.Equals(t, labels.FromStrings("a", "1"), s10.lset) - testutil.Equals(t, labels.FromStrings("a", "2"), s11.lset) - testutil.Equals(t, labels.FromStrings("a", "4"), s50.lset) - testutil.Equals(t, labels.FromStrings("a", "3"), s100.lset) + testutil.Equals(t, labels.FromStrings("a", "1"), s10.lset, "") + testutil.Equals(t, labels.FromStrings("a", "2"), s11.lset, "") + testutil.Equals(t, labels.FromStrings("a", "4"), s50.lset, "") + testutil.Equals(t, labels.FromStrings("a", "3"), s100.lset, "") expandChunk := func(c chunkenc.Iterator) (x []sample) { for c.Next() { @@ -144,10 +144,10 @@ func TestHead_ReadWAL(t *testing.T) { return x } - testutil.Equals(t, []sample{{100, 2}, {101, 5}}, expandChunk(s10.iterator(0))) - testutil.Equals(t, 0, len(s11.chunks)) - testutil.Equals(t, []sample{{101, 6}}, expandChunk(s50.iterator(0))) - testutil.Equals(t, []sample{{100, 3}}, expandChunk(s100.iterator(0))) + testutil.Equals(t, []sample{{100, 2}, {101, 5}}, expandChunk(s10.iterator(0)), "") + testutil.Equals(t, 0, len(s11.chunks), "") + testutil.Equals(t, []sample{{101, 6}}, expandChunk(s50.iterator(0)), "") + testutil.Equals(t, []sample{{100, 3}}, expandChunk(s100.iterator(0)), "") } func TestHead_Truncate(t *testing.T) { @@ -185,12 +185,12 @@ func TestHead_Truncate(t *testing.T) { testutil.Equals(t, []*memChunk{ {minTime: 2000, maxTime: 2999}, - }, h.series.getByID(s1.ref).chunks) + }, h.series.getByID(s1.ref).chunks, "") testutil.Equals(t, []*memChunk{ {minTime: 2000, maxTime: 2999}, {minTime: 3000, maxTime: 3999}, - }, h.series.getByID(s2.ref).chunks) + }, h.series.getByID(s2.ref).chunks, "") testutil.Assert(t, h.series.getByID(s3.ref) == nil, "") testutil.Assert(t, h.series.getByID(s4.ref) == nil, "") @@ -202,10 +202,10 @@ func TestHead_Truncate(t *testing.T) { postingsC1, _ := index.ExpandPostings(h.postings.Get("c", "1")) postingsAll, _ := index.ExpandPostings(h.postings.Get("", "")) - testutil.Equals(t, []uint64{s1.ref}, postingsA1) - testutil.Equals(t, []uint64{s2.ref}, postingsA2) - testutil.Equals(t, []uint64{s1.ref, s2.ref}, postingsB1) - testutil.Equals(t, []uint64{s1.ref, s2.ref}, postingsAll) + testutil.Equals(t, []uint64{s1.ref}, postingsA1, "") + testutil.Equals(t, []uint64{s2.ref}, postingsA2, "") + testutil.Equals(t, []uint64{s1.ref, s2.ref}, postingsB1, "") + testutil.Equals(t, []uint64{s1.ref, s2.ref}, postingsAll, "") testutil.Assert(t, postingsB2 == nil, "") testutil.Assert(t, postingsC1 == nil, "") @@ -215,13 +215,13 @@ func TestHead_Truncate(t *testing.T) { "b": {}, "1": {}, "2": {}, - }, h.symbols) + }, h.symbols, "") testutil.Equals(t, map[string]stringset{ "a": {"1": struct{}{}, "2": struct{}{}}, "b": {"1": struct{}{}}, "": {"": struct{}{}}, - }, h.values) + }, h.values, "") } // Validate various behaviors brought on by firstChunkID accounting for @@ -245,10 +245,10 @@ func TestMemSeries_truncateChunks(t *testing.T) { s.truncateChunksBefore(2000) - testutil.Equals(t, int64(2000), s.chunks[0].minTime) + testutil.Equals(t, int64(2000), s.chunks[0].minTime, "") testutil.Assert(t, s.chunk(0) == nil, "first chunks not gone") - testutil.Equals(t, countBefore/2, len(s.chunks)) - testutil.Equals(t, lastChunk, s.chunk(lastID)) + testutil.Equals(t, countBefore/2, len(s.chunks), "") + testutil.Equals(t, lastChunk, s.chunk(lastID), "") // Validate that the series' sample buffer is applied correctly to the last chunk // after truncation. @@ -366,7 +366,7 @@ Outer: for { eok, rok := expss.Next(), res.Next() - testutil.Equals(t, eok, rok) + testutil.Equals(t, eok, rok, "") if !eok { continue Outer @@ -374,13 +374,13 @@ Outer: sexp := expss.At() sres := res.At() - testutil.Equals(t, sexp.Labels(), sres.Labels()) + testutil.Equals(t, sexp.Labels(), sres.Labels(), "") smplExp, errExp := expandSeriesIterator(sexp.Iterator()) smplRes, errRes := expandSeriesIterator(sres.Iterator()) - testutil.Equals(t, errExp, errRes) - testutil.Equals(t, smplExp, smplRes) + testutil.Equals(t, errExp, errRes, "") + testutil.Equals(t, smplExp, smplRes, "") } } } @@ -420,7 +420,7 @@ func TestDeleteUntilCurMax(t *testing.T) { it := exps.Iterator() ressmpls, err := expandSeriesIterator(it) testutil.Ok(t, err) - testutil.Equals(t, []sample{{11, 1}}, ressmpls) + testutil.Equals(t, []sample{{11, 1}}, ressmpls, "") } func TestDelete_e2e(t *testing.T) { numDatapoints := 1000 @@ -566,17 +566,17 @@ func TestDelete_e2e(t *testing.T) { } } } - testutil.Equals(t, eok, rok) + testutil.Equals(t, eok, rok, "") if !eok { break } sexp := expSs.At() sres := ss.At() - testutil.Equals(t, sexp.Labels(), sres.Labels()) + testutil.Equals(t, sexp.Labels(), sres.Labels(), "") smplExp, errExp := expandSeriesIterator(sexp.Iterator()) smplRes, errRes := expandSeriesIterator(sres.Iterator()) - testutil.Equals(t, errExp, errRes) - testutil.Equals(t, smplExp, smplRes) + testutil.Equals(t, errExp, errRes, "") + testutil.Equals(t, smplExp, smplRes, "") } } } @@ -720,8 +720,8 @@ func TestGCChunkAccess(t *testing.T) { testutil.Equals(t, labels.Labels{{ Name: "a", Value: "1", - }}, lset) - testutil.Equals(t, 2, len(chunks)) + }}, lset, "") + testutil.Equals(t, 2, len(chunks), "") cr := h.chunksRange(0, 1500) _, err = cr.Chunk(chunks[0].Ref) @@ -732,7 +732,7 @@ func TestGCChunkAccess(t *testing.T) { h.Truncate(1500) // Remove a chunk. _, err = cr.Chunk(chunks[0].Ref) - testutil.Equals(t, ErrNotFound, err) + testutil.Equals(t, ErrNotFound, err, "") _, err = cr.Chunk(chunks[1].Ref) testutil.Ok(t, err) } @@ -760,8 +760,8 @@ func TestGCSeriesAccess(t *testing.T) { testutil.Equals(t, labels.Labels{{ Name: "a", Value: "1", - }}, lset) - testutil.Equals(t, 2, len(chunks)) + }}, lset, "") + testutil.Equals(t, 2, len(chunks), "") cr := h.chunksRange(0, 2000) _, err = cr.Chunk(chunks[0].Ref) @@ -771,12 +771,12 @@ func TestGCSeriesAccess(t *testing.T) { h.Truncate(2000) // Remove the series. - testutil.Equals(t, (*memSeries)(nil), h.series.getByID(1)) + testutil.Equals(t, (*memSeries)(nil), h.series.getByID(1), "") _, err = cr.Chunk(chunks[0].Ref) - testutil.Equals(t, ErrNotFound, err) + testutil.Equals(t, ErrNotFound, err, "") _, err = cr.Chunk(chunks[1].Ref) - testutil.Equals(t, ErrNotFound, err) + testutil.Equals(t, ErrNotFound, err, "") } func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) { @@ -803,7 +803,7 @@ func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) { ss, err := q.Select(labels.NewEqualMatcher("a", "1")) testutil.Ok(t, err) - testutil.Equals(t, true, ss.Next()) + testutil.Equals(t, true, ss.Next(), "") } func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) { @@ -830,11 +830,11 @@ func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) { ss, err := q.Select(labels.NewEqualMatcher("a", "1")) testutil.Ok(t, err) - testutil.Equals(t, false, ss.Next()) + testutil.Equals(t, false, ss.Next(), "") // Truncate again, this time the series should be deleted testutil.Ok(t, h.Truncate(2050)) - testutil.Equals(t, (*memSeries)(nil), h.series.getByHash(lset.Hash(), lset)) + testutil.Equals(t, (*memSeries)(nil), h.series.getByHash(lset.Hash(), lset), "") } func TestHead_LogRollback(t *testing.T) { @@ -854,9 +854,9 @@ func TestHead_LogRollback(t *testing.T) { testutil.Ok(t, app.Rollback()) recs := readTestWAL(t, w.Dir()) - testutil.Equals(t, 1, len(recs)) + testutil.Equals(t, 1, len(recs), "") series, ok := recs[0].([]RefSeries) testutil.Assert(t, ok, "expected series record but got %+v", recs[0]) - testutil.Equals(t, []RefSeries{{Ref: 1, Labels: labels.FromStrings("a", "b")}}, series) + testutil.Equals(t, series, []RefSeries{{Ref: 1, Labels: labels.FromStrings("a", "b")}}, "") } diff --git a/index/index_test.go b/index/index_test.go index 8d719813..1ef5f308 100644 --- a/index/index_test.go +++ b/index/index_test.go @@ -226,8 +226,8 @@ func TestIndexRW_Postings(t *testing.T) { err := ir.Series(p.At(), &l, &c) testutil.Ok(t, err) - testutil.Equals(t, 0, len(c)) - testutil.Equals(t, series[i], l) + testutil.Equals(t, 0, len(c), "") + testutil.Equals(t, series[i], l, "") } testutil.Ok(t, p.Err()) @@ -352,8 +352,8 @@ func TestPersistence_index_e2e(t *testing.T) { testutil.Ok(t, err) err = mi.Series(expp.At(), &explset, &expchks) - testutil.Equals(t, explset, lset) - testutil.Equals(t, expchks, chks) + testutil.Equals(t, explset, lset, "") + testutil.Equals(t, expchks, chks, "") } testutil.Assert(t, expp.Next() == false, "") testutil.Ok(t, gotp.Err()) @@ -366,7 +366,7 @@ func TestPersistence_index_e2e(t *testing.T) { tplsRes, err := ir.LabelValues(k) testutil.Ok(t, err) - testutil.Equals(t, tplsExp.Len(), tplsRes.Len()) + testutil.Equals(t, tplsExp.Len(), tplsRes.Len(), "") for i := 0; i < tplsExp.Len(); i++ { strsExp, err := tplsExp.At(i) testutil.Ok(t, err) @@ -374,7 +374,7 @@ func TestPersistence_index_e2e(t *testing.T) { strsRes, err := tplsRes.At(i) testutil.Ok(t, err) - testutil.Equals(t, strsExp, strsRes) + testutil.Equals(t, strsExp, strsRes, "") } } diff --git a/index/postings_test.go b/index/postings_test.go index e7e2e61c..92c57a37 100644 --- a/index/postings_test.go +++ b/index/postings_test.go @@ -30,7 +30,7 @@ func TestMemPostings_addFor(t *testing.T) { p.addFor(5, allPostingsKey) - testutil.Equals(t, []uint64{1, 2, 3, 4, 5, 6, 7, 8}, p.m[allPostingsKey]) + testutil.Equals(t, []uint64{1, 2, 3, 4, 5, 6, 7, 8}, p.m[allPostingsKey], "") } func TestMemPostings_ensureOrder(t *testing.T) { @@ -102,7 +102,7 @@ func TestIntersect(t *testing.T) { res, err := ExpandPostings(Intersect(a, b)) testutil.Ok(t, err) - testutil.Equals(t, c.res, res) + testutil.Equals(t, c.res, res, "") } } @@ -143,7 +143,7 @@ func TestMultiIntersect(t *testing.T) { res, err := ExpandPostings(Intersect(ps...)) testutil.Ok(t, err) - testutil.Equals(t, c.res, res) + testutil.Equals(t, c.res, res, "") } } @@ -200,7 +200,7 @@ func TestMultiMerge(t *testing.T) { res, err := ExpandPostings(Merge(i1, i2, i3)) testutil.Ok(t, err) - testutil.Equals(t, c.res, res) + testutil.Equals(t, c.res, res, "") } } @@ -232,7 +232,7 @@ func TestMergedPostings(t *testing.T) { res, err := ExpandPostings(newMergedPostings(a, b)) testutil.Ok(t, err) - testutil.Equals(t, c.res, res) + testutil.Equals(t, c.res, res, "") } } @@ -285,7 +285,7 @@ func TestMergedPostingsSeek(t *testing.T) { p := newMergedPostings(a, b) - testutil.Equals(t, c.success, p.Seek(c.seek)) + testutil.Equals(t, c.success, p.Seek(c.seek), "") // After Seek(), At() should be called. if c.success { @@ -294,7 +294,7 @@ func TestMergedPostingsSeek(t *testing.T) { testutil.Ok(t, err) lst = append([]uint64{start}, lst...) - testutil.Equals(t, c.res, lst) + testutil.Equals(t, c.res, lst, "") } } @@ -349,7 +349,7 @@ func TestRemovedPostings(t *testing.T) { res, err := ExpandPostings(newRemovedPostings(a, b)) testutil.Ok(t, err) - testutil.Equals(t, c.res, res) + testutil.Equals(t, c.res, res, "") } } @@ -448,7 +448,7 @@ func TestRemovedPostingsSeek(t *testing.T) { p := newRemovedPostings(a, b) - testutil.Equals(t, c.success, p.Seek(c.seek)) + testutil.Equals(t, c.success, p.Seek(c.seek), "") // After Seek(), At() should be called. if c.success { @@ -457,7 +457,7 @@ func TestRemovedPostingsSeek(t *testing.T) { testutil.Ok(t, err) lst = append([]uint64{start}, lst...) - testutil.Equals(t, c.res, lst) + testutil.Equals(t, c.res, lst, "") } } @@ -483,7 +483,7 @@ func TestBigEndian(t *testing.T) { bep := newBigEndianPostings(beLst) for i := 0; i < num; i++ { testutil.Assert(t, bep.Next() == true, "") - testutil.Equals(t, uint64(ls[i]), bep.At()) + testutil.Equals(t, uint64(ls[i]), bep.At(), "") } testutil.Assert(t, bep.Next() == false, "") @@ -531,8 +531,8 @@ func TestBigEndian(t *testing.T) { bep := newBigEndianPostings(beLst) for _, v := range table { - testutil.Equals(t, v.found, bep.Seek(uint64(v.seek))) - testutil.Equals(t, uint64(v.val), bep.At()) + testutil.Equals(t, v.found, bep.Seek(uint64(v.seek)), "") + testutil.Equals(t, uint64(v.val), bep.At(), "") testutil.Assert(t, bep.Err() == nil, "") } }) @@ -552,5 +552,5 @@ func TestIntersectWithMerge(t *testing.T) { res, err := ExpandPostings(p) testutil.Ok(t, err) - testutil.Equals(t, []uint64{30}, res) + testutil.Equals(t, []uint64{30}, res, "") } diff --git a/labels/labels_test.go b/labels/labels_test.go index 200c1fc2..991d732b 100644 --- a/labels/labels_test.go +++ b/labels/labels_test.go @@ -81,8 +81,8 @@ func TestCompareAndEquals(t *testing.T) { // Use constructor to ensure sortedness. a, b := New(c.a...), New(c.b...) - testutil.Equals(t, c.res, Compare(a, b)) - testutil.Equals(t, c.res == 0, a.Equals(b)) + testutil.Equals(t, c.res, Compare(a, b), "") + testutil.Equals(t, c.res == 0, a.Equals(b), "") } } diff --git a/querier_test.go b/querier_test.go index 87a45b64..241bf268 100644 --- a/querier_test.go +++ b/querier_test.go @@ -217,7 +217,7 @@ Outer: for { eok, rok := c.exp.Next(), res.Next() - testutil.Equals(t, eok, rok) + testutil.Equals(t, eok, rok, "") if !eok { continue Outer @@ -225,13 +225,13 @@ Outer: sexp := c.exp.At() sres := res.At() - testutil.Equals(t, sexp.Labels(), sres.Labels()) + testutil.Equals(t, sexp.Labels(), sres.Labels(), "") smplExp, errExp := expandSeriesIterator(sexp.Iterator()) smplRes, errRes := expandSeriesIterator(sres.Iterator()) - testutil.Equals(t, errExp, errRes) - testutil.Equals(t, smplExp, smplRes) + testutil.Equals(t, errExp, errRes, "") + testutil.Equals(t, smplExp, smplRes, "") } } } @@ -492,7 +492,7 @@ Outer: for { eok, rok := c.exp.Next(), res.Next() - testutil.Equals(t, eok, rok) + testutil.Equals(t, eok, rok, "") if !eok { continue Outer @@ -500,13 +500,13 @@ Outer: sexp := c.exp.At() sres := res.At() - testutil.Equals(t, sexp.Labels(), sres.Labels()) + testutil.Equals(t, sexp.Labels(), sres.Labels(), "") smplExp, errExp := expandSeriesIterator(sexp.Iterator()) smplRes, errRes := expandSeriesIterator(sres.Iterator()) - testutil.Equals(t, errExp, errRes) - testutil.Equals(t, smplExp, smplRes) + testutil.Equals(t, errExp, errRes, "") + testutil.Equals(t, smplExp, smplRes, "") } } @@ -662,7 +662,7 @@ Outer: for { eok, rok := c.exp.Next(), res.Next() - testutil.Equals(t, eok, rok) + testutil.Equals(t, eok, rok, "") if !eok { continue Outer @@ -670,13 +670,13 @@ Outer: sexp := c.exp.At() sres := res.At() - testutil.Equals(t, sexp.Labels(), sres.Labels()) + testutil.Equals(t, sexp.Labels(), sres.Labels(), "") smplExp, errExp := expandSeriesIterator(sexp.Iterator()) smplRes, errRes := expandSeriesIterator(sres.Iterator()) - testutil.Equals(t, errExp, errRes) - testutil.Equals(t, smplExp, smplRes) + testutil.Equals(t, errExp, errRes, "") + testutil.Equals(t, smplExp, smplRes, "") } } @@ -769,12 +769,12 @@ func TestBaseChunkSeries(t *testing.T) { idx := tc.expIdxs[i] - testutil.Equals(t, tc.series[idx].lset, lset) - testutil.Equals(t, tc.series[idx].chunks, chks) + testutil.Equals(t, tc.series[idx].lset, lset, "") + testutil.Equals(t, tc.series[idx].chunks, chks, "") i++ } - testutil.Equals(t, len(tc.expIdxs), i) + testutil.Equals(t, len(tc.expIdxs), i, "") testutil.Ok(t, bcs.Err()) } @@ -985,8 +985,8 @@ func TestSeriesIterator(t *testing.T) { smplExp, errExp := expandSeriesIterator(exp) smplRes, errRes := expandSeriesIterator(res) - testutil.Equals(t, errExp, errRes) - testutil.Equals(t, smplExp, smplRes) + testutil.Equals(t, errExp, errRes, "") + testutil.Equals(t, smplExp, smplRes, "") } t.Run("Seek", func(t *testing.T) { @@ -1053,7 +1053,7 @@ func TestSeriesIterator(t *testing.T) { } exp := newListSeriesIterator(smplValid) - testutil.Equals(t, tc.success, res.Seek(tc.seek)) + testutil.Equals(t, tc.success, res.Seek(tc.seek), "") if tc.success { // Init the list and then proceed to check. @@ -1063,11 +1063,11 @@ func TestSeriesIterator(t *testing.T) { for remaining { sExp, eExp := exp.At() sRes, eRes := res.At() - testutil.Equals(t, eExp, eRes) - testutil.Equals(t, sExp, sRes) + testutil.Equals(t, eExp, eRes, "") + testutil.Equals(t, sExp, sRes, "") remaining = exp.Next() - testutil.Equals(t, remaining, res.Next()) + testutil.Equals(t, remaining, res.Next(), "") } } } @@ -1086,8 +1086,8 @@ func TestSeriesIterator(t *testing.T) { smplExp, errExp := expandSeriesIterator(exp) smplRes, errRes := expandSeriesIterator(res) - testutil.Equals(t, errExp, errRes) - testutil.Equals(t, smplExp, smplRes) + testutil.Equals(t, errExp, errRes, "") + testutil.Equals(t, smplExp, smplRes, "") } t.Run("Seek", func(t *testing.T) { @@ -1099,7 +1099,7 @@ func TestSeriesIterator(t *testing.T) { res := newChainedSeriesIterator(a, b, c) exp := newListSeriesIterator(tc.exp) - testutil.Equals(t, tc.success, res.Seek(tc.seek)) + testutil.Equals(t, tc.success, res.Seek(tc.seek), "") if tc.success { // Init the list and then proceed to check. @@ -1109,11 +1109,11 @@ func TestSeriesIterator(t *testing.T) { for remaining { sExp, eExp := exp.At() sRes, eRes := res.At() - testutil.Equals(t, eExp, eRes) - testutil.Equals(t, sExp, sRes) + testutil.Equals(t, eExp, eRes, "") + testutil.Equals(t, sExp, sRes, "") remaining = exp.Next() - testutil.Equals(t, remaining, res.Next()) + testutil.Equals(t, remaining, res.Next(), "") } } } @@ -1135,8 +1135,8 @@ func TestChunkSeriesIterator_DoubleSeek(t *testing.T) { testutil.Assert(t, res.Seek(1) == true, "") testutil.Assert(t, res.Seek(2) == true, "") ts, v := res.At() - testutil.Equals(t, int64(2), ts) - testutil.Equals(t, float64(2), v) + testutil.Equals(t, int64(2), ts, "") + testutil.Equals(t, float64(2), v, "") } // Regression when seeked chunks were still found via binary search and we always @@ -1152,13 +1152,13 @@ func TestChunkSeriesIterator_SeekInCurrentChunk(t *testing.T) { testutil.Assert(t, it.Next() == true, "") ts, v := it.At() - testutil.Equals(t, int64(1), ts) - testutil.Equals(t, float64(2), v) + testutil.Equals(t, int64(1), ts, "") + testutil.Equals(t, float64(2), v, "") testutil.Assert(t, it.Seek(4) == true, "") ts, v = it.At() - testutil.Equals(t, int64(5), ts) - testutil.Equals(t, float64(6), v) + testutil.Equals(t, int64(5), ts, "") + testutil.Equals(t, float64(6), v, "") } // Regression when calling Next() with a time bounded to fit within two samples. @@ -1299,7 +1299,7 @@ func BenchmarkMergedSeriesSet(b *testing.B) { i++ } testutil.Ok(b, ms.Err()) - testutil.Equals(b, len(lbls), i) + testutil.Equals(b, len(lbls), i, "") } }) } @@ -1364,8 +1364,8 @@ func TestDeletedIterator(t *testing.T) { testutil.Assert(t, i < 1000, "") ts, v := it.At() - testutil.Equals(t, act[i].t, ts) - testutil.Equals(t, act[i].v, v) + testutil.Equals(t, act[i].t, ts, "") + testutil.Equals(t, act[i].v, v, "") } // There has been an extra call to Next(). i++ diff --git a/record_test.go b/record_test.go index 4257fc0c..fe9dcf23 100644 --- a/record_test.go +++ b/record_test.go @@ -39,7 +39,7 @@ func TestRecord_EncodeDecode(t *testing.T) { } decSeries, err := dec.Series(enc.Series(series, nil), nil) testutil.Ok(t, err) - testutil.Equals(t, series, decSeries) + testutil.Equals(t, series, decSeries, "") samples := []RefSample{ {Ref: 0, T: 12423423, V: 1.2345}, @@ -48,7 +48,7 @@ func TestRecord_EncodeDecode(t *testing.T) { } decSamples, err := dec.Samples(enc.Samples(samples, nil), nil) testutil.Ok(t, err) - testutil.Equals(t, samples, decSamples) + testutil.Equals(t, samples, decSamples, "") // Intervals get split up into single entries. So we don't get back exactly // what we put in. @@ -69,5 +69,5 @@ func TestRecord_EncodeDecode(t *testing.T) { {ref: 123, intervals: Intervals{{Mint: 5000, Maxt: 0}}}, {ref: 13, intervals: Intervals{{Mint: -1000, Maxt: -11}}}, {ref: 13, intervals: Intervals{{Mint: 5000, Maxt: 1000}}}, - }, decTstones) + }, decTstones, "") } diff --git a/repair_test.go b/repair_test.go index ba0295c6..5442619c 100644 --- a/repair_test.go +++ b/repair_test.go @@ -100,7 +100,7 @@ func TestRepairBadIndexVersion(t *testing.T) { testutil.Equals(t, []labels.Labels{ {{"a", "1"}, {"b", "1"}}, {{"a", "2"}, {"b", "1"}}, - }, res) + }, res, "") meta, err = readMetaFile(tmpDbDir) testutil.Ok(t, err) diff --git a/testutil/testutil.go b/testutil/testutil.go index cde0e4f9..9cce57e5 100644 --- a/testutil/testutil.go +++ b/testutil/testutil.go @@ -58,10 +58,10 @@ func NotOk(tb testing.TB, err error) { } // Equals fails the test if exp is not equal to act. -func Equals(tb testing.TB, exp, act interface{}) { +func Equals(tb testing.TB, exp, act interface{}, format string) { if !reflect.DeepEqual(exp, act) { _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) + fmt.Printf("\033[31m%s:%d:\n\n\t"+format+": expected: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) tb.FailNow() } } diff --git a/tombstones_test.go b/tombstones_test.go index 62bc0681..3c9fe68d 100644 --- a/tombstones_test.go +++ b/tombstones_test.go @@ -50,7 +50,7 @@ func TestWriteAndReadbackTombStones(t *testing.T) { testutil.Ok(t, err) // Compare the two readers. - testutil.Equals(t, stones, restr) + testutil.Equals(t, stones, restr, "") } func TestAddingNewIntervals(t *testing.T) { @@ -118,7 +118,7 @@ func TestAddingNewIntervals(t *testing.T) { for _, c := range cases { - testutil.Equals(t, c.exp, c.exist.add(c.new)) + testutil.Equals(t, c.exp, c.exist.add(c.new), "") } return } diff --git a/tsdbutil/buffer_test.go b/tsdbutil/buffer_test.go index d8db139a..88dd8ea1 100644 --- a/tsdbutil/buffer_test.go +++ b/tsdbutil/buffer_test.go @@ -79,12 +79,12 @@ func TestBufferedSeriesIterator(t *testing.T) { t, v := bit.At() b = append(b, sample{t: t, v: v}) } - testutil.Equals(t, exp, b) + testutil.Equals(t, exp, b, "") } sampleEq := func(ets int64, ev float64) { ts, v := it.At() - testutil.Equals(t, ets, ts) - testutil.Equals(t, ev, v) + testutil.Equals(t, ets, ts, "") + testutil.Equals(t, ev, v, "") } it = NewBuffer(newListSeriesIterator([]sample{ diff --git a/wal/wal_test.go b/wal/wal_test.go index 72f46253..92abbdf0 100644 --- a/wal/wal_test.go +++ b/wal/wal_test.go @@ -144,7 +144,7 @@ func TestReader(t *testing.T) { if j >= len(c.exp) { t.Fatal("received more records than inserted") } - testutil.Equals(t, c.exp[j], rec) + testutil.Equals(t, c.exp[j], rec, "") } if !c.fail && r.Err() != nil { t.Fatalf("unexpected error: %s", r.Err()) @@ -306,7 +306,7 @@ func TestWAL_Repair(t *testing.T) { result = append(result, append(b, r.Record()...)) } testutil.Ok(t, r.Err()) - testutil.Equals(t, 4, len(result)) + testutil.Equals(t, 4, len(result), "") for i, r := range result { if !bytes.Equal(records[i], r) { diff --git a/wal_test.go b/wal_test.go index e145188d..0e3087e7 100644 --- a/wal_test.go +++ b/wal_test.go @@ -44,7 +44,7 @@ func TestSegmentWAL_cut(t *testing.T) { testutil.Ok(t, w.cut()) // Cutting creates a new file. - testutil.Equals(t, 2, len(w.files)) + testutil.Equals(t, 2, len(w.files), "") testutil.Ok(t, w.write(WALEntrySeries, 1, []byte("Hello World!!"))) @@ -58,16 +58,16 @@ func TestSegmentWAL_cut(t *testing.T) { metab := make([]byte, 8) _, err = f.Read(metab) testutil.Ok(t, err) - testutil.Equals(t, WALMagic, binary.BigEndian.Uint32(metab[:4])) - testutil.Equals(t, WALFormatDefault, metab[4]) + testutil.Equals(t, WALMagic, binary.BigEndian.Uint32(metab[:4]), "") + testutil.Equals(t, WALFormatDefault, metab[4], "") // We cannot actually check for correct pre-allocation as it is // optional per filesystem and handled transparently. et, flag, b, err := newWALReader(nil, nil).entry(f) testutil.Ok(t, err) - testutil.Equals(t, WALEntrySeries, et) - testutil.Equals(t, byte(walSeriesSimple), flag) - testutil.Equals(t, []byte("Hello World!!"), b) + testutil.Equals(t, WALEntrySeries, et, "") + testutil.Equals(t, flag, byte(walSeriesSimple), "") + testutil.Equals(t, []byte("Hello World!!"), b, "") } } @@ -143,7 +143,7 @@ func TestSegmentWAL_Truncate(t *testing.T) { readSeries = append(readSeries, s...) }, nil, nil) - testutil.Equals(t, expected, readSeries) + testutil.Equals(t, expected, readSeries, "") } // Symmetrical test of reading and writing to the WAL via its main interface. @@ -211,9 +211,9 @@ func TestSegmentWAL_Log_Restore(t *testing.T) { testutil.Ok(t, r.Read(serf, smplf, delf)) - testutil.Equals(t, recordedSamples, resultSamples) - testutil.Equals(t, recordedSeries, resultSeries) - testutil.Equals(t, recordedDeletes, resultDeletes) + testutil.Equals(t, recordedSamples, resultSamples, "") + testutil.Equals(t, recordedSeries, resultSeries, "") + testutil.Equals(t, recordedDeletes, resultDeletes, "") series := series[k : k+(numMetrics/iterations)] @@ -292,7 +292,7 @@ func TestWALRestoreCorrupted_invalidSegment(t *testing.T) { fns, err := fileutil.ReadDir(dir) testutil.Ok(t, err) - testutil.Equals(t, []string{"000000"}, fns) + testutil.Equals(t, []string{"000000"}, fns, "") } // Test reading from a WAL that has been corrupted through various means. @@ -402,17 +402,17 @@ func TestWALRestoreCorrupted(t *testing.T) { r := w2.Reader() serf := func(l []RefSeries) { - testutil.Equals(t, 0, len(l)) + testutil.Equals(t, 0, len(l), "") } // Weird hack to check order of reads. i := 0 samplf := func(s []RefSample) { if i == 0 { - testutil.Equals(t, []RefSample{{T: 1, V: 2}}, s) + testutil.Equals(t, []RefSample{{T: 1, V: 2}}, s, "") i++ } else { - testutil.Equals(t, []RefSample{{T: 99, V: 100}}, s) + testutil.Equals(t, []RefSample{{T: 99, V: 100}}, s, "") } } @@ -542,7 +542,7 @@ func TestMigrateWAL_Fuzz(t *testing.T) { []RefSample{{Ref: 3, T: 100, V: 200}, {Ref: 4, T: 300, V: 400}}, []Stone{{ref: 1, intervals: []Interval{{100, 200}}}}, []RefSample{{Ref: 500, T: 1, V: 1}}, - }, res) + }, res, "") // Migrating an already migrated WAL shouldn't do anything. testutil.Ok(t, MigrateWAL(nil, wdir))