Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sql: remove testing cluster settings in favor of metamorphic build tag #57483

Merged
merged 6 commits into from
Dec 15, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 0 additions & 24 deletions build/teamcity-test-constants.sh

This file was deleted.

1 change: 1 addition & 0 deletions pkg/bench/ddl_analysis/validate_benchmark_data_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ func TestBenchmarkExpectation(t *testing.T) {
skip.UnderStress(t)
skip.UnderRace(t)
skip.UnderShort(t)
skip.UnderMetamorphic(t)

expecations := readExpectationsFile(t)

Expand Down
5 changes: 5 additions & 0 deletions pkg/ccl/importccl/read_import_mysql_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/row"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
Expand All @@ -41,6 +42,10 @@ func TestMysqldumpDataReader(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)

// This test currently blocks indefinitely if
// row.kvDatumRowConverterBatchSize is set to 1 or 2 (#57955).
skip.UnderMetamorphic(t)

files := getMysqldumpTestdata(t)

ctx := context.Background()
Expand Down
53 changes: 2 additions & 51 deletions pkg/cmd/roachtest/tpchvec.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ import (
gosql "database/sql"
"fmt"
"math"
"math/rand"
"regexp"
"runtime"
"sort"
Expand Down Expand Up @@ -146,11 +145,6 @@ func (b tpchVecTestCaseBase) getRunConfig(
}},
setupNames: []string{"default"},
}
if version != tpchVecVersion19_2 {
runConfig.clusterSetups[0] = append(runConfig.clusterSetups[0],
"RESET CLUSTER SETTING sql.testing.vectorize.batch_size",
)
}
for queryNum := 1; queryNum <= tpch.NumQueries; queryNum++ {
if _, shouldSkip := queriesToSkip[queryNum]; !shouldSkip {
runConfig.queriesToRun = append(runConfig.queriesToRun, queryNum)
Expand Down Expand Up @@ -517,33 +511,6 @@ func (d tpchVecDiskTest) preTestRunHook(
}
}

// setSmallBatchSize sets a cluster setting to override the batch size to be in
// [1, 5) range.
func setSmallBatchSize(t *test, conn *gosql.DB, rng *rand.Rand) {
batchSize := 1 + rng.Intn(4)
t.Status(fmt.Sprintf("setting sql.testing.vectorize.batch_size to %d", batchSize))
if _, err := conn.Exec(fmt.Sprintf("SET CLUSTER SETTING sql.testing.vectorize.batch_size=%d", batchSize)); err != nil {
t.Fatal(err)
}
}

type tpchVecSmallBatchSizeTest struct {
tpchVecTestCaseBase
}

func (b tpchVecSmallBatchSizeTest) preTestRunHook(
ctx context.Context,
t *test,
c *cluster,
conn *gosql.DB,
version crdbVersion,
clusterSetup []string,
) {
b.tpchVecTestCaseBase.preTestRunHook(t, conn, clusterSetup, true /* createStats */)
rng, _ := randutil.NewPseudoRand()
setSmallBatchSize(t, conn, rng)
}

func baseTestRun(
ctx context.Context, t *test, c *cluster, conn *gosql.DB, version crdbVersion, tc tpchVecTestCase,
) {
Expand Down Expand Up @@ -607,12 +574,6 @@ func (s tpchVecSmithcmpTest) preTestRunHook(
t.Fatal(err)
}
c.Put(ctx, smithcmp, "./"+tpchVecSmithcmp, node)
// To increase test coverage, we will be randomizing the batch size in 50%
// of the runs.
rng, _ := randutil.NewPseudoRand()
if rng.Float64() < 0.5 {
setSmallBatchSize(t, conn, rng)
}
}

func smithcmpTestRun(
Expand Down Expand Up @@ -698,18 +659,6 @@ func registerTPCHVec(r *testRegistry) {
},
})

r.Add(testSpec{
Name: "tpchvec/smallbatchsize",
Owner: OwnerSQLExec,
Cluster: makeClusterSpec(tpchVecNodeCount),
// 19.2 version doesn't have the testing cluster setting to change the batch
// size, so only run on versions >= 20.1.0.
MinVersion: "v20.1.0",
Run: func(ctx context.Context, t *test, c *cluster) {
runTPCHVec(ctx, t, c, tpchVecSmallBatchSizeTest{}, baseTestRun)
},
})

r.Add(testSpec{
Name: "tpchvec/smithcmp",
Owner: OwnerSQLExec,
Expand Down Expand Up @@ -742,6 +691,8 @@ func registerTPCHVec(r *testRegistry) {
// that modify the cluster settings for all configs to benchmark
// like in the example below. The example benchmarks three values
// of coldata.BatchSize() variable against each other.
// NOTE: the setting has been removed since the example was written,
// but it still serves the purpose of showing how to use the config.
var clusterSetups [][]string
var setupNames []string
for _, batchSize := range []int{512, 1024, 1536} {
Expand Down
6 changes: 0 additions & 6 deletions pkg/col/coldata/batch.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,12 +105,6 @@ func SetBatchSizeForTests(newBatchSize int) error {
return nil
}

// ResetBatchSizeForTests resets the batchSize variable to the default batch
// size. It should only be used in tests.
func ResetBatchSizeForTests() {
atomic.SwapInt64(&batchSize, defaultBatchSize)
}

// NewMemBatch allocates a new in-memory Batch.
// TODO(jordan): pool these allocations.
func NewMemBatch(typs []*types.T, factory ColumnFactory) Batch {
Expand Down
21 changes: 0 additions & 21 deletions pkg/col/coldata/nulls.go
Original file line number Diff line number Diff line change
Expand Up @@ -192,27 +192,6 @@ func (n *Nulls) UnsetNull(i int) {
n.nulls[i>>3] |= bitMask[i&7]
}

// Remove the unused warning.
var (
n = Nulls{}
_ = n.swap
)

// swap swaps the null values at the argument indices. We implement the logic
// directly on the byte array rather than case on the result of NullAt to avoid
// having to take some branches.
func (n *Nulls) swap(iIdx, jIdx int) {
i, j := uint64(iIdx), uint64(jIdx)
// Get original null values.
ni := (n.nulls[i/8] >> (i % 8)) & 0x1
nj := (n.nulls[j/8] >> (j % 8)) & 0x1
// Write into the correct positions.
iMask := bitMask[i%8]
jMask := bitMask[j%8]
n.nulls[i/8] = (n.nulls[i/8] & ^iMask) | (nj << (i % 8))
n.nulls[j/8] = (n.nulls[j/8] & ^jMask) | (ni << (j % 8))
}

// setSmallRange is a helper that copies over a slice [startIdx, startIdx+toSet)
// of src and puts it into this nulls starting at destIdx.
func (n *Nulls) setSmallRange(src *Nulls, destIdx, startIdx, toSet int) {
Expand Down
92 changes: 10 additions & 82 deletions pkg/col/coldata/nulls_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,16 @@ var nulls5 Nulls
var nulls10 Nulls

// pos is a collection of interesting boundary indices to use in tests.
var pos = []int{0, 1, 63, 64, 65, BatchSize() - 1, BatchSize()}
var pos []int

func init() {
pos = []int{0, 1, BatchSize() - 1, BatchSize()}
for _, possiblePos := range []int{63, 64, 65} {
if BatchSize() >= possiblePos {
pos = append(pos, possiblePos)
}
}
}

func init() {
nulls3 = NewNulls(BatchSize())
Expand Down Expand Up @@ -88,87 +97,6 @@ func TestUnsetNullRange(t *testing.T) {
}
}

func TestSwapNulls(t *testing.T) {
n := NewNulls(BatchSize())
swapPos := []int{0, 1, 63, 64, 65, BatchSize() - 1}
idxInSwapPos := func(idx int) bool {
for _, p := range swapPos {
if p == idx {
return true
}
}
return false
}

t.Run("TestSwapNullWithNull", func(t *testing.T) {
// Test that swapping null with null doesn't change anything.
for _, p := range swapPos {
n.SetNull(p)
}
for _, i := range swapPos {
for _, j := range swapPos {
n.swap(i, j)
for k := 0; k < BatchSize(); k++ {
require.Equal(t, idxInSwapPos(k), n.NullAt(k),
"after swapping NULLS (%d, %d), NullAt(%d) saw %t, expected %t", i, j, k, n.NullAt(k), idxInSwapPos(k))
}
}
}
})

t.Run("TestSwapNullWithNotNull", func(t *testing.T) {
// Test that swapping null with not null changes things appropriately.
n.UnsetNulls()
swaps := map[int]int{
0: BatchSize() - 1,
1: 62,
2: 3,
63: 65,
68: 120,
}
idxInSwaps := func(idx int) bool {
for k, v := range swaps {
if idx == k || idx == v {
return true
}
}
return false
}
for _, j := range swaps {
n.SetNull(j)
}
for i, j := range swaps {
n.swap(i, j)
require.Truef(t, n.NullAt(i), "after swapping not null and null (%d, %d), found null=%t at %d", i, j, n.NullAt(i), i)
require.Truef(t, !n.NullAt(j), "after swapping not null and null (%d, %d), found null=%t at %d", i, j, !n.NullAt(j), j)
for k := 0; k < BatchSize(); k++ {
if idxInSwaps(k) {
continue
}
require.Falsef(t, n.NullAt(k),
"after swapping NULLS (%d, %d), NullAt(%d) saw %t, expected false", i, j, k, n.NullAt(k))
}
}
})

t.Run("TestSwapNullWithNull", func(t *testing.T) {
// Test that swapping not null with not null doesn't do anything.
n.SetNulls()
for _, p := range swapPos {
n.UnsetNull(p)
}
for _, i := range swapPos {
for _, j := range swapPos {
n.swap(i, j)
for k := 0; k < BatchSize(); k++ {
require.Equal(t, idxInSwapPos(k), !n.NullAt(k),
"after swapping NULLS (%d, %d), NullAt(%d) saw %t, expected %t", i, j, k, !n.NullAt(k), idxInSwapPos(k))
}
}
}
})
}

func TestNullsTruncate(t *testing.T) {
for _, size := range pos {
n := NewNulls(BatchSize())
Expand Down
16 changes: 16 additions & 0 deletions pkg/col/coldata/vec_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,9 @@ func TestNullRanges(t *testing.T) {

c := coldata.NewMemColumn(types.Int, coldata.BatchSize(), coldata.StandardColumnFactory)
for _, tc := range tcs {
if tc.end > coldata.BatchSize() {
continue
}
c.Nulls().UnsetNulls()
c.Nulls().SetNullRange(tc.start, tc.end)

Expand Down Expand Up @@ -204,6 +207,10 @@ func TestAppend(t *testing.T) {
}

for _, tc := range testCases {
if tc.args.DestIdx+tc.expectedLength > coldata.BatchSize() ||
tc.args.SrcEndIdx > coldata.BatchSize() {
continue
}
tc.args.Src = src
if tc.args.SrcEndIdx == 0 {
// SrcEndIdx is always required.
Expand Down Expand Up @@ -277,6 +284,9 @@ func TestCopy(t *testing.T) {
}

for _, tc := range testCases {
if tc.args.DestIdx+(tc.args.SrcEndIdx-tc.args.SrcStartIdx) > coldata.BatchSize() {
continue
}
tc.args.Src = src
t.Run(tc.name, func(t *testing.T) {
dest := coldata.NewMemColumn(typ, coldata.BatchSize(), coldata.StandardColumnFactory)
Expand All @@ -297,6 +307,9 @@ func TestCopy(t *testing.T) {
}

func TestCopyNulls(t *testing.T) {
if coldata.BatchSize() < 10 {
return
}
var typ = types.Int

// Set up the destination vector.
Expand Down Expand Up @@ -352,6 +365,9 @@ func TestCopyNulls(t *testing.T) {
}

func TestCopySelOnDestDoesNotUnsetOldNulls(t *testing.T) {
if coldata.BatchSize() < 5 {
return
}
var typ = types.Int

// Set up the destination vector. It is all nulls except for a single
Expand Down
2 changes: 2 additions & 0 deletions pkg/settings/registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,8 @@ var retiredSettings = map[string]struct{}{
"sql.parallel_scans.enabled": {},
// removed as of 21.1.
"sql.distsql.interleaved_joins.enabled": {},
"sql.testing.vectorize.batch_size": {},
"sql.testing.mutations.max_batch_size": {},
}

// register adds a setting to the registry.
Expand Down
9 changes: 9 additions & 0 deletions pkg/sql/ambiguous_commit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,11 @@ import (
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc/nodedialer"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/mutations"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
Expand Down Expand Up @@ -62,6 +64,13 @@ func TestAmbiguousCommit(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)

if mutations.MaxBatchSize() == 1 {
// This test relies on the fact that the mutation batch consisting of a
// single row also contains an EndTxn which is the case only when the
// max batch size is at least 2, so we'll skip it.
skip.UnderMetamorphic(t)
}

testutils.RunTrueAndFalse(t, "ambiguousSuccess", func(t *testing.T, ambiguousSuccess bool) {
var params base.TestServerArgs
var processed int32
Expand Down
3 changes: 0 additions & 3 deletions pkg/sql/colflow/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ go_library(
"//pkg/col/coldata",
"//pkg/col/coldataext",
"//pkg/rpc/nodedialer",
"//pkg/settings",
"//pkg/sql/catalog/descs",
"//pkg/sql/colcontainer",
"//pkg/sql/colexec",
Expand All @@ -21,8 +20,6 @@ go_library(
"//pkg/sql/execinfra",
"//pkg/sql/execinfrapb",
"//pkg/sql/flowinfra",
"//pkg/sql/pgwire/pgcode",
"//pkg/sql/pgwire/pgerror",
"//pkg/sql/rowexec",
"//pkg/sql/sessiondatapb",
"//pkg/sql/types",
Expand Down
Loading