diff --git a/build/teamcity-nightly-roachtest-invoke.sh b/build/teamcity-nightly-roachtest-invoke.sh new file mode 100755 index 000000000000..299a03fce7a8 --- /dev/null +++ b/build/teamcity-nightly-roachtest-invoke.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +set -euo pipefail + +bin/roachtest run \ + --cloud="${CLOUD}" \ + --artifacts="${ARTIFACTS}" \ + --parallelism="${PARALLELISM}" \ + --cpu-quota="${CPUQUOTA}" \ + --zones="${ZONES}" \ + --count="${COUNT-1}" \ + --debug="${DEBUG-false}" \ + --build-tag="${BUILD_TAG}" \ + --cockroach="${COCKROACH_BINARY}" \ + --roachprod="${PWD}/bin/roachprod" \ + --workload="${PWD}/bin/workload" \ + --teamcity=true \ + --slack-token="${SLACK_TOKEN}" \ + --cluster-id="${TC_BUILD_ID}" \ + "${TESTS}" diff --git a/build/teamcity-nightly-roachtest.sh b/build/teamcity-nightly-roachtest.sh new file mode 100755 index 000000000000..fc70fdd3fb52 --- /dev/null +++ b/build/teamcity-nightly-roachtest.sh @@ -0,0 +1,117 @@ +#!/usr/bin/env bash +set -euxo pipefail + +# Entry point for the nightly roachtests. These are run from CI and require +# appropriate secrets for the ${CLOUD} parameter (along with other things, +# apologies, you're going to have to dig around for them below or even better +# yet, look at the job). + +# Note that when this script is called, the cockroach binary to be tested +# already exists in the current directory. +COCKROACH_BINARY="${PWD}/cockroach.linux-2.6.32-gnu-amd64" +chmod +x "${COCKROACH_BINARY}" + +if [[ ! -f ~/.ssh/id_rsa.pub ]]; then + ssh-keygen -q -C "roachtest-nightly $(date)" -N "" -f ~/.ssh/id_rsa +fi + +# The artifacts dir should match up with that supplied by TC. +artifacts=$PWD/artifacts +mkdir -p "${artifacts}" +chmod o+rwx "${artifacts}" + +# Disable global -json flag. +export PATH=$PATH:$(GOFLAGS=; go env GOPATH)/bin + +make bin/workload bin/roachtest bin/roachprod > "${artifacts}/build.txt" 2>&1 || cat "${artifacts}/build.txt" + +# Set up Google credentials. Note that we need this for all clouds since we upload +# perf artifacts to Google Storage at the end. +if [[ "$GOOGLE_EPHEMERAL_CREDENTIALS" ]]; then + echo "$GOOGLE_EPHEMERAL_CREDENTIALS" > creds.json + gcloud auth activate-service-account --key-file=creds.json + export ROACHPROD_USER=teamcity +else + echo 'warning: GOOGLE_EPHEMERAL_CREDENTIALS not set' >&2 + echo "Assuming that you've run \`gcloud auth login\` from inside the builder." >&2 +fi + +# Early bind the stats dir. Roachtest invocations can take ages, and we want the +# date at the time of the start of the run (which identifies the version of the +# code run best). +stats_dir="$(date +"%Y%m%d")-${TC_BUILD_ID}" + +# Set up a function we'll invoke at the end. +function upload_stats { + if [[ "${TC_BUILD_BRANCH}" == "master" ]]; then + bucket="cockroach-nightly-${CLOUD}" + if [[ "${CLOUD}" == "gce" ]]; then + # GCE, having been there first, gets an exemption. + bucket="cockroach-nightly" + fi + # The stats.json files need some path translation: + # ${artifacts}/path/to/test/stats.json + # to + # gs://${bucket}/artifacts/${stats_dir}/path/to/test/stats.json + # + # `find` below will expand "{}" as ./path/to/test/stats.json. We need + # to bend over backwards to remove the `./` prefix or gsutil will have + # a `.` folder in ${stats_dir}, which we don't want. + (cd "${artifacts}" && \ + while IFS= read -r f; do + if [[ -n "${f}" ]]; then + gsutil cp "${f}" "gs://${bucket}/artifacts/${stats_dir}/${f}" + fi + done <<< "$(find . -name stats.json | sed 's/^\.\///')") + fi +} + +# Upload any stats.json we can find, no matter what happens. +trap upload_stats EXIT + +# Set up the parameters for the roachtest invocation. + +ARTIFACTS="${artifacts}" +PARALLELISM=16 +CPUQUOTA=1024 +ZONES="" +TESTS="" +case "${CLOUD}" in + gce) + # We specify --zones below so that nodes are created in us-central1-b by + # default. This reserves us-east1-b (the roachprod default zone) for use by + # manually created clusters. + ZONES="us-central1-b,us-west1-b,europe-west2-b" + ;; + aws) + PARALLELISM=3 + CPUQUOTA=384 + if [ -z "${TESTS}" ]; then + TESTS="kv(0|95)|ycsb|tpcc/(headroom/n4cpu16)|tpccbench/(nodes=3/cpu=16)|scbench/randomload/(nodes=3/ops=2000/conc=1)|backup/(KMS/n3cpu4)" + fi + ;; + *) + echo "unknown cloud ${CLOUD}" + exit 1 + ;; +esac + +export \ +CLOUD="${CLOUD}" \ +ARTIFACTS="${ARTIFACTS}" \ +PARALLELISM="${PARALLELISM}" \ +CPUQUOTA="${CPUQUOTA}" \ +ZONES="${ZONES}" \ +COUNT="${COUNT-1}" \ +DEBUG="${DEBUG-false}" \ +BUILD_TAG="${BUILD_TAG}" \ +COCKROACH_BINARY="${COCKROACH_BINARY}" \ +SLACK_TOKEN="${SLACK_TOKEN}" \ +TC_BUILD_ID="${TC_BUILD_ID}" \ +TESTS="${TESTS}" + +# Teamcity has a 1300 minute timeout that, when reached, kills the process +# without a stack trace (probably SIGKILL). We'd love to see a stack trace +# though, so after 1200 minutes, kill with SIGINT which will allow roachtest to +# fail tests and cleanup. +timeout -s INT $((1200*60)) "build/teamcity-nightly-roachtest-invoke.sh" diff --git a/pkg/ccl/workloadccl/allccl/all.go b/pkg/ccl/workloadccl/allccl/all.go index cdde152de188..f58fd8f1b6d5 100644 --- a/pkg/ccl/workloadccl/allccl/all.go +++ b/pkg/ccl/workloadccl/allccl/all.go @@ -19,6 +19,7 @@ import ( _ "github.com/cockroachdb/cockroach/pkg/workload/debug" _ "github.com/cockroachdb/cockroach/pkg/workload/examples" _ "github.com/cockroachdb/cockroach/pkg/workload/indexes" + _ "github.com/cockroachdb/cockroach/pkg/workload/interleavebench" _ "github.com/cockroachdb/cockroach/pkg/workload/interleavedpartitioned" _ "github.com/cockroachdb/cockroach/pkg/workload/jsonload" _ "github.com/cockroachdb/cockroach/pkg/workload/kv" @@ -28,6 +29,7 @@ import ( _ "github.com/cockroachdb/cockroach/pkg/workload/querylog" _ "github.com/cockroachdb/cockroach/pkg/workload/queue" _ "github.com/cockroachdb/cockroach/pkg/workload/rand" + _ "github.com/cockroachdb/cockroach/pkg/workload/schemachange" _ "github.com/cockroachdb/cockroach/pkg/workload/sqlsmith" _ "github.com/cockroachdb/cockroach/pkg/workload/tpcc" _ "github.com/cockroachdb/cockroach/pkg/workload/tpccchecks" diff --git a/pkg/ccl/workloadccl/allccl/all_test.go b/pkg/ccl/workloadccl/allccl/all_test.go index f50bcb8e6cfc..66d400282ff8 100644 --- a/pkg/ccl/workloadccl/allccl/all_test.go +++ b/pkg/ccl/workloadccl/allccl/all_test.go @@ -11,6 +11,7 @@ package allccl import ( "context" "encoding/binary" + "fmt" "hash" "hash/fnv" "math" @@ -22,13 +23,15 @@ import ( "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coltypes" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/bufalloc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/workloadsql" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) @@ -78,9 +81,10 @@ func TestAllRegisteredImportFixture(t *testing.T) { } t.Run(meta.Name, func(t *testing.T) { - if bigInitialData(meta) && testing.Short() { - t.Skipf(`%s loads a lot of data`, meta.Name) + if bigInitialData(meta) { + skip.UnderShort(t, fmt.Sprintf(`%s loads a lot of data`, meta.Name)) } + defer log.Scope(t).Close(t) ctx := context.Background() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{ @@ -127,7 +131,8 @@ func TestAllRegisteredSetup(t *testing.T) { case `roachmart`: // TODO(dan): It'd be nice to test this with the default flags. For now, // this is better than nothing. - if err := gen.(workload.Flagser).Flags().Parse([]string{ + flags := gen.(workload.Flagser).Flags() + if err := flags.Parse([]string{ `--users=10`, `--orders=100`, `--partition=false`, }); err != nil { t.Fatal(err) @@ -138,6 +143,7 @@ func TestAllRegisteredSetup(t *testing.T) { } t.Run(meta.Name, func(t *testing.T) { + defer log.Scope(t).Close(t) ctx := context.Background() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{ UseDatabase: "d", @@ -165,6 +171,7 @@ func TestAllRegisteredSetup(t *testing.T) { func TestConsistentSchema(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) // Test that the table schemas are consistent when the workload is created // multiple times with the same seed. @@ -188,7 +195,7 @@ func hashTableInitialData( h hash.Hash, data workload.BatchedTuples, a *bufalloc.ByteAllocator, ) error { var scratch [8]byte - b := coldata.NewMemBatchWithSize(nil, 0) + b := coldata.NewMemBatchWithSize(nil /* types */, 0 /* size */) for batchIdx := 0; batchIdx < data.NumBatches; batchIdx++ { *a = (*a)[:0] data.FillBatch(batchIdx, b, a) @@ -237,9 +244,7 @@ func TestDeterministicInitialData(t *testing.T) { // There are other tests that run initial data generation under race, so we // don't get anything from running this one under race as well. - if util.RaceEnabled { - t.Skip(`uninteresting under race`) - } + skip.UnderRace(t, "uninteresting under race") // Hardcode goldens for the fingerprint of the initial data of generators with // default flags. This lets us opt in generators known to be deterministic and @@ -263,7 +268,7 @@ func TestDeterministicInitialData(t *testing.T) { `startrek`: 0xa0249fbdf612734c, `tpcc`: 0xab32e4f5e899eb2f, `tpch`: 0xdd952207e22aa577, - `ycsb`: 0x85dd34d8c07fd808, + `ycsb`: 0x1244ea1c29ef67f6, } var a bufalloc.ByteAllocator @@ -277,8 +282,8 @@ func TestDeterministicInitialData(t *testing.T) { continue } t.Run(meta.Name, func(t *testing.T) { - if bigInitialData(meta) && testing.Short() { - t.Skipf(`%s involves a lot of data`, meta.Name) + if bigInitialData(meta) { + skip.UnderShort(t, fmt.Sprintf(`%s involves a lot of data`, meta.Name)) } h := fnv.New64() diff --git a/pkg/ccl/workloadccl/bench_test.go b/pkg/ccl/workloadccl/bench_test.go index 3ec7970e8ca4..111ed076abde 100644 --- a/pkg/ccl/workloadccl/bench_test.go +++ b/pkg/ccl/workloadccl/bench_test.go @@ -16,6 +16,7 @@ import ( _ "github.com/cockroachdb/cockroach/pkg/ccl" "github.com/cockroachdb/cockroach/pkg/ccl/workloadccl" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/tpcc" @@ -48,9 +49,7 @@ func benchmarkImportFixture(b *testing.B, gen workload.Generator) { } func BenchmarkImportFixture(b *testing.B) { - if testing.Short() { - b.Skip("skipping long benchmark") - } + skip.UnderShort(b, "skipping long benchmark") b.Run(`tpcc/warehouses=1`, func(b *testing.B) { benchmarkImportFixture(b, tpcc.FromWarehouses(1)) diff --git a/pkg/ccl/workloadccl/cliccl/fixtures.go b/pkg/ccl/workloadccl/cliccl/fixtures.go index 9db65425dd55..42c7825b6857 100644 --- a/pkg/ccl/workloadccl/cliccl/fixtures.go +++ b/pkg/ccl/workloadccl/cliccl/fixtures.go @@ -22,7 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/workload" workloadcli "github.com/cockroachdb/cockroach/pkg/workload/cli" "github.com/cockroachdb/cockroach/pkg/workload/workloadsql" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" "google.golang.org/api/option" @@ -47,6 +47,7 @@ func config() workloadccl.FixtureConfig { config.BillingProject = *gcsBillingProjectOverride } config.CSVServerURL = *fixturesMakeImportCSVServerURL + config.TableStats = *fixturesMakeTableStats return config } @@ -91,6 +92,10 @@ var fixturesMakeFilesPerNode = fixturesMakeCmd.PersistentFlags().Int( `files-per-node`, 1, `number of file URLs to generate per node when using csv-server`) +var fixturesMakeTableStats = fixturesMakeCmd.PersistentFlags().Bool( + `table-stats`, true, + `generate full table statistics for all tables`) + var fixturesImportFilesPerNode = fixturesImportCmd.PersistentFlags().Int( `files-per-node`, 1, `number of file URLs to generate per node`) diff --git a/pkg/ccl/workloadccl/fixture.go b/pkg/ccl/workloadccl/fixture.go index 94c2ec14bfba..bd49e41da3a2 100644 --- a/pkg/ccl/workloadccl/fixture.go +++ b/pkg/ccl/workloadccl/fixture.go @@ -28,7 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" "google.golang.org/api/iterator" ) @@ -60,6 +60,10 @@ type FixtureConfig struct { // storage requests. This is required to be set if using a "requestor pays" // bucket. BillingProject string + + // If TableStats is true, CREATE STATISTICS is called on all tables before + // creating the fixture. + TableStats bool } func (s FixtureConfig) objectPathToURI(folder string) string { @@ -142,7 +146,7 @@ func GetFixture( fixtureFolder := generatorToGCSFolder(config, gen) _, err := b.Objects(ctx, &storage.Query{Prefix: fixtureFolder, Delimiter: `/`}).Next() - if err == iterator.Done { + if errors.Is(err, iterator.Done) { notFound = true return errors.Errorf(`fixture not found: %s`, fixtureFolder) } else if err != nil { @@ -153,7 +157,7 @@ func GetFixture( for _, table := range gen.Tables() { tableFolder := filepath.Join(fixtureFolder, table.Name) _, err := b.Objects(ctx, &storage.Query{Prefix: tableFolder, Delimiter: `/`}).Next() - if err == iterator.Done { + if errors.Is(err, iterator.Done) { return errors.Errorf(`fixture table not found: %s`, tableFolder) } else if err != nil { return err @@ -279,6 +283,28 @@ func MakeFixture( return Fixture{}, err } + if config.TableStats { + // Clean up any existing statistics. + _, err := sqlDB.Exec("DELETE FROM system.table_statistics WHERE true") + if err != nil { + return Fixture{}, errors.Wrapf(err, "while deleting table statistics") + } + g := ctxgroup.WithContext(ctx) + for _, t := range gen.Tables() { + t := t + g.Go(func() error { + log.Infof(ctx, "Creating table stats for %s", t.Name) + _, err := sqlDB.Exec(fmt.Sprintf( + `CREATE STATISTICS pre_backup FROM "%s"."%s"`, dbName, t.Name, + )) + return err + }) + } + if err := g.Wait(); err != nil { + return Fixture{}, err + } + } + g := ctxgroup.WithContext(ctx) for _, t := range gen.Tables() { t := t @@ -554,11 +580,12 @@ func RestoreFixture( g.GoCtx(func(ctx context.Context) error { start := timeutil.Now() importStmt := fmt.Sprintf(`RESTORE %s.%s FROM $1 WITH into_db=$2`, genName, table.TableName) + log.Infof(ctx, "Restoring from %s", table.BackupURI) var rows, index, tableBytes int64 var discard interface{} res, err := sqlDB.Query(importStmt, table.BackupURI, database) if err != nil { - return err + return errors.Wrapf(err, "backup: %s", table.BackupURI) } defer res.Close() if !res.Next() { @@ -619,14 +646,14 @@ func ListFixtures( gensPrefix := config.GCSPrefix + `/` for genIter := b.Objects(ctx, &storage.Query{Prefix: gensPrefix, Delimiter: `/`}); ; { gen, err := genIter.Next() - if err == iterator.Done { + if errors.Is(err, iterator.Done) { break } else if err != nil { return nil, err } for genConfigIter := b.Objects(ctx, &storage.Query{Prefix: gen.Prefix, Delimiter: `/`}); ; { genConfig, err := genConfigIter.Next() - if err == iterator.Done { + if errors.Is(err, iterator.Done) { break } else if err != nil { return nil, err diff --git a/pkg/ccl/workloadccl/fixture_test.go b/pkg/ccl/workloadccl/fixture_test.go index 30dc207dde14..084d2810d0e2 100644 --- a/pkg/ccl/workloadccl/fixture_test.go +++ b/pkg/ccl/workloadccl/fixture_test.go @@ -25,6 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -90,7 +91,7 @@ func TestFixture(t *testing.T) { gcsBucket := os.Getenv(`GS_BUCKET`) gcsKey := os.Getenv(`GS_JSONKEY`) if gcsBucket == "" || gcsKey == "" { - t.Skip("GS_BUCKET and GS_JSONKEY env vars must be set") + skip.IgnoreLint(t, "GS_BUCKET and GS_JSONKEY env vars must be set") } source, err := google.JWTConfigFromJSON([]byte(gcsKey), storage.ScopeReadWrite) diff --git a/pkg/ccl/workloadccl/format/sstable.go b/pkg/ccl/workloadccl/format/sstable.go index 98114a3a111c..9f8cf5a9e756 100644 --- a/pkg/ccl/workloadccl/format/sstable.go +++ b/pkg/ccl/workloadccl/format/sstable.go @@ -24,7 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/workload" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) // ToTableDescriptor returns the corresponding TableDescriptor for a workload diff --git a/pkg/ccl/workloadccl/roachmartccl/roachmart.go b/pkg/ccl/workloadccl/roachmartccl/roachmart.go index b602d5890158..4f6cf3ec2120 100644 --- a/pkg/ccl/workloadccl/roachmartccl/roachmart.go +++ b/pkg/ccl/workloadccl/roachmartccl/roachmart.go @@ -12,7 +12,6 @@ import ( "bytes" "context" gosql "database/sql" - "errors" "fmt" "math/rand" "strings" @@ -21,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" ) @@ -200,7 +200,9 @@ func (m *roachmart) Tables() []workload.Table { } // Ops implements the Opser interface. -func (m *roachmart) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (m *roachmart) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { sqlDatabase, err := workload.SanitizeUrls(m, m.connFlags.DBOverride, urls) if err != nil { return workload.QueryLoad{}, err diff --git a/pkg/cli/cli.go b/pkg/cli/cli.go index e5213b1efc10..e8696a1228b4 100644 --- a/pkg/cli/cli.go +++ b/pkg/cli/cli.go @@ -28,12 +28,13 @@ import ( _ "github.com/cockroachdb/cockroach/pkg/workload/bank" // registers workloads _ "github.com/cockroachdb/cockroach/pkg/workload/bulkingest" // registers workloads workloadcli "github.com/cockroachdb/cockroach/pkg/workload/cli" - _ "github.com/cockroachdb/cockroach/pkg/workload/examples" // registers workloads - _ "github.com/cockroachdb/cockroach/pkg/workload/kv" // registers workloads - _ "github.com/cockroachdb/cockroach/pkg/workload/movr" // registers workloads - _ "github.com/cockroachdb/cockroach/pkg/workload/tpcc" // registers workloads - _ "github.com/cockroachdb/cockroach/pkg/workload/tpch" // registers workloads - _ "github.com/cockroachdb/cockroach/pkg/workload/ycsb" // registers workloads + _ "github.com/cockroachdb/cockroach/pkg/workload/examples" // registers workloads + _ "github.com/cockroachdb/cockroach/pkg/workload/kv" // registers workloads + _ "github.com/cockroachdb/cockroach/pkg/workload/movr" // registers workloads + _ "github.com/cockroachdb/cockroach/pkg/workload/schemachange" // registers workloads + _ "github.com/cockroachdb/cockroach/pkg/workload/tpcc" // registers workloads + _ "github.com/cockroachdb/cockroach/pkg/workload/tpch" // registers workloads + _ "github.com/cockroachdb/cockroach/pkg/workload/ycsb" // registers workloads "github.com/cockroachdb/errors" "github.com/spf13/cobra" ) diff --git a/pkg/cli/cli_test.go b/pkg/cli/cli_test.go index 8fbf927aef84..c50f480d2845 100644 --- a/pkg/cli/cli_test.go +++ b/pkg/cli/cli_test.go @@ -1401,7 +1401,7 @@ Available Commands: version output version information debug debugging commands sqlfmt format SQL statements - workload [experimental] generators for data and query loads + workload generators for data and query loads systembench Run systembench help Help about any command diff --git a/pkg/cli/demo_cluster.go b/pkg/cli/demo_cluster.go index 6ff1f5f9aacb..5e8b35666b9e 100644 --- a/pkg/cli/demo_cluster.go +++ b/pkg/cli/demo_cluster.go @@ -604,7 +604,7 @@ func (c *transientCluster) runWorkload( // Dummy registry to prove to the Opser. reg := histogram.NewRegistry(time.Duration(100) * time.Millisecond) - ops, err := opser.Ops(sqlUrls, reg) + ops, err := opser.Ops(ctx, sqlUrls, reg) if err != nil { return errors.Wrap(err, "unable to create workload") } diff --git a/pkg/cmd/roachprod-stress/main.go b/pkg/cmd/roachprod-stress/main.go index f9b8306ed6e9..9666590be440 100644 --- a/pkg/cmd/roachprod-stress/main.go +++ b/pkg/cmd/roachprod-stress/main.go @@ -14,7 +14,6 @@ import ( "bufio" "bytes" "context" - "errors" "flag" "fmt" "io" @@ -33,6 +32,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/errors" ) var ( @@ -62,7 +62,7 @@ func run() error { var b bytes.Buffer flags.SetOutput(&b) flags.Usage() - return errors.New(b.String()) + return errors.Newf("%s", b.String()) } cluster := os.Args[1] @@ -109,7 +109,7 @@ func run() error { var b bytes.Buffer flags.SetOutput(&b) flags.Usage() - return errors.New(b.String()) + return errors.Newf("%s", b.String()) } if *flagFailure != "" { if _, err := regexp.Compile(*flagFailure); err != nil { @@ -138,6 +138,16 @@ func run() error { return err } + const localLibDir = "lib.docker_amd64/" + if fi, err := os.Stat(localLibDir); err == nil && fi.IsDir() { + cmd = exec.Command("roachprod", "put", cluster, localLibDir, "lib") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return err + } + } + cmd = exec.Command("roachprod", "run", cluster, "mkdir -p "+pkg) if err := cmd.Run(); err != nil { return err @@ -288,12 +298,13 @@ func run() error { atomic.LoadInt32(&runs), atomic.LoadInt32(&fails), roundToSeconds(timeutil.Since(startTime))) - switch err := ctx.Err(); err { + err := ctx.Err() + switch { // A context timeout in this case is indicative of no failures // being detected in the allotted duration. - case context.DeadlineExceeded: + case errors.Is(err, context.DeadlineExceeded): return nil - case context.Canceled: + case errors.Is(err, context.Canceled): if *flagMaxRuns > 0 && int(atomic.LoadInt32(&runs)) >= *flagMaxRuns { return nil } diff --git a/pkg/cmd/roachprod/cloud/cluster_cloud.go b/pkg/cmd/roachprod/cloud/cluster_cloud.go index 445476ba2aff..cbbf6a11b91c 100644 --- a/pkg/cmd/roachprod/cloud/cluster_cloud.go +++ b/pkg/cmd/roachprod/cloud/cluster_cloud.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/config" "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) const vmNameFormat = "user--" diff --git a/pkg/cmd/roachprod/cloud/gc.go b/pkg/cmd/roachprod/cloud/gc.go index 714170c6b949..8aef8a5ff351 100644 --- a/pkg/cmd/roachprod/cloud/gc.go +++ b/pkg/cmd/roachprod/cloud/gc.go @@ -26,9 +26,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/config" "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm" "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/errors" "github.com/nlopes/slack" ) +var errNoSlackClient = fmt.Errorf("no Slack client") + type status struct { good []*Cluster warn []*Cluster @@ -100,23 +103,15 @@ func findChannel(client *slack.Client, name string) (string, error) { } func findUserChannel(client *slack.Client, email string) (string, error) { - if client != nil { - // TODO(peter): GetUserByEmail doesn't seem to work. Why? - users, err := client.GetUsers() - if err != nil { - return "", err - } - for _, user := range users { - if user.Profile.Email == email { - _, _, channelID, err := client.OpenIMChannel(user.ID) - if err != nil { - return "", err - } - return channelID, nil - } - } + if client == nil { + return "", errNoSlackClient } - return "", fmt.Errorf("not found") + u, err := client.GetUserByEmail(email) + if err != nil { + return "", err + } + _, _, channelID, err := client.OpenIMChannel(u.ID) + return channelID, err } func postStatus(client *slack.Client, channel string, dryrun bool, s *status, badVMs vm.List) { @@ -320,6 +315,8 @@ func GCClusters(cloud *Cloud, dryrun bool) error { userChannel, err := findUserChannel(client, user+config.EmailDomain) if err == nil { postStatus(client, userChannel, dryrun, status, nil) + } else if !errors.Is(err, errNoSlackClient) { + log.Printf("could not deliver Slack DM to %s: %v", user+config.EmailDomain, err) } } } diff --git a/pkg/cmd/roachprod/errors/errors.go b/pkg/cmd/roachprod/errors/errors.go index 7c822f926f85..196a059f8afb 100644 --- a/pkg/cmd/roachprod/errors/errors.go +++ b/pkg/cmd/roachprod/errors/errors.go @@ -14,7 +14,7 @@ import ( "fmt" "os/exec" - crdberrors "github.com/cockroachdb/errors" + "github.com/cockroachdb/errors" ) // Error is an interface for error types used by the main.wrap() function @@ -29,15 +29,11 @@ type Error interface { // Exit codes for the errors const ( cmdExitCode = 20 - cockroachExitCode = 30 sshExitCode = 10 unclassifiedExitCode = 1 ) -// Cmd wraps errors that result from a non-cockroach command run against -// the cluster. -// -// For errors coming from a cockroach command, use Cockroach. +// Cmd wraps errors that result from a command run against the cluster. type Cmd struct { Err error } @@ -54,7 +50,7 @@ func (e Cmd) ExitCode() int { // Format passes formatting responsibilities to cockroachdb/errors func (e Cmd) Format(s fmt.State, verb rune) { - crdberrors.FormatError(e, s, verb) + errors.FormatError(e, s, verb) } // Unwrap the wrapped the non-cockroach command error. @@ -62,32 +58,6 @@ func (e Cmd) Unwrap() error { return e.Err } -// Cockroach wraps errors that result from a cockroach command run against the cluster. -// -// For non-cockroach commands, use Cmd. -type Cockroach struct { - Err error -} - -func (e Cockroach) Error() string { - return fmt.Sprintf("DEAD_ROACH_PROBLEM: %s", e.Err.Error()) -} - -// ExitCode gives the process exit code to return for cockroach errors. -func (e Cockroach) ExitCode() int { - return cockroachExitCode -} - -// Format passes formatting responsibilities to cockroachdb/errors -func (e Cockroach) Format(s fmt.State, verb rune) { - crdberrors.FormatError(e, s, verb) -} - -// Unwrap the wrapped cockroach error. -func (e Cockroach) Unwrap() error { - return e.Err -} - // SSH wraps ssh-specific errors from connections to remote hosts. type SSH struct { Err error @@ -104,7 +74,7 @@ func (e SSH) ExitCode() int { // Format passes formatting responsibilities to cockroachdb/errors func (e SSH) Format(s fmt.State, verb rune) { - crdberrors.FormatError(e, s, verb) + errors.FormatError(e, s, verb) } // Unwrap the wrapped SSH error. @@ -128,7 +98,7 @@ func (e Unclassified) ExitCode() int { // Format passes formatting responsibilities to cockroachdb/errors func (e Unclassified) Format(s fmt.State, verb rune) { - crdberrors.FormatError(e, s, verb) + errors.FormatError(e, s, verb) } // Unwrap the wrapped unclassified error. @@ -154,45 +124,20 @@ func ClassifyCmdError(err error) Error { return Unclassified{err} } -// ClassifyCockroachError classifies an error received while executing a -// cockroach command remotely over an ssh connection to the right Error type. -func ClassifyCockroachError(err error) Error { - if err == nil { - return nil - } - - if exitErr, ok := asExitError(err); ok { - if exitErr.ExitCode() == 255 { - return SSH{err} - } - return Cockroach{err} - } - - return Unclassified{err} -} - // Extract the an ExitError from err's error tree or (nil, false) if none exists. func asExitError(err error) (*exec.ExitError, bool) { - if exitErr, ok := crdberrors.If(err, func(err error) (interface{}, bool) { - if err, ok := err.(*exec.ExitError); ok { - return err, true - } - return nil, false - }); ok { - return exitErr.(*exec.ExitError), true + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + return exitErr, true } return nil, false } // AsError extracts the Error from err's error tree or (nil, false) if none exists. func AsError(err error) (Error, bool) { - if rpErr, ok := crdberrors.If(err, func(err error) (interface{}, bool) { - if rpErr, ok := err.(Error); ok { - return rpErr, true - } - return nil, false - }); ok { - return rpErr.(Error), true + var e Error + if errors.As(err, &e) { + return e, true } return nil, false } diff --git a/pkg/cmd/roachprod/hosts.go b/pkg/cmd/roachprod/hosts.go index 3a2deb3ff453..b0cf73323611 100644 --- a/pkg/cmd/roachprod/hosts.go +++ b/pkg/cmd/roachprod/hosts.go @@ -23,7 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/cloud" "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/config" "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/install" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) func initDirs() error { @@ -160,6 +160,13 @@ func loadClusters() error { } else { return newInvalidHostsLineErr(l) } + // NB: it turns out we do see empty hosts here if we are concurrently + // creating clusters and this sync is picking up a cluster that's not + // ready yet. See: + // https://github.com/cockroachdb/cockroach/issues/49542#issuecomment-634563130 + // if n == "" { + // return newInvalidHostsLineErr(l) + // } var locality string if len(fields) > 0 { @@ -182,6 +189,9 @@ func loadClusters() error { c.Localities = append(c.Localities, locality) c.VPCs = append(c.VPCs, vpc) } + if len(c.VMs) == 0 { + return errors.Errorf("found no VMs in %s", contents) + } install.Clusters[file.Name()] = c } diff --git a/pkg/cmd/roachprod/install/cassandra.go b/pkg/cmd/roachprod/install/cassandra.go index 8299ee31af15..136c111ba25b 100644 --- a/pkg/cmd/roachprod/install/cassandra.go +++ b/pkg/cmd/roachprod/install/cassandra.go @@ -45,9 +45,11 @@ func (Cassandra) Start(c *SyncedCluster, extraArgs []string) { if err != nil { return err } - defer session.Close() + defer func() { + _ = session.Close() + }() - cmd := c.Env + `env ROACHPROD=true cassandra` + + cmd := `env ` + c.Env + ` ROACHPROD=true cassandra` + ` -Dcassandra.config=file://${PWD}/cassandra.yaml` + ` -Dcassandra.ring_delay_ms=3000` + ` > cassandra.stdout 2> cassandra.stderr` @@ -63,7 +65,9 @@ func (Cassandra) Start(c *SyncedCluster, extraArgs []string) { if err != nil { return false, err } - defer session.Close() + defer func() { + _ = session.Close() + }() cmd := `nc -z $(hostname) 9042` if _, err := session.CombinedOutput(cmd); err != nil { diff --git a/pkg/cmd/roachprod/install/cassandra_yaml.go b/pkg/cmd/roachprod/install/cassandra_yaml.go index d14346f61cf6..1676bd8c87b6 100644 --- a/pkg/cmd/roachprod/install/cassandra_yaml.go +++ b/pkg/cmd/roachprod/install/cassandra_yaml.go @@ -88,7 +88,7 @@ num_tokens: 256 # May either be "true" or "false" to enable globally hinted_handoff_enabled: true -# When hinted_handoff_enabled is true, a black list of data centers that will not +# When hinted_handoff_enabled is true, a blocklist of data centers that will not # perform hinted handoff # hinted_handoff_disabled_datacenters: # - DC1 diff --git a/pkg/cmd/roachprod/install/cluster_synced.go b/pkg/cmd/roachprod/install/cluster_synced.go index 9fa808b87158..2e2a1a67c791 100644 --- a/pkg/cmd/roachprod/install/cluster_synced.go +++ b/pkg/cmd/roachprod/install/cluster_synced.go @@ -37,8 +37,9 @@ import ( clog "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/cockroach/pkg/util/version" + "github.com/cockroachdb/errors" crdberrors "github.com/cockroachdb/errors" - "github.com/pkg/errors" "golang.org/x/sync/errgroup" ) @@ -82,26 +83,6 @@ type SyncedCluster struct { DebugDir string } -// CmdKind is the kind of command passed to SyncedCluster.Run(). -type CmdKind int - -// The kinds of commands passed to SyncedCluster.Run(). -const ( - // A cockroach command is passed. - CockroachCmd CmdKind = iota - - // A non-classified command is passed. - OtherCmd -) - -func (ck CmdKind) classifyError(err error) rperrors.Error { - if ck == CockroachCmd { - return rperrors.ClassifyCockroachError(err) - } - - return rperrors.ClassifyCmdError(err) -} - func (c *SyncedCluster) host(index int) string { return c.VMs[index-1] } @@ -122,7 +103,13 @@ func (c *SyncedCluster) IsLocal() bool { return c.Name == config.Local } -// ServerNodes TODO(peter): document +// ServerNodes is the fully expanded, ordered list of nodes that any given +// roachprod command is intending to target. +// +// $ roachprod create local -n 4 +// $ roachprod start local # [1, 2, 3, 4] +// $ roachprod start local:2-4 # [2, 3, 4] +// $ roachprod start local:2,1,4 # [1, 2, 4] func (c *SyncedCluster) ServerNodes() []int { return append([]int{}, c.Nodes...) } @@ -148,7 +135,14 @@ func (c *SyncedCluster) GetInternalIP(index int) (string, error) { "GetInternalIP: failed to execute hostname on %s:%d:\n(stdout) %s\n(stderr) %s", c.Name, index, stdout.String(), stderr.String()) } - return strings.TrimSpace(stdout.String()), nil + ip := strings.TrimSpace(stdout.String()) + if ip == "" { + return "", errors.Errorf( + "empty internal IP returned, stdout:\n%s\nstderr:\n%s", + stdout.String(), stderr.String(), + ) + } + return ip, nil } // Start TODO(peter): document @@ -459,12 +453,9 @@ done // stdout: Where stdout messages are written // stderr: Where stderr messages are written // nodes: The cluster nodes where the command will be run. -// cmdKind: Which type of command is being run? This allows refined error reporting. // title: A description of the command being run that is output to the logs. // cmd: The command to run. -func (c *SyncedCluster) Run( - stdout, stderr io.Writer, nodes []int, cmdKind CmdKind, title, cmd string, -) error { +func (c *SyncedCluster) Run(stdout, stderr io.Writer, nodes []int, title, cmd string) error { // Stream output if we're running the command on only 1 node. stream := len(nodes) == 1 var display string @@ -513,7 +504,7 @@ func (c *SyncedCluster) Run( if errors[i] != nil { detailMsg := fmt.Sprintf("Node %d. Command with error:\n```\n%s\n```\n", nodes[i], cmd) err = crdberrors.WithDetail(errors[i], detailMsg) - err = cmdKind.classifyError(err) + err = rperrors.ClassifyCmdError(err) errors[i] = err } return nil, nil @@ -524,7 +515,7 @@ func (c *SyncedCluster) Run( if err != nil { detailMsg := fmt.Sprintf("Node %d. Command with error:\n```\n%s\n```\n", nodes[i], cmd) err = crdberrors.WithDetail(err, detailMsg) - err = cmdKind.classifyError(err) + err = rperrors.ClassifyCmdError(err) errors[i] = err msg += fmt.Sprintf("\n%v", err) } @@ -1012,18 +1003,38 @@ func (c *SyncedCluster) Put(src, dest string) { } } - mkpath := func(i int) string { + mkpath := func(i int, dest string) (string, error) { if i == -1 { - return src + return src, nil + } + // Expand the destination to allow, for example, putting directly + // into {store-dir}. + e := expander{ + node: c.Nodes[i], } - return fmt.Sprintf("%s@%s:%s", c.user(c.Nodes[i]), c.host(c.Nodes[i]), dest) + dest, err := e.expand(c, dest) + if err != nil { + return "", err + } + return fmt.Sprintf("%s@%s:%s", c.user(c.Nodes[i]), c.host(c.Nodes[i]), dest), nil } for i := range c.Nodes { - go func(i int) { + go func(i int, dest string) { defer wg.Done() if c.IsLocal() { + // Expand the destination to allow, for example, putting directly + // into {store-dir}. + e := expander{ + node: c.Nodes[i], + } + var err error + dest, err = e.expand(c, dest) + if err != nil { + results <- result{i, err} + return + } if _, err := os.Stat(src); err != nil { results <- result{i, err} return @@ -1033,7 +1044,16 @@ func (c *SyncedCluster) Put(src, dest string) { results <- result{i, err} return } - to := fmt.Sprintf(os.ExpandEnv("${HOME}/local/%d/%s"), c.Nodes[i], dest) + // TODO(jlinder): this does not take into account things like + // roachprod put local:1 /some/file.txt /some/dir + // and will replace 'dir' with the contents of file.txt, instead + // of creating /some/dir/file.txt. + var to string + if filepath.IsAbs(dest) { + to = dest + } else { + to = fmt.Sprintf(os.ExpandEnv("${HOME}/local/%d/%s"), c.Nodes[i], dest) + } // Remove the destination if it exists, ignoring errors which we'll // handle via the os.Symlink() call. _ = os.Remove(to) @@ -1049,12 +1069,21 @@ func (c *SyncedCluster) Put(src, dest string) { // achieving this approach is likely a generalization of the current // code. srcIndex := <-sources - from := mkpath(srcIndex) + from, err := mkpath(srcIndex, dest) + if err != nil { + results <- result{i, err} + return + } // TODO(peter): For remote-to-remote copies, should the destination use // the internal IP address? The external address works, but it might be // slower. - to := mkpath(i) - err := c.scp(from, to) + to, err := mkpath(i, dest) + if err != nil { + results <- result{i, err} + return + } + + err = c.scp(from, to) results <- result{i, err} if err != nil { @@ -1070,7 +1099,7 @@ func (c *SyncedCluster) Put(src, dest string) { pushSource(i) } } - }(i) + }(i, dest) } go func() { @@ -1708,3 +1737,43 @@ func (c *SyncedCluster) Parallel( func (c *SyncedCluster) escapedTag() string { return strings.Replace(c.Tag, "/", "\\/", -1) } + +// Init initializes the cluster. It does it through node 1 (as per ServerNodes) +// to maintain parity with auto-init behavior of `roachprod start` (when +// --skip-init) is not specified. The implementation should be kept in +// sync with Cockroach.Start. +func (c *SyncedCluster) Init() { + r := c.Impl.(Cockroach) + h := &crdbInstallHelper{c: c, r: r} + + // See (Cockroach).Start. We reserve a few special operations for the first + // node, so we strive to maintain the same here for interoperability. + const firstNodeIdx = 0 + + vers, err := getCockroachVersion(c, c.ServerNodes()[firstNodeIdx]) + if err != nil { + log.Fatalf("unable to retrieve cockroach version: %v", err) + } + + if !vers.AtLeast(version.MustParse("v20.1.0")) { + log.Fatal("`roachprod init` only supported for v20.1 and beyond") + } + + fmt.Printf("%s: initializing cluster\n", h.c.Name) + initOut, err := h.initializeCluster(firstNodeIdx) + if err != nil { + log.Fatalf("unable to initialize cluster: %v", err) + } + if initOut != "" { + fmt.Println(initOut) + } + + fmt.Printf("%s: setting cluster settings\n", h.c.Name) + clusterSettingsOut, err := h.setClusterSettings(firstNodeIdx) + if err != nil { + log.Fatalf("unable to set cluster settings: %v", err) + } + if clusterSettingsOut != "" { + fmt.Println(clusterSettingsOut) + } +} diff --git a/pkg/cmd/roachprod/install/cockroach.go b/pkg/cmd/roachprod/install/cockroach.go index c633882ef86a..a8ac7216c503 100644 --- a/pkg/cmd/roachprod/install/cockroach.go +++ b/pkg/cmd/roachprod/install/cockroach.go @@ -12,6 +12,7 @@ package install import ( "fmt" + "log" "os" "os/exec" "path/filepath" @@ -23,19 +24,20 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/ssh" "github.com/cockroachdb/cockroach/pkg/util/envutil" "github.com/cockroachdb/cockroach/pkg/util/version" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) // StartOpts TODO(peter): document var StartOpts struct { Encrypt bool Sequential bool + SkipInit bool } // Cockroach TODO(peter): document type Cockroach struct{} -func cockroachNodeBinary(c *SyncedCluster, i int) string { +func cockroachNodeBinary(c *SyncedCluster, node int) string { if filepath.IsAbs(config.Binary) { return config.Binary } @@ -43,7 +45,7 @@ func cockroachNodeBinary(c *SyncedCluster, i int) string { return "./" + config.Binary } - path := filepath.Join(fmt.Sprintf(os.ExpandEnv("${HOME}/local/%d"), i), config.Binary) + path := filepath.Join(fmt.Sprintf(os.ExpandEnv("${HOME}/local/%d"), node), config.Binary) if _, err := os.Stat(path); err == nil { return path } @@ -75,14 +77,14 @@ func cockroachNodeBinary(c *SyncedCluster, i int) string { return path } -func getCockroachVersion(c *SyncedCluster, i int) (*version.Version, error) { - sess, err := c.newSession(i) +func getCockroachVersion(c *SyncedCluster, node int) (*version.Version, error) { + sess, err := c.newSession(node) if err != nil { return nil, err } defer sess.Close() - cmd := cockroachNodeBinary(c, i) + " version" + cmd := cockroachNodeBinary(c, node) + " version" out, err := sess.CombinedOutput(cmd) if err != nil { return nil, errors.Wrapf(err, "~ %s\n%s", cmd, out) @@ -110,237 +112,102 @@ func argExists(args []string, target string) int { return -1 } -// Start implements the ClusterImpl.NodeDir interface. +// Start implements the ClusterImpl.NodeDir interface, and powers `roachprod +// start`. Starting the first node is special-cased quite a bit, it's used to +// distribute certs, set cluster settings, and initialize the cluster. Also, +// if we're only starting a single node in the cluster and it happens to be the +// "first" node (node 1, as understood by SyncedCluster.ServerNodes), we use +// `start-single-node` (this was written to provide a short hand to start a +// single node cluster with a replication factor of one). func (r Cockroach) Start(c *SyncedCluster, extraArgs []string) { - // Check to see if node 1 was started indicating the cluster was - // bootstrapped. - var bootstrapped bool - for _, i := range c.ServerNodes() { - if i == 1 { - bootstrapped = true - break - } - } + h := &crdbInstallHelper{c: c, r: r} + h.distributeCerts() - if c.Secure && bootstrapped { - c.DistributeCerts() - } - - display := fmt.Sprintf("%s: starting", c.Name) - host1 := c.host(1) nodes := c.ServerNodes() - - // If we're creating nodes that span VPC (e.g. AWS multi-region or - // multi-cloud), we'll tell the nodes to advertise their public IPs - // so that attaching nodes to the cluster Just Works. - var advertisePublicIP bool - for i, vpc := range c.VPCs { - if i > 0 && vpc != c.VPCs[0] { - advertisePublicIP = true - break - } - } - - env := func() string { - var buf strings.Builder - for _, v := range os.Environ() { - if strings.HasPrefix(v, "COCKROACH_") { - if buf.Len() > 0 { - buf.WriteString(" ") - } - buf.WriteString(v) - } - } - if len(c.Env) > 0 { - if buf.Len() > 0 { - buf.WriteString(" ") - } - buf.WriteString(c.Env) - } - return buf.String() - }() - - p := 0 + var parallelism = 0 if StartOpts.Sequential { - p = 1 + parallelism = 1 } - c.Parallel(display, len(nodes), p, func(i int) ([]byte, error) { - vers, err := getCockroachVersion(c, nodes[i]) + + fmt.Printf("%s: starting nodes\n", c.Name) + c.Parallel("", len(nodes), parallelism, func(nodeIdx int) ([]byte, error) { + vers, err := getCockroachVersion(c, nodes[nodeIdx]) if err != nil { return nil, err } - sess, err := c.newSession(nodes[i]) - if err != nil { + // NB: if cockroach started successfully, we ignore the output as it is + // some harmless start messaging. + if _, err := h.startNode(nodeIdx, extraArgs, vers); err != nil { return nil, err } - defer sess.Close() - - port := r.NodePort(c, nodes[i]) - var args []string - if c.Secure { - args = append(args, "--certs-dir="+c.Impl.CertsDir(c, nodes[i])) - } else { - args = append(args, "--insecure") - } - dir := c.Impl.NodeDir(c, nodes[i]) - logDir := c.Impl.LogDir(c, nodes[i]) - if idx := argExists(extraArgs, "--store"); idx == -1 { - args = append(args, "--store=path="+dir) - } - args = append(args, "--log-dir="+logDir) - args = append(args, "--background") - if vers.AtLeast(version.MustParse("v1.1.0")) { - cache := 25 - if c.IsLocal() { - cache /= len(nodes) - if cache == 0 { - cache = 1 - } - } - args = append(args, fmt.Sprintf("--cache=%d%%", cache)) - args = append(args, fmt.Sprintf("--max-sql-memory=%d%%", cache)) - } - if c.IsLocal() { - // This avoids annoying firewall prompts on Mac OS X. - if vers.AtLeast(version.MustParse("v2.1.0")) { - args = append(args, "--listen-addr=127.0.0.1") - } else { - args = append(args, "--host=127.0.0.1") - } - } - args = append(args, fmt.Sprintf("--port=%d", port)) - args = append(args, fmt.Sprintf("--http-port=%d", GetAdminUIPort(port))) - if locality := c.locality(nodes[i]); locality != "" { - if idx := argExists(extraArgs, "--locality"); idx == -1 { - args = append(args, "--locality="+locality) - } - } - if nodes[i] != 1 { - args = append(args, fmt.Sprintf("--join=%s:%d", host1, r.NodePort(c, 1))) - } - if advertisePublicIP { - args = append(args, fmt.Sprintf("--advertise-host=%s", c.host(i+1))) - } else if !c.IsLocal() { - // Explicitly advertise by IP address so that we don't need to - // deal with cross-region name resolution. The `hostname -I` - // prints all IP addresses for the host and then we'll select - // the first from the list. - args = append(args, "--advertise-host=$(hostname -I | awk '{print $1}')") + // We reserve a few special operations (bootstrapping, and setting + // cluster settings) for node 1. + if node := nodes[nodeIdx]; node != 1 { + return nil, nil } - var keyCmd string - if StartOpts.Encrypt { - // Encryption at rest is turned on for the cluster. - // TODO(windchan7): allow key size to be specified through flags. - encryptArgs := "--enterprise-encryption=path=%s,key=%s/aes-128.key,old-key=plain" - var storeDir string - if idx := argExists(extraArgs, "--store"); idx == -1 { - storeDir = dir - } else { - storeDir = strings.TrimPrefix(extraArgs[idx], "--store=") - } - encryptArgs = fmt.Sprintf(encryptArgs, storeDir, storeDir) - args = append(args, encryptArgs) + // NB: The code blocks below are not parallelized, so it's safe for us + // to use fmt.Printf style logging. - // Command to create the store key. - keyCmd = fmt.Sprintf("mkdir -p %[1]s; if [ ! -e %[1]s/aes-128.key ]; then openssl rand -out %[1]s/aes-128.key 48; fi; ", storeDir) - } + // 1. We don't init invoked using `--skip-init`. + // 2. We don't init when invoking with `start-single-node`. + // 3. For nodes running <20.1, the --join flags are constructed in a + // manner such that the first node doesn't have any (see + // `generateStartArgs`),which prompts CRDB to auto-initialize. For + // nodes running >=20.1, we need to explicitly initialize. - // Argument template expansion is node specific (e.g. for {store-dir}). - e := expander{ - node: nodes[i], + if StartOpts.SkipInit { + return nil, nil } - for _, arg := range extraArgs { - expandedArg, err := e.expand(c, arg) + + shouldInit := !h.useStartSingleNode(vers) && vers.AtLeast(version.MustParse("v20.1.0")) + if shouldInit { + fmt.Printf("%s: initializing cluster\n", h.c.Name) + initOut, err := h.initializeCluster(nodeIdx) if err != nil { - return nil, err + log.Fatalf("unable to initialize cluster: %v", err) } - args = append(args, strings.Split(expandedArg, " ")...) - } - // For a one-node cluster, use start-single-node to disable replication. - startCmd := "start" - if len(c.VMs) == 1 && vers.AtLeast(version.MustParse("v19.2.0")) { - startCmd = "start-single-node" + if initOut != "" { + fmt.Println(initOut) + } } - binary := cockroachNodeBinary(c, nodes[i]) - // NB: this is awkward as when the process fails, the test runner will show an - // unhelpful empty error (since everything has been redirected away). This is - // unfortunately equally awkward to address. - cmd := "ulimit -c unlimited; mkdir -p " + logDir + "; " - // TODO(peter): The ps and lslocks stuff is intended to debug why killing - // of a cockroach process sometimes doesn't release file locks immediately. - cmd += `echo ">>> roachprod start: $(date)" >> ` + logDir + "/roachprod.log; " + - `ps axeww -o pid -o command >> ` + logDir + "/roachprod.log; " + - `[ -x /usr/bin/lslocks ] && /usr/bin/lslocks >> ` + logDir + "/roachprod.log; " - cmd += keyCmd + - fmt.Sprintf(" export ROACHPROD=%d%s && ", nodes[i], c.Tag) + - "GOTRACEBACK=crash " + - "COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING=1 " + - // Turn stats mismatch into panic, see: - // https://github.com/cockroachdb/cockroach/issues/38720#issuecomment-539136246 - // Disabled because we have a local repro in - // https://github.com/cockroachdb/cockroach/issues/37815#issuecomment-545650087 + if !vers.AtLeast(version.MustParse("v20.1.0")) { + // Given #51897 remains unresolved, master-built roachprod is used + // to run roachtests against the 20.1 branch. Some of those + // roachtests test mixed-version clusters that start off at 19.2. + // Consequently, we manually add this `cluster-bootstrapped` file + // where roachprod expects to find it for already-initialized + // clusters. This is a pretty gross hack, that we should address by + // addressing #51897. // - // "COCKROACH_ENFORCE_CONSISTENT_STATS=true " + - env + " " + binary + " " + startCmd + " " + strings.Join(args, " ") + - " >> " + logDir + "/cockroach.stdout.log 2>> " + logDir + "/cockroach.stderr.log" + - " || (x=$?; cat " + logDir + "/cockroach.stderr.log; exit $x)" - if out, err := sess.CombinedOutput(cmd); err != nil { - return nil, errors.Wrapf(err, "~ %s\n%s", cmd, out) - } - // NB: if cockroach started successfully, we ignore the output as it is - // some harmless start messaging. - return nil, nil - }) - - if bootstrapped { - license := envutil.EnvOrDefaultString("COCKROACH_DEV_LICENSE", "") - if license == "" { - fmt.Printf("%s: COCKROACH_DEV_LICENSE unset: enterprise features will be unavailable\n", - c.Name) - } - - var msg string - display = fmt.Sprintf("%s: initializing cluster settings", c.Name) - c.Parallel(display, 1, 0, func(i int) ([]byte, error) { - sess, err := c.newSession(1) + // TODO(irfansharif): Remove this once #51897 is resolved. + markBootstrap := fmt.Sprintf("touch %s/%s", h.c.Impl.NodeDir(h.c, nodes[nodeIdx]), "cluster-bootstrapped") + cmdOut, err := h.run(nodeIdx, markBootstrap) if err != nil { - return nil, err - } - defer sess.Close() - - var cmd string - if c.IsLocal() { - cmd = `cd ${HOME}/local/1 ; ` + log.Fatalf("unable to run cmd: %v", err) } - dir := c.Impl.NodeDir(c, nodes[i]) - cmd += ` -if ! test -e ` + dir + `/settings-initialized ; then - COCKROACH_CONNECT_TIMEOUT=0 ` + cockroachNodeBinary(c, 1) + " sql --url " + - r.NodeURL(c, "localhost", r.NodePort(c, 1)) + " -e " + - fmt.Sprintf(`" -SET CLUSTER SETTING server.remote_debugging.mode = 'any'; -SET CLUSTER SETTING cluster.organization = 'Cockroach Labs - Production Testing'; -SET CLUSTER SETTING enterprise.license = '%s';"`, license) + ` && - touch ` + dir + `/settings-initialized -fi -` - out, err := sess.CombinedOutput(cmd) - if err != nil { - return nil, errors.Wrapf(err, "~ %s\n%s", cmd, out) + if cmdOut != "" { + fmt.Println(cmdOut) } - msg = strings.TrimSpace(string(out)) - return nil, nil - }) + } - if msg != "" { - fmt.Println(msg) + // We're sure to set cluster settings after having initialized the + // cluster. + + fmt.Printf("%s: setting cluster settings\n", h.c.Name) + clusterSettingsOut, err := h.setClusterSettings(nodeIdx) + if err != nil { + log.Fatalf("unable to set cluster settings: %v", err) } - } + if clusterSettingsOut != "" { + fmt.Println(clusterSettingsOut) + } + return nil, nil + }) } // NodeDir implements the ClusterImpl.NodeDir interface. @@ -373,7 +240,7 @@ func (Cockroach) CertsDir(c *SyncedCluster, index int) string { func (Cockroach) NodeURL(c *SyncedCluster, host string, port int) string { url := fmt.Sprintf("'postgres://root@%s:%d", host, port) if c.Secure { - url += "?sslcert=certs%2Fnode.crt&sslkey=certs%2Fnode.key&" + + url += "?sslcert=certs%2Fclient.root.crt&sslkey=certs%2Fclient.root.key&" + "sslrootcert=certs%2Fca.crt&sslmode=verify-full" } else { url += "?sslmode=disable" @@ -421,8 +288,8 @@ func (r Cockroach) SQL(c *SyncedCluster, args []string) error { resultChan := make(chan result, len(c.Nodes)) display := fmt.Sprintf("%s: executing sql", c.Name) - c.Parallel(display, len(c.Nodes), 0, func(i int) ([]byte, error) { - sess, err := c.newSession(c.Nodes[i]) + c.Parallel(display, len(c.Nodes), 0, func(nodeIdx int) ([]byte, error) { + sess, err := c.newSession(c.Nodes[nodeIdx]) if err != nil { return nil, err } @@ -430,10 +297,10 @@ func (r Cockroach) SQL(c *SyncedCluster, args []string) error { var cmd string if c.IsLocal() { - cmd = fmt.Sprintf(`cd ${HOME}/local/%d ; `, c.Nodes[i]) + cmd = fmt.Sprintf(`cd ${HOME}/local/%d ; `, c.Nodes[nodeIdx]) } - cmd += cockroachNodeBinary(c, c.Nodes[i]) + " sql --url " + - r.NodeURL(c, "localhost", r.NodePort(c, c.Nodes[i])) + " " + + cmd += cockroachNodeBinary(c, c.Nodes[nodeIdx]) + " sql --url " + + r.NodeURL(c, "localhost", r.NodePort(c, c.Nodes[nodeIdx])) + " " + ssh.Escape(args) out, err := sess.CombinedOutput(cmd) @@ -441,7 +308,7 @@ func (r Cockroach) SQL(c *SyncedCluster, args []string) error { return nil, errors.Wrapf(err, "~ %s\n%s", cmd, out) } - resultChan <- result{node: c.Nodes[i], output: string(out)} + resultChan <- result{node: c.Nodes[nodeIdx], output: string(out)} return nil, nil }) @@ -458,3 +325,342 @@ func (r Cockroach) SQL(c *SyncedCluster, args []string) error { return nil } + +type crdbInstallHelper struct { + c *SyncedCluster + r Cockroach +} + +func (h *crdbInstallHelper) startNode( + nodeIdx int, extraArgs []string, vers *version.Version, +) (string, error) { + startCmd, err := h.generateStartCmd(nodeIdx, extraArgs, vers) + if err != nil { + return "", err + } + + nodes := h.c.ServerNodes() + sess, err := h.c.newSession(nodes[nodeIdx]) + if err != nil { + return "", err + } + defer sess.Close() + + out, err := sess.CombinedOutput(startCmd) + if err != nil { + return "", errors.Wrapf(err, "~ %s\n%s", startCmd, out) + } + return strings.TrimSpace(string(out)), nil +} + +func (h *crdbInstallHelper) generateStartCmd( + nodeIdx int, extraArgs []string, vers *version.Version, +) (string, error) { + args, err := h.generateStartArgs(nodeIdx, extraArgs, vers) + if err != nil { + return "", err + } + + // For a one-node cluster, use `start-single-node` to disable replication. + // For everything else we'll fall back to using `cockroach start`. + var startCmd string + if h.useStartSingleNode(vers) { + startCmd = "start-single-node" + } else { + startCmd = "start" + } + + nodes := h.c.ServerNodes() + logDir := h.c.Impl.LogDir(h.c, nodes[nodeIdx]) + binary := cockroachNodeBinary(h.c, nodes[nodeIdx]) + keyCmd := h.generateKeyCmd(nodeIdx, extraArgs) + + // NB: this is awkward as when the process fails, the test runner will show an + // unhelpful empty error (since everything has been redirected away). This is + // unfortunately equally awkward to address. + cmd := fmt.Sprintf(` + ulimit -c unlimited; mkdir -p %[1]s; + echo ">>> roachprod start: $(date)" >> %[1]s/roachprod.log; + ps axeww -o pid -o command >> %[1]s/roachprod.log; + [ -x /usr/bin/lslocks ] && /usr/bin/lslocks >> %[1]s/roachprod.log; %[2]s + export ROACHPROD=%[3]d%[4]s; + GOTRACEBACK=crash COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING=1 %[5]s \ + %[6]s %[7]s %[8]s >> %[1]s/cockroach.stdout.log \ + 2>> %[1]s/cockroach.stderr.log \ + || (x=$?; cat %[1]s/cockroach.stderr.log; exit $x)`, + logDir, // [1] + keyCmd, // [2] + nodes[nodeIdx], // [3] + h.c.Tag, // [4] + h.getEnvVars(), // [5] + binary, // [6] + startCmd, // [7] + strings.Join(args, " "), // [8] + ) + return cmd, nil +} + +func (h *crdbInstallHelper) generateStartArgs( + nodeIdx int, extraArgs []string, vers *version.Version, +) ([]string, error) { + var args []string + nodes := h.c.ServerNodes() + + args = append(args, "--background") + if h.c.Secure { + args = append(args, "--certs-dir="+h.c.Impl.CertsDir(h.c, nodes[nodeIdx])) + } else { + args = append(args, "--insecure") + } + + dir := h.c.Impl.NodeDir(h.c, nodes[nodeIdx]) + logDir := h.c.Impl.LogDir(h.c, nodes[nodeIdx]) + if idx := argExists(extraArgs, "--store"); idx == -1 { + args = append(args, "--store=path="+dir) + } + args = append(args, "--log-dir="+logDir) + + if vers.AtLeast(version.MustParse("v1.1.0")) { + cache := 25 + if h.c.IsLocal() { + cache /= len(nodes) + if cache == 0 { + cache = 1 + } + } + args = append(args, fmt.Sprintf("--cache=%d%%", cache)) + args = append(args, fmt.Sprintf("--max-sql-memory=%d%%", cache)) + } + if h.c.IsLocal() { + // This avoids annoying firewall prompts on Mac OS X. + if vers.AtLeast(version.MustParse("v2.1.0")) { + args = append(args, "--listen-addr=127.0.0.1") + } else { + args = append(args, "--host=127.0.0.1") + } + } + + port := h.r.NodePort(h.c, nodes[nodeIdx]) + args = append(args, fmt.Sprintf("--port=%d", port)) + args = append(args, fmt.Sprintf("--http-port=%d", GetAdminUIPort(port))) + if locality := h.c.locality(nodes[nodeIdx]); locality != "" { + if idx := argExists(extraArgs, "--locality"); idx == -1 { + args = append(args, "--locality="+locality) + } + } + + if !h.useStartSingleNode(vers) { + // --join flags are unsupported/unnecessary in `cockroach + // start-single-node`. That aside, setting up --join flags is a bit + // precise. We have every node point to node 1. For clusters running + // <20.1, we have node 1 not point to anything (which in turn is used to + // trigger auto-initialization node 1). For clusters running >=20.1, + // node 1 also points to itself, and an explicit `cockroach init` is + // needed. + if nodes[nodeIdx] != 1 || vers.AtLeast(version.MustParse("v20.1.0")) { + args = append(args, fmt.Sprintf("--join=%s:%d", h.c.host(1), h.r.NodePort(h.c, 1))) + } + } + + if h.shouldAdvertisePublicIP() { + args = append(args, fmt.Sprintf("--advertise-host=%s", h.c.host(nodeIdx+1))) + } else if !h.c.IsLocal() { + // Explicitly advertise by IP address so that we don't need to + // deal with cross-region name resolution. The `hostname -I` + // prints all IP addresses for the host and then we'll select + // the first from the list. + args = append(args, "--advertise-host=$(hostname -I | awk '{print $1}')") + } + + if StartOpts.Encrypt { + // Encryption at rest is turned on for the cluster. + // TODO(windchan7): allow key size to be specified through flags. + encryptArgs := "--enterprise-encryption=path=%s,key=%s/aes-128.key,old-key=plain" + var storeDir string + if idx := argExists(extraArgs, "--store"); idx == -1 { + storeDir = dir + } else { + storeDir = strings.TrimPrefix(extraArgs[idx], "--store=") + } + encryptArgs = fmt.Sprintf(encryptArgs, storeDir, storeDir) + args = append(args, encryptArgs) + } + + // Argument template expansion is node specific (e.g. for {store-dir}). + e := expander{ + node: nodes[nodeIdx], + } + for _, arg := range extraArgs { + expandedArg, err := e.expand(h.c, arg) + if err != nil { + return nil, err + } + args = append(args, strings.Split(expandedArg, " ")...) + } + + return args, nil +} + +func (h *crdbInstallHelper) initializeCluster(nodeIdx int) (string, error) { + nodes := h.c.ServerNodes() + initCmd := h.generateInitCmd(nodeIdx) + + sess, err := h.c.newSession(nodes[nodeIdx]) + if err != nil { + return "", err + } + defer sess.Close() + + out, err := sess.CombinedOutput(initCmd) + if err != nil { + return "", errors.Wrapf(err, "~ %s\n%s", initCmd, out) + } + return strings.TrimSpace(string(out)), nil +} + +func (h *crdbInstallHelper) setClusterSettings(nodeIdx int) (string, error) { + nodes := h.c.ServerNodes() + clusterSettingCmd := h.generateClusterSettingCmd(nodeIdx) + + sess, err := h.c.newSession(nodes[nodeIdx]) + if err != nil { + return "", err + } + defer sess.Close() + + out, err := sess.CombinedOutput(clusterSettingCmd) + if err != nil { + return "", errors.Wrapf(err, "~ %s\n%s", clusterSettingCmd, out) + } + return strings.TrimSpace(string(out)), nil +} + +func (h *crdbInstallHelper) generateClusterSettingCmd(nodeIdx int) string { + nodes := h.c.ServerNodes() + license := envutil.EnvOrDefaultString("COCKROACH_DEV_LICENSE", "") + if license == "" { + fmt.Printf("%s: COCKROACH_DEV_LICENSE unset: enterprise features will be unavailable\n", + h.c.Name) + } + + var clusterSettingCmd string + if h.c.IsLocal() { + clusterSettingCmd = `cd ${HOME}/local/1 ; ` + } + + binary := cockroachNodeBinary(h.c, nodes[nodeIdx]) + path := fmt.Sprintf("%s/%s", h.c.Impl.NodeDir(h.c, nodes[nodeIdx]), "settings-initialized") + url := h.r.NodeURL(h.c, "localhost", h.r.NodePort(h.c, 1)) + + clusterSettingCmd += fmt.Sprintf(` + if ! test -e %s ; then + COCKROACH_CONNECT_TIMEOUT=0 %s sql --url %s -e " + SET CLUSTER SETTING server.remote_debugging.mode = 'any'; + SET CLUSTER SETTING cluster.organization = 'Cockroach Labs - Production Testing'; + SET CLUSTER SETTING enterprise.license = '%s';" \ + && touch %s + fi`, path, binary, url, license, path) + return clusterSettingCmd +} + +func (h *crdbInstallHelper) generateInitCmd(nodeIdx int) string { + nodes := h.c.ServerNodes() + + var initCmd string + if h.c.IsLocal() { + initCmd = `cd ${HOME}/local/1 ; ` + } + + path := fmt.Sprintf("%s/%s", h.c.Impl.NodeDir(h.c, nodes[nodeIdx]), "cluster-bootstrapped") + url := h.r.NodeURL(h.c, "localhost", h.r.NodePort(h.c, nodes[nodeIdx])) + binary := cockroachNodeBinary(h.c, nodes[nodeIdx]) + initCmd += fmt.Sprintf(` + if ! test -e %[1]s ; then + COCKROACH_CONNECT_TIMEOUT=0 %[2]s init --url %[3]s && touch %[1]s + fi`, path, binary, url) + return initCmd +} + +func (h *crdbInstallHelper) generateKeyCmd(nodeIdx int, extraArgs []string) string { + if !StartOpts.Encrypt { + return "" + } + + nodes := h.c.ServerNodes() + var storeDir string + if idx := argExists(extraArgs, "--store"); idx == -1 { + storeDir = h.c.Impl.NodeDir(h.c, nodes[nodeIdx]) + } else { + storeDir = strings.TrimPrefix(extraArgs[idx], "--store=") + } + + // Command to create the store key. + keyCmd := fmt.Sprintf(` + mkdir -p %[1]s; + if [ ! -e %[1]s/aes-128.key ]; then + openssl rand -out %[1]s/aes-128.key 48; + fi;`, storeDir) + return keyCmd +} + +func (h *crdbInstallHelper) useStartSingleNode(vers *version.Version) bool { + return len(h.c.VMs) == 1 && vers.AtLeast(version.MustParse("v19.2.0")) +} + +// distributeCerts, like the name suggests, distributes certs if it's a secure +// cluster and we're starting n1. +func (h *crdbInstallHelper) distributeCerts() { + for _, node := range h.c.ServerNodes() { + if node == 1 && h.c.Secure { + h.c.DistributeCerts() + break + } + } +} + +func (h *crdbInstallHelper) shouldAdvertisePublicIP() bool { + // If we're creating nodes that span VPC (e.g. AWS multi-region or + // multi-cloud), we'll tell the nodes to advertise their public IPs + // so that attaching nodes to the cluster Just Works. + for i, vpc := range h.c.VPCs { + if i > 0 && vpc != h.c.VPCs[0] { + return true + } + } + return false +} + +func (h *crdbInstallHelper) getEnvVars() string { + var buf strings.Builder + for _, v := range os.Environ() { + if strings.HasPrefix(v, "COCKROACH_") { + if buf.Len() > 0 { + buf.WriteString(" ") + } + buf.WriteString(v) + } + } + if len(h.c.Env) > 0 { + if buf.Len() > 0 { + buf.WriteString(" ") + } + buf.WriteString(h.c.Env) + } + return buf.String() +} + +func (h *crdbInstallHelper) run(nodeIdx int, cmd string) (string, error) { + nodes := h.c.ServerNodes() + + sess, err := h.c.newSession(nodes[nodeIdx]) + if err != nil { + return "", err + } + defer sess.Close() + + out, err := sess.CombinedOutput(cmd) + if err != nil { + return "", errors.Wrapf(err, "~ %s\n%s", cmd, out) + } + return strings.TrimSpace(string(out)), nil +} diff --git a/pkg/cmd/roachprod/install/expander.go b/pkg/cmd/roachprod/install/expander.go index d01b60ac6d8e..021501cf1fd1 100644 --- a/pkg/cmd/roachprod/install/expander.go +++ b/pkg/cmd/roachprod/install/expander.go @@ -15,7 +15,7 @@ import ( "regexp" "strings" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) var parameterRe = regexp.MustCompile(`{[^}]*}`) diff --git a/pkg/cmd/roachprod/install/install.go b/pkg/cmd/roachprod/install/install.go index f7d0fb419b8c..6699189511bf 100644 --- a/pkg/cmd/roachprod/install/install.go +++ b/pkg/cmd/roachprod/install/install.go @@ -39,7 +39,7 @@ sudo service cassandra stop; sudo mkdir -p "${thrift_dir}" sudo chmod 777 "${thrift_dir}" cd "${thrift_dir}" - curl "https://downloads.apache.org/thrift/0.10.0/thrift-0.10.0.tar.gz" | sudo tar xvz --strip-components 1 + curl "https://downloads.apache.org/thrift/0.13.0/thrift-0.13.0.tar.gz" | sudo tar xvz --strip-components 1 sudo ./configure --prefix=/usr sudo make -j$(nproc) sudo make install @@ -54,7 +54,8 @@ sudo service cassandra stop; sudo rm -rf "${charybde_dir}" "${nemesis_path}" /usr/local/bin/charybdefs{,-nemesis} sudo mkdir -p "${charybde_dir}" sudo chmod 777 "${charybde_dir}" - git clone --depth 1 "https://github.com/scylladb/charybdefs.git" "${charybde_dir}" + # TODO(bilal): Change URL back to scylladb/charybdefs once https://github.com/scylladb/charybdefs/pull/21 is merged. + git clone --depth 1 "https://github.com/itsbilal/charybdefs.git" "${charybde_dir}" cd "${charybde_dir}" thrift -r --gen cpp server.thrift @@ -163,7 +164,7 @@ func SortedCmds() []string { func Install(c *SyncedCluster, args []string) error { do := func(title, cmd string) error { var buf bytes.Buffer - err := c.Run(&buf, &buf, c.Nodes, OtherCmd, "installing "+title, cmd) + err := c.Run(&buf, &buf, c.Nodes, "installing "+title, cmd) if err != nil { fmt.Print(buf.String()) } diff --git a/pkg/cmd/roachprod/install/session.go b/pkg/cmd/roachprod/install/session.go index 0b0ac46781a1..9812684fdcee 100644 --- a/pkg/cmd/roachprod/install/session.go +++ b/pkg/cmd/roachprod/install/session.go @@ -19,7 +19,7 @@ import ( "sync" "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/config" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) type session interface { diff --git a/pkg/cmd/roachprod/install/staging.go b/pkg/cmd/roachprod/install/staging.go index 7f8e1bc9aab3..d111fe751ef6 100644 --- a/pkg/cmd/roachprod/install/staging.go +++ b/pkg/cmd/roachprod/install/staging.go @@ -15,6 +15,7 @@ import ( "fmt" "net/url" "os" + "strings" "github.com/cockroachdb/cockroach/pkg/util/httputil" ) @@ -24,21 +25,21 @@ const ( releaseBinaryServer = "https://s3.amazonaws.com/binaries.cockroachdb.com/" ) -func getEdgeBinaryURL(binaryName, SHA, arch string) (*url.URL, error) { +func getEdgeURL(urlPathBase, SHA, arch string, ext string) (*url.URL, error) { edgeBinaryLocation, err := url.Parse(edgeBinaryServer) if err != nil { return nil, err } - edgeBinaryLocation.Path = binaryName + edgeBinaryLocation.Path = urlPathBase // If a target architecture is provided, attach that. if len(arch) > 0 { edgeBinaryLocation.Path += "." + arch } // If a specific SHA is provided, just attach that. if len(SHA) > 0 { - edgeBinaryLocation.Path += "." + SHA + edgeBinaryLocation.Path += "." + SHA + ext } else { - edgeBinaryLocation.Path += ".LATEST" + edgeBinaryLocation.Path += ext + ".LATEST" // Otherwise, find the latest SHA binary available. This works because // "[executable].LATEST" redirects to the latest SHA. resp, err := httputil.Head(context.TODO(), edgeBinaryLocation.String()) @@ -54,18 +55,45 @@ func getEdgeBinaryURL(binaryName, SHA, arch string) (*url.URL, error) { // StageRemoteBinary downloads a cockroach edge binary with the provided // application path to each specified by the cluster. If no SHA is specified, // the latest build of the binary is used instead. -func StageRemoteBinary(c *SyncedCluster, applicationName, binaryPath, SHA, arch string) error { - binURL, err := getEdgeBinaryURL(binaryPath, SHA, arch) +// Returns the SHA of the resolve binary. +func StageRemoteBinary( + c *SyncedCluster, applicationName, urlPathBase, SHA, arch string, +) (string, error) { + binURL, err := getEdgeURL(urlPathBase, SHA, arch, "") if err != nil { - return err + return "", err } fmt.Printf("Resolved binary url for %s: %s\n", applicationName, binURL) + urlSplit := strings.Split(binURL.Path, ".") cmdStr := fmt.Sprintf( `curl -sfSL -o %s "%s" && chmod 755 ./%s`, applicationName, binURL, applicationName, ) + return urlSplit[len(urlSplit)-1], c.Run( + os.Stdout, os.Stderr, c.Nodes, fmt.Sprintf("staging binary (%s)", applicationName), cmdStr, + ) +} + +// StageOptionalRemoteLibrary downloads a library from the cockroach edge with the provided +// application path to each specified by the cluster. +// If no SHA is specified, the latest build of the library is used instead. +// It will not error if the library does not exist on the edge. +func StageOptionalRemoteLibrary( + c *SyncedCluster, libraryName, urlPathBase, SHA, arch, ext string, +) error { + url, err := getEdgeURL(urlPathBase, SHA, arch, ext) + if err != nil { + return err + } + fmt.Printf("Resolved library url for %s: %s\n", libraryName, url) + cmdStr := fmt.Sprintf( + `mkdir -p ./lib && \ +curl -sfSL -o "./lib/%s" "%s" 2>/dev/null || echo 'optional library %s not found; continuing...'`, + libraryName+ext, + url, + libraryName+ext, + ) return c.Run( - os.Stdout, os.Stderr, c.Nodes, OtherCmd, - fmt.Sprintf("staging binary (%s)", applicationName), cmdStr, + os.Stdout, os.Stderr, c.Nodes, fmt.Sprintf("staging library (%s)", libraryName), cmdStr, ) } @@ -93,9 +121,11 @@ func StageCockroachRelease(c *SyncedCluster, version, arch string) error { tmpdir="$(mktemp -d /tmp/cockroach-release.XXX)" && \ curl -f -s -S -o- %s | tar xfz - -C "${tmpdir}" --strip-components 1 && \ mv ${tmpdir}/cockroach ./cockroach && \ +mkdir -p ./lib && \ +if [ -d ${tmpdir}/lib ]; then mv ${tmpdir}/lib/* ./lib; fi && \ chmod 755 ./cockroach `, binURL) return c.Run( - os.Stdout, os.Stderr, c.Nodes, OtherCmd, "staging cockroach release binary", cmdStr, + os.Stdout, os.Stderr, c.Nodes, "staging cockroach release binary", cmdStr, ) } diff --git a/pkg/cmd/roachprod/k8s/roachprod-gc.yaml b/pkg/cmd/roachprod/k8s/roachprod-gc.yaml index 545d09031e20..74ebf38217df 100644 --- a/pkg/cmd/roachprod/k8s/roachprod-gc.yaml +++ b/pkg/cmd/roachprod/k8s/roachprod-gc.yaml @@ -34,6 +34,7 @@ spec: image: gcr.io/cockroach-dev-inf/cockroachlabs/roachprod:master args: - gc + -- --gce-project=cockroach-ephemeral,andrei-jepsen - --slack-token - $(SLACK_TOKEN) env: diff --git a/pkg/cmd/roachprod/main.go b/pkg/cmd/roachprod/main.go index 8e31a9a987f9..1130b0d10279 100644 --- a/pkg/cmd/roachprod/main.go +++ b/pkg/cmd/roachprod/main.go @@ -39,7 +39,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm/gce" "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm/local" "github.com/cockroachdb/cockroach/pkg/util/flagutil" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/cobra" "golang.org/x/crypto/ssh/terminal" "golang.org/x/sys/unix" @@ -80,7 +80,7 @@ var ( listMine bool clusterType = "cockroach" secure = false - nodeEnv = "COCKROACH_ENABLE_RPC_COMPRESSION=false" + nodeEnv = []string{"COCKROACH_ENABLE_RPC_COMPRESSION=false"} nodeArgs []string tag string external = false @@ -89,6 +89,7 @@ var ( adminurlIPs = false useTreeDist = true encrypt = false + skipInit = false quiet = false sig = 9 waitFlag = false @@ -135,19 +136,13 @@ func newCluster(name string) (*install.SyncedCluster, error) { c, ok := install.Clusters[name] if !ok { - // NB: We don't use fmt.Errorf due to a linter error about the error - // message containing capitals and punctuation. We don't use - // errors.New(fmt.Sprintf()) due to a linter error that we should use - // fmt.Errorf() instead. Sigh. - s := fmt.Sprintf(`unknown cluster: %s - + err := errors.Newf(`unknown cluster: %s`, name) + err = errors.WithHintf(err, ` Available clusters: %s - -Hint: use "roachprod sync" to update the list of available clusters. -`, - name, strings.Join(sortedClusters(), "\n ")) - return nil, errors.New(s) +`, strings.Join(sortedClusters(), "\n ")) + err = errors.WithHint(err, `Use "roachprod sync" to update the list of available clusters.`) + return nil, err } switch clusterType { @@ -180,7 +175,7 @@ Hint: use "roachprod sync" to update the list of available clusters. } c.Nodes = nodes c.Secure = secure - c.Env = nodeEnv + c.Env = strings.Join(nodeEnv, " ") c.Args = nodeArgs if tag != "" { c.Tag = "/" + tag @@ -290,12 +285,12 @@ type clusterAlreadyExistsError struct { name string } -func (e clusterAlreadyExistsError) Error() string { +func (e *clusterAlreadyExistsError) Error() string { return fmt.Sprintf("cluster %s already exists", e.name) } func newClusterAlreadyExistsError(name string) error { - return clusterAlreadyExistsError{name: name} + return &clusterAlreadyExistsError{name: name} } var createCmd = &cobra.Command{ @@ -355,7 +350,7 @@ Local Clusters if retErr == nil || clusterName == config.Local { return } - if _, ok := retErr.(clusterAlreadyExistsError); ok { + if errors.HasType(retErr, (*clusterAlreadyExistsError)(nil)) { return } fmt.Fprintf(os.Stderr, "Cleaning up partially-created cluster (prev err: %s)\n", retErr) @@ -990,7 +985,9 @@ environment variables to the cockroach process. ` + tagHelp + ` The "start" command takes care of setting up the --join address and specifying reasonable defaults for other flags. One side-effect of this convenience is -that node 1 is special and must be started for the cluster to be initialized. +that node 1 is special and if started, is used to auto-initialize the cluster. +The --skip-init flag can be used to avoid auto-initialization (which can then +separately be done using the "init" command). If the COCKROACH_DEV_LICENSE environment variable is set the enterprise.license cluster setting will be set to its value. @@ -1041,6 +1038,31 @@ other signals. }), } +var initCmd = &cobra.Command{ + Use: "init ", + Short: "initialize the cluster", + Long: `Initialize the cluster. + +The "init" command bootstraps the cluster (using "cockroach init"). It also sets +default cluster settings. It's intended to be used in conjunction with +'roachprod start --skip-init'. +`, + Args: cobra.ExactArgs(1), + Run: wrap(func(cmd *cobra.Command, args []string) error { + clusterName, err := verifyClusterName(args[0]) + if err != nil { + return err + } + + c, err := newCluster(clusterName) + if err != nil { + return err + } + c.Init() + return nil + }), +} + var statusCmd = &cobra.Command{ Use: "status ", Short: "retrieve the status of nodes in a cluster", @@ -1114,21 +1136,17 @@ of nodes, outputting a line whenever a change is detected: if err != nil { return err } - var errs []string for msg := range c.Monitor(monitorIgnoreEmptyNodes, monitorOneShot) { if msg.Err != nil { msg.Msg += "error: " + msg.Err.Error() } - s := fmt.Sprintf("%d: %s", msg.Index, msg.Msg) + thisError := errors.Newf("%d: %s", msg.Index, msg.Msg) if msg.Err != nil || strings.Contains(msg.Msg, "dead") { - errs = append(errs, s) + err = errors.CombineErrors(err, thisError) } - fmt.Println(s) - } - if len(errs) != 0 { - return errors.New(strings.Join(errs, ", ")) + fmt.Println(thisError.Error()) } - return nil + return err }), } @@ -1191,12 +1209,12 @@ the 'zfs rollback' command: } fsCmd = `sudo zpool create -f data1 -m /mnt/data1 /dev/sdb` case "ext4": - fsCmd = `sudo mkfs.ext4 -F /dev/sdb && sudo mount -o discard,defaults /dev/sdb /mnt/data1` + fsCmd = `sudo mkfs.ext4 -F /dev/sdb && sudo mount -o defaults /dev/sdb /mnt/data1` default: return fmt.Errorf("unknown filesystem %q", fs) } - err = c.Run(os.Stdout, os.Stderr, c.Nodes, install.OtherCmd, "reformatting", fmt.Sprintf(` + err = c.Run(os.Stdout, os.Stderr, c.Nodes, "reformatting", fmt.Sprintf(` set -euo pipefail if sudo zpool list -Ho name 2>/dev/null | grep ^data1$; then sudo zpool destroy -f data1 @@ -1238,7 +1256,7 @@ var runCmd = &cobra.Command{ if len(title) > 30 { title = title[:27] + "..." } - return c.Run(os.Stdout, os.Stderr, c.Nodes, install.CockroachCmd, title, cmd) + return c.Run(os.Stdout, os.Stderr, c.Nodes, title, cmd) }), } @@ -1294,14 +1312,14 @@ Some examples of usage: } else if c.IsLocal() { os = runtime.GOOS } - var debugArch, releaseArch string + var debugArch, releaseArch, libExt string switch os { case "linux": - debugArch, releaseArch = "linux-gnu-amd64", "linux-amd64" + debugArch, releaseArch, libExt = "linux-gnu-amd64", "linux-amd64", ".so" case "darwin": - debugArch, releaseArch = "darwin-amd64", "darwin-10.9-amd64" + debugArch, releaseArch, libExt = "darwin-amd64", "darwin-10.9-amd64", ".dylib" case "windows": - debugArch, releaseArch = "windows-amd64", "windows-6.2-amd64" + debugArch, releaseArch, libExt = "windows-amd64", "windows-6.2-amd64", ".dll" default: return errors.Errorf("cannot stage binary on %s", os) } @@ -1313,13 +1331,32 @@ Some examples of usage: } switch applicationName { case "cockroach": - return install.StageRemoteBinary( + sha, err := install.StageRemoteBinary( c, applicationName, "cockroach/cockroach", versionArg, debugArch, ) + if err != nil { + return err + } + // NOTE: libraries may not be present in older versions. + // Use the sha for the binary to download the same remote library. + for _, library := range []string{"libgeos", "libgeos_c"} { + if err := install.StageOptionalRemoteLibrary( + c, + library, + fmt.Sprintf("cockroach/lib/%s", library), + sha, + debugArch, + libExt, + ); err != nil { + return err + } + } + return nil case "workload": - return install.StageRemoteBinary( + _, err := install.StageRemoteBinary( c, applicationName, "cockroach/workload", versionArg, "", /* arch */ ) + return err case "release": return install.StageCockroachRelease(c, versionArg, releaseArch) default: @@ -1436,9 +1473,15 @@ var pgurlCmd = &cobra.Command{ var urls []string for i, ip := range ips { + if ip == "" { + return errors.Errorf("empty ip: %v", ips) + } urls = append(urls, c.Impl.NodeURL(c, ip, c.Impl.NodePort(c, nodes[i]))) } fmt.Println(strings.Join(urls, " ")) + if len(urls) != len(nodes) { + return errors.Errorf("have nodes %v, but urls %v from ips %v", nodes, urls, ips) + } return nil }), } @@ -1543,6 +1586,7 @@ func main() { monitorCmd, startCmd, stopCmd, + initCmd, runCmd, wipeCmd, reformatCmd, @@ -1733,12 +1777,14 @@ func main() { "start nodes sequentially so node IDs match hostnames") cmd.Flags().StringArrayVarP( &nodeArgs, "args", "a", nil, "node arguments") - cmd.Flags().StringVarP( + cmd.Flags().StringArrayVarP( &nodeEnv, "env", "e", nodeEnv, "node environment variables") cmd.Flags().StringVarP( &clusterType, "type", "t", clusterType, `cluster type ("cockroach" or "cassandra")`) cmd.Flags().BoolVar( &install.StartOpts.Encrypt, "encrypt", encrypt, "start nodes with encryption at rest turned on") + cmd.Flags().BoolVar( + &install.StartOpts.SkipInit, "skip-init", skipInit, "skip initializing the cluster") fallthrough case sqlCmd: cmd.Flags().StringVarP( diff --git a/pkg/cmd/roachprod/ssh/ssh.go b/pkg/cmd/roachprod/ssh/ssh.go index a5253d7e29ed..0c4533c6c542 100644 --- a/pkg/cmd/roachprod/ssh/ssh.go +++ b/pkg/cmd/roachprod/ssh/ssh.go @@ -24,7 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/config" "github.com/cockroachdb/cockroach/pkg/util/syncutil" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" "golang.org/x/crypto/ssh/knownhosts" diff --git a/pkg/cmd/roachprod/vm/aws/aws.go b/pkg/cmd/roachprod/vm/aws/aws.go index 1e152605ee93..efd7e29afda3 100644 --- a/pkg/cmd/roachprod/vm/aws/aws.go +++ b/pkg/cmd/roachprod/vm/aws/aws.go @@ -24,7 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm/flagstub" "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/cockroach/pkg/util/syncutil" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" "golang.org/x/sync/errgroup" "golang.org/x/time/rate" @@ -75,12 +75,21 @@ type providerOpts struct { EBSVolumeType string EBSVolumeSize int EBSProvisionedIOPs int + UseMultipleDisks bool + + // Use specified ImageAMI when provisioning. + // Overrides config.json AMI. + ImageAMI string // CreateZones stores the list of zones for used cluster creation. // When > 1 zone specified, geo is automatically used, otherwise, geo depends // on the geo flag being set. If no zones specified, defaultCreateZones are // used. See defaultCreateZones. CreateZones []string + // CreateRateLimit specifies the rate limit used for aws instance creation. + // The request limit from aws' side can vary across regions, as well as the + // size of cluster being created. + CreateRateLimit float64 } const ( @@ -141,6 +150,14 @@ func (o *providerOpts) ConfigureCreateFlags(flags *pflag.FlagSet) { "as AZ:N where N is an integer, the zone will be repeated N times. If > 1\n"+ "zone specified, the cluster will be spread out evenly by zone regardless\n"+ "of geo (default [%s])", strings.Join(defaultCreateZones, ","))) + flags.StringVar(&o.ImageAMI, ProviderName+"-image-ami", + "", "Override image AMI to use. See https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-images.html") + flags.BoolVar(&o.UseMultipleDisks, ProviderName+"-enable-multiple-stores", + false, "Enable the use of multiple stores by creating one store directory per disk. Default is to raid0 stripe all disks.") + flags.Float64Var(&o.CreateRateLimit, ProviderName+"-create-rate-limit", 2, "aws"+ + " rate limit (per second) for instance creation. This is used to avoid hitting the request"+ + " limits from aws, which can vary based on the region, and the size of the cluster being"+ + " created. Try lowering this limit when hitting 'Request limit exceeded' errors.") } func (o *providerOpts) ConfigureClusterFlags(flags *pflag.FlagSet, _ vm.MultipleProjectsOption) { @@ -251,8 +268,7 @@ func (p *Provider) Create(names []string, opts vm.CreateOpts) error { } } var g errgroup.Group - const rateLimit = 2 // per second - limiter := rate.NewLimiter(rateLimit, 2 /* buckets */) + limiter := rate.NewLimiter(rate.Limit(p.opts.CreateRateLimit), 2 /* buckets */) for i := range names { capName := names[i] placement := zones[i] @@ -670,7 +686,7 @@ func (p *Provider) runInstance(name string, zone string, opts vm.CreateOpts) err extraMountOpts = "nobarrier" } } - filename, err := writeStartupScript(extraMountOpts) + filename, err := writeStartupScript(extraMountOpts, p.opts.UseMultipleDisks) if err != nil { return errors.Wrapf(err, "could not write AWS startup script to temp file") } @@ -678,12 +694,19 @@ func (p *Provider) runInstance(name string, zone string, opts vm.CreateOpts) err _ = os.Remove(filename) }() + withFlagOverride := func(cfg string, fl *string) string { + if *fl == "" { + return cfg + } + return *fl + } + args := []string{ "ec2", "run-instances", "--associate-public-ip-address", "--count", "1", - "--image-id", az.region.AMI, "--instance-type", machineType, + "--image-id", withFlagOverride(az.region.AMI, &p.opts.ImageAMI), "--key-name", keyName, "--region", az.region.Name, "--security-group-ids", az.region.SecurityGroup, @@ -691,6 +714,7 @@ func (p *Provider) runInstance(name string, zone string, opts vm.CreateOpts) err "--tag-specifications", tagSpecs, "--user-data", "file://" + filename, } + if cpuOptions != "" { args = append(args, "--cpu-options", cpuOptions) } @@ -702,7 +726,7 @@ func (p *Provider) runInstance(name string, zone string, opts vm.CreateOpts) err case "gp2": ebsParams = fmt.Sprintf("{VolumeSize=%d,VolumeType=%s,DeleteOnTermination=true}", p.opts.EBSVolumeSize, t) - case "io1": + case "io1", "io2": ebsParams = fmt.Sprintf("{VolumeSize=%d,VolumeType=%s,Iops=%d,DeleteOnTermination=true}", p.opts.EBSVolumeSize, t, p.opts.EBSProvisionedIOPs) default: @@ -714,7 +738,6 @@ func (p *Provider) runInstance(name string, zone string, opts vm.CreateOpts) err "DeviceName=/dev/sdd,Ebs="+ebsParams, ) } - return p.runJSONCommand(args, &data) } diff --git a/pkg/cmd/roachprod/vm/aws/config.json b/pkg/cmd/roachprod/vm/aws/config.json index c3e08fd34599..f6ad724f3baa 100644 --- a/pkg/cmd/roachprod/vm/aws/config.json +++ b/pkg/cmd/roachprod/vm/aws/config.json @@ -19,7 +19,9 @@ "security_group": "sg-0e00c2f8f274a0fea", "subnets": { "ap-northeast-2a": "subnet-0d24440b29a76b724", - "ap-northeast-2c": "subnet-07a4f14d08de379d8" + "ap-northeast-2b": "subnet-0b049a35364cc9d28", + "ap-northeast-2c": "subnet-0261535876b726680", + "ap-northeast-2d": "subnet-049feccf9cc9895f1" } }, { @@ -28,7 +30,8 @@ "security_group": "sg-03a68bb0d765c135e", "subnets": { "ap-south-1a": "subnet-0286d3ac1095fc6f1", - "ap-south-1b": "subnet-012666900a3627088" + "ap-south-1b": "subnet-012666900a3627088", + "ap-south-1c": "subnet-014859824b1b1365d" } }, { @@ -57,7 +60,8 @@ "security_group": "sg-0d97f7ec3edc8f7c1", "subnets": { "ca-central-1a": "subnet-02ef88f3eb706271e", - "ca-central-1b": "subnet-072be60d12ab9cc6c" + "ca-central-1b": "subnet-072be60d12ab9cc6c", + "ca-central-1d": "subnet-0a75f397c5f90c490" } }, { @@ -73,7 +77,7 @@ { "ami_id": "ami-1b791862", "region": "eu-west-1", - "security_group": "sg-0439765c57cd091f7", + "security_group": "sg-033eb468bf7e3c6b5", "subnets": { "eu-west-1a": "subnet-092adeba3986a0218", "eu-west-1b": "subnet-0d05781a8ca47b75c", diff --git a/pkg/cmd/roachprod/vm/aws/embedded.go b/pkg/cmd/roachprod/vm/aws/embedded.go index a5f36620e8fd..bee03995386a 100644 --- a/pkg/cmd/roachprod/vm/aws/embedded.go +++ b/pkg/cmd/roachprod/vm/aws/embedded.go @@ -1,6 +1,6 @@ // Code generated by go-bindata. DO NOT EDIT. // sources: -// config.json (6.206kB) +// config.json (6.468kB) // old.json (1.165kB) package aws @@ -70,7 +70,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _configJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x58\xdb\x6e\x23\x47\x0e\x7d\x9f\xaf\x30\xfc\x1c\x03\x64\x5d\xc8\x62\x7e\x65\xb1\x08\x58\x2c\x72\x22\xc0\xb1\x07\x96\x94\x45\x10\xcc\xbf\x2f\x64\x6f\x7a\x54\x33\xad\xd1\x65\xa5\xb7\x2e\xf5\x61\x55\x9f\x43\xf2\xb0\xfb\xef\x4f\x0f\x0f\x0f\x0f\x8f\x6f\xfe\x79\xf3\xfa\xb2\x7d\xfc\xf5\xe1\x63\xe1\x7d\x71\xeb\x2f\xdb\xcd\x6e\xf3\xa7\x3f\xfe\xfa\x10\xfa\xbc\xf5\x5f\xbe\xfd\xb7\xfb\xeb\xcb\x61\xf9\xf1\x79\xb3\xdd\x3d\x1e\xad\xff\xa9\xcf\xfb\xc3\x1f\xff\x5a\x96\x0e\xbf\xbf\xa7\xab\xf7\x1b\xf5\x8f\xcd\x6f\x9b\x71\x08\xa1\x7f\x6c\x9e\x4a\xa3\x0c\x96\xfc\x28\xd4\x72\xe7\xc7\xd9\xde\xef\xfc\xf2\xf4\xf2\xfa\xb6\xfb\xdd\x75\xbb\x7b\xc2\xb5\x7b\xb7\x6e\xfb\xb7\xcd\xee\xaf\xdf\x3e\xbf\xbd\xee\xbf\x1c\x30\xdb\xcf\x4f\x00\x40\x5e\x1a\x0c\x66\x45\x40\x28\xab\xc8\x7d\x7f\xf1\xdd\xcc\xc0\x7c\xe0\x69\x73\x7d\x8f\xfd\x8e\x79\x82\x81\xa5\x8c\x9e\x4d\xbc\xb0\x8f\xa8\x2b\xf1\x57\x42\xd8\x71\x88\x14\xa6\xaa\x94\x30\x85\x65\x43\xbd\x2c\xc4\x38\x0e\xe1\x72\x78\xce\x68\x3d\x77\x0f\xc2\x78\xfc\x21\xc2\xd7\x69\xe5\xeb\x2f\xd7\x69\xa4\x9d\x79\x14\x5b\x7b\xba\x53\x1a\xa5\xcb\x35\x72\x00\x4b\xd1\x22\x71\x51\x08\x5f\x23\xe0\x4a\x8d\xd2\xac\x51\x2a\xa5\x40\x4f\xa2\x4c\x9d\xd3\x5a\x0e\xac\x84\x98\x34\x62\x2d\x81\x65\x40\x1b\x9e\x59\x46\xbb\x37\xc1\xad\x78\xee\xc9\xfb\x59\x82\xb7\xaf\xfb\xdd\xef\xd7\x14\x40\x56\x6a\xbd\xc3\x60\xaa\x86\xb9\xae\x96\xd9\x25\xe4\xfe\x6f\xe3\x89\xd8\xd4\x68\x64\x35\x04\xa9\x61\x14\x6b\xa7\xfa\x0e\xde\x8f\xe1\x98\x88\x48\x00\x34\x53\x62\x68\x77\x27\xb5\x73\xe4\xd6\xec\x42\x52\xaf\xed\x2c\x4d\x4a\x2b\x61\x55\x2a\x57\xb4\xe0\xff\x8b\xd8\x95\xce\xd2\x5a\x6d\x42\x8a\x19\xa1\x62\x86\x7c\x8e\xdc\x8f\x10\x13\xc1\x00\x5d\x7b\xc5\xd1\xa3\x78\x43\x84\xcb\x42\x4c\x89\x4f\x64\xac\x2c\xc3\xa3\xb8\x68\xf5\x7b\x6b\x94\xb3\xf6\x9a\xea\x2a\xef\x27\x34\xba\xa2\xb3\x40\xef\xce\x05\x87\x0c\x80\x18\xf9\xe6\xce\x72\xb4\xf9\x5c\x00\xca\x4c\x1a\x26\x46\x6c\xe1\xed\x22\x82\xd3\xa4\x91\x72\x09\xa7\x4a\xa8\x79\x34\xa6\xcb\x64\x9e\x9b\x93\x1a\x50\xa3\xcc\x1e\x23\xa3\x2b\xdf\x5b\x23\xe4\x3c\x7a\xe3\xb5\xa3\x1d\x69\x64\xfa\x64\xfe\xb2\x7b\xd3\xe7\x6b\xaa\x68\x08\x07\xbb\x65\x1f\xd6\x82\x6d\x1d\x79\x4e\xa1\xe3\xad\x67\x7d\x3c\x5a\x8b\xec\x9d\x81\x12\xe3\x5a\xf3\xfb\x21\xc0\xa4\x0e\xa7\xee\x04\x03\x93\x76\x31\x23\xbb\x37\xb5\x15\x6a\xb5\x91\xe3\xe7\xd4\xfa\xfe\x16\x6a\xab\xb0\x18\x36\x1f\x09\x95\xb8\xde\xd6\xa0\x8e\xb7\x9e\x4d\xb5\xd9\x10\x84\x4e\x31\x28\x67\x3d\x49\xed\x71\x80\x89\x5a\x8a\x1c\x89\x90\x35\x4a\x1e\xe2\x27\x5d\xf9\x38\xc0\x94\xf6\x11\xbd\x76\x1c\x09\x3c\xea\x10\xc6\xbb\xa7\x7d\x67\xc1\x46\xab\xed\x66\xd2\xe6\x3f\x7e\x9d\x73\x94\x2c\x07\x37\xae\x6c\x03\x04\x6f\x74\x8e\x65\xdf\x49\x15\x49\x3a\xbc\x6b\x96\x46\x0a\x09\x4f\x36\xa4\x05\x3d\x49\x32\xa0\x72\x43\x6d\xa6\x85\x3b\x57\x3b\x8b\x9e\xad\x22\x77\x2d\x65\x68\x4d\x5a\xa5\xd4\xbb\xeb\x21\x05\x1d\x4a\xac\xb9\xd8\x8a\x1e\x57\xb8\x84\xf5\x4a\x18\x44\x20\xb5\x6a\x92\xb5\xc7\xbe\x5c\x8f\xd9\x20\x4a\xa6\x51\x7b\x6b\x18\x3d\x4b\xb5\xb3\x8c\xce\xde\x10\xde\xa4\xb2\x0b\x91\x42\x55\xff\x59\x8d\x7d\xa0\x27\x3d\x4a\xc3\x24\x46\x38\x70\x38\x23\xa4\x7b\xeb\x61\x87\xa9\x47\xfa\x2a\x5d\x3f\xea\xb1\xea\x1e\x27\x46\xd6\xd4\x4d\x19\xa0\x49\x2e\x9e\xec\xb6\x91\x75\xd9\x77\x36\x04\x0b\xc4\x86\x84\x18\x62\x40\x74\x8e\xd1\xdc\x67\xb4\xb5\x51\x22\x37\x02\x62\xc3\x71\x16\x3d\xe9\x61\x35\xbc\x8a\xf5\x34\x3c\xa8\xb6\xbb\xdb\x74\xef\xd2\x07\x8f\xd5\x76\xf2\x4d\x8f\xad\x3e\x5d\x3b\xe9\x3a\xc7\x08\x49\x96\x3b\x8e\x3e\xf0\x36\x8f\x5e\xf6\xd5\xef\x73\xb4\xd7\xdc\x11\xba\xbb\xb7\x35\x1f\x9c\xd1\x93\x1e\x62\xd6\xc2\x6c\xb4\x52\x39\x5a\x3d\xf9\xe6\xbc\xa0\xe7\x7e\x95\xb2\x84\x69\xee\xae\xa5\x06\xdc\x5d\x0f\xa2\x0a\x64\x78\xa6\x3e\xf6\xdb\xab\xf5\x10\xce\xa0\xb5\x1b\x97\x9c\xb4\xfb\x6d\xfe\xb1\xec\x3b\xe9\x11\xa5\x5b\x6b\x3e\x24\xd4\xda\x48\x27\xa7\xd1\x05\x3d\x4f\x4b\x20\x80\x49\xc4\x1a\x80\x61\x39\x59\x1f\x0b\x7a\x1e\x63\x3d\x6b\x4d\x4e\xd9\x99\xb1\xc7\xc9\x5c\x58\xd0\xd3\x27\x90\x40\x84\x1a\xbd\x72\x48\x85\xa0\x93\xb9\xb0\xa0\x7d\xaa\x6c\x36\x33\x4b\x66\xd6\x79\x0c\x3f\xf9\xa6\xb4\xa0\x63\x7a\x91\x95\xda\x47\x82\x96\x11\x6c\x64\xb8\xbf\xf3\x51\x75\xea\x71\x66\x00\xff\xe7\x6c\x57\x38\x5f\x46\x39\xbc\xbc\x48\x15\x51\xc2\xb2\xfa\x7d\xe7\xe2\x4c\x9a\x9d\x0f\x8a\x91\x0e\xc6\x84\xda\x46\xd5\xb5\x24\x9d\xd1\x73\x26\x71\xed\xae\x9a\xa2\x18\x97\x58\xad\xa1\x19\x3d\x65\x92\x00\xd5\xc3\xb8\xdf\x65\x00\x96\x32\xee\xad\x07\x70\x6d\xb5\xd0\x99\x4e\xbb\xdf\x5e\x3d\x19\x0e\x54\x2f\x9a\x87\xd5\x41\x50\xe0\x36\xe7\x5b\xf6\x9d\x2b\x5b\x5b\x53\x4a\xa9\x08\x37\xaf\x69\x2d\x49\x66\xf4\x5c\x9b\x4d\x07\xf8\xe8\xd1\x3c\x17\x8b\xbb\x33\xca\xd2\x38\x0b\x9c\xf9\x0c\xf0\xcf\xd9\xae\xc8\x70\x3a\xbc\x5f\x44\xe3\xa6\x12\x89\x7d\x7d\x0a\xb9\x94\xd1\xef\x66\xbb\xe0\x8e\x46\xd6\x6b\xb4\xc6\xa7\x67\x89\x05\x3d\x65\xb8\x29\x5b\xea\x92\xb5\x90\x68\xe2\x9f\x75\xbb\x95\xd9\x2e\x48\x54\x52\x22\xab\x94\x35\xe5\xb3\x6a\xa6\xa9\x57\x62\xe7\x54\x98\x25\x73\x37\x4e\x92\xcf\xaa\xb9\x5c\xfd\xfb\xd3\xc7\xf5\xd7\x4f\xff\x0d\x00\x00\xff\xff\x2e\x40\xf2\x0c\x3e\x18\x00\x00") +var _configJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x98\xdb\x6e\x23\x47\x0e\x86\xef\xe7\x29\x0c\x5f\xc7\x40\xb1\x0e\x64\x31\xaf\xb2\x58\x04\x24\x8b\x9c\x08\x98\xd8\x03\x4b\xca\x22\x08\xe6\xdd\x17\xb2\x93\x8e\x6a\xa6\x65\x1d\x22\xdd\x75\xa9\x7f\xb2\xba\x3e\x9e\xba\xff\xfc\xf4\xf0\xf0\xf0\xf0\xf8\xea\x9f\x37\x2f\xcf\xdb\xc7\x9f\x1f\xde\x17\xde\x16\xb7\xfe\xbc\xdd\xec\x36\xbf\xfb\xe3\xcf\x0f\x21\x5f\xb6\xfe\xd3\x3f\xff\xed\xfe\xf8\x7a\x58\x7e\xfc\xb2\xd9\xee\x1e\x8f\xd6\x7f\x97\x2f\xfb\xc3\x1f\xff\x59\x96\x0e\xbf\x3f\xa7\xab\xb7\x1b\xe5\xb7\xcd\x2f\x9b\x71\x30\x21\xbf\x6d\x9e\x6a\xc7\x92\x2c\xfb\x91\xa9\xe5\xce\xf7\xbd\xbd\xdd\xf9\xf5\xe9\xf9\xe5\x75\xf7\xab\xcb\x76\xf7\x04\x6b\xf7\x6e\xdd\xf6\xaf\x9b\xdd\x1f\xbf\x7c\x7e\x7d\xd9\x7f\x3d\x68\xb6\x9f\x9f\x52\x4a\xe8\xb5\xa7\x41\x24\x90\x20\xd5\x55\xe5\x5e\x9f\x7d\x37\x9f\xc0\xbc\xe1\xc9\xb9\xbc\xd9\x7e\xd3\x3c\xa5\x01\xb5\x0e\x2d\xc6\x5e\xc9\x47\xb4\x15\xfb\x2b\x26\xec\xd8\x44\x0e\x13\x11\xcc\x90\xc3\x8a\x81\x5c\x66\x62\x1c\x9b\x70\x3e\x3c\x67\x74\x2d\xea\x81\x10\x8f\x3f\x58\xf8\x36\xad\x7c\xfb\xe9\x3a\x46\xa2\x44\xa3\xda\xda\xd3\x9d\x62\x94\x2f\x67\xe4\x29\x59\x8e\x1e\x99\xaa\xa4\xf0\xb5\x03\xb8\x92\x51\x9e\x19\xe5\x5a\x6b\xd2\xcc\x42\xa8\x94\xd7\x62\x60\xc5\x84\x1e\x9b\xd0\x54\x59\x4a\x2b\x58\xcd\x78\xe4\x7e\x99\x89\x19\x33\x42\x2b\xad\xbf\x6d\x01\xb1\xa7\xcb\x4c\x4c\x98\x2b\x87\x9b\x05\x9b\x71\xe7\x16\x70\x6f\xcc\xbd\x7a\xd1\xec\x7a\x16\xf3\xf6\x65\xbf\xfb\xf5\x9a\x34\x2c\x82\x5d\x35\x0d\xc2\x66\x50\xda\x6a\xb2\x5f\x82\xf8\x2f\xc7\x13\xde\xdc\x71\x14\x31\x48\xdc\xc2\x30\xd6\x76\xf5\x9d\x7c\x42\x0b\x19\x11\x39\x25\x29\x98\x29\xf5\x8f\xd0\xfe\x25\x9f\xb0\x42\xed\x8d\x7b\xae\x0a\x0a\x05\xdb\xb8\x37\x13\xa5\x28\xbd\xdb\x85\x4c\xae\x2d\x8f\x9d\x6b\xaf\x61\x8d\x1b\x35\xb0\xa0\x7f\xc5\x65\xa5\x3c\xf6\xde\x3a\xa3\x40\x81\xd4\xa0\xa4\x72\xee\x70\xdf\x4d\x4c\x7c\x52\x52\xd1\x06\x43\xa3\x7a\x07\xf8\x28\x6f\x8e\x4c\x4c\x8c\x10\x8d\x84\x78\x78\x54\x67\x69\x7e\x6f\x46\xa5\x88\xb6\xdc\x56\xcf\xfd\x04\xa3\x2b\xca\x63\x52\x75\xaa\x30\x78\xa4\x14\xa3\xdc\x5c\x1e\x8f\x9c\xcf\xf9\x23\x44\x28\x61\x6c\x48\x16\x7e\x36\x01\x56\xca\xa3\x50\x0d\xc7\x86\x20\x65\x74\xc2\xcb\x30\xcf\xe5\x51\x2c\x61\xc7\x42\x1e\xa3\x80\x0b\xdd\x9b\x11\x50\x19\xda\x69\x6d\x6b\x47\x8c\x4c\x9e\xcc\x9f\x77\xaf\xf2\xe5\x9a\x2c\x1a\x4c\x41\x6e\xc5\x87\xf5\x20\x5b\x57\x9e\x23\x74\xec\x7a\xe6\xe3\xd1\x7b\x14\x57\x4a\x98\x09\xd6\x6a\xe7\x0f\x06\x26\x3a\x94\xd5\x31\x0d\xc8\xa2\x6c\x86\x76\x89\x81\x31\xe3\x6d\x51\x98\xac\x05\x27\xab\x9c\xee\xcd\xa6\xa5\xd6\x6c\x94\xf8\x98\x8d\xef\x6f\x61\xd3\x98\xd8\xa0\xfb\xc8\x20\x48\xed\xb6\x0a\x77\xec\x7a\x1e\x2d\xba\x0d\x86\xa4\x18\x03\x4b\x91\x93\x6c\x8e\x0d\x4c\x6c\x30\x4a\x64\x04\x92\xa8\x65\xb0\x9f\x9c\x4d\x8e\x0d\x4c\x79\x13\xa1\x4d\x61\xe4\xe4\xd1\x06\xd3\xdd\x67\x02\x50\x62\xe8\xb8\x5a\xaf\x26\x36\xff\xf3\xeb\x5a\x4f\x29\xae\x15\xbb\x06\x79\x31\xd4\xd5\xd9\xf2\x12\x30\xef\x7e\x27\x2a\x9c\x65\xb8\x4a\xe1\x8e\x92\x32\x9c\xac\x68\x8b\x7a\x42\x32\x52\xa3\x0e\xd2\x4d\x2a\x29\xb5\x93\xe9\xb2\xa8\xe7\x5e\x53\x54\x6a\x1d\xd2\xb2\x34\xae\xed\xee\x3c\xb8\x82\xa7\x1a\x6b\x6d\x70\x85\xc7\x15\x6d\xc6\xb4\x21\x04\x62\xe2\xd6\x24\xf3\xda\x63\x5f\xce\x63\xee\x30\xb5\xe0\x68\xda\x3b\x84\x16\x6e\x76\xf6\x44\xe7\xe6\x12\xde\xb9\x91\x33\xa2\xa4\x26\xfe\x51\x8e\xbd\xab\x27\x1e\xb5\x43\x66\x43\x18\x30\x9c\x20\xe5\x7b\xf3\xb0\xc3\xd8\xc4\xba\x7a\x5c\x3f\xf2\x58\x6d\x3f\x27\xf2\x23\xab\x09\xa5\xd4\xb9\x54\xcf\x76\xdb\xc8\xbc\xf8\x9d\x3b\x8a\x05\x40\x07\x04\x08\xb6\x84\x78\xee\x44\x8b\xce\x6a\xeb\xa3\x46\xe9\x98\x90\x0c\xc6\x59\xf5\xc4\xc3\x5a\x78\x63\xd3\x3c\x3c\xb0\xf5\xbb\xf7\x79\x55\xd6\x41\x63\xb5\xce\xff\xc3\x63\x2b\x4f\xd7\x8e\xca\x4e\x31\x82\xb3\x15\x85\xa1\x03\x6e\x6b\xf2\x8b\x5f\xf9\x3e\x46\xb5\x15\x85\xa4\xee\xde\xd7\xfa\xe0\xac\x9e\x78\xb0\x59\x0f\xb3\xd1\x6b\xa3\xe8\xed\xe4\xf7\x83\x45\x3d\xd7\xab\x5c\x38\x4c\x8a\xba\xd4\x16\xe9\xee\x3c\x10\x5b\x42\x83\x33\xf9\xb1\xdf\x5e\xcd\x83\xa9\x24\x69\x6a\x54\x4b\x16\xf5\xdb\x1a\xfb\xe2\x77\xe2\x11\x55\xad\x77\x1f\x1c\x62\x7d\xe4\x93\xe3\xec\xa2\x9e\xc7\xad\xc4\x09\x32\xb3\xf5\x94\x0c\xea\xc9\xfc\x58\xd4\xf3\x1c\xec\x45\x5a\x76\x2c\x4e\x04\x1a\x27\x63\x61\x51\x4f\x93\x5a\x00\xa4\x16\xda\x28\xb8\xa5\xc0\x93\xb1\xb0\xa8\x7d\xca\x6c\x32\x33\xcb\x66\xa6\x34\x86\x9f\x7c\xd5\x5a\xd4\x31\xbd\x09\x73\xd3\x91\x53\x2f\x90\x6c\x94\x74\xff\xce\x87\xcd\x51\xe3\xcc\x04\xff\xf7\xde\xae\xe8\x7c\x05\xf8\xf0\xf6\xc3\x8d\x59\x10\xea\x6d\x93\xc8\xe2\x77\x8a\xa4\x54\x0d\x65\x10\x64\x90\x3e\x9a\xac\x05\xe9\xac\x9e\x23\x89\x9a\xba\x48\x8e\x6a\x54\x63\x35\x87\x66\xf5\x14\x49\x9c\xb0\x1d\xde\x17\x94\x47\x82\x5a\xef\xfe\x65\x22\x51\xeb\xad\xe2\x99\x4a\xbb\xdf\x5e\x3d\x19\x0e\x10\xaf\x52\x86\xb5\x81\xa9\xa6\xdb\x3a\xdf\xe2\x77\xce\x6c\xe9\x5d\x30\xe7\xca\xd4\xbd\xe5\xb5\x20\x99\xd5\x73\x6e\x76\x19\xc9\x87\x46\xf7\x52\x2d\xee\x7e\xa2\xc4\x9d\x0a\xa7\x33\xdf\x11\xfe\xde\xdb\x15\x11\x8e\x87\xf7\x8b\xe8\xd4\x85\x23\x93\xaf\x4f\x21\x97\x9e\xe8\x77\xb3\x5d\x90\x82\xa1\x69\x8b\xde\xe9\xf4\x2c\xb1\xa8\xa7\x08\x37\x21\xcb\xca\x45\x2a\xb2\x64\xfa\xa8\xda\xad\xcc\x76\x81\x2c\x9c\x33\x5a\xc3\x22\xb9\x9c\xa5\x39\x7f\x4d\x05\xa5\x5c\x89\xb8\x90\x1a\x65\x2e\x67\x69\x2e\x57\xff\xfd\xf4\x7e\xfd\xed\xd3\xff\x03\x00\x00\xff\xff\x33\x56\x51\xb9\x44\x19\x00\x00") func configJsonBytes() ([]byte, error) { return bindataRead( @@ -85,8 +85,8 @@ func configJson() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "config.json", size: 6206, mode: os.FileMode(0600), modTime: time.Unix(1400000000, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x65, 0x9, 0x86, 0x5b, 0xe2, 0x20, 0x97, 0xc5, 0xaf, 0x8f, 0xac, 0x3b, 0xda, 0x4, 0x20, 0x26, 0x3, 0x5f, 0xdf, 0x49, 0x66, 0x8d, 0xbd, 0xc6, 0x1c, 0x85, 0x10, 0xdd, 0x7b, 0xa1, 0x42, 0x93}} + info := bindataFileInfo{name: "config.json", size: 6468, mode: os.FileMode(0600), modTime: time.Unix(1400000000, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa3, 0xd6, 0x51, 0x56, 0xf4, 0x2, 0x38, 0x5d, 0xd5, 0x18, 0xa7, 0x2a, 0xdd, 0xb7, 0xde, 0x79, 0x33, 0x77, 0xc8, 0xae, 0xcd, 0xa3, 0xcd, 0x56, 0x33, 0xbe, 0x6c, 0xa7, 0x3a, 0x18, 0x39, 0x88}} return a, nil } diff --git a/pkg/cmd/roachprod/vm/aws/keys.go b/pkg/cmd/roachprod/vm/aws/keys.go index 067f506e9070..59fb9d128eb9 100644 --- a/pkg/cmd/roachprod/vm/aws/keys.go +++ b/pkg/cmd/roachprod/vm/aws/keys.go @@ -18,7 +18,7 @@ import ( "os" "strings" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) const sshPublicKeyFile = "${HOME}/.ssh/id_rsa.pub" diff --git a/pkg/cmd/roachprod/vm/aws/support.go b/pkg/cmd/roachprod/vm/aws/support.go index ab94c33948d8..bfee2bd12e46 100644 --- a/pkg/cmd/roachprod/vm/aws/support.go +++ b/pkg/cmd/roachprod/vm/aws/support.go @@ -20,7 +20,7 @@ import ( "text/template" "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) // Both M5 and I3 machines expose their EBS or local SSD volumes as NVMe block @@ -39,11 +39,14 @@ set -x sudo apt-get update sudo apt-get install -qy --no-install-recommends mdadm -mount_opts="discard,defaults" +mount_opts="defaults" {{if .ExtraMountOpts}}mount_opts="${mount_opts},{{.ExtraMountOpts}}"{{end}} +use_multiple_disks='{{if .UseMultipleDisks}}true{{end}}' + disks=() -mountpoint="/mnt/data1" +mount_prefix="/mnt/data" + # On different machine types, the drives are either called nvme... or xvdd. for d in $(ls /dev/nvme?n1 /dev/xvdd); do if ! mount | grep ${d}; then @@ -53,24 +56,33 @@ for d in $(ls /dev/nvme?n1 /dev/xvdd); do echo "Disk ${d} already mounted, skipping..." fi done + + if [ "${#disks[@]}" -eq "0" ]; then + mountpoint="${mount_prefix}1" echo "No disks mounted, creating ${mountpoint}" mkdir -p ${mountpoint} chmod 777 ${mountpoint} -elif [ "${#disks[@]}" -eq "1" ]; then - echo "One disk mounted, creating ${mountpoint}" - mkdir -p ${mountpoint} - disk=${disks[0]} - mkfs.ext4 -E nodiscard ${disk} - mount -o ${mount_opts} ${disk} ${mountpoint} - chmod 777 ${mountpoint} - echo "${disk} ${mountpoint} ext4 ${mount_opts} 1 1" | tee -a /etc/fstab +elif [ "${#disks[@]}" -eq "1" ] || [ -n "use_multiple_disks" ]; then + disknum=1 + for disk in "${disks[@]}" + do + mountpoint="${mount_prefix}${disknum}" + disknum=$((disknum + 1 )) + echo "Creating ${mountpoint}" + mkdir -p ${mountpoint} + mkfs.ext4 -F ${disk} + mount -o ${mount_opts} ${disk} ${mountpoint} + chmod 777 ${mountpoint} + echo "${disk} ${mountpoint} ext4 ${mount_opts} 1 1" | tee -a /etc/fstab + done else + mountpoint="${mount_prefix}1" echo "${#disks[@]} disks mounted, creating ${mountpoint} using RAID 0" mkdir -p ${mountpoint} raiddisk="/dev/md0" mdadm --create ${raiddisk} --level=0 --raid-devices=${#disks[@]} "${disks[@]}" - mkfs.ext4 -E nodiscard ${raiddisk} + mkfs.ext4 -F ${raiddisk} mount -o ${mount_opts} ${raiddisk} ${mountpoint} chmod 777 ${mountpoint} echo "${raiddisk} ${mountpoint} ext4 ${mount_opts} 1 1" | tee -a /etc/fstab @@ -96,7 +108,7 @@ sudo service sshd restart # increase the default maximum number of open file descriptors for # root and non-root users. Load generators running a lot of concurrent # workers bump into this often. -sudo sh -c 'echo "root - nofile 65536\n* - nofile 65536" > /etc/security/limits.d/10-roachprod-nofiles.conf' +sudo sh -c 'echo "root - nofile 1048576\n* - nofile 1048576" > /etc/security/limits.d/10-roachprod-nofiles.conf' # Enable core dumps cat < /etc/security/limits.d/core_unlimited.conf @@ -125,12 +137,13 @@ sudo touch /mnt/data1/.roachprod-initialized // // extraMountOpts, if not empty, is appended to the default mount options. It is // a comma-separated list of options for the "mount -o" flag. -func writeStartupScript(extraMountOpts string) (string, error) { +func writeStartupScript(extraMountOpts string, useMultiple bool) (string, error) { type tmplParams struct { - ExtraMountOpts string + ExtraMountOpts string + UseMultipleDisks bool } - args := tmplParams{ExtraMountOpts: extraMountOpts} + args := tmplParams{ExtraMountOpts: extraMountOpts, UseMultipleDisks: useMultiple} tmpfile, err := ioutil.TempFile("", "aws-startup-script") if err != nil { @@ -156,7 +169,7 @@ func (p *Provider) runCommand(args []string) ([]byte, error) { cmd.Stderr = &stderrBuf output, err := cmd.Output() if err != nil { - if exitErr, ok := err.(*exec.ExitError); ok { + if exitErr := (*exec.ExitError)(nil); errors.As(err, &exitErr) { log.Println(string(exitErr.Stderr)) } return nil, errors.Wrapf(err, "failed to run: aws %s: stderr: %v", diff --git a/pkg/cmd/roachprod/vm/azure/azure.go b/pkg/cmd/roachprod/vm/azure/azure.go index 459fb1883ab2..c839fa9ff85d 100644 --- a/pkg/cmd/roachprod/vm/azure/azure.go +++ b/pkg/cmd/roachprod/vm/azure/azure.go @@ -74,6 +74,7 @@ type Provider struct { subscription subscriptions.Subscription resourceGroups map[string]resources.Group subnets map[string]network.Subnet + securityGroups map[string]network.SecurityGroup } } @@ -81,6 +82,7 @@ type Provider struct { func New() *Provider { p := &Provider{} p.mu.resourceGroups = make(map[string]resources.Group) + p.mu.securityGroups = make(map[string]network.SecurityGroup) p.mu.subnets = make(map[string]network.Subnet) return p } @@ -116,6 +118,16 @@ func (p *Provider) Create(names []string, opts vm.CreateOpts) error { return errors.Wrapf(err, "could not find SSH public key file") } + clusterTags := make(map[string]*string) + clusterTags[tagCluster] = to.StringPtr(opts.ClusterName) + clusterTags[tagCreated] = to.StringPtr(timeutil.Now().Format(time.RFC3339)) + clusterTags[tagLifetime] = to.StringPtr(opts.Lifetime.String()) + clusterTags[tagRoachprod] = to.StringPtr("true") + + getClusterResourceGroupName := func(location string) string { + return fmt.Sprintf("%s-%s", opts.ClusterName, location) + } + ctx, cancel := context.WithTimeout(context.Background(), p.opts.operationTimeout) defer cancel() @@ -127,6 +139,10 @@ func (p *Provider) Create(names []string, opts vm.CreateOpts) error { } } + if len(p.opts.zone) == 0 { + p.opts.zone = defaultZone + } + if _, err := p.createVNets(ctx, p.opts.locations); err != nil { return err } @@ -148,7 +164,8 @@ func (p *Provider) Create(names []string, opts vm.CreateOpts) error { location := p.opts.locations[locIdx] // Create a resource group within the location. - group, err := p.getResourceGroup(ctx, opts.ClusterName, location, opts) + group, err := p.getOrCreateResourceGroup( + ctx, getClusterResourceGroupName(location), location, clusterTags) if err != nil { return err } @@ -412,7 +429,7 @@ func (p *Provider) List() (vm.List, error) { if err != nil { return nil, err } - if err := p.fillNetworkDetails(ctx, &m, nicID); err == vm.ErrBadNetwork { + if err := p.fillNetworkDetails(ctx, &m, nicID); errors.Is(err, vm.ErrBadNetwork) { m.Errors = append(m.Errors, err) } else if err != nil { return nil, err @@ -459,7 +476,7 @@ final_message: "roachprod init completed" fmt.Sprintf("chown -R %s /data1", remoteUser), } if opts.SSDOpts.NoExt4Barrier { - cmds = append(cmds, "mount -o remount,nobarrier,discard /mnt/data") + cmds = append(cmds, "mount -o remount,nobarrier /mnt/data") } } else { // We define lun42 explicitly in the data disk request below. @@ -519,6 +536,7 @@ mounts: // https://github.com/Azure-Samples/azure-sdk-for-go-samples/blob/79e3f3af791c3873d810efe094f9d61e93a6ccaa/compute/vm.go#L41 vm = compute.VirtualMachine{ Location: group.Location, + Zones: to.StringSlicePtr([]string{p.opts.zone}), Tags: tags, VirtualMachineProperties: &compute.VirtualMachineProperties{ HardwareProfile: &compute.HardwareProfile{ @@ -568,16 +586,31 @@ mounts: }, } if !opts.SSDOpts.UseLocalSSD { + var storageAccType compute.StorageAccountTypes + switch p.opts.networkDiskType { + case "premium-disk": + storageAccType = compute.StorageAccountTypesPremiumLRS + case "ultra-disk": + storageAccType = compute.StorageAccountTypesUltraSSDLRS + default: + err = errors.Newf("unsuported network disk type: %s", p.opts.networkDiskType) + return + } vm.VirtualMachineProperties.StorageProfile.DataDisks = &[]compute.DataDisk{ { CreateOption: compute.DiskCreateOptionTypesEmpty, DiskSizeGB: to.Int32Ptr(100), Lun: to.Int32Ptr(42), ManagedDisk: &compute.ManagedDiskParameters{ - StorageAccountType: compute.StorageAccountTypesPremiumLRS, + StorageAccountType: storageAccType, }, }, } + if storageAccType == compute.StorageAccountTypesUltraSSDLRS { + vm.AdditionalCapabilities = &compute.AdditionalCapabilities{ + UltraSSDEnabled: to.BoolPtr(true), + } + } } future, err := client.CreateOrUpdate(ctx, *group.Name, name, vm) if err != nil { @@ -602,6 +635,10 @@ func (p *Provider) createNIC( return } + p.mu.Lock() + sg := p.mu.securityGroups[p.getVnetNetworkSecurityGroupName(*group.Location)] + p.mu.Unlock() + future, err := client.CreateOrUpdate(ctx, *group.Name, *ip.Name, network.Interface{ Name: ip.Name, Location: group.Location, @@ -616,6 +653,9 @@ func (p *Provider) createNIC( }, }, }, + NetworkSecurityGroup: &sg, + EnableAcceleratedNetworking: to.BoolPtr(true), + Primary: to.BoolPtr(true), }, }) if err != nil { @@ -631,6 +671,151 @@ func (p *Provider) createNIC( return } +func (p *Provider) getOrCreateNetworkSecurityGroup( + ctx context.Context, name string, resourceGroup resources.Group, +) (network.SecurityGroup, error) { + p.mu.Lock() + group, ok := p.mu.securityGroups[name] + p.mu.Unlock() + if ok { + return group, nil + } + + sub, err := p.getSubscription(ctx) + if err != nil { + return network.SecurityGroup{}, err + } + client := network.NewSecurityGroupsClient(*sub.SubscriptionID) + if client.Authorizer, err = p.getAuthorizer(); err != nil { + return network.SecurityGroup{}, err + } + if client.Authorizer, err = p.getAuthorizer(); err != nil { + return network.SecurityGroup{}, err + } + + cacheAndReturn := func(group network.SecurityGroup) (network.SecurityGroup, error) { + p.mu.Lock() + p.mu.securityGroups[name] = group + p.mu.Unlock() + return group, nil + } + + // Check if the network security group already exists on Azure. + group, err = client.Get(ctx, *resourceGroup.Name, name, "" /* expand */) + if err == nil { + return cacheAndReturn(group) + } + var detail autorest.DetailedError + if errors.As(err, &detail) { + // It's okay if the network security group was not found, it will be created + // below. + if code, ok := detail.StatusCode.(int); ok && code != 404 { + return network.SecurityGroup{}, err + } + } + + future, err := client.CreateOrUpdate(ctx, *resourceGroup.Name, name, network.SecurityGroup{ + SecurityGroupPropertiesFormat: &network.SecurityGroupPropertiesFormat{ + SecurityRules: &[]network.SecurityRule{ + { + Name: to.StringPtr("SSH_Inbound"), + SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ + Priority: to.Int32Ptr(300), + Protocol: network.SecurityRuleProtocolTCP, + Access: network.SecurityRuleAccessAllow, + Direction: network.SecurityRuleDirectionInbound, + SourceAddressPrefix: to.StringPtr("*"), + SourcePortRange: to.StringPtr("*"), + DestinationAddressPrefix: to.StringPtr("*"), + DestinationPortRange: to.StringPtr("22"), + }, + }, + { + Name: to.StringPtr("SSH_Outbound"), + SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ + Priority: to.Int32Ptr(301), + Protocol: network.SecurityRuleProtocolTCP, + Access: network.SecurityRuleAccessAllow, + Direction: network.SecurityRuleDirectionOutbound, + SourceAddressPrefix: to.StringPtr("*"), + SourcePortRange: to.StringPtr("*"), + DestinationAddressPrefix: to.StringPtr("*"), + DestinationPortRange: to.StringPtr("*"), + }, + }, + { + Name: to.StringPtr("HTTP_Inbound"), + SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ + Priority: to.Int32Ptr(320), + Protocol: network.SecurityRuleProtocolTCP, + Access: network.SecurityRuleAccessAllow, + Direction: network.SecurityRuleDirectionInbound, + SourceAddressPrefix: to.StringPtr("*"), + SourcePortRange: to.StringPtr("*"), + DestinationAddressPrefix: to.StringPtr("*"), + DestinationPortRange: to.StringPtr("80"), + }, + }, + { + Name: to.StringPtr("HTTP_Outbound"), + SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ + Priority: to.Int32Ptr(321), + Protocol: network.SecurityRuleProtocolTCP, + Access: network.SecurityRuleAccessAllow, + Direction: network.SecurityRuleDirectionOutbound, + SourceAddressPrefix: to.StringPtr("*"), + SourcePortRange: to.StringPtr("*"), + DestinationAddressPrefix: to.StringPtr("*"), + DestinationPortRange: to.StringPtr("*"), + }, + }, + { + Name: to.StringPtr("HTTPS_Inbound"), + SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ + Priority: to.Int32Ptr(340), + Protocol: network.SecurityRuleProtocolTCP, + Access: network.SecurityRuleAccessAllow, + Direction: network.SecurityRuleDirectionInbound, + SourceAddressPrefix: to.StringPtr("*"), + SourcePortRange: to.StringPtr("*"), + DestinationAddressPrefix: to.StringPtr("*"), + DestinationPortRange: to.StringPtr("443"), + }, + }, + { + Name: to.StringPtr("HTTPS_Outbound"), + SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ + Priority: to.Int32Ptr(341), + Protocol: network.SecurityRuleProtocolTCP, + Access: network.SecurityRuleAccessAllow, + Direction: network.SecurityRuleDirectionOutbound, + SourceAddressPrefix: to.StringPtr("*"), + SourcePortRange: to.StringPtr("*"), + DestinationAddressPrefix: to.StringPtr("*"), + DestinationPortRange: to.StringPtr("*"), + }, + }, + }, + }, + Location: resourceGroup.Location, + }) + if err != nil { + return network.SecurityGroup{}, err + } + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return network.SecurityGroup{}, err + } + securityGroup, err := future.Result(client) + if err != nil { + return network.SecurityGroup{}, err + } + return cacheAndReturn(securityGroup) +} + +func (p *Provider) getVnetNetworkSecurityGroupName(location string) string { + return fmt.Sprintf("roachprod-vnets-nsg-%s", location) +} + // createVNets will create a VNet in each of the given locations to be // shared across roachprod clusters. Thus, all roachprod clusters will // be able to communicate with one another, although this is scoped by @@ -644,38 +829,13 @@ func (p *Provider) createVNets( } groupsClient := resources.NewGroupsClient(*sub.SubscriptionID) - if groupsClient.Authorizer, err = p.getAuthorizer(); err != nil { - return nil, err - } - vnetGroupName := func(location string) string { - return fmt.Sprintf("roachprod-vnets-%s", location) - } + vnetResourceGroupTags := make(map[string]*string) + vnetResourceGroupTags[tagComment] = to.StringPtr("DO NOT DELETE: Used by all roachprod clusters") + vnetResourceGroupTags[tagRoachprod] = to.StringPtr("true") - // Supporting local functions to make the logic below easier to read. - createVNetGroup := func(location string) (resources.Group, error) { - return groupsClient.CreateOrUpdate(ctx, vnetGroupName(location), resources.Group{ - Location: to.StringPtr(location), - Tags: map[string]*string{ - tagComment: to.StringPtr("DO NOT DELETE: Used by all roachprod clusters"), - tagRoachprod: to.StringPtr("true"), - }, - }) - } - - getVNetGroup := func(location string) (resources.Group, bool, error) { - group, err := groupsClient.Get(ctx, vnetGroupName(location)) - if err == nil { - return group, true, nil - } - if detail, ok := err.(autorest.DetailedError); ok { - if code, ok := detail.StatusCode.(int); ok { - if code == 404 { - return resources.Group{}, false, nil - } - } - } - return resources.Group{}, false, err + vnetResourceGroupName := func(location string) string { + return fmt.Sprintf("roachprod-vnets-%s", location) } setVNetSubnetPrefix := func(group resources.Group, subnet int) (resources.Group, error) { @@ -686,18 +846,17 @@ func (p *Provider) createVNets( }) } - // First, find or create a resource group for roachprod to create the - // VNets in. We need one per location. - groupsByLocation := make(map[string]resources.Group) + // First, find or create a resource groups and network security groups for + // roachprod to create the VNets in. We need one per location. for _, location := range locations { - group, found, err := getVNetGroup(location) - if err == nil && !found { - group, err = createVNetGroup(location) + group, err := p.getOrCreateResourceGroup(ctx, vnetResourceGroupName(location), location, vnetResourceGroupTags) + if err != nil { + return nil, errors.Wrapf(err, "resource group for location %q", location) } + _, err = p.getOrCreateNetworkSecurityGroup(ctx, p.getVnetNetworkSecurityGroupName(location), group) if err != nil { - return nil, errors.Wrapf(err, "for location %q", location) + return nil, errors.Wrapf(err, "nsg for location %q", location) } - groupsByLocation[location] = group } // In order to prevent overlapping subnets, we want to associate each @@ -708,8 +867,22 @@ func (p *Provider) createVNets( // roachprod to select a new network prefix. prefixesByLocation := make(map[string]int) activePrefixes := make(map[int]bool) - var locationsWithoutSubnet []string - for location, group := range groupsByLocation { + + nextAvailablePrefix := func() int { + prefix := 1 + for activePrefixes[prefix] { + prefix++ + } + activePrefixes[prefix] = true + return prefix + } + newSubnetsCreated := false + + for _, location := range p.opts.locations { + p.mu.Lock() + group := p.mu.resourceGroups[vnetResourceGroupName(location)] + p.mu.Unlock() + // Prefix already exists for the resource group. if prefixString := group.Tags[tagSubnet]; prefixString != nil { prefix, err := strconv.Atoi(*prefixString) if err != nil { @@ -718,29 +891,38 @@ func (p *Provider) createVNets( activePrefixes[prefix] = true prefixesByLocation[location] = prefix } else { - locationsWithoutSubnet = append(locationsWithoutSubnet, location) - } - } - - prefix := 1 - for _, location := range locationsWithoutSubnet { - for activePrefixes[prefix] { - prefix++ - } - activePrefixes[prefix] = true - prefixesByLocation[location] = prefix - group := groupsByLocation[location] - if groupsByLocation[location], err = setVNetSubnetPrefix(group, prefix); err != nil { - return nil, errors.Wrapf(err, "for location %q", location) + // The fact that the vnet didn't have a prefix means that new subnets will + // be created. + newSubnetsCreated = true + prefix := nextAvailablePrefix() + prefixesByLocation[location] = prefix + p.mu.Lock() + group := p.mu.resourceGroups[vnetResourceGroupName(location)] + p.mu.Unlock() + group, err = setVNetSubnetPrefix(group, prefix) + if err != nil { + return nil, errors.Wrapf(err, "for location %q", location) + } + // We just updated the VNet Subnet prefix on the resource group -- update + // the cached entry to reflect that. + p.mu.Lock() + p.mu.resourceGroups[vnetResourceGroupName(location)] = group + p.mu.Unlock() } } // Now, we can ensure that the VNet exists with the requested subnet. + // TODO(arul): Does this need to be done for all locations or just for the + // locations that didn't have a subnet/vnet before? I'm inclined to say the + // latter, but I'm leaving the existing behavior as is. ret := make(map[string]network.VirtualNetwork) vnets := make([]network.VirtualNetwork, len(ret)) for location, prefix := range prefixesByLocation { - group := groupsByLocation[location] - if vnet, _, err := p.createVNet(ctx, group, prefix); err == nil { + p.mu.Lock() + resourceGroup := p.mu.resourceGroups[vnetResourceGroupName(location)] + networkSecurityGroup := p.mu.securityGroups[p.getVnetNetworkSecurityGroupName(location)] + p.mu.Unlock() + if vnet, _, err := p.createVNet(ctx, resourceGroup, networkSecurityGroup, prefix); err == nil { ret[location] = vnet vnets = append(vnets, vnet) } else { @@ -749,10 +931,9 @@ func (p *Provider) createVNets( } // We only need to create peerings if there are new subnets. - if locationsWithoutSubnet != nil { + if newSubnetsCreated { return ret, p.createVNetPeerings(ctx, vnets) } - return ret, nil } @@ -760,7 +941,10 @@ func (p *Provider) createVNets( // A single /18 subnet will be created within the VNet. // The results will be memoized in the Provider. func (p *Provider) createVNet( - ctx context.Context, group resources.Group, prefix int, + ctx context.Context, + resourceGroup resources.Group, + securityGroup network.SecurityGroup, + prefix int, ) (vnet network.VirtualNetwork, subnet network.Subnet, err error) { vnetName := p.opts.vnetName @@ -773,40 +957,42 @@ func (p *Provider) createVNet( return } vnet = network.VirtualNetwork{ - Name: group.Name, - Location: group.Location, + Name: to.StringPtr(vnetName), + Location: resourceGroup.Location, VirtualNetworkPropertiesFormat: &network.VirtualNetworkPropertiesFormat{ AddressSpace: &network.AddressSpace{ AddressPrefixes: &[]string{fmt.Sprintf("10.%d.0.0/16", prefix)}, }, Subnets: &[]network.Subnet{ { - Name: group.Name, + Name: resourceGroup.Name, SubnetPropertiesFormat: &network.SubnetPropertiesFormat{ - AddressPrefix: to.StringPtr(fmt.Sprintf("10.%d.0.0/18", prefix)), + AddressPrefix: to.StringPtr(fmt.Sprintf("10.%d.0.0/18", prefix)), + NetworkSecurityGroup: &securityGroup, }, }, }, }, } - future, err := client.CreateOrUpdate(ctx, *group.Name, *group.Name, vnet) + future, err := client.CreateOrUpdate(ctx, *resourceGroup.Name, *resourceGroup.Name, vnet) if err != nil { - err = errors.Wrapf(err, "creating Azure VNet %q in %q", vnetName, *group.Name) + err = errors.Wrapf(err, "creating Azure VNet %q in %q", vnetName, *resourceGroup.Name) return } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - err = errors.Wrapf(err, "creating Azure VNet %q in %q", vnetName, *group.Name) + err = errors.Wrapf(err, "creating Azure VNet %q in %q", vnetName, *resourceGroup.Name) return } vnet, err = future.Result(client) - err = errors.Wrapf(err, "creating Azure VNet %q in %q", vnetName, *group.Name) - if err == nil { - subnet = (*vnet.Subnets)[0] - p.mu.Lock() - p.mu.subnets[*group.Location] = subnet - p.mu.Unlock() - log.Printf("created Azure VNet %q in %q with prefix %d", vnetName, *group.Name, prefix) + err = errors.Wrapf(err, "creating Azure VNet %q in %q", vnetName, *resourceGroup.Name) + if err != nil { + return } + subnet = (*vnet.Subnets)[0] + p.mu.Lock() + p.mu.subnets[*resourceGroup.Location] = subnet + p.mu.Unlock() + log.Printf("created Azure VNet %q in %q with prefix %d", vnetName, *resourceGroup.Name, prefix) return } @@ -884,11 +1070,15 @@ func (p *Provider) createIP( } future, err := ipc.CreateOrUpdate(ctx, *group.Name, name, network.PublicIPAddress{ - Name: to.StringPtr(name), + Name: to.StringPtr(name), + Sku: &network.PublicIPAddressSku{ + Name: network.PublicIPAddressSkuNameStandard, + }, Location: group.Location, + Zones: to.StringSlicePtr([]string{p.opts.zone}), PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{ PublicIPAddressVersion: network.IPv4, - PublicIPAllocationMethod: network.Dynamic, + PublicIPAllocationMethod: network.Static, }, }) if err != nil { @@ -959,49 +1149,60 @@ func (p *Provider) fillNetworkDetails(ctx context.Context, m *vm.VM, nicID azure return nil } -// getResourceGroup creates or retrieves a resource group within the -// specified location. The base name will be combined with the location, -// to allow for easy tear-down of multi-region clusters. Results are -// memoized within the Provider instance. -func (p *Provider) getResourceGroup( - ctx context.Context, cluster, location string, opts vm.CreateOpts, -) (group resources.Group, err error) { - groupName := fmt.Sprintf("%s-%s", cluster, location) +// getOrCreateResourceGroup retrieves or creates a resource group with the given +// name in the specified location and with the given tags. Results are memoized +// within the Provider instance. +func (p *Provider) getOrCreateResourceGroup( + ctx context.Context, name string, location string, tags map[string]*string, +) (resources.Group, error) { + // First, check the local provider cache. p.mu.Lock() - group, ok := p.mu.resourceGroups[groupName] + group, ok := p.mu.resourceGroups[name] p.mu.Unlock() if ok { - return + return group, nil + } + + cacheAndReturn := func(group resources.Group) (resources.Group, error) { + p.mu.Lock() + p.mu.resourceGroups[name] = group + p.mu.Unlock() + return group, nil } sub, err := p.getSubscription(ctx) if err != nil { - return + return resources.Group{}, err } client := resources.NewGroupsClient(*sub.SubscriptionID) if client.Authorizer, err = p.getAuthorizer(); err != nil { - return + return resources.Group{}, err } - tags := make(map[string]*string) - tags[tagCluster] = to.StringPtr(cluster) - tags[tagCreated] = to.StringPtr(timeutil.Now().Format(time.RFC3339)) - tags[tagLifetime] = to.StringPtr(opts.Lifetime.String()) - tags[tagRoachprod] = to.StringPtr("true") + // Next, we make an API call to see if the resource already exists on Azure. + group, err = client.Get(ctx, name) + if err == nil { + return cacheAndReturn(group) + } + var detail autorest.DetailedError + if errors.As(err, &detail) { + // It's okay if the resource was "not found" -- we will create it below. + if code, ok := detail.StatusCode.(int); ok && code != 404 { + return resources.Group{}, err + } + } - group, err = client.CreateOrUpdate(ctx, groupName, + group, err = client.CreateOrUpdate(ctx, name, resources.Group{ Location: to.StringPtr(location), Tags: tags, }) - if err == nil { - p.mu.Lock() - p.mu.resourceGroups[groupName] = group - p.mu.Unlock() + if err != nil { + return resources.Group{}, err } - return + return cacheAndReturn(group) } // getSubscription chooses the first available subscription. The value diff --git a/pkg/cmd/roachprod/vm/azure/flags.go b/pkg/cmd/roachprod/vm/azure/flags.go index e03152e65f1f..150ea710b52f 100644 --- a/pkg/cmd/roachprod/vm/azure/flags.go +++ b/pkg/cmd/roachprod/vm/azure/flags.go @@ -26,6 +26,8 @@ type providerOpts struct { operationTimeout time.Duration syncDelete bool vnetName string + zone string + networkDiskType string } var defaultLocations = []string{ @@ -34,6 +36,8 @@ var defaultLocations = []string{ "westeurope", } +var defaultZone = "1" + // ConfigureCreateFlags implements vm.ProviderFlags. func (o *providerOpts) ConfigureCreateFlags(flags *pflag.FlagSet) { flags.DurationVar(&o.operationTimeout, ProviderName+"-timeout", 10*time.Minute, @@ -48,6 +52,9 @@ func (o *providerOpts) ConfigureCreateFlags(flags *pflag.FlagSet) { strings.Join(defaultLocations, ","))) flags.StringVar(&o.vnetName, ProviderName+"-vnet-name", "common", "The name of the VNet to use") + flags.StringVar(&o.zone, ProviderName+"-availability-zone", "", fmt.Sprintf("Availability Zone to create VMs in")) + flags.StringVar(&o.networkDiskType, ProviderName+"-network-disk-type", "premium-disk", + "type of network disk [premium-disk, ultra-disk]. only used if local-ssd is false") } // ConfigureClusterFlags implements vm.ProviderFlags and is a no-op. diff --git a/pkg/cmd/roachprod/vm/flagstub/flagstub.go b/pkg/cmd/roachprod/vm/flagstub/flagstub.go index 5fb8e2fcc949..743983b145a5 100644 --- a/pkg/cmd/roachprod/vm/flagstub/flagstub.go +++ b/pkg/cmd/roachprod/vm/flagstub/flagstub.go @@ -14,7 +14,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) // New wraps a delegate vm.Provider to only return its name and @@ -23,12 +23,12 @@ import ( // implemented as no-op or no-value. All other operations will // return the provided error. func New(delegate vm.Provider, unimplemented string) vm.Provider { - return &provider{delegate: delegate, unimplemented: errors.New(unimplemented)} + return &provider{delegate: delegate, unimplemented: unimplemented} } type provider struct { delegate vm.Provider - unimplemented error + unimplemented string } // CleanSSH implements vm.Provider and is a no-op. @@ -43,17 +43,17 @@ func (p *provider) ConfigSSH() error { // Create implements vm.Provider and returns Unimplemented. func (p *provider) Create(names []string, opts vm.CreateOpts) error { - return p.unimplemented + return errors.Newf("%s", p.unimplemented) } // Delete implements vm.Provider and returns Unimplemented. func (p *provider) Delete(vms vm.List) error { - return p.unimplemented + return errors.Newf("%s", p.unimplemented) } // Extend implements vm.Provider and returns Unimplemented. func (p *provider) Extend(vms vm.List, lifetime time.Duration) error { - return p.unimplemented + return errors.Newf("%s", p.unimplemented) } // FindActiveAccount implements vm.Provider and returns an empty account. diff --git a/pkg/cmd/roachprod/vm/gce/gcloud.go b/pkg/cmd/roachprod/vm/gce/gcloud.go index 9cf18066ef0d..60e3fae2b026 100644 --- a/pkg/cmd/roachprod/vm/gce/gcloud.go +++ b/pkg/cmd/roachprod/vm/gce/gcloud.go @@ -24,7 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/config" "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm" "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm/flagstub" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" "golang.org/x/sync/errgroup" ) @@ -62,7 +62,7 @@ func runJSONCommand(args []string, parsed interface{}) error { rawJSON, err := cmd.Output() if err != nil { var stderr []byte - if exitErr, ok := err.(*exec.ExitError); ok { + if exitErr := (*exec.ExitError)(nil); errors.As(err, &exitErr) { stderr = exitErr.Stderr } // TODO(peter,ajwerner): Remove this hack once gcloud behaves when adding @@ -178,9 +178,12 @@ type providerOpts struct { projects []string ServiceAccount string MachineType string + MinCPUPlatform string Zones []string Image string SSDCount int + PDVolumeType string + PDVolumeSize int // useSharedUser indicates that the shared user rather than the personal // user should be used to ssh into the remote machines. @@ -265,17 +268,26 @@ func (o *providerOpts) ConfigureCreateFlags(flags *pflag.FlagSet) { flags.StringVar(&o.ServiceAccount, ProviderName+"-service-account", os.Getenv("GCE_SERVICE_ACCOUNT"), "Service account to use") + flags.StringVar(&o.MachineType, ProviderName+"-machine-type", "n1-standard-4", "Machine type (see https://cloud.google.com/compute/docs/machine-types)") + flags.StringVar(&o.MinCPUPlatform, ProviderName+"-min-cpu-platform", "", + "Minimum CPU platform (see https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)") + flags.StringVar(&o.Image, ProviderName+"-image", "ubuntu-1604-xenial-v20200129", + "Image to use to create the vm, ubuntu-1904-disco-v20191008 is a more modern image") + + flags.IntVar(&o.SSDCount, ProviderName+"-local-ssd-count", 1, + "Number of local SSDs to create, only used if local-ssd=true") + flags.StringVar(&o.PDVolumeType, ProviderName+"-pd-volume-type", "pd-ssd", + "Type of the persistent disk volume, only used if local-ssd=false") + flags.IntVar(&o.PDVolumeSize, ProviderName+"-pd-volume-size", 500, + "Size in GB of persistent disk volume, only used if local-ssd=false") + flags.StringSliceVar(&o.Zones, ProviderName+"-zones", nil, fmt.Sprintf("Zones for cluster. If zones are formatted as AZ:N where N is an integer, the zone\n"+ "will be repeated N times. If > 1 zone specified, nodes will be geo-distributed\n"+ "regardless of geo (default [%s])", strings.Join(defaultZones, ","))) - flags.StringVar(&o.Image, ProviderName+"-image", "ubuntu-1604-xenial-v20200129", - "Image to use to create the vm, ubuntu-1904-disco-v20191008 is a more modern image") - flags.IntVar(&o.SSDCount, ProviderName+"-local-ssd-count", 1, - "Number of local SSDs to create on GCE instance.") } func (o *providerOpts) ConfigureClusterFlags(flags *pflag.FlagSet, opt vm.MultipleProjectsOption) { @@ -402,6 +414,16 @@ func (p *Provider) Create(names []string, opts vm.CreateOpts) error { if opts.SSDOpts.NoExt4Barrier { extraMountOpts = "nobarrier" } + } else { + pdProps := []string{ + fmt.Sprintf("type=%s", p.opts.PDVolumeType), + fmt.Sprintf("size=%dGB", p.opts.PDVolumeSize), + "auto-delete=yes", + } + args = append(args, "--create-disk", strings.Join(pdProps, ",")) + // Enable DISCARD commands for persistent disks, as is advised in: + // https://cloud.google.com/compute/docs/disks/optimizing-pd-performance#formatting_parameters. + extraMountOpts = "discard" } // Create GCE startup script file. @@ -414,6 +436,9 @@ func (p *Provider) Create(names []string, opts vm.CreateOpts) error { }() args = append(args, "--machine-type", p.opts.MachineType) + if p.opts.MinCPUPlatform != "" { + args = append(args, "--min-cpu-platform", p.opts.MinCPUPlatform) + } args = append(args, "--labels", fmt.Sprintf("lifetime=%s", opts.Lifetime)) args = append(args, "--metadata-from-file", fmt.Sprintf("startup-script=%s", filename)) diff --git a/pkg/cmd/roachprod/vm/gce/utils.go b/pkg/cmd/roachprod/vm/gce/utils.go index cf81d561b14f..640f75849991 100644 --- a/pkg/cmd/roachprod/vm/gce/utils.go +++ b/pkg/cmd/roachprod/vm/gce/utils.go @@ -21,7 +21,7 @@ import ( "text/template" "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) const ( @@ -37,8 +37,9 @@ var Subdomain = func() string { return "roachprod.crdb.io" }() -// Startup script used to find/format/mount all local SSDs in GCE. -// Each disk is mounted to /mnt/data and chmoded to all users. +// Startup script used to find/format/mount all local SSDs and (non-boot) +// persistent disks in GCE. Each disk is mounted to /mnt/data and +// chmoded to all users. // // This is a template because the instantiator needs to optionally configure the // mounting options. The script cannot take arguments since it is to be invoked @@ -46,11 +47,12 @@ var Subdomain = func() string { const gceLocalSSDStartupScriptTemplate = `#!/usr/bin/env bash # Script for setting up a GCE machine for roachprod use. -mount_opts="discard,defaults" +mount_opts="defaults" {{if .ExtraMountOpts}}mount_opts="${mount_opts},{{.ExtraMountOpts}}"{{end}} +# ignore the boot disk: /dev/disk/by-id/google-persistent-disk-0. disknum=0 -for d in $(ls /dev/disk/by-id/google-local-*); do +for d in $(ls /dev/disk/by-id/google-local-* /dev/disk/by-id/google-persistent-disk-[1-9]); do let "disknum++" grep -e "${d}" /etc/fstab > /dev/null if [ $? -ne 0 ]; then @@ -84,7 +86,7 @@ sudo service sshd restart # increase the default maximum number of open file descriptors for # root and non-root users. Load generators running a lot of concurrent # workers bump into this often. -sudo sh -c 'echo "root - nofile 65536\n* - nofile 65536" > /etc/security/limits.d/10-roachprod-nofiles.conf' +sudo sh -c 'echo "root - nofile 1048576\n* - nofile 1048576" > /etc/security/limits.d/10-roachprod-nofiles.conf' # Send TCP keepalives every minute since GCE will terminate idle connections # after 10m. Note that keepalives still need to be requested by the application diff --git a/pkg/cmd/roachprod/vm/local/local.go b/pkg/cmd/roachprod/vm/local/local.go index 779ee1f4bb60..97d637a47b51 100644 --- a/pkg/cmd/roachprod/vm/local/local.go +++ b/pkg/cmd/roachprod/vm/local/local.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/install" "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm" "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" ) diff --git a/pkg/cmd/roachprod/vm/vm.go b/pkg/cmd/roachprod/vm/vm.go index 1c9c19dd1b9a..bce49b1b05f0 100644 --- a/pkg/cmd/roachprod/vm/vm.go +++ b/pkg/cmd/roachprod/vm/vm.go @@ -19,7 +19,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/config" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" "golang.org/x/sync/errgroup" ) diff --git a/pkg/cmd/roachtest/acceptance.go b/pkg/cmd/roachtest/acceptance.go index 7965d827a557..4945b391dd5e 100644 --- a/pkg/cmd/roachtest/acceptance.go +++ b/pkg/cmd/roachtest/acceptance.go @@ -29,13 +29,12 @@ func registerAcceptance(r *testRegistry) { { name: "bank/zerosum-splits", fn: runBankNodeZeroSum, skip: "https://github.com/cockroachdb/cockroach/issues/33683 (runs into " + - " various errors during its rebalances, see isExpectedRelocateError)", + " various errors during its rebalances, see IsExpectedRelocateError)", }, // {"bank/zerosum-restart", runBankZeroSumRestart}, {name: "build-info", fn: runBuildInfo}, {name: "build-analyze", fn: runBuildAnalyze}, {name: "cli/node-status", fn: runCLINodeStatus}, - {name: "decommission", fn: runDecommissionAcceptance}, {name: "cluster-init", fn: runClusterInit}, {name: "event-log", fn: runEventLog}, {name: "gossip/peerings", fn: runGossipPeerings}, @@ -43,18 +42,24 @@ func registerAcceptance(r *testRegistry) { {name: "gossip/restart-node-one", fn: runGossipRestartNodeOne}, {name: "gossip/locality-address", fn: runCheckLocalityIPAddress}, {name: "rapid-restart", fn: runRapidRestart}, + { + name: "many-splits", fn: runManySplits, + minVersion: "v19.2.0", // SQL syntax unsupported on 19.1.x + }, {name: "status-server", fn: runStatusServer}, { name: "version-upgrade", - fn: runVersionUpgrade, - skip: "skipped due to flakiness", + fn: func(ctx context.Context, t *test, c *cluster) { + runVersionUpgrade(ctx, t, c, r.buildVersion) + }, // This test doesn't like running on old versions because it upgrades to // the latest released version and then it tries to "head", where head is // the cockroach binary built from the branch on which the test is // running. If that branch corresponds to an older release, then upgrading // to head after 19.2 fails. minVersion: "v19.2.0", - timeout: 30 * time.Minute}, + timeout: 30 * time.Minute, + }, } tags := []string{"default", "quick"} const numNodes = 4 diff --git a/pkg/cmd/roachtest/activerecord.go b/pkg/cmd/roachtest/activerecord.go new file mode 100644 index 000000000000..12d004b3d93e --- /dev/null +++ b/pkg/cmd/roachtest/activerecord.go @@ -0,0 +1,237 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +import ( + "bufio" + "bytes" + "context" + "fmt" + "regexp" +) + +var activerecordResultRegex = regexp.MustCompile(`^(?P[^\s]+#[^\s]+) = (?P\d+\.\d+ s) = (?P.)$`) +var railsReleaseTagRegex = regexp.MustCompile(`^v(?P\d+)\.(?P\d+)\.(?P\d+)\.?(?P\d*)$`) +var supportedRailsVersion = "5.2.4.3" + +// This test runs pgjdbc's full test suite against a single cockroach node. + +func registerActiveRecord(r *testRegistry) { + runActiveRecord := func( + ctx context.Context, + t *test, + c *cluster, + ) { + if c.isLocal() { + t.Fatal("cannot be run in local mode") + } + node := c.Node(1) + t.Status("setting up cockroach") + c.Put(ctx, cockroach, "./cockroach", c.All()) + c.Start(ctx, t, c.All()) + + version, err := fetchCockroachVersion(ctx, c, node[0]) + if err != nil { + t.Fatal(err) + } + + if err := alterZoneConfigAndClusterSettings(ctx, version, c, node[0]); err != nil { + t.Fatal(err) + } + + t.Status("creating database used by tests") + db, err := c.ConnE(ctx, node[0]) + if err != nil { + t.Fatal(err) + } + defer db.Close() + + if _, err := db.ExecContext( + ctx, `CREATE DATABASE activerecord_unittest;`, + ); err != nil { + t.Fatal(err) + } + + if _, err := db.ExecContext( + ctx, `CREATE DATABASE activerecord_unittest2;`, + ); err != nil { + t.Fatal(err) + } + + t.Status("cloning rails and installing prerequisites") + // Report the latest tag, but do not use it. The newest versions produces output that breaks our xml parser, + // and we want to pin to the working version for now. + latestTag, err := repeatGetLatestTag( + ctx, c, "rails", "rails", railsReleaseTagRegex, + ) + if err != nil { + t.Fatal(err) + } + c.l.Printf("Latest rails release is %s.", latestTag) + c.l.Printf("Supported rails release is %s.", supportedRailsVersion) + + if err := repeatRunE( + ctx, c, node, "update apt-get", `sudo apt-get -qq update`, + ); err != nil { + t.Fatal(err) + } + + if err := repeatRunE( + ctx, + c, + node, + "install dependencies", + `sudo apt-get -qq install ruby-full ruby-dev rubygems build-essential zlib1g-dev libpq-dev libsqlite3-dev`, + ); err != nil { + t.Fatal(err) + } + + if err := repeatRunE( + ctx, + c, + node, + "install ruby 2.7", + `mkdir -p ruby-install && \ + curl -fsSL https://github.com/postmodern/ruby-install/archive/v0.6.1.tar.gz | tar --strip-components=1 -C ruby-install -xz && \ + sudo make -C ruby-install install && \ + sudo ruby-install --system ruby 2.7.1 && \ + sudo gem update --system`, + ); err != nil { + t.Fatal(err) + } + + if err := repeatRunE( + ctx, c, node, "remove old activerecord adapter", `rm -rf /mnt/data1/activerecord-cockroachdb-adapter`, + ); err != nil { + t.Fatal(err) + } + + if err := repeatGitCloneE( + ctx, + t.l, + c, + "https://github.com/cockroachdb/activerecord-cockroachdb-adapter.git", + "/mnt/data1/activerecord-cockroachdb-adapter", + "master", + node, + ); err != nil { + t.Fatal(err) + } + + t.Status("installing bundler") + if err := repeatRunE( + ctx, + c, + node, + "installing bundler", + `cd /mnt/data1/activerecord-cockroachdb-adapter/ && sudo gem install bundler:2.1.4`, + ); err != nil { + t.Fatal(err) + } + + t.Status("installing gems") + if err := repeatRunE( + ctx, + c, + node, + "installing gems", + fmt.Sprintf( + `cd /mnt/data1/activerecord-cockroachdb-adapter/ && `+ + `RAILS_VERSION=%s sudo bundle install`, supportedRailsVersion), + ); err != nil { + t.Fatal(err) + } + + blocklistName, expectedFailures, ignorelistName, ignorelist := activeRecordBlocklists.getLists(version) + if expectedFailures == nil { + t.Fatalf("No activerecord blocklist defined for cockroach version %s", version) + } + status := fmt.Sprintf("Running cockroach version %s, using blocklist %s", version, blocklistName) + if ignorelist != nil { + status = fmt.Sprintf("Running cockroach version %s, using blocklist %s, using ignorelist %s", + version, blocklistName, ignorelistName) + } + c.l.Printf("%s", status) + + t.Status("running activerecord test suite") + // Note that this is expected to return an error, since the test suite + // will fail. And it is safe to swallow it here. + rawResults, _ := c.RunWithBuffer(ctx, t.l, node, + `cd /mnt/data1/activerecord-cockroachdb-adapter/ && `+ + `sudo RUBYOPT="-W0" TESTOPTS="-v" bundle exec rake test`, + ) + + c.l.Printf("Test Results:\n%s", rawResults) + + // Find all the failed and errored tests. + results := newORMTestsResults() + + scanner := bufio.NewScanner(bytes.NewReader(rawResults)) + for scanner.Scan() { + match := activerecordResultRegex.FindStringSubmatch(scanner.Text()) + if match == nil { + continue + } + test, result := match[1], match[3] + pass := result == "." + skipped := result == "S" + results.allTests = append(results.allTests, test) + + ignoredIssue, expectedIgnored := ignorelist[test] + issue, expectedFailure := expectedFailures[test] + switch { + case expectedIgnored: + results.results[test] = fmt.Sprintf("--- SKIP: %s due to %s (expected)", test, ignoredIssue) + results.ignoredCount++ + case skipped && expectedFailure: + results.results[test] = fmt.Sprintf("--- SKIP: %s (unexpected)", test) + results.unexpectedSkipCount++ + case skipped: + results.results[test] = fmt.Sprintf("--- SKIP: %s (expected)", test) + results.skipCount++ + case pass && !expectedFailure: + results.results[test] = fmt.Sprintf("--- PASS: %s (expected)", test) + results.passExpectedCount++ + case pass && expectedFailure: + results.results[test] = fmt.Sprintf("--- PASS: %s - %s (unexpected)", + test, maybeAddGithubLink(issue), + ) + results.passUnexpectedCount++ + case !pass && expectedFailure: + results.results[test] = fmt.Sprintf("--- FAIL: %s - %s (expected)", + test, maybeAddGithubLink(issue), + ) + results.failExpectedCount++ + results.currentFailures = append(results.currentFailures, test) + case !pass && !expectedFailure: + results.results[test] = fmt.Sprintf("--- FAIL: %s (unexpected)", test) + results.failUnexpectedCount++ + results.currentFailures = append(results.currentFailures, test) + } + results.runTests[test] = struct{}{} + } + + results.summarizeAll( + t, "activerecord" /* ormName */, blocklistName, expectedFailures, version, supportedRailsVersion, + ) + } + + r.Add(testSpec{ + MinVersion: "v20.1.0", + Name: "activerecord", + Owner: OwnerAppDev, + Cluster: makeClusterSpec(1), + Tags: []string{`default`, `orm`}, + Run: func(ctx context.Context, t *test, c *cluster) { + runActiveRecord(ctx, t, c) + }, + }) +} diff --git a/pkg/cmd/roachtest/activerecord_blocklist.go b/pkg/cmd/roachtest/activerecord_blocklist.go new file mode 100644 index 000000000000..5c22436f801a --- /dev/null +++ b/pkg/cmd/roachtest/activerecord_blocklist.go @@ -0,0 +1,39 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +var activeRecordBlocklists = blocklistsForVersion{ + {"v20.1", "activeRecordBlockList20_1", activeRecordBlockList20_1, "activeRecordIgnoreList20_1", activeRecordIgnoreList20_1}, + {"v20.2", "activeRecordBlockList20_2", activeRecordBlockList20_2, "activeRecordIgnoreList20_2", activeRecordIgnoreList20_2}, +} + +// These are lists of known activerecord test errors and failures. +// When the activerecord test suite is run, the results are compared to this list. +// Any passed test that is not on this list is reported as PASS - expected +// Any passed test that is on this list is reported as PASS - unexpected +// Any failed test that is on this list is reported as FAIL - expected +// Any failed test that is not on this list is reported as FAIL - unexpected +// Any test on this list that is not run is reported as FAIL - not run +// +// Please keep these lists alphabetized for easy diffing. +// After a failed run, an updated version of this blocklist should be available +// in the test log. +var activeRecordBlockList20_2 = blocklist{} + +var activeRecordBlockList20_1 = blocklist{} + +var activeRecordIgnoreList20_2 = blocklist{ + "FixturesTest#test_create_fixtures": "flaky - FK constraint violated sometimes when loading all fixture data", +} + +var activeRecordIgnoreList20_1 = blocklist{ + "FixturesTest#test_create_fixtures": "flaky - FK constraint violated sometimes when loading all fixture data", +} diff --git a/pkg/cmd/roachtest/allocator.go b/pkg/cmd/roachtest/allocator.go index 138d18dfe116..515d03f00773 100644 --- a/pkg/cmd/roachtest/allocator.go +++ b/pkg/cmd/roachtest/allocator.go @@ -15,11 +15,10 @@ import ( gosql "database/sql" "fmt" "math" - "strings" "time" "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) func registerAllocator(r *testRegistry) { @@ -88,11 +87,12 @@ func registerAllocator(r *testRegistry) { }, }) r.Add(testSpec{ - Name: `replicate/wide`, - Owner: OwnerKV, - Timeout: 10 * time.Minute, - Cluster: makeClusterSpec(9, cpu(1)), - Run: runWideReplication, + Name: `replicate/wide`, + Owner: OwnerKV, + Timeout: 10 * time.Minute, + Cluster: makeClusterSpec(9, cpu(1)), + MinVersion: "v19.2.0", + Run: runWideReplication, }) } @@ -255,7 +255,10 @@ func runWideReplication(ctx context.Context, t *test, c *cluster) { t.Fatalf("9-node cluster required") } - args := startArgs("--env=COCKROACH_SCAN_MAX_IDLE_TIME=5ms") + args := startArgs( + "--env=COCKROACH_SCAN_MAX_IDLE_TIME=5ms", + "--args=--vmodule=replicate_queue=6", + ) c.Put(ctx, cockroach, "./cockroach") c.Start(ctx, t, c.All(), args) @@ -263,14 +266,7 @@ func runWideReplication(ctx context.Context, t *test, c *cluster) { defer db.Close() zones := func() []string { - oldVersion := false rows, err := db.Query(`SELECT target FROM crdb_internal.zones`) - // TODO(solon): Remove this block once we are no longer running roachtest - // against version 19.1 and earlier. - if err != nil && strings.Contains(err.Error(), `column "target" does not exist`) { - oldVersion = true - rows, err = db.Query(`SELECT zone_name FROM crdb_internal.zones`) - } if err != nil { t.Fatal(err) } @@ -281,19 +277,6 @@ func runWideReplication(ctx context.Context, t *test, c *cluster) { if err := rows.Scan(&name); err != nil { t.Fatal(err) } - // TODO(solon): Remove this block once we are no longer running roachtest - // against version 19.1 and earlier. - if oldVersion { - which := "RANGE" - if name[0] == '.' { - name = name[1:] - } else if strings.Count(name, ".") == 0 { - which = "DATABASE" - } else { - which = "TABLE" - } - name = fmt.Sprintf("%s %s", which, name) - } results = append(results, name) } return results @@ -345,6 +328,7 @@ func runWideReplication(ctx context.Context, t *test, c *cluster) { // Stop the cluster and restart 2/3 of the nodes. c.Stop(ctx) + tBeginDown := timeutil.Now() c.Start(ctx, t, c.Range(1, 6), args) waitForUnderReplicated := func(count int) { @@ -393,6 +377,10 @@ FROM crdb_internal.kv_store_status // because the allocator cannot select a replica for removal that is on a // store for which it doesn't have a store descriptor. run(`SET CLUSTER SETTING server.time_until_store_dead = '90s'`) + // Sleep until the node is dead so that when we actually wait for replication, + // we can expect things to move swiftly. + time.Sleep(90*time.Second - timeutil.Now().Sub(tBeginDown)) + setReplication(5) waitForReplication(5) diff --git a/pkg/cmd/roachtest/alterpk.go b/pkg/cmd/roachtest/alterpk.go index e8193d4a9c64..f05716879206 100644 --- a/pkg/cmd/roachtest/alterpk.go +++ b/pkg/cmd/roachtest/alterpk.go @@ -88,8 +88,7 @@ func registerAlterPK(r *testRegistry) { } // runAlterPKTPCC runs a primary key change while the TPCC workload runs. - runAlterPKTPCC := func(ctx context.Context, t *test, c *cluster) { - const warehouses = 500 + runAlterPKTPCC := func(ctx context.Context, t *test, c *cluster, warehouses int, expensiveChecks bool) { const duration = 10 * time.Minute roachNodes, loadNode := setupTest(ctx, t, c) @@ -151,9 +150,14 @@ func registerAlterPK(r *testRegistry) { m.Wait() // Run the verification checks of the TPCC workload post primary key change. + expensiveChecksArg := "" + if expensiveChecks { + expensiveChecksArg = "--expensive-checks" + } checkCmd := fmt.Sprintf( - "./workload check tpcc --warehouses %d --expensive-checks {pgurl%s}", + "./workload check tpcc --warehouses %d %s {pgurl%s}", warehouses, + expensiveChecksArg, c.Node(roachNodes[0]), ) t.Status("beginning database verification") @@ -170,12 +174,25 @@ func registerAlterPK(r *testRegistry) { Run: runAlterPKBank, }) r.Add(testSpec{ - Name: "alterpk-tpcc", + Name: "alterpk-tpcc-250", + Owner: OwnerSQLSchema, + // Use a 4 node cluster -- 3 nodes will run cockroach, and the last will be the + // workload driver node. + MinVersion: "v20.1.0", + Cluster: makeClusterSpec(4, cpu(32)), + Run: func(ctx context.Context, t *test, c *cluster) { + runAlterPKTPCC(ctx, t, c, 250 /* warehouses */, true /* expensiveChecks */) + }, + }) + r.Add(testSpec{ + Name: "alterpk-tpcc-500", Owner: OwnerSQLSchema, // Use a 4 node cluster -- 3 nodes will run cockroach, and the last will be the // workload driver node. MinVersion: "v20.1.0", Cluster: makeClusterSpec(4, cpu(16)), - Run: runAlterPKTPCC, + Run: func(ctx context.Context, t *test, c *cluster) { + runAlterPKTPCC(ctx, t, c, 500 /* warehouses */, false /* expensiveChecks */) + }, }) } diff --git a/pkg/cmd/roachtest/autoupgrade.go b/pkg/cmd/roachtest/autoupgrade.go new file mode 100644 index 000000000000..9ffb596c4772 --- /dev/null +++ b/pkg/cmd/roachtest/autoupgrade.go @@ -0,0 +1,268 @@ +// Copyright 2018 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +import ( + "context" + "fmt" + "runtime" + "time" + + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/util/binfetcher" + "github.com/cockroachdb/errors" +) + +// This test verifies that preserve_downgrade_option is respected and that in the +// absence of it the cluster auto-upgrades when this is safe. +// +// NB: if you're interested in mixed-version testing, don't look at this test +// but check out acceptance/version-upgrade. +// +// NOTE: DO NOT USE THIS TEST AS A TEMPLATE FOR MIXED-VERSION TESTING. +// You want to look at versionupgrade.go, which has a test harness you +// can use. +func registerAutoUpgrade(r *testRegistry) { + runAutoUpgrade := func(ctx context.Context, t *test, c *cluster, oldVersion string) { + nodes := c.spec.NodeCount + goos := ifLocal(runtime.GOOS, "linux") + + b, err := binfetcher.Download(ctx, binfetcher.Options{ + Binary: "cockroach", + Version: "v" + oldVersion, + GOOS: goos, + GOARCH: "amd64", + }) + if err != nil { + t.Fatal(err) + } + + c.Put(ctx, b, "./cockroach", c.Range(1, nodes)) + + c.Start(ctx, t, c.Range(1, nodes)) + + const stageDuration = 30 * time.Second + const timeUntilStoreDead = 90 * time.Second + const buff = 10 * time.Second + + sleep := func(ts time.Duration) error { + t.WorkerStatus("sleeping") + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(ts): + return nil + } + } + + db := c.Conn(ctx, 1) + defer db.Close() + + if _, err := db.ExecContext(ctx, + "SET CLUSTER SETTING server.time_until_store_dead = $1", timeUntilStoreDead.String(), + ); err != nil { + t.Fatal(err) + } + + if err := sleep(stageDuration); err != nil { + t.Fatal(err) + } + + decommissionAndStop := func(node int) error { + t.WorkerStatus("decommission") + port := fmt.Sprintf("{pgport:%d}", node) + if err := c.RunE(ctx, c.Node(node), + fmt.Sprintf("./cockroach node decommission %d --insecure --port=%s", node, port)); err != nil { + return err + } + t.WorkerStatus("stop") + c.Stop(ctx, c.Node(node)) + return nil + } + + clusterVersion := func() (string, error) { + var version string + if err := db.QueryRowContext(ctx, `SHOW CLUSTER SETTING version`).Scan(&version); err != nil { + return "", errors.Wrap(err, "determining cluster version") + } + return version, nil + } + + // oldVersion was a patch-level version, such as v19.1.4, but cluster version upgrades only + // ever deal in ., which we load from the current value of the cluster setting. + // Overwrite oldVersion to prevent confusion. + oldVersion, err = clusterVersion() + if err != nil { + t.Fatal(err) + } + + checkUpgraded := func() (bool, error) { + upgradedVersion, err := clusterVersion() + if err != nil { + return false, err + } + return upgradedVersion != oldVersion, nil + } + + checkDowngradeOption := func(version string) error { + if _, err := db.ExecContext(ctx, + "SET CLUSTER SETTING cluster.preserve_downgrade_option = $1;", version, + ); err == nil { + return fmt.Errorf("cluster.preserve_downgrade_option shouldn't be set to any other values besides current cluster version; was able to set it to %s", version) + } else if !testutils.IsError(err, "cannot set cluster.preserve_downgrade_option") { + return err + } + return nil + } + + // Now perform a rolling restart into the new binary (i.e. the one this roachtest + // is testing, i.e. the branch we're running on), except the last node. + for i := 1; i < nodes; i++ { + t.WorkerStatus("upgrading ", i) + if err := c.StopCockroachGracefullyOnNode(ctx, i); err != nil { + t.Fatal(err) + } + c.Put(ctx, cockroach, "./cockroach", c.Node(i)) + c.Start(ctx, t, c.Node(i), startArgsDontEncrypt) + if err := sleep(stageDuration); err != nil { + t.Fatal(err) + } + + // Check cluster version is not upgraded until all nodes are running the new version. + if upgraded, err := checkUpgraded(); err != nil { + t.Fatal(err) + } else if upgraded { + t.Fatal("cluster setting version shouldn't be upgraded before all nodes are running the new version") + } + } + + // Now stop a previously started node and upgrade the last node. + // Check cluster version is not upgraded. + if err := c.StopCockroachGracefullyOnNode(ctx, nodes-1); err != nil { + t.Fatal(err) + } + if err := c.StopCockroachGracefullyOnNode(ctx, nodes); err != nil { + t.Fatal(err) + } + c.Put(ctx, cockroach, "./cockroach", c.Node(nodes)) + c.Start(ctx, t, c.Node(nodes), startArgsDontEncrypt) + if err := sleep(stageDuration); err != nil { + t.Fatal(err) + } + + if upgraded, err := checkUpgraded(); err != nil { + t.Fatal(err) + } else if upgraded { + t.Fatal("cluster setting version shouldn't be upgraded before all non-decommissioned nodes are alive") + } + + // Now decommission and stop n3, to test that the auto upgrade happens + // regardless (a decommissioned node is regarded as not being part of + // the cluster any more). + nodeDecommissioned := nodes - 2 + if err := decommissionAndStop(nodeDecommissioned); err != nil { + t.Fatal(err) + } + if err := sleep(timeUntilStoreDead + buff); err != nil { + t.Fatal(err) + } + + // Check cannot set cluster setting cluster.preserve_downgrade_option to any + // value besides the old cluster version. + if err := checkDowngradeOption("1.9"); err != nil { + t.Fatal(err) + } + if err := checkDowngradeOption("99.9"); err != nil { + t.Fatal(err) + } + + // Set cluster setting cluster.preserve_downgrade_option to be current + // cluster version to prevent upgrade. + if _, err := db.ExecContext(ctx, + "SET CLUSTER SETTING cluster.preserve_downgrade_option = $1;", oldVersion, + ); err != nil { + t.Fatal(err) + } + if err := sleep(stageDuration); err != nil { + t.Fatal(err) + } + + // Restart the previously stopped node. + c.Start(ctx, t, c.Node(nodes-1), startArgsDontEncrypt) + if err := sleep(stageDuration); err != nil { + t.Fatal(err) + } + + t.WorkerStatus("check cluster version has not been upgraded") + if upgraded, err := checkUpgraded(); err != nil { + t.Fatal(err) + } else if upgraded { + t.Fatal("cluster setting version shouldn't be upgraded because cluster.preserve_downgrade_option is set properly") + } + + // Check cannot set cluster setting version until cluster.preserve_downgrade_option + // is cleared. + if _, err := db.ExecContext(ctx, + "SET CLUSTER SETTING version = crdb_internal.node_executable_version();", + ); err == nil { + t.Fatal("should not be able to set cluster setting version before resetting cluster.preserve_downgrade_option") + } else if !testutils.IsError(err, "cluster.preserve_downgrade_option is set to") { + t.Fatal(err) + } + + // Reset cluster.preserve_downgrade_option to enable upgrade. + if _, err := db.ExecContext(ctx, + "RESET CLUSTER SETTING cluster.preserve_downgrade_option;", + ); err != nil { + t.Fatal(err) + } + if err := sleep(stageDuration); err != nil { + t.Fatal(err) + } + + // Check if the cluster version has been upgraded. + t.WorkerStatus("check cluster version has been upgraded") + if upgraded, err := checkUpgraded(); err != nil { + t.Fatal(err) + } else if !upgraded { + t.Fatalf("cluster setting version is not upgraded, still %s", oldVersion) + } + + // Finally, check if the cluster.preserve_downgrade_option has been reset. + t.WorkerStatus("check cluster setting cluster.preserve_downgrade_option has been set to an empty string") + var downgradeVersion string + if err := db.QueryRowContext(ctx, + "SHOW CLUSTER SETTING cluster.preserve_downgrade_option", + ).Scan(&downgradeVersion); err != nil { + t.Fatal(err) + } + if downgradeVersion != "" { + t.Fatalf("cluster setting cluster.preserve_downgrade_option is %s, should be an empty string", downgradeVersion) + } + + // Start n3 again to satisfy the dead node detector. + c.Start(ctx, t, c.Node(nodeDecommissioned)) + } + + r.Add(testSpec{ + Name: `autoupgrade`, + Owner: OwnerKV, + MinVersion: "v19.1.0", + Cluster: makeClusterSpec(5), + Run: func(ctx context.Context, t *test, c *cluster) { + pred, err := PredecessorVersion(r.buildVersion) + if err != nil { + t.Fatal(err) + } + runAutoUpgrade(ctx, t, c, pred) + }, + }) +} diff --git a/pkg/cmd/roachtest/backup.go b/pkg/cmd/roachtest/backup.go index b7b2d9fee9cd..675adc0cca96 100644 --- a/pkg/cmd/roachtest/backup.go +++ b/pkg/cmd/roachtest/backup.go @@ -18,38 +18,46 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/version" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) func registerBackup(r *testRegistry) { + importBankData := func(ctx context.Context, rows int, t *test, c *cluster) string { + dest := c.name + + if local { + rows = 100 + dest += fmt.Sprintf("%d", timeutil.Now().UnixNano()) + } + + c.Put(ctx, workload, "./workload") + c.Put(ctx, cockroach, "./cockroach") + + // NB: starting the cluster creates the logs dir as a side effect, + // needed below. + c.Start(ctx, t) + c.Run(ctx, c.All(), `./workload csv-server --port=8081 &> logs/workload-csv-server.log < /dev/null &`) + time.Sleep(time.Second) // wait for csv server to open listener + + importArgs := []string{ + "./workload", "fixtures", "import", "bank", + "--db=bank", "--payload-bytes=10240", "--ranges=0", "--csv-server", "http://localhost:8081", + fmt.Sprintf("--rows=%d", rows), "--seed=1", "{pgurl:1}", + } + c.Run(ctx, c.Node(1), importArgs...) + + return dest + } + backup2TBSpec := makeClusterSpec(10) r.Add(testSpec{ - Name: fmt.Sprintf("backup2TB/%s", backup2TBSpec), + Name: fmt.Sprintf("backup/2TB/%s", backup2TBSpec), Owner: OwnerBulkIO, Cluster: backup2TBSpec, MinVersion: "v2.1.0", Run: func(ctx context.Context, t *test, c *cluster) { rows := 65104166 - dest := c.name - - if local { - rows = 100 - dest += fmt.Sprintf("%d", timeutil.Now().UnixNano()) - } - - c.Put(ctx, workload, "./workload") - c.Put(ctx, cockroach, "./cockroach") - - // NB: starting the cluster creates the logs dir as a side effect, - // needed below. - c.Start(ctx, t) - c.Run(ctx, c.All(), `./workload csv-server --port=8081 &> logs/workload-csv-server.log < /dev/null &`) - time.Sleep(time.Second) // wait for csv server to open listener - - c.Run(ctx, c.Node(1), "./workload", "fixtures", "import", "bank", - "--db=bank", "--payload-bytes=10240", "--ranges=0", "--csv-server", "http://localhost:8081", - fmt.Sprintf("--rows=%d", rows), "--seed=1", "{pgurl:1}") - + dest := importBankData(ctx, rows, t, c) m := newMonitor(ctx, c) m.Go(func(ctx context.Context) error { t.Status(`running backup`) @@ -90,11 +98,14 @@ func registerBackup(r *testRegistry) { incDir := backupDir + "/inc" t.Status(`workload initialization`) - cmd := fmt.Sprintf( + cmd := []string{fmt.Sprintf( "./workload init tpcc --warehouses=%d {pgurl:1-%d}", warehouses, c.spec.NodeCount, - ) - c.Run(ctx, c.Node(1), cmd) + )} + if !t.buildVersion.AtLeast(version.MustParse("v20.2.0")) { + cmd = append(cmd, "--deprecated-fk-indexes") + } + c.Run(ctx, c.Node(1), cmd...) m := newMonitor(ctx, c) m.Go(func(ctx context.Context) error { @@ -126,11 +137,11 @@ func registerBackup(r *testRegistry) { return } - t.Status(`full backup`) // Use a time slightly in the past to avoid "cannot specify timestamp in the future" errors. tFull := fmt.Sprint(timeutil.Now().Add(time.Second * -2).UnixNano()) m = newMonitor(ctx, c) m.Go(func(ctx context.Context) error { + t.Status(`full backup`) _, err := conn.ExecContext(ctx, `BACKUP tpcc.* TO $1 AS OF SYSTEM TIME `+tFull, fullDir, @@ -146,10 +157,10 @@ func registerBackup(r *testRegistry) { return } - t.Status(`incremental backup`) tInc := fmt.Sprint(timeutil.Now().Add(time.Second * -2).UnixNano()) m = newMonitor(ctx, c) m.Go(func(ctx context.Context) error { + t.Status(`incremental backup`) _, err := conn.ExecContext(ctx, `BACKUP tpcc.* TO $1 AS OF SYSTEM TIME `+tInc+` INCREMENTAL FROM $2`, incDir, @@ -190,11 +201,16 @@ func registerBackup(r *testRegistry) { } t.Status(`fingerprint`) + // TODO(adityamaru): Pull the fingerprint logic into a utility method + // which can be shared by multiple roachtests. fingerprint := func(db string, asof string) (string, error) { var b strings.Builder var tables []string - rows, err := conn.QueryContext(ctx, fmt.Sprintf("SHOW TABLES FROM %s", db)) + rows, err := conn.QueryContext( + ctx, + fmt.Sprintf("SELECT table_name FROM [SHOW TABLES FROM %s] ORDER BY table_name", db), + ) if err != nil { return "", err } @@ -259,4 +275,5 @@ func registerBackup(r *testRegistry) { m.Wait() }, }) + } diff --git a/pkg/cmd/roachtest/bank.go b/pkg/cmd/roachtest/bank.go index 9ed317d93b0e..158cfa20b06e 100644 --- a/pkg/cmd/roachtest/bank.go +++ b/pkg/cmd/roachtest/bank.go @@ -28,7 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) const ( @@ -295,7 +295,7 @@ func (s *bankState) startSplitMonkey(ctx context.Context, d time.Duration, c *cl c.l.Printf("round %d: splitting key %v\n", curRound, key) _, err := client.db.ExecContext(ctx, fmt.Sprintf(`ALTER TABLE bank.accounts SPLIT AT VALUES (%d)`, key)) - if err != nil && !(pgerror.IsSQLRetryableError(err) || isExpectedRelocateError(err)) { + if err != nil && !(pgerror.IsSQLRetryableError(err) || IsExpectedRelocateError(err)) { s.errChan <- err } client.RUnlock() @@ -316,7 +316,7 @@ func (s *bankState) startSplitMonkey(ctx context.Context, d time.Duration, c *cl curRound, key, nodes[1:]) _, err := client.db.ExecContext(ctx, relocateQuery) - if err != nil && !(pgerror.IsSQLRetryableError(err) || isExpectedRelocateError(err)) { + if err != nil && !(pgerror.IsSQLRetryableError(err) || IsExpectedRelocateError(err)) { s.errChan <- err } for i := 0; i < len(s.clients); i++ { @@ -327,24 +327,26 @@ func (s *bankState) startSplitMonkey(ctx context.Context, d time.Duration, c *cl }() } -func isExpectedRelocateError(err error) bool { - // See: - // https://github.com/cockroachdb/cockroach/issues/33732 - // https://github.com/cockroachdb/cockroach/issues/33708 - // https://github.cm/cockroachdb/cockroach/issues/34012 - // https://github.com/cockroachdb/cockroach/issues/33683#issuecomment-454889149 - // for more failure modes not caught here. We decided to avoid adding - // to this catchall and to fix the root causes instead. - // We've also seen "breaker open" errors here. - whitelist := []string{ +// IsExpectedRelocateError maintains an allowlist of errors related to +// atomic-replication-changes we want to ignore / retry on for tests. +// See: +// https://github.com/cockroachdb/cockroach/issues/33732 +// https://github.com/cockroachdb/cockroach/issues/33708 +// https://github.cm/cockroachdb/cockroach/issues/34012 +// https://github.com/cockroachdb/cockroach/issues/33683#issuecomment-454889149 +// for more failure modes not caught here. +func IsExpectedRelocateError(err error) bool { + allowlist := []string{ "descriptor changed", "unable to remove replica .* which is not present", "unable to add replica .* which is already present", "received invalid ChangeReplicasTrigger .* to remove self", "failed to apply snapshot: raft group deleted", "snapshot failed:", + "breaker open", + "unable to select removal target", // https://github.com/cockroachdb/cockroach/issues/49513 } - pattern := "(" + strings.Join(whitelist, "|") + ")" + pattern := "(" + strings.Join(allowlist, "|") + ")" return testutils.IsError(err, pattern) } diff --git a/pkg/cmd/roachtest/blacklist_test.go b/pkg/cmd/roachtest/blocklist_test.go similarity index 76% rename from pkg/cmd/roachtest/blacklist_test.go rename to pkg/cmd/roachtest/blocklist_test.go index 1fad6dec5796..2f7cca3fc076 100644 --- a/pkg/cmd/roachtest/blacklist_test.go +++ b/pkg/cmd/roachtest/blocklist_test.go @@ -19,27 +19,29 @@ import ( "strings" "testing" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/google/go-github/github" "golang.org/x/oauth2" ) const githubAPITokenEnv = "GITHUB_API_TOKEN" -const runBlacklistEnv = "RUN_BLACKLIST_TEST" +const runBlocklistEnv = "RUN_BLOCKLIST_TEST" -func TestBlacklists(t *testing.T) { - if _, ok := os.LookupEnv(runBlacklistEnv); !ok { - t.Skipf("Blackist test is only run if %s is set", runBlacklistEnv) +func TestBlocklists(t *testing.T) { + if _, ok := os.LookupEnv(runBlocklistEnv); !ok { + skip.IgnoreLintf(t, "Blocklist test is only run if %s is set", runBlocklistEnv) } - blacklists := map[string]blacklist{ - "hibernate": hibernateBlackList20_1, - "pgjdbc": pgjdbcBlackList20_1, - "psycopg": psycopgBlackList20_1, - "django": djangoBlacklist20_1, - "sqlAlchemy": sqlAlchemyBlacklist20_1, - "libpq": libPQBlacklist20_1, - "gopg": gopgBlackList20_1, - "pgx": pgxBlacklist20_1, + blocklists := map[string]blocklist{ + "hibernate": hibernateBlockList20_2, + "pgjdbc": pgjdbcBlockList20_2, + "psycopg": psycopgBlockList20_2, + "django": djangoBlocklist20_2, + "sqlAlchemy": sqlAlchemyBlocklist20_2, + "libpq": libPQBlocklist20_2, + "gopg": gopgBlockList20_2, + "pgx": pgxBlocklist20_2, + "activerecord": activeRecordBlockList20_2, } type reasonCount struct { reason string @@ -48,7 +50,7 @@ func TestBlacklists(t *testing.T) { } var failureMap = make(map[string]*reasonCount, 200) - for suite, bl := range blacklists { + for suite, bl := range blocklists { for _, reason := range bl { if _, ok := failureMap[reason]; !ok { failureMap[reason] = &reasonCount{ @@ -106,6 +108,6 @@ func TestBlacklists(t *testing.T) { } if anyClosed { - t.Fatal("Some closed issues appear in blacklists") + t.Fatal("Some closed issues appear in blocklists") } } diff --git a/pkg/cmd/roachtest/canary.go b/pkg/cmd/roachtest/canary.go index 8c0e823b7a09..350e0bb9cc5e 100644 --- a/pkg/cmd/roachtest/canary.go +++ b/pkg/cmd/roachtest/canary.go @@ -28,35 +28,35 @@ import ( // TODO(bram): There are more common elements between all the canary tests, // factor more of them into here. -// blacklist is a lists of known test errors and failures. -type blacklist map[string]string +// blocklist is a lists of known test errors and failures. +type blocklist map[string]string -// blacklistForVersion contains both a blacklist of known test errors and +// blocklistForVersion contains both a blocklist of known test errors and // failures but also an optional ignorelist for flaky tests. // When the test suite is run, the results are compared to this list. -// Any passed test that is not on this blacklist is reported as PASS - expected -// Any passed test that is on this blacklist is reported as PASS - unexpected -// Any failed test that is on this blacklist is reported as FAIL - expected -// Any failed test that is not on blackthis list is reported as FAIL - unexpected -// Any test on this blacklist that is not run is reported as FAIL - not run +// Any passed test that is not on this blocklist is reported as PASS - expected +// Any passed test that is on this blocklist is reported as PASS - unexpected +// Any failed test that is on this blocklist is reported as FAIL - expected +// Any failed test that is not on blocklist list is reported as FAIL - unexpected +// Any test on this blocklist that is not run is reported as FAIL - not run // Ant test in the ignorelist is reported as SKIP if it is run -type blacklistForVersion struct { +type blocklistForVersion struct { versionPrefix string - blacklistname string - blacklist blacklist + blocklistname string + blocklist blocklist ignorelistname string - ignorelist blacklist + ignorelist blocklist } -type blacklistsForVersion []blacklistForVersion +type blocklistsForVersion []blocklistForVersion -// getLists returns the appropriate blacklist and ignorelist based on the +// getLists returns the appropriate blocklist and ignorelist based on the // cockroach version. This check only looks to ensure that the prefix that // matches. -func (b blacklistsForVersion) getLists(version string) (string, blacklist, string, blacklist) { +func (b blocklistsForVersion) getLists(version string) (string, blocklist, string, blocklist) { for _, info := range b { if strings.HasPrefix(version, info.versionPrefix) { - return info.blacklistname, info.blacklist, info.ignorelistname, info.ignorelist + return info.blocklistname, info.blocklist, info.ignorelistname, info.ignorelist } } return "", nil, "", nil diff --git a/pkg/cmd/roachtest/cancel.go b/pkg/cmd/roachtest/cancel.go index 6e65ebf0e872..8f841b129510 100644 --- a/pkg/cmd/roachtest/cancel.go +++ b/pkg/cmd/roachtest/cancel.go @@ -35,6 +35,7 @@ import ( func registerCancel(r *testRegistry) { runCancel := func(ctx context.Context, t *test, c *cluster, queries []string, warehouses int, useDistsql bool) { + t.Skip("skipping flaky cancel/tpcc test", "test needs to be updated see https://github.com/cockroachdb/cockroach/issues/42103") c.Put(ctx, cockroach, "./cockroach", c.All()) c.Put(ctx, workload, "./workload", c.All()) c.Start(ctx, t, c.All()) diff --git a/pkg/cmd/roachtest/cdc.go b/pkg/cmd/roachtest/cdc.go index 30a7738cdc19..697c99f4d94b 100644 --- a/pkg/cmd/roachtest/cdc.go +++ b/pkg/cmd/roachtest/cdc.go @@ -26,7 +26,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/cockroachdb/cockroach/pkg/util/version" "github.com/cockroachdb/errors" "github.com/codahale/hdrhistogram" ) @@ -43,11 +42,9 @@ type cdcTestArgs struct { tpccWarehouseCount int workloadDuration string initialScan bool - rangefeed bool kafkaChaos bool crdbChaos bool cloudStorageSink bool - fixturesImport bool targetInitialScanLatency time.Duration targetSteadyLatency time.Duration @@ -55,12 +52,6 @@ type cdcTestArgs struct { } func cdcBasicTest(ctx context.Context, t *test, c *cluster, args cdcTestArgs) { - // Skip the poller test on v19.2. After 19.2 is out, we should likely delete - // the test entirely. - if !args.rangefeed && t.buildVersion.Compare(version.MustParse(`v19.1.0-0`)) > 0 { - t.Skip("no poller in >= v19.2.0", "") - } - crdbNodes := c.Range(1, c.spec.NodeCount-1) workloadNode := c.Node(c.spec.NodeCount) kafkaNode := c.Node(c.spec.NodeCount) @@ -70,16 +61,7 @@ func cdcBasicTest(ctx context.Context, t *test, c *cluster, args cdcTestArgs) { db := c.Conn(ctx, 1) defer stopFeeds(db) - if _, err := db.Exec( - `SET CLUSTER SETTING kv.rangefeed.enabled = $1`, args.rangefeed, - ); err != nil { - t.Fatal(err) - } - // The 2.1 branch doesn't have this cluster setting, so ignore the error if - // it's about an unknown cluster setting - if _, err := db.Exec( - `SET CLUSTER SETTING changefeed.push.enabled = $1`, args.rangefeed, - ); err != nil && !strings.Contains(err.Error(), "unknown cluster setting") { + if _, err := db.Exec(`SET CLUSTER SETTING kv.rangefeed.enabled = true`); err != nil { t.Fatal(err) } kafka := kafkaManager{ @@ -87,14 +69,6 @@ func cdcBasicTest(ctx context.Context, t *test, c *cluster, args cdcTestArgs) { nodes: kafkaNode, } - // Workaround for #35947. The optimizer currently plans a bad query for TPCC - // when it has stats, so disable stats for now. - if _, err := db.Exec( - `SET CLUSTER SETTING sql.stats.automatic_collection.enabled = false`, - ); err != nil && !strings.Contains(err.Error(), "unknown cluster setting") { - t.Fatal(err) - } - var sinkURI string if args.cloudStorageSink { ts := timeutil.Now().Format(`20060102150405`) @@ -126,7 +100,7 @@ func cdcBasicTest(ctx context.Context, t *test, c *cluster, args cdcTestArgs) { // value" errors #34025. tpcc.tolerateErrors = true - tpcc.install(ctx, c, args.fixturesImport) + tpcc.install(ctx, c) // TODO(dan,ajwerner): sleeping momentarily before running the workload // mitigates errors like "error in newOrder: missing stock row" from tpcc. time.Sleep(2 * time.Second) @@ -233,6 +207,7 @@ func cdcBasicTest(ctx context.Context, t *test, c *cluster, args cdcTestArgs) { } func runCDCBank(ctx context.Context, t *test, c *cluster) { + // Make the logs dir on every node to work around the `roachprod get logs` // spam. c.Run(ctx, c.All(), `mkdir -p logs`) @@ -371,7 +346,7 @@ func runCDCBank(ctx context.Context, t *test, c *cluster) { } } if failures := v.Failures(); len(failures) > 0 { - return errors.New("validator failures:\n" + strings.Join(failures, "\n")) + return errors.Newf("validator failures:\n%s", strings.Join(failures, "\n")) } return nil }) @@ -382,6 +357,7 @@ func runCDCBank(ctx context.Context, t *test, c *cluster) { // end-to-end (including the schema registry default of requiring backward // compatibility within a topic). func runCDCSchemaRegistry(ctx context.Context, t *test, c *cluster) { + crdbNodes, kafkaNode := c.Node(1), c.Node(1) c.Put(ctx, cockroach, "./cockroach", crdbNodes) c.Start(ctx, t, crdbNodes) @@ -505,77 +481,58 @@ func runCDCSchemaRegistry(ctx context.Context, t *test, c *cluster) { } func registerCDC(r *testRegistry) { - useRangeFeed := true - if r.buildVersion.Compare(version.MustParse(`v19.1.0-0`)) < 0 { - // RangeFeed is not production ready in 2.1, so run the tests with the - // poller. - useRangeFeed = false - } - r.Add(testSpec{ - Name: fmt.Sprintf("cdc/tpcc-1000/rangefeed=%t", useRangeFeed), - Owner: `cdc`, - MinVersion: "v2.1.0", - Cluster: makeClusterSpec(4, cpu(16)), + Name: fmt.Sprintf("cdc/tpcc-1000"), + Owner: OwnerCDC, + Cluster: makeClusterSpec(4, cpu(16)), Run: func(ctx context.Context, t *test, c *cluster) { cdcBasicTest(ctx, t, c, cdcTestArgs{ workloadType: tpccWorkloadType, tpccWarehouseCount: 1000, workloadDuration: "120m", - rangefeed: useRangeFeed, targetInitialScanLatency: 3 * time.Minute, targetSteadyLatency: 10 * time.Minute, }) }, }) r.Add(testSpec{ - Name: fmt.Sprintf("cdc/initial-scan/rangefeed=%t", useRangeFeed), - Owner: `cdc`, - MinVersion: "v2.1.0", - Cluster: makeClusterSpec(4, cpu(16)), + Name: fmt.Sprintf("cdc/initial-scan"), + Owner: OwnerCDC, + Cluster: makeClusterSpec(4, cpu(16)), Run: func(ctx context.Context, t *test, c *cluster) { cdcBasicTest(ctx, t, c, cdcTestArgs{ workloadType: tpccWorkloadType, tpccWarehouseCount: 100, workloadDuration: "30m", initialScan: true, - rangefeed: useRangeFeed, targetInitialScanLatency: 30 * time.Minute, targetSteadyLatency: time.Minute, }) }, }) r.Add(testSpec{ - Name: "cdc/poller/rangefeed=false", - Owner: `cdc`, - // When testing a 2.1 binary, we use the poller for all the other tests - // and this is close enough to cdc/tpcc-1000 test to be redundant, so - // skip it. - MinVersion: "v19.1.0", - Cluster: makeClusterSpec(4, cpu(16)), + Name: "cdc/poller/rangefeed=false", + Owner: OwnerCDC, + Cluster: makeClusterSpec(4, cpu(16)), Run: func(ctx context.Context, t *test, c *cluster) { cdcBasicTest(ctx, t, c, cdcTestArgs{ workloadType: tpccWorkloadType, tpccWarehouseCount: 1000, workloadDuration: "30m", - rangefeed: false, targetInitialScanLatency: 30 * time.Minute, targetSteadyLatency: 2 * time.Minute, }) }, }) r.Add(testSpec{ - Name: fmt.Sprintf("cdc/sink-chaos/rangefeed=%t", useRangeFeed), - Owner: `cdc`, - // TODO(dan): Re-enable this test on 2.1 if we decide to backport #36852. - MinVersion: "v19.1.0", - Cluster: makeClusterSpec(4, cpu(16)), + Name: fmt.Sprintf("cdc/sink-chaos"), + Owner: `cdc`, + Cluster: makeClusterSpec(4, cpu(16)), Run: func(ctx context.Context, t *test, c *cluster) { cdcBasicTest(ctx, t, c, cdcTestArgs{ workloadType: tpccWorkloadType, tpccWarehouseCount: 100, workloadDuration: "30m", - rangefeed: useRangeFeed, kafkaChaos: true, targetInitialScanLatency: 3 * time.Minute, targetSteadyLatency: 5 * time.Minute, @@ -583,18 +540,15 @@ func registerCDC(r *testRegistry) { }, }) r.Add(testSpec{ - Name: fmt.Sprintf("cdc/crdb-chaos/rangefeed=%t", useRangeFeed), - Owner: `cdc`, - Skip: "#37716", - // TODO(dan): Re-enable this test on 2.1 if we decide to backport #36852. - MinVersion: "v19.1.0", - Cluster: makeClusterSpec(4, cpu(16)), + Name: fmt.Sprintf("cdc/crdb-chaos"), + Owner: `cdc`, + Skip: "#37716", + Cluster: makeClusterSpec(4, cpu(16)), Run: func(ctx context.Context, t *test, c *cluster) { cdcBasicTest(ctx, t, c, cdcTestArgs{ workloadType: tpccWorkloadType, tpccWarehouseCount: 100, workloadDuration: "30m", - rangefeed: useRangeFeed, crdbChaos: true, targetInitialScanLatency: 3 * time.Minute, // TODO(dan): It should be okay to drop this as low as 2 to 3 minutes, @@ -606,9 +560,8 @@ func registerCDC(r *testRegistry) { }, }) r.Add(testSpec{ - Name: fmt.Sprintf("cdc/ledger/rangefeed=%t", useRangeFeed), - Owner: `cdc`, - MinVersion: "v2.1.0", + Name: fmt.Sprintf("cdc/ledger"), + Owner: `cdc`, // TODO(mrtracy): This workload is designed to be running on a 20CPU nodes, // but this cannot be allocated without some sort of configuration outside // of this test. Look into it. @@ -618,7 +571,6 @@ func registerCDC(r *testRegistry) { workloadType: ledgerWorkloadType, workloadDuration: "30m", initialScan: true, - rangefeed: useRangeFeed, targetInitialScanLatency: 10 * time.Minute, targetSteadyLatency: time.Minute, targetTxnPerSecond: 575, @@ -626,10 +578,9 @@ func registerCDC(r *testRegistry) { }, }) r.Add(testSpec{ - Name: "cdc/cloud-sink-gcs/rangefeed=true", - Owner: `cdc`, - MinVersion: "v19.1.0", - Cluster: makeClusterSpec(4, cpu(16)), + Name: "cdc/cloud-sink-gcs/rangefeed=true", + Owner: `cdc`, + Cluster: makeClusterSpec(4, cpu(16)), Run: func(ctx context.Context, t *test, c *cluster) { cdcBasicTest(ctx, t, c, cdcTestArgs{ workloadType: tpccWorkloadType, @@ -640,28 +591,24 @@ func registerCDC(r *testRegistry) { tpccWarehouseCount: 50, workloadDuration: "30m", initialScan: true, - rangefeed: true, cloudStorageSink: true, - fixturesImport: true, targetInitialScanLatency: 30 * time.Minute, targetSteadyLatency: time.Minute, }) }, }) r.Add(testSpec{ - Name: "cdc/bank", - Owner: `cdc`, - MinVersion: "v2.1.0", - Cluster: makeClusterSpec(4), + Name: "cdc/bank", + Owner: `cdc`, + Cluster: makeClusterSpec(4), Run: func(ctx context.Context, t *test, c *cluster) { runCDCBank(ctx, t, c) }, }) r.Add(testSpec{ - Name: "cdc/schemareg", - Owner: `cdc`, - MinVersion: "v19.1.0", - Cluster: makeClusterSpec(1), + Name: "cdc/schemareg", + Owner: `cdc`, + Cluster: makeClusterSpec(1), Run: func(ctx context.Context, t *test, c *cluster) { runCDCSchemaRegistry(ctx, t, c) }, @@ -684,7 +631,14 @@ func (k kafkaManager) install(ctx context.Context) { k.c.status("installing kafka") folder := k.basePath() k.c.Run(ctx, k.nodes, `mkdir -p `+folder) - k.c.Run(ctx, k.nodes, `curl -s https://packages.confluent.io/archive/4.0/confluent-oss-4.0.0-2.11.tar.gz | tar -xz -C `+folder) + k.c.Run( + ctx, + k.nodes, + fmt.Sprintf( + `for i in $(seq 1 5); do curl --retry 3 --retry-delay 1 -o /tmp/confluent.tar.gz https://storage.googleapis.com/cockroach-fixtures/tools/confluent-oss-4.0.0-2.11.tar.gz && break || sleep 15; done && tar xvf /tmp/confluent.tar.gz -C %s`, + folder, + ), + ) if !k.c.isLocal() { k.c.Run(ctx, k.nodes, `mkdir -p logs`) k.c.Run(ctx, k.nodes, `sudo apt-get -q update 2>&1 > logs/apt-get-update.log`) @@ -777,16 +731,11 @@ type tpccWorkload struct { tolerateErrors bool } -func (tw *tpccWorkload) install(ctx context.Context, c *cluster, fixturesImport bool) { - command := `./workload fixtures load` - if fixturesImport { - // For fixtures import, use the version built into the cockroach binary so - // the tpcc workload-versions match on release branches. - command = `./cockroach workload fixtures import` - } +func (tw *tpccWorkload) install(ctx context.Context, c *cluster) { + // For fixtures import, use the version built into the cockroach binary so + // the tpcc workload-versions match on release branches. c.Run(ctx, tw.workloadNodes, fmt.Sprintf( - `%s tpcc --warehouses=%d --checks=false {pgurl%s}`, - command, + `./cockroach workload fixtures import tpcc --warehouses=%d --checks=false {pgurl%s}`, tw.tpccWarehouseCount, tw.sqlNodes.randNode(), )) diff --git a/pkg/cmd/roachtest/chaos.go b/pkg/cmd/roachtest/chaos.go index 34cc6e93c5ff..c6cdcd3314ec 100644 --- a/pkg/cmd/roachtest/chaos.go +++ b/pkg/cmd/roachtest/chaos.go @@ -15,7 +15,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) // ChaosTimer configures a chaos schedule. diff --git a/pkg/cmd/roachtest/clearrange.go b/pkg/cmd/roachtest/clearrange.go index 270f9f313fa0..e09881b067e1 100644 --- a/pkg/cmd/roachtest/clearrange.go +++ b/pkg/cmd/roachtest/clearrange.go @@ -16,6 +16,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/cockroach/pkg/util/version" ) func registerClearRange(r *testRegistry) { @@ -28,7 +29,7 @@ func registerClearRange(r *testRegistry) { // to <3:30h but it varies. Timeout: 5*time.Hour + 90*time.Minute, MinVersion: "v19.1.0", - Cluster: makeClusterSpec(10), + Cluster: makeClusterSpec(10, cpu(16)), Run: func(ctx context.Context, t *test, c *cluster) { runClearRange(ctx, t, c, checks) }, @@ -68,6 +69,14 @@ func runClearRange(ctx context.Context, t *test, c *cluster, aggressiveChecks bo t.Status(`restoring tiny table`) defer t.WorkerStatus() + if t.buildVersion.AtLeast(version.MustParse("v19.2.0")) { + conn := c.Conn(ctx, 1) + if _, err := conn.ExecContext(ctx, `SET CLUSTER SETTING kv.bulk_io_write.concurrent_addsstable_requests = $1`, c.spec.NodeCount); err != nil { + t.Fatal(err) + } + conn.Close() + } + // Use a 120s connect timeout to work around the fact that the server will // declare itself ready before it's actually 100% ready. See: // https://github.com/cockroachdb/cockroach/issues/34897#issuecomment-465089057 @@ -157,6 +166,7 @@ func runClearRange(ctx context.Context, t *test, c *cluster, aggressiveChecks bo return err } + t.WorkerStatus("waiting for ~", curBankRanges, " merges to complete (and for at least ", timeutil.Now().Sub(deadline), " to pass)") select { case <-after: case <-ctx.Done(): diff --git a/pkg/cmd/roachtest/cli.go b/pkg/cmd/roachtest/cli.go index 555382f86d1a..e569a3b0bbaa 100644 --- a/pkg/cmd/roachtest/cli.go +++ b/pkg/cmd/roachtest/cli.go @@ -28,7 +28,9 @@ func runCLINodeStatus(ctx context.Context, t *test, c *cluster) { lastWords := func(s string) []string { var result []string - for _, line := range strings.Split(s, "\n") { + s = elideInsecureDeprecationNotice(s) + lines := strings.Split(s, "\n") + for _, line := range lines { words := strings.Fields(line) if n := len(words); n > 0 { result = append(result, words[n-2]+" "+words[n-1]) diff --git a/pkg/cmd/roachtest/clock_jump_crash.go b/pkg/cmd/roachtest/clock_jump_crash.go index 0615c9799803..80b02198c636 100644 --- a/pkg/cmd/roachtest/clock_jump_crash.go +++ b/pkg/cmd/roachtest/clock_jump_crash.go @@ -49,7 +49,7 @@ func runClockJump(ctx context.Context, t *test, c *cluster, tc clockJumpTestCase // Wait for Cockroach to process the above cluster setting time.Sleep(10 * time.Second) - if !isAlive(db) { + if !isAlive(db, c.l) { t.Fatal("Node unexpectedly crashed") } @@ -65,7 +65,7 @@ func runClockJump(ctx context.Context, t *test, c *cluster, tc clockJumpTestCase // seconds before checking whether the node is alive and // restarting it if not. time.Sleep(3 * time.Second) - if !isAlive(db) { + if !isAlive(db, c.l) { c.Start(ctx, t, c.Node(1)) } }() @@ -76,7 +76,7 @@ func runClockJump(ctx context.Context, t *test, c *cluster, tc clockJumpTestCase time.Sleep(3 * time.Second) t.Status("validating health") - aliveAfterOffset = isAlive(db) + aliveAfterOffset = isAlive(db, c.l) if aliveAfterOffset != tc.aliveAfterOffset { t.Fatalf("Expected node health %v, got %v", tc.aliveAfterOffset, aliveAfterOffset) } diff --git a/pkg/cmd/roachtest/clock_monotonic.go b/pkg/cmd/roachtest/clock_monotonic.go index 7844b19c94fc..47220a0bfcc6 100644 --- a/pkg/cmd/roachtest/clock_monotonic.go +++ b/pkg/cmd/roachtest/clock_monotonic.go @@ -48,7 +48,7 @@ func runClockMonotonicity(ctx context.Context, t *test, c *cluster, tc clockMono // Wait for Cockroach to process the above cluster setting time.Sleep(10 * time.Second) - if !isAlive(db) { + if !isAlive(db, c.l) { t.Fatal("Node unexpectedly crashed") } @@ -59,7 +59,7 @@ func runClockMonotonicity(ctx context.Context, t *test, c *cluster, tc clockMono // Recover from the injected clock offset after validation completes. defer func() { - if !isAlive(db) { + if !isAlive(db, c.l) { t.Fatal("Node unexpectedly crashed") } // Stop cockroach node before recovering from clock offset as this clock @@ -70,7 +70,7 @@ func runClockMonotonicity(ctx context.Context, t *test, c *cluster, tc clockMono offsetInjector.recover(ctx, c.spec.NodeCount) c.Start(ctx, t, c.Node(c.spec.NodeCount)) - if !isAlive(db) { + if !isAlive(db, c.l) { t.Fatal("Node unexpectedly crashed") } }() @@ -83,7 +83,7 @@ func runClockMonotonicity(ctx context.Context, t *test, c *cluster, tc clockMono t.Status("starting cockroach post offset") c.Start(ctx, t, c.Node(c.spec.NodeCount)) - if !isAlive(db) { + if !isAlive(db, c.l) { t.Fatal("Node unexpectedly crashed") } diff --git a/pkg/cmd/roachtest/clock_util.go b/pkg/cmd/roachtest/clock_util.go index fd5887210e99..69d4cf45eb4c 100644 --- a/pkg/cmd/roachtest/clock_util.go +++ b/pkg/cmd/roachtest/clock_util.go @@ -17,13 +17,22 @@ import ( "time" ) -// isAlive returns whether the node queried by db is alive -func isAlive(db *gosql.DB) bool { - _, err := db.Exec("SHOW DATABASES") - return err == nil +// isAlive returns whether the node queried by db is alive. +func isAlive(db *gosql.DB, l *logger) bool { + // The cluster might have just restarted, in which case the first call to db + // might return an error. In fact, the first db.Ping() reliably returns an + // error (but a db.Exec() only seldom returns an error). So, we're gonna + // Ping() twice to allow connections to be re-established. + _ = db.Ping() + if err := db.Ping(); err != nil { + l.Printf("isAlive returned err=%v (%T)", err, err) + } else { + return true + } + return false } -// dbUnixEpoch returns the current time in db +// dbUnixEpoch returns the current time in db. func dbUnixEpoch(db *gosql.DB) (float64, error) { var epoch float64 if err := db.QueryRow("SELECT now()::DECIMAL").Scan(&epoch); err != nil { @@ -32,13 +41,13 @@ func dbUnixEpoch(db *gosql.DB) (float64, error) { return epoch, nil } -// offsetInjector is used to inject clock offsets in roachtests +// offsetInjector is used to inject clock offsets in roachtests. type offsetInjector struct { c *cluster deployed bool } -// deploy installs ntp and downloads / compiles bumptime used to create a clock offset +// deploy installs ntp and downloads / compiles bumptime used to create a clock offset. func (oi *offsetInjector) deploy(ctx context.Context) error { if err := oi.c.RunE(ctx, oi.c.All(), "test -x ./bumptime"); err == nil { oi.deployed = true @@ -71,7 +80,7 @@ func (oi *offsetInjector) deploy(ctx context.Context) error { return nil } -// offset injects a offset of s into the node with the given nodeID +// offset injects a offset of s into the node with the given nodeID. func (oi *offsetInjector) offset(ctx context.Context, nodeID int, s time.Duration) { if !oi.deployed { oi.c.t.Fatal("Offset injector must be deployed before injecting a clock offset") @@ -85,7 +94,7 @@ func (oi *offsetInjector) offset(ctx context.Context, nodeID int, s time.Duratio } // recover force syncs time on the node with the given nodeID to recover -// from any offsets +// from any offsets. func (oi *offsetInjector) recover(ctx context.Context, nodeID int) { if !oi.deployed { oi.c.t.Fatal("Offset injector must be deployed before recovering from clock offsets") @@ -106,7 +115,7 @@ func (oi *offsetInjector) recover(ctx context.Context, nodeID int) { } // newOffsetInjector creates a offsetInjector which can be used to inject -// and recover from clock offsets +// and recover from clock offsets. func newOffsetInjector(c *cluster) *offsetInjector { return &offsetInjector{c: c} } diff --git a/pkg/cmd/roachtest/cluster.go b/pkg/cmd/roachtest/cluster.go index ae45b364d5fa..4ec835da5649 100644 --- a/pkg/cmd/roachtest/cluster.go +++ b/pkg/cmd/roachtest/cluster.go @@ -27,6 +27,7 @@ import ( "os/user" "path/filepath" "regexp" + "runtime" "sort" "strconv" "strings" @@ -52,18 +53,21 @@ const ( ) var ( - local bool - cockroach string - cloud = gce - encrypt encryptValue = "false" - instanceType string - workload string - roachprod string - buildTag string - clusterName string - clusterWipe bool - zonesF string - teamCity bool + local bool + cockroach string + libraryFilePaths []string + cloud = gce + encrypt encryptValue = "false" + instanceType string + localSSD bool + workload string + roachprod string + createArgs []string + buildTag string + clusterName string + clusterWipe bool + zonesF string + teamCity bool ) type encryptValue string @@ -115,7 +119,7 @@ func filepathAbs(path string) (string, error) { return path, nil } -func findBinary(binary, defValue string) (string, error) { +func findBinary(binary, defValue string) (abspath string, err error) { if binary == "" { binary = defValue } @@ -124,44 +128,67 @@ func findBinary(binary, defValue string) (string, error) { if fi, err := os.Stat(binary); err == nil && fi.Mode().IsRegular() && (fi.Mode()&0111) != 0 { return filepathAbs(binary) } + return findBinaryOrLibrary("bin", binary) +} + +func findLibrary(libraryName string) (string, error) { + suffix := ".so" + if local { + switch runtime.GOOS { + case "linux": + case "freebsd": + case "openbsd": + case "dragonfly": + case "windows": + suffix = ".dll" + case "darwin": + suffix = ".dylib" + default: + return "", errors.Newf("failed to find suffix for runtime %s", runtime.GOOS) + } + } + return findBinaryOrLibrary("lib", libraryName+suffix) +} +func findBinaryOrLibrary(binOrLib string, name string) (string, error) { // Find the binary to run and translate it to an absolute path. First, look // for the binary in PATH. - path, err := exec.LookPath(binary) + path, err := exec.LookPath(name) if err != nil { - if strings.HasPrefix(binary, "/") { + if strings.HasPrefix(name, "/") { return "", errors.WithStack(err) } - // We're unable to find the binary in PATH and "binary" is a relative path: + + // We're unable to find the name in PATH and "name" is a relative path: // look in the cockroach repo. gopath := os.Getenv("GOPATH") if gopath == "" { gopath = filepath.Join(os.Getenv("HOME"), "go") } - var binSuffix string + var suffix string if !local { - binSuffix = ".docker_amd64" + suffix = ".docker_amd64" } dirs := []string{ filepath.Join(gopath, "/src/github.com/cockroachdb/cockroach/"), - filepath.Join(gopath, "/src/github.com/cockroachdb/cockroach/bin"+binSuffix), - filepath.Join(os.ExpandEnv("$PWD"), "bin"+binSuffix), + filepath.Join(gopath, "/src/github.com/cockroachdb/cockroach", binOrLib+suffix), + filepath.Join(os.ExpandEnv("$PWD"), binOrLib+suffix), } for _, dir := range dirs { - path = filepath.Join(dir, binary) + path = filepath.Join(dir, name) var err2 error path, err2 = exec.LookPath(path) if err2 == nil { return filepathAbs(path) } } - return "", fmt.Errorf("failed to find %q in $PATH or any of %s", binary, dirs) + return "", fmt.Errorf("failed to find %q in $PATH or any of %s", name, dirs) } return filepathAbs(path) } -func initBinaries() { +func initBinariesAndLibraries() { // If we're running against an existing "local" cluster, force the local flag // to true in order to get the "local" test configurations. if clusterName == "local" { @@ -190,6 +217,16 @@ func initBinaries() { fmt.Fprintf(os.Stderr, "%+v\n", err) os.Exit(1) } + + // In v20.2 or higher, optionally expect certain library files to exist. + // Since they may not be found in older versions, do not hard error if they are not found. + for _, libraryName := range []string{"libgeos", "libgeos_c"} { + if libraryFilePath, err := findLibrary(libraryName); err != nil { + fmt.Fprintf(os.Stderr, "error finding library %s, ignoring: %+v\n", libraryName, err) + } else { + libraryFilePaths = append(libraryFilePaths, libraryFilePath) + } + } } type clusterRegistry struct { @@ -323,11 +360,24 @@ func (r *clusterRegistry) destroyAllClusters(ctx context.Context, l *logger) { } } +// execCmd is like execCmdEx, but doesn't return the command's output. func execCmd(ctx context.Context, l *logger, args ...string) error { - // NB: It is important that this waitgroup Waits after cancel() below. - var wg sync.WaitGroup - defer wg.Wait() + return execCmdEx(ctx, l, args...).err +} +type cmdRes struct { + err error + // stdout and stderr are the commands output. Note that this is truncated and + // only a tail is returned. + stdout, stderr string +} + +// execCmdEx runs a command and returns its error and output. +// +// Note that the output is truncated; only a tail is returned. +// Also note that if the command exits with an error code, its output is also +// included in cmdRes.err. +func execCmdEx(ctx context.Context, l *logger, args ...string) cmdRes { var cancel func() ctx, cancel = context.WithCancel(ctx) defer cancel() @@ -336,32 +386,78 @@ func execCmd(ctx context.Context, l *logger, args ...string) error { cmd := exec.CommandContext(ctx, args[0], args[1:]...) debugStdoutBuffer, _ := circbuf.NewBuffer(4096) - debugStderrBuffer, _ := circbuf.NewBuffer(1024) + debugStderrBuffer, _ := circbuf.NewBuffer(4096) // Do a dance around https://github.com/golang/go/issues/23019. - // Briefly put, passing os.Std{out,err} to subprocesses isn't great for - // context cancellation as Run() will wait for any subprocesses to finish. - // For example, "roachprod run x -- sleep 20" would wait 20 seconds, even - // if the context got canceled right away. Work around the problem by passing - // pipes to the command on which we set aggressive deadlines once the context - // expires. + // When the command we run launches a subprocess, that subprocess receives + // a copy of our Command's Stdout/Stderr file descriptor, which effectively + // means that the file descriptors close only when that subcommand returns. + // However, proactively killing the subcommand is not really possible - we + // will only manage to kill the parent process that we launched directly. + // In practice this means that if we try to react to context cancellation, + // the pipes we read the output from will wait for the *subprocess* to + // terminate, leaving us hanging, potentially indefinitely. + // To work around it, use pipes and set a read deadline on our (read) end of + // the pipes when we detect a context cancellation. + // + // See TestExecCmd for a test. + var closePipes func(ctx context.Context) + var wg sync.WaitGroup { - rOut, wOut, err := os.Pipe() + + var wOut, wErr, rOut, rErr *os.File + var cwOnce sync.Once + closePipes = func(ctx context.Context) { + // Idempotently closes the writing end of the pipes. This is called either + // when the process returns or when it was killed due to context + // cancellation. In the former case, close the writing ends of the pipe + // so that the copy goroutines started below return (without missing any + // output). In the context cancellation case, we set a deadline to force + // the goroutines to quit eagerly. This is important since the command + // may have duplicated wOut and wErr to its possible subprocesses, which + // may continue to run for long periods of time, and would otherwise + // block this command. In theory this is possible also when the command + // returns on its own accord, so we set a (more lenient) deadline in the + // first case as well. + // + // NB: there's also the option (at least on *nix) to use a process group, + // but it doesn't look portable: + // https://medium.com/@felixge/killing-a-child-process-and-all-of-its-children-in-go-54079af94773 + cwOnce.Do(func() { + if wOut != nil { + _ = wOut.Close() + } + if wErr != nil { + _ = wErr.Close() + } + dur := 10 * time.Second // wait up to 10s for subprocesses + if ctx.Err() != nil { + dur = 10 * time.Millisecond + } + deadline := timeutil.Now().Add(dur) + if rOut != nil { + _ = rOut.SetReadDeadline(deadline) + } + if rErr != nil { + _ = rErr.SetReadDeadline(deadline) + } + }) + } + defer closePipes(ctx) + + var err error + rOut, wOut, err = os.Pipe() if err != nil { - return err + return cmdRes{err: err} } - defer rOut.Close() - defer wOut.Close() - rErr, wErr, err := os.Pipe() + rErr, wErr, err = os.Pipe() if err != nil { - return err + return cmdRes{err: err} } - defer rErr.Close() - defer wErr.Close() cmd.Stdout = wOut - wg.Add(3) + wg.Add(1) go func() { defer wg.Done() _, _ = io.Copy(l.stdout, io.TeeReader(rOut, debugStdoutBuffer)) @@ -370,50 +466,52 @@ func execCmd(ctx context.Context, l *logger, args ...string) error { if l.stderr == l.stdout { // If l.stderr == l.stdout, we use only one pipe to avoid // duplicating everything. - wg.Done() cmd.Stderr = wOut } else { cmd.Stderr = wErr + wg.Add(1) go func() { defer wg.Done() _, _ = io.Copy(l.stderr, io.TeeReader(rErr, debugStderrBuffer)) }() } + } - go func() { - defer wg.Done() - <-ctx.Done() - // NB: setting a more aggressive deadline here makes TestClusterMonitor flaky. - now := timeutil.Now().Add(3 * time.Second) - _ = rOut.SetDeadline(now) - _ = wOut.SetDeadline(now) - _ = rErr.SetDeadline(now) - _ = wErr.SetDeadline(now) - }() + err := cmd.Run() + closePipes(ctx) + wg.Wait() + + stdoutString := debugStdoutBuffer.String() + if debugStdoutBuffer.TotalWritten() > debugStdoutBuffer.Size() { + stdoutString = "<... some data truncated by circular buffer; go to artifacts for details ...>\n" + stdoutString + } + stderrString := debugStderrBuffer.String() + if debugStderrBuffer.TotalWritten() > debugStderrBuffer.Size() { + stderrString = "<... some data truncated by circular buffer; go to artifacts for details ...>\n" + stderrString } - if err := cmd.Run(); err != nil { + if err != nil { // Context errors opaquely appear as "signal killed" when manifested. // We surface this error explicitly. if ctx.Err() != nil { err = errors.CombineErrors(ctx.Err(), err) } - // Synchronize access to ring buffers before using them to create an - // error to return. - cancel() - wg.Wait() if err != nil { err = &withCommandDetails{ cause: err, cmd: strings.Join(args, " "), - stderr: debugStderrBuffer.String(), - stdout: debugStdoutBuffer.String(), + stderr: stderrString, + stdout: stdoutString, } } - return err } - return nil + + return cmdRes{ + err: err, + stdout: stdoutString, + stderr: stderrString, + } } type withCommandDetails struct { @@ -624,6 +722,10 @@ func isSSD(machineType string) bool { if cloud != aws { panic("can only differentiate SSDs based on machine type on AWS") } + if !localSSD { + // Overridden by the user using a cmd arg. + return false + } typeAndSize := strings.Split(machineType, ".") if len(typeAndSize) == 2 { @@ -682,6 +784,16 @@ func (n nodeListOption) randNode() nodeListOption { return nodeListOption{n[rand.Intn(len(n))]} } +// nodeIDsString returns a space separated list of all node IDs comprising this +// list. +func (n nodeListOption) nodeIDsString() string { + result := "" + for _, i := range n { + result += fmt.Sprintf("%s ", strconv.Itoa(i)) + } + return result +} + func (n nodeListOption) String() string { if len(n) == 0 { return "" @@ -801,27 +913,41 @@ func (s *clusterSpec) args() []string { machineTypeArg := machineTypeFlag(machineType) + "=" + machineType args = append(args, machineTypeArg) } - if s.Zones != "" { - switch cloud { - case gce: - if s.Geo { - args = append(args, "--gce-zones="+s.Zones) - } else { - args = append(args, "--gce-zones="+firstZone(s.Zones)) + + if !local { + zones := s.Zones + if zones == "" { + zones = zonesF + } + if zones != "" { + if !s.Geo { + zones = firstZone(zones) } - case azure: - args = append(args, "--azure-locations="+s.Zones) - default: - fmt.Fprintf(os.Stderr, "specifying zones is not yet supported on %s", cloud) - os.Exit(1) + var arg string + switch cloud { + case aws: + arg = "--aws-zones=" + zones + case gce: + arg = "--gce-zones=" + zones + case azure: + arg = "--azure-locations=" + zones + default: + fmt.Fprintf(os.Stderr, "specifying zones is not yet supported on %s", cloud) + os.Exit(1) + } + args = append(args, arg) } } + if s.Geo { args = append(args, "--geo") } if s.Lifetime != 0 { args = append(args, "--lifetime="+s.Lifetime.String()) } + if len(createArgs) > 0 { + args = append(args, createArgs...) + } return args } @@ -1123,14 +1249,7 @@ func (f *clusterFactory) newCluster( sargs := []string{roachprod, "create", c.name, "-n", fmt.Sprint(c.spec.NodeCount)} sargs = append(sargs, cfg.spec.args()...) - if !local && zonesF != "" && cfg.spec.Zones == "" { - if cfg.spec.Geo { - sargs = append(sargs, "--gce-zones="+zonesF) - } else { - sargs = append(sargs, "--gce-zones="+firstZone(zonesF)) - } - } - if !cfg.useIOBarrier { + if !cfg.useIOBarrier && localSSD { sargs = append(sargs, "--local-ssd-no-ext4-barrier") } @@ -1144,13 +1263,16 @@ func (f *clusterFactory) newCluster( logPath := filepath.Join(f.artifactsDir, runnerLogsDir, "cluster-create", name+".log") l, err := rootLogger(logPath, teeOpt) if err != nil { - log.Fatal(ctx, err) + log.Fatalf(ctx, "%v", err) } success := false // Attempt to create a cluster several times, cause them clouds be flaky that // my phone says it's snowing. for i := 0; i < 3; i++ { + if i > 0 { + l.PrintfCtx(ctx, "Retrying cluster creation (attempt #%d)", i+1) + } err = execCmd(ctx, l, sargs...) if err == nil { success = true @@ -1350,6 +1472,19 @@ func (c *cluster) Range(begin, end int) nodeListOption { return r } +// All returns a node list containing only the node i. +func (c *cluster) Nodes(ns ...int) nodeListOption { + r := make(nodeListOption, 0, len(ns)) + for _, n := range ns { + if n < 1 || n > c.spec.NodeCount { + c.t.Fatalf("invalid node range: %d (1-%d)", n, c.spec.NodeCount) + } + + r = append(r, n) + } + return r +} + // All returns a node list containing only the node i. func (c *cluster) Node(i int) nodeListOption { return c.Range(i, i) @@ -1368,12 +1503,60 @@ func (c *cluster) FetchLogs(ctx context.Context) error { // Don't hang forever if we can't fetch the logs. return contextutil.RunWithTimeout(ctx, "fetch logs", 2*time.Minute, func(ctx context.Context) error { - path := filepath.Join(c.t.ArtifactsDir(), "logs") + path := filepath.Join(c.t.ArtifactsDir(), "logs", "unredacted") if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { return err } - return execCmd(ctx, c.l, roachprod, "get", c.name, "logs" /* src */, path /* dest */) + if err := execCmd(ctx, c.l, roachprod, "get", c.name, "logs" /* src */, path /* dest */); err != nil { + log.Infof(ctx, "failed to fetch logs: %v", err) + if ctx.Err() != nil { + return err + } + } + + if err := c.RunE(ctx, c.All(), "mkdir -p logs/redacted && ./cockroach debug merge-logs --redact logs/*.log > logs/redacted/combined.log"); err != nil { + log.Infof(ctx, "failed to redact logs: %v", err) + if ctx.Err() != nil { + return err + } + } + + return execCmd( + ctx, c.l, roachprod, "get", c.name, "logs/redacted/combined.log" /* src */, filepath.Join(c.t.ArtifactsDir(), "logs/cockroach.log"), + ) + }) +} + +// FetchDiskUsage collects a summary of the disk usage on nodes. +func (c *cluster) FetchDiskUsage(ctx context.Context) error { + // TODO(jackson): This is temporary for debugging out-of-disk-space + // failures like #44845. + if c.spec.NodeCount == 0 || c.isLocal() { + // No nodes can happen during unit tests and implies nothing to do. + // Also, don't grab disk usage on local runs. + return nil + } + + c.l.Printf("fetching disk usage\n") + c.status("fetching disk usage") + + // Don't hang forever. + return contextutil.RunWithTimeout(ctx, "disk usage", 20*time.Second, func(ctx context.Context) error { + const name = "diskusage.txt" + path := filepath.Join(c.t.ArtifactsDir(), name) + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + if err := execCmd( + ctx, c.l, roachprod, "ssh", c.name, "--", + "/bin/bash", "-c", "'du -c /mnt/data1 > "+name+"'", + ); err != nil { + // Don't error out because it might've worked on some nodes. Fetching will + // error out below but will get everything it can first. + c.l.Printf("during disk usage fetching: %s", err) + } + return execCmd(ctx, c.l, roachprod, "get", c.name, name /* src */, path /* dest */) }) } @@ -1416,14 +1599,24 @@ func (c *cluster) FetchDebugZip(ctx context.Context) error { if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { return err } - // `./cockroach debug zip` is noisy. Suppress the output unless it fails. - output, err := execCmdWithBuffer(ctx, c.l, roachprod, "ssh", c.name+":1", "--", - "./cockroach", "debug", "zip", "--url", "{pgurl:1}", zipName) - if err != nil { - c.l.Printf("./cockroach debug zip failed: %s", output) - return err + // Some nodes might be down, so try to find one that works. We make the + // assumption that a down node will refuse the connection, so it won't + // waste our time. + for i := 1; i <= c.spec.NodeCount; i++ { + // `./cockroach debug zip` is noisy. Suppress the output unless it fails. + si := strconv.Itoa(i) + output, err := execCmdWithBuffer(ctx, c.l, roachprod, "ssh", c.name+":"+si, "--", + "./cockroach", "debug", "zip", "--url", "{pgurl:"+si+"}", zipName) + if err != nil { + c.l.Printf("./cockroach debug zip failed: %s", output) + if i < c.spec.NodeCount { + continue + } + return err + } + return execCmd(ctx, c.l, roachprod, "get", c.name+":"+si, zipName /* src */, path /* dest */) } - return execCmd(ctx, c.l, roachprod, "get", c.name+":1", zipName /* src */, path /* dest */) + return nil }) } @@ -1459,7 +1652,13 @@ func (c *cluster) FailOnDeadNodes(ctx context.Context, t *test) { // check since we know that such spurious errors are possibly without any relation // to the check having failed. func (c *cluster) CheckReplicaDivergenceOnDB(ctx context.Context, db *gosql.DB) error { + // NB: we set a statement_timeout since context cancellation won't work here, + // see: + // https://github.com/cockroachdb/cockroach/pull/34520 + // + // We've seen the consistency checks hang indefinitely in some cases. rows, err := db.QueryContext(ctx, ` +SET statement_timeout = '3m'; SELECT t.range_id, t.start_key_pretty, t.status, t.detail FROM crdb_internal.check_consistency(true, '', '') as t @@ -1471,24 +1670,21 @@ WHERE t.status NOT IN ('RANGE_CONSISTENT', 'RANGE_INDETERMINATE')`) c.l.Printf("consistency check failed with %v; ignoring", err) return nil } - var buf bytes.Buffer + var finalErr error for rows.Next() { var rangeID int32 var prettyKey, status, detail string - if err := rows.Scan(&rangeID, &prettyKey, &status, &detail); err != nil { - return err + if scanErr := rows.Scan(&rangeID, &prettyKey, &status, &detail); err != nil { + return scanErr } - fmt.Fprintf(&buf, "r%d (%s) is inconsistent: %s %s\n", rangeID, prettyKey, status, detail) + finalErr = errors.CombineErrors(finalErr, + errors.Newf("r%d (%s) is inconsistent: %s %s\n", rangeID, prettyKey, status, detail)) } if err := rows.Err(); err != nil { - return err + finalErr = errors.CombineErrors(finalErr, err) } - msg := buf.String() - if msg != "" { - return errors.New(msg) - } - return nil + return finalErr } // FailOnReplicaDivergence fails the test if @@ -1742,7 +1938,7 @@ func (c *cluster) PutE(ctx context.Context, l *logger, src, dest string, opts .. return errors.Wrap(ctx.Err(), "cluster.Put") } - c.status("uploading binary") + c.status("uploading file") defer c.status("") err := execCmd(ctx, c.l, roachprod, "put", c.makeNodes(opts...), src, dest) @@ -1752,6 +1948,33 @@ func (c *cluster) PutE(ctx context.Context, l *logger, src, dest string, opts .. return nil } +// PutLibraries inserts all available library files into all nodes on the cluster +// at the specified location. +func (c *cluster) PutLibraries(ctx context.Context, libraryDir string) error { + if ctx.Err() != nil { + return errors.Wrap(ctx.Err(), "cluster.Put") + } + + c.status("uploading library files") + defer c.status("") + + if err := c.RunE(ctx, c.All(), "mkdir", "-p", libraryDir); err != nil { + return err + } + for _, libraryFilePath := range libraryFilePaths { + putPath := filepath.Join(libraryDir, filepath.Base(libraryFilePath)) + if err := c.PutE( + ctx, + c.l, + libraryFilePath, + putPath, + ); err != nil { + return err + } + } + return nil +} + // Get gets files from remote hosts. func (c *cluster) Get(ctx context.Context, l *logger, src, dest string, opts ...option) error { if ctx.Err() != nil { @@ -2081,20 +2304,31 @@ func (c *cluster) RunWithBuffer( // and communication from a test driver to nodes in a cluster should use // external IPs. func (c *cluster) pgURL(ctx context.Context, node nodeListOption, external bool) []string { - args := []string{`pgurl`} + args := []string{roachprod, "pgurl"} if external { args = append(args, `--external`) } - args = append(args, c.makeNodes(node)) - cmd := exec.CommandContext(ctx, roachprod, args...) - output, err := cmd.Output() - if err != nil { - fmt.Println(strings.Join(cmd.Args, ` `)) - c.t.Fatal(err) + nodes := c.makeNodes(node) + args = append(args, nodes) + cmd := execCmdEx(ctx, c.l, args...) + if cmd.err != nil { + c.t.Fatal(errors.Wrapf(cmd.err, "failed to get pgurl for nodes: %s", nodes)) + } + urls := strings.Split(strings.TrimSpace(cmd.stdout), " ") + if len(urls) != len(node) { + c.t.Fatalf( + "pgurl for nodes %v got urls %v from stdout:\n%s\nstderr:\n%s", + node, urls, cmd.stdout, cmd.stderr, + ) } - urls := strings.Split(strings.TrimSpace(string(output)), " ") for i := range urls { urls[i] = strings.Trim(urls[i], "'") + if urls[i] == "" { + c.t.Fatalf( + "pgurl for nodes %s empty: %v from\nstdout:\n%s\nstderr:\n%s", + urls, node, cmd.stdout, cmd.stderr, + ) + } } return urls } @@ -2349,44 +2583,27 @@ func (m *monitor) ResetDeaths() { atomic.StoreInt32(&m.expDeaths, 0) } -var errGoexit = errors.New("Goexit() was called") +var errTestFatal = errors.New("t.Fatal() was called") func (m *monitor) Go(fn func(context.Context) error) { m.g.Go(func() (err error) { - var returned bool defer func() { - if returned { - return - } - if r := recover(); r != errGoexit && r != nil { - // Pass any regular panics through. - panic(r) - } else { - // If the invoked method called runtime.Goexit (such as it - // happens when it calls t.Fatal), exit with a sentinel error - // here so that the wrapped errgroup cancels itself. - // - // Note that the trick here is that we panicked explicitly below, - // which somehow "overrides" the Goexit which is supposed to be - // un-recoverable, but we do need to recover to return an error. - err = errGoexit + if r := recover(); r != nil { + if r != errTestFatal { + // Pass any regular panics through. + panic(r) + } + // t.{Skip,Fatal} perform a panic(errTestFatal). If we've caught the + // errTestFatal sentinel we transform the panic into an error return so + // that the wrapped errgroup cancels itself. + err = errTestFatal } }() if impl, ok := m.t.(*test); ok { // Automatically clear the worker status message when the goroutine exits. defer impl.WorkerStatus() } - defer func() { - if !returned { - if r := recover(); r != nil { - panic(r) - } - panic(errGoexit) - } - }() - err = fn(m.ctx) - returned = true - return err + return fn(m.ctx) }) } @@ -2482,7 +2699,7 @@ func (m *monitor) wait(args ...string) error { cmd.Stdout = io.MultiWriter(pipeW, monL.stdout) cmd.Stderr = monL.stderr if err := cmd.Run(); err != nil { - if err != context.Canceled && !strings.Contains(err.Error(), "killed") { + if !errors.Is(err, context.Canceled) && !strings.Contains(err.Error(), "killed") { // The expected reason for an error is that the monitor was killed due // to the context being canceled. Any other error is an actual error. setMonitorCmdErr(err) @@ -2522,6 +2739,7 @@ func (m *monitor) wait(args ...string) error { } func waitForFullReplication(t *test, db *gosql.DB) { + t.l.Printf("waiting for up-replication...\n") tStart := timeutil.Now() for ok := false; !ok; time.Sleep(time.Second) { if err := db.QueryRow( diff --git a/pkg/cmd/roachtest/cluster_init.go b/pkg/cmd/roachtest/cluster_init.go index 66b709f8bc76..dad8194ee5d2 100644 --- a/pkg/cmd/roachtest/cluster_init.go +++ b/pkg/cmd/roachtest/cluster_init.go @@ -38,42 +38,8 @@ func runClusterInit(ctx context.Context, t *test, c *cluster) { t.Fatal("no address for first node") } - // Legacy-style init where we start node 1 without a join flag and then point - // the other nodes at it. - func() { - var g errgroup.Group - g.Go(func() error { - return c.RunE(ctx, c.Node(1), - `mkdir -p {log-dir} && `+ - `./cockroach start --insecure --background --store={store-dir} `+ - `--log-dir={log-dir} --cache=10% --max-sql-memory=10% `+ - `--listen-addr=:{pgport:1} --http-port=$[{pgport:1}+1] `+ - `> {log-dir}/cockroach.stdout 2> {log-dir}/cockroach.stderr`) - }) - for i := 2; i <= c.spec.NodeCount; i++ { - i := i - g.Go(func() error { - return c.RunE(ctx, c.Node(i), - fmt.Sprintf( - `mkdir -p {log-dir} && `+ - `./cockroach start --insecure --background --store={store-dir} `+ - `--log-dir={log-dir} --cache=10%% --max-sql-memory=10%% `+ - `--listen-addr=:{pgport:%[1]d} --http-port=$[{pgport:%[1]d}+1] `+ - `--join=`+addrs[0]+ - `> {log-dir}/cockroach.stdout 2> {log-dir}/cockroach.stderr`, i)) - }) - } - if err := g.Wait(); err != nil { - t.Fatal(err) - } - - db := c.Conn(ctx, 1) - defer db.Close() - waitForFullReplication(t, db) - }() - - // New-style init where we start all nodes with the same join flags and then - // issue an "init" command to one of the nodes. + // We start all nodes with the same join flags and then issue an "init" + // command to one of the nodes. for _, initNode := range []int{1, 2} { c.Wipe(ctx) diff --git a/pkg/cmd/roachtest/cluster_test.go b/pkg/cmd/roachtest/cluster_test.go index 60c293d60d20..7a708bac1645 100644 --- a/pkg/cmd/roachtest/cluster_test.go +++ b/pkg/cmd/roachtest/cluster_test.go @@ -15,11 +15,11 @@ import ( "fmt" "os" "regexp" - "runtime" "testing" "time" "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -66,12 +66,62 @@ func (t testWrapper) logger() *logger { return nil } +func TestExecCmd(t *testing.T) { + cfg := &loggerConfig{stdout: os.Stdout, stderr: os.Stderr} + logger, err := cfg.newLogger("" /* path */) + if err != nil { + t.Fatal(err) + } + + t.Run(`success`, func(t *testing.T) { + res := execCmdEx(context.Background(), logger, "/bin/bash", "-c", "echo guacamole") + require.NoError(t, res.err) + require.Contains(t, res.stdout, "guacamole") + }) + + t.Run(`error`, func(t *testing.T) { + res := execCmdEx(context.Background(), logger, "/bin/bash", "-c", "echo burrito; false") + require.Error(t, res.err) + require.Contains(t, res.stdout, "burrito") + }) + + t.Run(`returns-on-cancel`, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(10 * time.Millisecond) + cancel() + }() + tBegin := timeutil.Now() + require.Error(t, execCmd(ctx, logger, "/bin/bash", "-c", "sleep 100")) + if max, act := 99*time.Second, timeutil.Since(tBegin); max < act { + t.Fatalf("took %s despite cancellation", act) + } + }) + + t.Run(`returns-on-cancel-subprocess`, func(t *testing.T) { + // The tricky version of the preceding test. The difference is that the process + // spawns a stalling subprocess and then waits for it. See execCmdEx for a + // detailed discussion of how this is made work. + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(10 * time.Millisecond) + cancel() + }() + tBegin := timeutil.Now() + require.Error(t, execCmd(ctx, logger, "/bin/bash", "-c", "sleep 100& wait")) + if max, act := 99*time.Second, timeutil.Since(tBegin); max < act { + t.Fatalf("took %s despite cancellation", act) + } + }) +} + func TestClusterMonitor(t *testing.T) { cfg := &loggerConfig{stdout: os.Stdout, stderr: os.Stderr} logger, err := cfg.newLogger("" /* path */) if err != nil { t.Fatal(err) } + t.Run(`success`, func(t *testing.T) { c := &cluster{t: testWrapper{t}, l: logger} m := newMonitor(context.Background(), c) @@ -192,10 +242,9 @@ func TestClusterMonitor(t *testing.T) { // In reality t.Fatal adds text that is returned when the test fails, // so the failing goroutine will be referenced (not like in the expected // error below, where all you see is the other one being canceled). - runtime.Goexit() - return errors.New("unreachable") + panic(errTestFatal) }) - expectedErr := regexp.QuoteMeta(`Goexit() was called`) + expectedErr := regexp.QuoteMeta(`t.Fatal() was called`) if err := m.wait("sleep", "100"); !testutils.IsError(err, expectedErr) { t.Logf("error details: %+v", err) t.Error(err) diff --git a/pkg/cmd/roachtest/copy.go b/pkg/cmd/roachtest/copy.go index 565b71e1dc26..6963f19abce3 100644 --- a/pkg/cmd/roachtest/copy.go +++ b/pkg/cmd/roachtest/copy.go @@ -17,8 +17,8 @@ import ( "strings" "github.com/cockroachdb/cockroach-go/crdb" + "github.com/cockroachdb/errors" _ "github.com/lib/pq" - "github.com/pkg/errors" ) func registerCopy(r *testRegistry) { @@ -45,6 +45,13 @@ func registerCopy(r *testRegistry) { db := c.Conn(ctx, 1) defer db.Close() + // Disable load-based splitting so that we can more accurately + // predict an upper-bound on the number of ranges that the cluster + // will end up with. + if err := disableLoadBasedSplitting(ctx, db); err != nil { + return errors.Wrap(err, "disabling load-based splitting") + } + t.Status("importing Bank fixture") c.Run(ctx, c.Node(1), fmt.Sprintf( "./workload fixtures load bank --rows=%d --payload-bytes=%d {pgurl:1}", diff --git a/pkg/cmd/roachtest/decommission.go b/pkg/cmd/roachtest/decommission.go index 849fa31ad693..31ab6c44060d 100644 --- a/pkg/cmd/roachtest/decommission.go +++ b/pkg/cmd/roachtest/decommission.go @@ -14,6 +14,7 @@ import ( "context" "encoding/csv" "fmt" + "math/rand" "reflect" "regexp" "strconv" @@ -23,20 +24,72 @@ import ( "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/errors" "github.com/kr/pretty" _ "github.com/lib/pq" - "github.com/pkg/errors" "golang.org/x/sync/errgroup" ) +func registerDecommission(r *testRegistry) { + { + numNodes := 4 + duration := time.Hour + + r.Add(testSpec{ + Name: fmt.Sprintf("decommission/nodes=%d/duration=%s", numNodes, duration), + Owner: OwnerKV, + MinVersion: "v20.2.0", + Cluster: makeClusterSpec(4), + Run: func(ctx context.Context, t *test, c *cluster) { + if local { + duration = 5 * time.Minute + t.l.Printf("running with duration=%s in local mode\n", duration) + } + runDecommission(ctx, t, c, numNodes, duration) + }, + }) + } + { + numNodes := 6 + r.Add(testSpec{ + Name: "decommission/randomized", + Owner: OwnerKV, + MinVersion: "v20.2.0", + Timeout: 10 * time.Minute, + Cluster: makeClusterSpec(numNodes), + Run: func(ctx context.Context, t *test, c *cluster) { + runDecommissionRandomized(ctx, t, c) + }, + }) + } + { + numNodes := 4 + r.Add(testSpec{ + Name: "decommission/mixed-versions", + Owner: OwnerKV, + MinVersion: "v20.2.0", + Cluster: makeClusterSpec(numNodes), + Run: func(ctx context.Context, t *test, c *cluster) { + runDecommissionMixedVersions(ctx, t, c, r.buildVersion) + }, + }) + } +} + +// runDecommission decommissions and wipes nodes in a cluster repeatedly, +// alternating between the node being shut down gracefully before and after the +// decommissioning operation, while some light load is running against the +// cluster (to manually verify that the qps don't dip too much). +// // TODO(tschottdorf): verify that the logs don't contain the messages // that would spam the log before #23605. I wonder if we should really // start grepping the logs. An alternative is to introduce a metric // that would have signaled this and check that instead. -func runDecommission(t *test, c *cluster, nodes int, duration time.Duration) { - ctx := context.Background() - +func runDecommission(ctx context.Context, t *test, c *cluster, nodes int, duration time.Duration) { const defaultReplicationFactor = 3 + if defaultReplicationFactor > nodes { + t.Fatal("improper configuration: replication factor greater than number of nodes in the test") + } // The number of nodes we're going to cycle through. Since we're sometimes // killing the nodes and then removing them, this means having to be careful // with loss of quorum. So only ever touch a fixed minority of nodes and @@ -45,18 +98,19 @@ func runDecommission(t *test, c *cluster, nodes int, duration time.Duration) { // at some point. numDecom := (defaultReplicationFactor - 1) / 2 - c.Put(ctx, workload, "./workload", c.Node(nodes)) + // node1 is kept pinned (i.e. not decommissioned/restarted), and is the node + // through which we run the workload and other queries. + pinnedNode := 1 c.Put(ctx, cockroach, "./cockroach", c.All()) + c.Put(ctx, workload, "./workload", c.Node(pinnedNode)) - for i := 1; i <= numDecom; i++ { + for i := 1; i <= nodes; i++ { c.Start(ctx, t, c.Node(i), startArgs(fmt.Sprintf("-a=--attrs=node%d", i))) } + c.Run(ctx, c.Node(pinnedNode), `./workload init kv --drop`) - c.Start(ctx, t, c.Range(numDecom+1, nodes)) - c.Run(ctx, c.Node(nodes), `./workload init kv --drop`) - - waitReplicatedAwayFrom := func(downNodeID string) error { - db := c.Conn(ctx, nodes) + waitReplicatedAwayFrom := func(downNodeID int) error { + db := c.Conn(ctx, pinnedNode) defer func() { _ = db.Close() }() @@ -87,52 +141,47 @@ func runDecommission(t *test, c *cluster, nodes int, duration time.Duration) { return nil } - waitUpReplicated := func(targetNodeID string) error { - db := c.Conn(ctx, nodes) + waitUpReplicated := func(targetNode, targetNodeID int) error { + db := c.Conn(ctx, pinnedNode) defer func() { _ = db.Close() }() - for ok := false; !ok; { + var count int + for { + // Check to see that there are no ranges where the target node is + // not part of the replica set. stmtReplicaCount := fmt.Sprintf( - `SELECT count(*) = 0 FROM crdb_internal.ranges WHERE array_position(replicas, %s) IS NULL and database_name = 'kv';`, targetNodeID) - t.Status(stmtReplicaCount) - if err := db.QueryRow(stmtReplicaCount).Scan(&ok); err != nil { + `SELECT count(*) FROM crdb_internal.ranges WHERE array_position(replicas, %d) IS NULL and database_name = 'kv';`, targetNodeID) + if err := db.QueryRow(stmtReplicaCount).Scan(&count); err != nil { return err } + t.Status(fmt.Sprintf("node%d missing %d replica(s)", targetNode, count)) + if count == 0 { + break + } time.Sleep(time.Second) } return nil } - if err := waitReplicatedAwayFrom("0" /* no down node */); err != nil { + if err := waitReplicatedAwayFrom(0 /* no down node */); err != nil { t.Fatal(err) } - loadDuration := " --duration=" + duration.String() - workloads := []string{ // TODO(tschottdorf): in remote mode, the ui shows that we consistently write // at 330 qps (despite asking for 500 below). Locally we get 500qps (and a lot // more without rate limiting). Check what's up with that. - "./workload run kv --max-rate 500 --tolerate-errors" + loadDuration + " {pgurl:1-%d}", + fmt.Sprintf("./workload run kv --max-rate 500 --tolerate-errors --duration=%s {pgurl:1-%d}", duration.String(), nodes), } - run := func(stmtStr string) { - db := c.Conn(ctx, nodes) + run := func(stmt string) { + db := c.Conn(ctx, pinnedNode) defer db.Close() - stmt := fmt.Sprintf(stmtStr, "", "=") - // We are removing the EXPERIMENTAL keyword in 2.1. For compatibility - // with 2.0 clusters we still need to try with it if the - // syntax without EXPERIMENTAL fails. - // TODO(knz): Remove this in 2.2. + t.Status(stmt) _, err := db.ExecContext(ctx, stmt) - if err != nil && strings.Contains(err.Error(), "syntax error") { - stmt = fmt.Sprintf(stmtStr, "EXPERIMENTAL", "") - t.Status(stmt) - _, err = db.ExecContext(ctx, stmt) - } if err != nil { t.Fatal(err) } @@ -143,20 +192,19 @@ func runDecommission(t *test, c *cluster, nodes int, duration time.Duration) { m, ctx = errgroup.WithContext(ctx) for _, cmd := range workloads { cmd := cmd // copy is important for goroutine - - cmd = fmt.Sprintf(cmd, nodes) m.Go(func() error { - return c.RunE(ctx, c.Node(nodes), cmd) + return c.RunE(ctx, c.Node(pinnedNode), cmd) }) } m.Go(func() error { - nodeID := func(node int) (string, error) { + getNodeID := func(node int) (int, error) { dbNode := c.Conn(ctx, node) defer dbNode.Close() - var nodeID string + + var nodeID int if err := dbNode.QueryRow(`SELECT node_id FROM crdb_internal.node_runtime_info LIMIT 1`).Scan(&nodeID); err != nil { - return "", err + return 0, err } return nodeID, nil } @@ -167,21 +215,32 @@ func runDecommission(t *test, c *cluster, nodes int, duration time.Duration) { return c.RunE(ctx, c.Node(node), "./cockroach quit --insecure --host=:"+port) } - decom := func(id string) error { - port := fmt.Sprintf("{pgport:%d}", nodes) // always use last node - t.Status("decommissioning node", id) - return c.RunE(ctx, c.Node(nodes), "./cockroach node decommission --insecure --wait=live --host=:"+port+" "+id) + decom := func(id int) error { + port := fmt.Sprintf("{pgport:%d}", pinnedNode) // always use the pinned node + t.Status(fmt.Sprintf("decommissioning node %d", id)) + return c.RunE(ctx, c.Node(pinnedNode), fmt.Sprintf("./cockroach node decommission --insecure --wait=all --host=:%s %d", port, id)) } - for tBegin, whileDown, node := timeutil.Now(), true, 1; timeutil.Since(tBegin) <= duration; whileDown, node = !whileDown, (node%numDecom)+1 { + tBegin, whileDown := timeutil.Now(), true + node := nodes + for timeutil.Since(tBegin) <= duration { + // Alternate between the node being shut down gracefully before and + // after the decommissioning operation. + whileDown = !whileDown + // Cycle through the last numDecom nodes. + node = nodes - (node % numDecom) + if node == pinnedNode { + t.Fatalf("programming error: not expecting to decommission/wipe node%d", pinnedNode) + } + t.Status(fmt.Sprintf("decommissioning %d (down=%t)", node, whileDown)) - id, err := nodeID(node) + nodeID, err := getNodeID(node) if err != nil { return err } - run(fmt.Sprintf(`ALTER RANGE default %%[1]s CONFIGURE ZONE %%[2]s 'constraints: {"+node%d"}'`, node)) - if err := waitUpReplicated(id); err != nil { + run(fmt.Sprintf(`ALTER RANGE default CONFIGURE ZONE = 'constraints: {"+node%d"}'`, node)) + if err := waitUpReplicated(node, nodeID); err != nil { return err } @@ -191,13 +250,13 @@ func runDecommission(t *test, c *cluster, nodes int, duration time.Duration) { } } - run(fmt.Sprintf(`ALTER RANGE default %%[1]s CONFIGURE ZONE %%[2]s 'constraints: {"-node%d"}'`, node)) + run(fmt.Sprintf(`ALTER RANGE default CONFIGURE ZONE = 'constraints: {"-node%d"}'`, node)) - if err := decom(id); err != nil { + if err := decom(nodeID); err != nil { return err } - if err := waitReplicatedAwayFrom(id); err != nil { + if err := waitReplicatedAwayFrom(nodeID); err != nil { return err } @@ -207,15 +266,18 @@ func runDecommission(t *test, c *cluster, nodes int, duration time.Duration) { } } + // Wipe the node and re-add to cluster with a new node ID. if err := c.RunE(ctx, c.Node(node), "rm -rf {store-dir}"); err != nil { return err } - db := c.Conn(ctx, 1) + db := c.Conn(ctx, pinnedNode) defer db.Close() - c.Start(ctx, t, c.Node(node), startArgs(fmt.Sprintf("-a=--join %s --attrs=node%d", - c.InternalAddr(ctx, c.Node(nodes))[0], node))) + sArgs := startArgs(fmt.Sprintf("-a=--join %s --attrs=node%d", c.InternalAddr(ctx, c.Node(pinnedNode))[0], node)) + if err := c.StartE(ctx, c.Node(node), sArgs); err != nil { + return err + } } // TODO(tschottdorf): run some ui sanity checks about decommissioned nodes // having disappeared. Verify that the workloads don't dip their qps or @@ -227,468 +289,471 @@ func runDecommission(t *test, c *cluster, nodes int, duration time.Duration) { } } -func registerDecommission(r *testRegistry) { - const numNodes = 4 - duration := time.Hour - - r.Add(testSpec{ - Name: fmt.Sprintf("decommission/nodes=%d/duration=%s", numNodes, duration), - Owner: OwnerKV, - Cluster: makeClusterSpec(numNodes), - Run: func(ctx context.Context, t *test, c *cluster) { - if local { - duration = 3 * time.Minute - t.l.Printf("running with duration=%s in local mode\n", duration) - } - runDecommission(t, c, numNodes, duration) - }, - }) -} - -func execCLI( - ctx context.Context, t *test, c *cluster, runNode int, extraArgs ...string, -) (string, error) { - args := []string{"./cockroach"} - args = append(args, extraArgs...) - args = append(args, "--insecure") - args = append(args, fmt.Sprintf("--port={pgport:%d}", runNode)) - buf, err := c.RunWithBuffer(ctx, t.l, c.Node(runNode), args...) - t.l.Printf("%s\n", buf) - return string(buf), err -} - -func runDecommissionAcceptance(ctx context.Context, t *test, c *cluster) { +// runDecommissionRandomized tests a bunch of node +// decommissioning/recommissioning procedures, all the while checking for +// replica movement and appropriate membership status detection behavior. We go +// through partial decommissioning of random nodes, ensuring we're able to undo +// those operations. We then fully decommission nodes, verifying it's an +// irreversible operation. +func runDecommissionRandomized(ctx context.Context, t *test, c *cluster) { args := startArgs("--env=COCKROACH_SCAN_MAX_IDLE_TIME=5ms") c.Put(ctx, cockroach, "./cockroach") c.Start(ctx, t, args) - decommission := func( - ctx context.Context, - runNode int, - targetNodes nodeListOption, - verbs ...string, - ) (string, error) { - args := []string{"node"} - args = append(args, verbs...) - for _, target := range targetNodes { - args = append(args, strconv.Itoa(target)) - } - return execCLI(ctx, t, c, runNode, args...) + h := newDecommTestHelper(t, c) + + firstNodeID := h.nodeIDs[0] + retryOpts := retry.Options{ + InitialBackoff: time.Second, + MaxBackoff: 5 * time.Second, + Multiplier: 2, } - getCsvNumCols := func(csvStr string) (cols int, err error) { - reader := csv.NewReader(strings.NewReader(csvStr)) - records, err := reader.Read() + // Partially decommission then recommission a random node, from another + // random node. Run a couple of status checks while doing so. + { + targetNode, runNode := h.getRandNode(), h.getRandNode() + t.l.Printf("partially decommissioning n%d from n%d\n", targetNode, runNode) + o, err := h.decommission(ctx, c.Node(targetNode), runNode, + "--wait=none", "--format=csv") if err != nil { - return 0, errors.Errorf("error reading csv input: \n %v\n errors:%s", csvStr, err) + t.Fatalf("decommission failed: %v", err) } - return len(records), nil - } - matchCSV := func(csvStr string, matchColRow [][]string) (err error) { - defer func() { + exp := [][]string{ + decommissionHeader, + {strconv.Itoa(targetNode), "true", `\d+`, "true", "decommissioning", "false"}, + } + if err := h.matchCSV(o, exp); err != nil { + t.Fatal(err) + } + + // Check that `node status` reflects an ongoing decommissioning status + // for the second node. + { + runNode = h.getRandNode() + t.l.Printf("checking that `node status` (from n%d) shows n%d as decommissioning\n", + runNode, targetNode) + o, err := execCLI(ctx, t, c, runNode, "node", "status", "--format=csv", "--decommission") if err != nil { - err = errors.Errorf("csv input:\n%v\nexpected:\n%s\nerrors:%s", - csvStr, pretty.Sprint(matchColRow), err) + t.Fatalf("node-status failed: %v", err) } - }() - reader := csv.NewReader(strings.NewReader(csvStr)) - reader.FieldsPerRecord = -1 - records, err := reader.ReadAll() - if err != nil { - return err + numCols := h.getCsvNumCols(o) + exp := h.expectCell(targetNode-1, /* node IDs are 1-indexed */ + statusHeaderMembershipColumnIdx, `decommissioning`, c.spec.NodeCount, numCols) + if err := h.matchCSV(o, exp); err != nil { + t.Fatal(err) + } } - lr, lm := len(records), len(matchColRow) - if lr < lm { - return errors.Errorf("csv has %d rows, but expected at least %d", lr, lm) + // Recommission the target node, cancel the in-flight decommissioning + // process. + { + runNode = h.getRandNode() + t.l.Printf("recommissioning n%d (from n%d)\n", targetNode, runNode) + if _, err := h.recommission(ctx, c.Node(targetNode), runNode); err != nil { + t.Fatalf("recommission failed: %v", err) + } } - // Compare only the last len(matchColRow) records. That is, if we want to - // match 4 rows and we have 100 records, we only really compare - // records[96:], that is, the last four rows. - records = records[lr-lm:] - - for i := range records { - if lr, lm := len(records[i]), len(matchColRow[i]); lr != lm { - return errors.Errorf("row #%d: csv has %d columns, but expected %d", i+1, lr, lm) + // Check that `node status` now reflects a 'active' status for the + // target node. + { + runNode = h.getRandNode() + t.l.Printf("checking that `node status` (from n%d) shows n%d as active\n", + targetNode, runNode) + o, err := execCLI(ctx, t, c, runNode, "node", "status", "--format=csv", "--decommission") + if err != nil { + t.Fatalf("node-status failed: %v", err) } - for j := range records[i] { - pat, str := matchColRow[i][j], records[i][j] - re := regexp.MustCompile(pat) - if !re.MatchString(str) { - err = errors.Errorf("%v\nrow #%d, col #%d: found %q which does not match %q", - err, i+1, j+1, str, pat) - } + + numCols := h.getCsvNumCols(o) + exp := h.expectCell(targetNode-1, /* node IDs are 1-indexed */ + statusHeaderMembershipColumnIdx, `active`, c.spec.NodeCount, numCols) + if err := h.matchCSV(o, exp); err != nil { + t.Fatal(err) } } - return err } - decommissionHeader := []string{ - "id", "is_live", "replicas", "is_decommissioning", "is_draining", - } - decommissionFooter := []string{ - "No more data reported on target nodes. " + - "Please verify cluster health before removing the nodes.", - } - - // Different output here to be backwards compatible with earlier - // versions of cockroach (versions pre commit 888813c, which - // extends the node status command to include locality information). - statusHeaderWithLocality := []string{ - "id", "address", "sql_address", "build", "started_at", "updated_at", "locality", "is_available", "is_live", - } - statusHeaderNoLocality := []string{ - "id", "address", "sql_address", "build", "started_at", "updated_at", "is_available", "is_live", - } - statusHeaderNoLocalityNoSQLAddress := []string{ - "id", "address", "build", "started_at", "updated_at", "is_available", "is_live", - } - getStatusCsvOutput := func(ids []string, numCols int) [][]string { - var res [][]string - switch numCols { - case len(statusHeaderNoLocality): - res = append(res, statusHeaderNoLocality) - case len(statusHeaderWithLocality): - res = append(res, statusHeaderWithLocality) - case len(statusHeaderNoLocalityNoSQLAddress): - res = append(res, statusHeaderNoLocalityNoSQLAddress) - default: - t.Fatalf( - "Expected status output numCols to be either %d, %d or %d, found %d", - len(statusHeaderNoLocalityNoSQLAddress), - len(statusHeaderNoLocality), - len(statusHeaderWithLocality), - numCols, - ) + // Check to see that operators aren't able to decommission into + // availability. We'll undo the attempted decommissioning event by + // recommissioning the targeted nodes. + { + // Attempt to decommission all the nodes. + { + runNode := h.getRandNode() + t.l.Printf("attempting to decommission all nodes from n%d\n", runNode) + o, err := h.decommission(ctx, c.All(), runNode, + "--wait=none", "--format=csv") + if err != nil { + t.Fatalf("decommission failed: %v", err) + } + + exp := [][]string{decommissionHeader} + for i := 1; i <= c.spec.NodeCount; i++ { + rowRegex := []string{strconv.Itoa(i), "true", `\d+`, "true", "decommissioning", "false"} + exp = append(exp, rowRegex) + } + if err := h.matchCSV(o, exp); err != nil { + t.Fatalf("decommission failed: %v", err) + } } - for _, id := range ids { - build := []string{id} - for i := 0; i < numCols-1; i++ { - build = append(build, `.*`) + + // Check that `node status` reflects an ongoing decommissioning status for + // all nodes. + { + runNode := h.getRandNode() + t.l.Printf("checking that `node status` (from n%d) shows all nodes as decommissioning\n", + runNode) + o, err := execCLI(ctx, t, c, runNode, "node", "status", "--format=csv", "--decommission") + if err != nil { + t.Fatalf("node-status failed: %v", err) + } + + numCols := h.getCsvNumCols(o) + var colRegex []string + for i := 1; i <= c.spec.NodeCount; i++ { + colRegex = append(colRegex, `decommissioning`) + } + exp := h.expectColumn(statusHeaderMembershipColumnIdx, colRegex, c.spec.NodeCount, numCols) + if err := h.matchCSV(o, exp); err != nil { + t.Fatal(err) } - res = append(res, build) } - return res - } - waitLiveDeprecated := "--wait=live is deprecated and is treated as --wait=all" + // Check that we can still do stuff, creating a database should be good + // enough. + { + runNode := h.getRandNode() + t.l.Printf("checking that we're able to create a database (from n%d)\n", runNode) + db := c.Conn(ctx, runNode) + defer db.Close() - t.l.Printf("decommissioning first node from the second, polling the status manually\n") - retryOpts := retry.Options{ - InitialBackoff: time.Second, - MaxBackoff: 5 * time.Second, - Multiplier: 1, - } - if err := retry.WithMaxAttempts(ctx, retryOpts, 20, func() error { - o, err := decommission(ctx, 2, c.Node(1), - "decommission", "--wait", "none", "--format", "csv") - if err != nil { - t.Fatalf("decommission failed: %v", err) + if _, err := db.Exec(`create database still_working;`); err != nil { + t.Fatal(err) + } } - exp := [][]string{ - decommissionHeader, - {"1", "true", "0", "true", "false"}, - decommissionFooter, + // Cancel in-flight decommissioning process of all nodes. + { + runNode := h.getRandNode() + t.l.Printf("recommissioning all nodes (from n%d)\n", runNode) + if _, err := h.recommission(ctx, c.All(), runNode); err != nil { + t.Fatalf("recommission failed: %v", err) + } } - return matchCSV(o, exp) - }); err != nil { - t.Fatal(err) - } + // Check that `node status` now reflects an 'active' status for all + // nodes. + { + runNode := h.getRandNode() + t.l.Printf("checking that `node status` (from n%d) shows all nodes as active\n", + runNode) + o, err := execCLI(ctx, t, c, runNode, "node", "status", "--format=csv", "--decommission") + if err != nil { + t.Fatalf("node-status failed: %v", err) + } - // Check that even though the node is decommissioned, we still see it (since - // it remains live) in `node ls`. - { - o, err := execCLI(ctx, t, c, 2, "node", "ls", "--format", "csv") - if err != nil { - t.Fatalf("node-ls failed: %v", err) - } - exp := [][]string{ - {"id"}, - {"1"}, - {"2"}, - {"3"}, - {"4"}, - } - if err := matchCSV(o, exp); err != nil { - t.Fatal(err) + numCols := h.getCsvNumCols(o) + var colRegex []string + for i := 1; i <= c.spec.NodeCount; i++ { + colRegex = append(colRegex, `active`) + } + exp := h.expectColumn(statusHeaderMembershipColumnIdx, colRegex, c.spec.NodeCount, numCols) + if err := h.matchCSV(o, exp); err != nil { + t.Fatal(err) + } } } - // Ditto `node status`. + + // Fully recommission two random nodes, from a random node, randomly choosing + // between using --wait={all,none}. We pin these two nodes to not re-use + // them for the block after, as they will have been fully decommissioned and + // by definition, non-operational. + decommissionedNodeA := h.getRandNode() + decommissionedNodeB := h.getRandNodeOtherThan(decommissionedNodeA) { - o, err := execCLI(ctx, t, c, 2, "node", "status", "--format", "csv") - if err != nil { - t.Fatalf("node-status failed: %v", err) + targetNodeA, targetNodeB := decommissionedNodeA, decommissionedNodeB + if targetNodeB < targetNodeA { + targetNodeB, targetNodeA = targetNodeA, targetNodeB } - numCols, err := getCsvNumCols(o) - if err != nil { - t.Fatal(err) - } - exp := getStatusCsvOutput([]string{`1`, `2`, `3`, `4`}, numCols) - if err := matchCSV(o, exp); err != nil { - t.Fatal(err) + + runNode := h.getRandNode() + waitStrategy := "all" // Blocking decommission. + if i := rand.Intn(2); i == 0 { + waitStrategy = "none" // Polling decommission. } - } - t.l.Printf("recommissioning first node (from third node)\n") - if _, err := decommission(ctx, 3, c.Node(1), "recommission"); err != nil { - t.Fatalf("recommission failed: %v", err) - } + t.l.Printf("fully decommissioning [n%d,n%d] from n%d, using --wait=%s\n", + targetNodeA, targetNodeB, runNode, waitStrategy) - t.l.Printf("decommissioning second node from third, using --wait=all\n") - { - o, err := decommission(ctx, 3, c.Node(2), - "decommission", "--wait", "all", "--format", "csv") - if err != nil { - t.Fatalf("decommission failed: %v", err) + // When using --wait=none, we poll the decommission status. + maxAttempts := 50 + if waitStrategy == "all" { + // --wait=all is a one shot attempt at decommissioning, that polls + // internally. + maxAttempts = 1 } - exp := [][]string{ - decommissionHeader, - {"2", "true", "0", "true", "false"}, - decommissionFooter, - } - if err := matchCSV(o, exp); err != nil { + // Decommission two nodes. + if err := retry.WithMaxAttempts(ctx, retryOpts, maxAttempts, func() error { + o, err := h.decommission(ctx, c.Nodes(targetNodeA, targetNodeB), runNode, + fmt.Sprintf("--wait=%s", waitStrategy), "--format=csv") + if err != nil { + t.Fatalf("decommission failed: %v", err) + } + + exp := [][]string{ + decommissionHeader, + {strconv.Itoa(targetNodeA), "true", "0", "true", "decommissioned", "false"}, + {strconv.Itoa(targetNodeB), "true", "0", "true", "decommissioned", "false"}, + decommissionFooter, + } + return h.matchCSV(o, exp) + }); err != nil { t.Fatal(err) } - } - - t.l.Printf("recommissioning second node from itself\n") - if _, err := decommission(ctx, 2, c.Node(2), "recommission"); err != nil { - t.Fatalf("recommission failed: %v", err) - } - // TODO(knz): quit --decommission is deprecated in 20.1. Remove - // this part of the roachtest in 20.2. - t.l.Printf("decommissioning third node via `quit --decommission`\n") - func() { - // This should not take longer than five minutes, and if it does, it's - // likely stuck forever and we want to see the output. - timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) - defer cancel() - if _, err := execCLI(timeoutCtx, t, c, 3, "quit", "--decommission"); err != nil { - if timeoutCtx.Err() != nil { - t.Fatalf("quit --decommission failed: %s", err) + // Check that even though two nodes are decommissioned, we still see + // them (since they remain live) in `node ls`. + { + runNode = h.getRandNode() + t.l.Printf("checking that `node ls` (from n%d) shows all nodes\n", runNode) + o, err := execCLI(ctx, t, c, runNode, "node", "ls", "--format=csv") + if err != nil { + t.Fatalf("node-ls failed: %v", err) + } + exp := [][]string{{"id"}} + for i := 1; i <= c.spec.NodeCount; i++ { + exp = append(exp, []string{strconv.Itoa(i)}) + } + if err := h.matchCSV(o, exp); err != nil { + t.Fatal(err) } - // TODO(tschottdorf): grep the process output for the string announcing success? - t.l.Errorf("WARNING: ignoring error on quit --decommission: %s\n", err) } - }() - // Now that the third node is down and decommissioned, decommissioning it - // again should be a no-op. We do it from node one but as always it doesn't - // matter. - t.l.Printf("checking that other nodes see node three as successfully decommissioned\n") - { - o, err := decommission(ctx, 2, c.Node(3), - "decommission", "--format", "csv") // wait=all is implied - if err != nil { - t.Fatalf("decommission failed: %v", err) + // Ditto for `node status`. + { + runNode = h.getRandNode() + t.l.Printf("checking that `node status` (from n%d) shows all nodes\n", runNode) + o, err := execCLI(ctx, t, c, runNode, "node", "status", "--format=csv") + if err != nil { + t.Fatalf("node-status failed: %v", err) + } + + numCols := h.getCsvNumCols(o) + colRegex := []string{} + for i := 1; i <= c.spec.NodeCount; i++ { + colRegex = append(colRegex, strconv.Itoa(i)) + } + exp := h.expectIDsInStatusOut(colRegex, numCols) + if err := h.matchCSV(o, exp); err != nil { + t.Fatal(err) + } } - exp := [][]string{ - decommissionHeader, - // Expect the same as usual, except this time the node should be draining - // because it shut down cleanly (thanks to `quit --decommission`). It turns - // out that while it will always manage to mark itself as draining during a - // graceful shutdown, gossip may not yet have told this node. It's rare, - // but seems to happen (#41249). - {"3", "true", "0", "true", "true|false"}, - decommissionFooter, + // Attempt to recommission the fully decommissioned nodes (expecting it + // to fail). + { + runNode = h.getRandNode() + t.l.Printf("expected to fail: recommissioning [n%d,n%d] (from n%d)\n", + targetNodeA, targetNodeB, runNode) + if _, err := h.recommission(ctx, c.Nodes(targetNodeA, targetNodeB), runNode); err == nil { + t.Fatal("expected recommission to fail") + } } - if err := matchCSV(o, exp); err != nil { - t.Fatal(err) + + // Decommissioning the same nodes again should be a no-op. We do it from + // a random node. + { + runNode = h.getRandNode() + t.l.Printf("checking that decommissioning [n%d,n%d] (from n%d) is a no-op\n", + targetNodeA, targetNodeB, runNode) + o, err := h.decommission(ctx, c.Nodes(targetNodeA, targetNodeB), runNode, + "--wait=all", "--format=csv") + if err != nil { + t.Fatalf("decommission failed: %v", err) + } + + exp := [][]string{ + decommissionHeader, + {strconv.Itoa(targetNodeA), "true", "0", "true", "decommissioned", "false"}, + {strconv.Itoa(targetNodeB), "true", "0", "true", "decommissioned", "false"}, + decommissionFooter, + } + if err := h.matchCSV(o, exp); err != nil { + t.Fatal(err) + } } - // Bring the node back up. It's still decommissioned, so it won't be of much use. - c.Stop(ctx, c.Node(3)) - c.Start(ctx, t, c.Node(3), args) + // We restart the nodes and attempt to recommission (should still fail). + { + runNode = h.getRandNode() + t.l.Printf("expected to fail: restarting [n%d,n%d] and attempting to recommission through n%d\n", + targetNodeA, targetNodeB, runNode) + c.Stop(ctx, c.Nodes(targetNodeA, targetNodeB)) + c.Start(ctx, t, c.Nodes(targetNodeA, targetNodeB), args) - // Recommission. Welcome back! - if _, err = decommission(ctx, 2, c.Node(3), "recommission"); err != nil { - t.Fatalf("recommission failed: %v", err) + if _, err := h.recommission(ctx, c.Nodes(targetNodeA, targetNodeB), runNode); err == nil { + t.Fatalf("expected recommission to fail") + } } } - // Kill the first node and verify that we can decommission it while it's down, - // bringing it back up to verify that its replicas still get removed. - t.l.Printf("intentionally killing first node\n") - c.Stop(ctx, c.Node(1)) - t.l.Printf("decommission first node, starting with it down but restarting it for verification\n") + // Decommission a downed node (random selected), randomly choosing between + // bringing the node back to life or leaving it permanently dead. + // + // TODO(irfansharif): We could pull merge this "deadness" check into the + // previous block, when fully decommissioning multiple nodes, to reduce the + // total number of nodes needed in the cluster. { - o, err := decommission(ctx, 2, c.Node(1), - "decommission", "--wait", "live") - if err != nil { - t.Fatalf("decommission failed: %v", err) - } - hasDeprecation := false - for _, s := range strings.Split(o, "\n") { - if s == waitLiveDeprecated { - hasDeprecation = true - break - } + restartDownedNode := false + if i := rand.Intn(2); i == 0 { + restartDownedNode = true } - if !hasDeprecation { - t.Fatal("missing deprecate message for --wait=live") + + if !restartDownedNode { + // We want to test decommissioning a truly dead node. Make sure we + // don't waste too much time waiting for the node to be recognized + // as dead. Note that we don't want to set this number too low or + // everything will seem dead to the allocator at all times, so + // nothing will ever happen. + func() { + db := c.Conn(ctx, 1) + defer db.Close() + const stmt = "SET CLUSTER SETTING server.time_until_store_dead = '1m15s'" + if _, err := db.ExecContext(ctx, stmt); err != nil { + t.Fatal(err) + } + }() } - c.Start(ctx, t, c.Node(1), args) - // Run a second time to wait until the replicas have all been GC'ed. - // Note that we specify "all" because even though the first node is - // now running, it may not be live by the time the command runs. - o, err = decommission(ctx, 2, c.Node(1), - "decommission", "--wait", "all", "--format", "csv") - if err != nil { + + // We also have to exclude the first node seeing as how we're going to + // wiping it below. Roachprod attempts to initialize a cluster when + // starting a "fresh" first node (without an existing bootstrap marker + // on disk, which we happen to also be wiping away). + targetNode := h.getRandNodeOtherThan(decommissionedNodeA, decommissionedNodeB, firstNodeID) + t.l.Printf("intentionally killing n%d to later decommission it when down\n", targetNode) + c.Stop(ctx, c.Node(targetNode)) + + runNode := h.getRandNodeOtherThan(targetNode) + t.l.Printf("decommissioning n%d (from n%d) in absentia\n", targetNode, runNode) + if _, err := h.decommission(ctx, c.Node(targetNode), runNode, + "--wait=all", "--format=csv"); err != nil { t.Fatalf("decommission failed: %v", err) } - exp := [][]string{ - decommissionHeader, - {"1", "true|false", "0", "true", "false"}, - decommissionFooter, - } - if err := matchCSV(o, exp); err != nil { - t.Fatal(err) - } - } + if restartDownedNode { + t.l.Printf("restarting n%d for verification\n", targetNode) - // Now we want to test decommissioning a truly dead node. Make sure we don't - // waste too much time waiting for the node to be recognized as dead. Note that - // we don't want to set this number too low or everything will seem dead to the - // allocator at all times, so nothing will ever happen. - func() { - db := c.Conn(ctx, 2) - defer db.Close() - const stmt = "SET CLUSTER SETTING server.time_until_store_dead = '1m15s'" - if _, err := db.ExecContext(ctx, stmt); err != nil { - t.Fatal(err) + // Bring targetNode it back up to verify that its replicas still get + // removed. + c.Start(ctx, t, c.Node(targetNode), args) } - }() - t.l.Printf("intentionally killing first node\n") - c.Stop(ctx, c.Node(1)) - // It is being decommissioned in absentia, meaning that its replicas are - // being removed due to deadness. We can't see that reflected in the output - // since the current mechanism gets its replica counts from what the node - // reports about itself, so our assertion here is somewhat weak. - t.l.Printf("decommission first node in absentia using --wait=live\n") - { - o, err := decommission(ctx, 3, c.Node(1), - "decommission", "--wait", "live", "--format", "csv") + // Run decommission a second time to wait until the replicas have + // all been GC'ed. Note that we specify "all" because even though + // the target node is now running, it may not be live by the time + // the command runs. + o, err := h.decommission(ctx, c.Node(targetNode), runNode, + "--wait=all", "--format=csv") if err != nil { t.Fatalf("decommission failed: %v", err) } - // Note we don't check precisely zero replicas (which the node would write - // itself, but it's dead). We do check that the node isn't live, though, which - // is essentially what `--wait=live` waits for. - // Note that the target node may still be "live" when it's marked as - // decommissioned, as its replica count may drop to zero faster than - // liveness times out. exp := [][]string{ decommissionHeader, - {"1", `true|false`, "0", `true`, `false`}, + {strconv.Itoa(targetNode), "true|false", "0", "true", "decommissioned", "false"}, decommissionFooter, } - if err := matchCSV(o, exp); err != nil { + if err := h.matchCSV(o, exp); err != nil { t.Fatal(err) } - hasDeprecation := false - for _, s := range strings.Split(o, "\n") { - if s == waitLiveDeprecated { - hasDeprecation = true - break + + if !restartDownedNode { + // Check that (at least after a bit) the node disappears from `node + // ls` because it is decommissioned and not live. + if err := retry.WithMaxAttempts(ctx, retryOpts, 50, func() error { + runNode := h.getRandNodeOtherThan(targetNode) + o, err := execCLI(ctx, t, c, runNode, "node", "ls", "--format=csv") + if err != nil { + t.Fatalf("node-ls failed: %v", err) + } + + var exp [][]string + for i := 1; i <= c.spec.NodeCount; i++ { + exp = append(exp, []string{fmt.Sprintf("[^%d]", targetNode)}) + } + + return h.matchCSV(o, exp) + }); err != nil { + t.Fatal(err) } - } - if !hasDeprecation { - t.Fatal("missing deprecate message for --wait=live") - } - } - // Check that (at least after a bit) the node disappears from `node ls` - // because it is decommissioned and not live. - for { - o, err := execCLI(ctx, t, c, 2, "node", "ls", "--format", "csv") - if err != nil { - t.Fatalf("node-ls failed: %v", err) - } + // Ditto for `node status` + if err := retry.WithMaxAttempts(ctx, retryOpts, 50, func() error { + runNode := h.getRandNodeOtherThan(targetNode) + o, err := execCLI(ctx, t, c, runNode, "node", "status", "--format=csv") + if err != nil { + t.Fatalf("node-status failed: %v", err) + } - exp := [][]string{ - {"id"}, - {"2"}, - {"3"}, - {"4"}, + numCols := h.getCsvNumCols(o) + var expC []string + // We're checking for n-1 rows, where n is the node count. + for i := 1; i < c.spec.NodeCount; i++ { + expC = append(expC, fmt.Sprintf("[^%d].*", targetNode)) + } + exp := h.expectIDsInStatusOut(expC, numCols) + return h.matchCSV(o, exp) + }); err != nil { + t.Fatal(err) + } } - if err := matchCSV(o, exp); err != nil { - time.Sleep(time.Second) - continue - } - break - } - for { - o, err := execCLI(ctx, t, c, 2, "node", "status", "--format", "csv") - if err != nil { - t.Fatalf("node-status failed: %v", err) - } - numCols, err := getCsvNumCols(o) - if err != nil { - t.Fatal(err) - } - exp := getStatusCsvOutput([]string{`2`, `3`, `4`}, numCols) - if err := matchCSV(o, exp); err != nil { - time.Sleep(time.Second) - continue - } - break - } + { + t.l.Printf("wiping n%d and adding it back to the cluster as a new node\n", targetNode) - // Wipe data of node 1 and start it as a new node. - // It will join the cluster with a node id of 5. - // This is done to verify that node status works when a new node is started - // with an address belonging to an old decommissioned node. - { - c.Wipe(ctx, c.Node(1)) - c.Start(ctx, t, c.Node(1), startArgs(fmt.Sprintf("-a=--join %s", - c.InternalAddr(ctx, c.Node(2))[0]))) - } + c.Stop(ctx, c.Node(targetNode)) + c.Wipe(ctx, c.Node(targetNode)) - if err := retry.WithMaxAttempts(ctx, retryOpts, 20, func() error { - o, err := execCLI(ctx, t, c, 2, "node", "status", "--format", "csv") - if err != nil { - t.Fatalf("node-status failed: %v", err) + joinNode := targetNode%c.spec.NodeCount + 1 + joinAddr := c.InternalAddr(ctx, c.Node(joinNode))[0] + c.Start(ctx, t, c.Node(targetNode), startArgs( + fmt.Sprintf("-a=--join %s", joinAddr), + )) } - numCols, err := getCsvNumCols(o) - if err != nil { + + if err := retry.WithMaxAttempts(ctx, retryOpts, 50, func() error { + o, err := execCLI(ctx, t, c, h.getRandNode(), "node", "status", "--format=csv") + if err != nil { + t.Fatalf("node-status failed: %v", err) + } + numCols := h.getCsvNumCols(o) + var expC []string + for i := 1; i <= c.spec.NodeCount; i++ { + expC = append(expC, fmt.Sprintf("[^%d].*", targetNode)) + } + exp := h.expectIDsInStatusOut(expC, numCols) + return h.matchCSV(o, exp) + }); err != nil { t.Fatal(err) } - exp := getStatusCsvOutput([]string{`2`, `3`, `4`, `5`}, numCols) - return matchCSV(o, exp) - }); err != nil { - t.Fatal(err) } + // We'll verify the set of events, in order, we expect to get posted to + // system.eventlog. if err := retry.ForDuration(time.Minute, func() error { // Verify the event log has recorded exactly one decommissioned or - // recommissioned event for each commissioning operation. - // - // Spurious errors appear to be possible since we might be trying to - // send RPCs to the (relatively recently) down node: - // - // pq: rpc error: code = Unavailable desc = grpc: the connection is - // unavailable - // - // Seen in https://teamcity.cockroachdb.com/viewLog.html?buildId=344802. - db := c.Conn(ctx, 2) + // recommissioned event for each membership operation. + db := c.Conn(ctx, 1) defer db.Close() rows, err := db.Query(` -SELECT "eventType", "targetID" FROM system.eventlog -WHERE "eventType" IN ($1, $2) ORDER BY timestamp`, - "node_decommissioned", "node_recommissioned", + SELECT "eventType" FROM system.eventlog WHERE "eventType" IN ($1, $2, $3) ORDER BY timestamp + `, "node_decommissioned", "node_decommissioning", "node_recommissioned", ) if err != nil { t.l.Printf("retrying: %v\n", err) @@ -702,49 +767,288 @@ WHERE "eventType" IN ($1, $2) ORDER BY timestamp`, } expMatrix := [][]string{ - {"node_decommissioned", "1"}, - {"node_recommissioned", "1"}, - {"node_decommissioned", "2"}, - {"node_recommissioned", "2"}, - {"node_decommissioned", "3"}, - {"node_recommissioned", "3"}, - {"node_decommissioned", "1"}, + // Partial decommission attempt of a single node. + {"node_decommissioning"}, + {"node_recommissioned"}, + + // Cluster wide decommissioning attempt. + {"node_decommissioning"}, + {"node_decommissioning"}, + {"node_decommissioning"}, + {"node_decommissioning"}, + {"node_decommissioning"}, + {"node_decommissioning"}, + + // Cluster wide recommissioning, to undo previous decommissioning attempt. + {"node_recommissioned"}, + {"node_recommissioned"}, + {"node_recommissioned"}, + {"node_recommissioned"}, + {"node_recommissioned"}, + {"node_recommissioned"}, + + // Full decommission of two nodes. + {"node_decommissioning"}, + {"node_decommissioning"}, + {"node_decommissioned"}, + {"node_decommissioned"}, + + // Full decommission of a single node. + {"node_decommissioning"}, + {"node_decommissioned"}, } if !reflect.DeepEqual(matrix, expMatrix) { - t.Fatalf("unexpected diff(matrix, expMatrix):\n%s", pretty.Diff(matrix, expMatrix)) + t.Fatalf("unexpected diff(matrix, expMatrix):\n%s\n%s\nvs.\n%s", pretty.Diff(matrix, expMatrix), matrix, expMatrix) } return nil }); err != nil { t.Fatal(err) } +} - // Last, verify that the operator can't shoot themselves in the foot by - // accidentally decommissioning all nodes. - // - // Specify wait=none because the command would block forever (the replicas have - // nowhere to go). - if _, err := decommission(ctx, 2, c.All(), "decommission", "--wait", "none"); err != nil { - t.Fatalf("decommission failed: %v", err) +// Header from the output of `cockroach node decommission`. +var decommissionHeader = []string{ + "id", "is_live", "replicas", "is_decommissioning", "membership", "is_draining", +} + +// Footer from the output of `cockroach node decommission`, after successful +// decommission. +var decommissionFooter = []string{ + "No more data reported on target nodes. " + + "Please verify cluster health before removing the nodes.", +} + +// Header from the output of `cockroach node status`. +var statusHeader = []string{ + "id", "address", "sql_address", "build", "started_at", "updated_at", "locality", "is_available", "is_live", +} + +// Header from the output of `cockroach node status --decommission`. +var statusHeaderWithDecommission = []string{ + "id", "address", "sql_address", "build", "started_at", "updated_at", "locality", "is_available", "is_live", + "gossiped_replicas", "is_decommissioning", "membership", "is_draining", +} + +// Index of `membership` column in statusHeaderWithDecommission +const statusHeaderMembershipColumnIdx = 11 + +type decommTestHelper struct { + t *test + c *cluster + nodeIDs []int +} + +func newDecommTestHelper(t *test, c *cluster) *decommTestHelper { + var nodeIDs []int + for i := 1; i <= c.spec.NodeCount; i++ { + nodeIDs = append(nodeIDs, i) + } + return &decommTestHelper{ + t: t, + c: c, + nodeIDs: nodeIDs, } +} - // Check that we can still do stuff. Creating a database should be good enough. - db := c.Conn(ctx, 2) - defer db.Close() +// decommission decommissions the given targetNodes, running the process +// through the specified runNode. +func (h *decommTestHelper) decommission( + ctx context.Context, targetNodes nodeListOption, runNode int, verbs ...string, +) (string, error) { + args := []string{"node", "decommission"} + args = append(args, verbs...) - if _, err := db.Exec(`CREATE DATABASE still_working;`); err != nil { - t.Fatal(err) + if len(targetNodes) == 1 && targetNodes[0] == runNode { + args = append(args, "--self") + } else { + for _, target := range targetNodes { + args = append(args, strconv.Itoa(target)) + } + } + return execCLI(ctx, h.t, h.c, runNode, args...) +} + +// recommission recommissions the given targetNodes, running the process +// through the specified runNode. +func (h *decommTestHelper) recommission( + ctx context.Context, targetNodes nodeListOption, runNode int, verbs ...string, +) (string, error) { + args := []string{"node", "recommission"} + args = append(args, verbs...) + + if len(targetNodes) == 1 && targetNodes[0] == runNode { + args = append(args, "--self") + } else { + for _, target := range targetNodes { + args = append(args, strconv.Itoa(target)) + } + } + return execCLI(ctx, h.t, h.c, runNode, args...) +} + +func elideInsecureDeprecationNotice(csvStr string) string { + // v20.1 introduces a deprecation notice for --insecure. Skip over it. + // TODO(knz): Remove this when --insecure is dropped. + // See: https://github.com/cockroachdb/cockroach/issues/53404 + lines := strings.SplitN(csvStr, "\n", 3) + if len(lines) > 0 && strings.HasPrefix(lines[0], "Flag --insecure has been deprecated") { + csvStr = lines[2] + } + return csvStr +} + +// getCsvNumCols returns the number of columns in the given csv string. +func (h *decommTestHelper) getCsvNumCols(csvStr string) (cols int) { + csvStr = elideInsecureDeprecationNotice(csvStr) + reader := csv.NewReader(strings.NewReader(csvStr)) + records, err := reader.Read() + if err != nil { + h.t.Fatal(errors.Errorf("error reading csv input: \n %v\n errors:%s", csvStr, err)) + } + return len(records) +} + +// matchCSV matches a multi-line csv string with the provided regex +// (matchColRow[i][j] will be matched against the i-th line, j-th column). +func (h *decommTestHelper) matchCSV(csvStr string, matchColRow [][]string) (err error) { + defer func() { + if err != nil { + err = errors.Errorf("csv input:\n%v\nexpected:\n%s\nerrors:%s", + csvStr, pretty.Sprint(matchColRow), err) + } + }() + + csvStr = elideInsecureDeprecationNotice(csvStr) + reader := csv.NewReader(strings.NewReader(csvStr)) + reader.FieldsPerRecord = -1 + records, err := reader.ReadAll() + if err != nil { + return err + } + + lr, lm := len(records), len(matchColRow) + if lr < lm { + return errors.Errorf("csv has %d rows, but expected at least %d", lr, lm) + } + + // Compare only the last len(matchColRow) records. That is, if we want to + // match 4 rows and we have 100 records, we only really compare + // records[96:], that is, the last four rows. + records = records[lr-lm:] + + for i := range records { + if lr, lm := len(records[i]), len(matchColRow[i]); lr != lm { + return errors.Errorf("row #%d: csv has %d columns, but expected %d", i+1, lr, lm) + } + for j := range records[i] { + pat, str := matchColRow[i][j], records[i][j] + re := regexp.MustCompile(pat) + if !re.MatchString(str) { + err = errors.Errorf("%v\nrow #%d, col #%d: found %q which does not match %q", + err, i+1, j+1, str, pat) + } + } } + return err +} - // Recommission all nodes. - if _, err := decommission(ctx, 2, c.All(), "recommission"); err != nil { - t.Fatalf("recommission failed: %v", err) +// expectColumn constructs a matching regex for a given column (identified +// by its column index). +func (h *decommTestHelper) expectColumn( + column int, columnRegex []string, numRows, numCols int, +) [][]string { + var res [][]string + for r := 0; r < numRows; r++ { + build := []string{} + for c := 0; c < numCols; c++ { + if c == column { + build = append(build, columnRegex[r]) + } else { + build = append(build, `.*`) + } + } + res = append(res, build) } + return res +} + +// expectCell constructs a matching regex for a given cell (identified by +// its row and column indexes). +func (h *decommTestHelper) expectCell( + row, column int, regex string, numRows, numCols int, +) [][]string { + var res [][]string + for r := 0; r < numRows; r++ { + build := []string{} + for c := 0; c < numCols; c++ { + if r == row && c == column { + build = append(build, regex) + } else { + build = append(build, `.*`) + } + } + res = append(res, build) + } + return res +} + +// expectIDsInStatusOut constructs a matching regex for output of `cockroach +// node status`. It matches against the `id` column in the output generated +// with and without the `--decommission` flag. +func (h *decommTestHelper) expectIDsInStatusOut(ids []string, numCols int) [][]string { + var res [][]string + switch numCols { + case len(statusHeader): + res = append(res, statusHeader) + case len(statusHeaderWithDecommission): + res = append(res, statusHeaderWithDecommission) + default: + h.t.Fatalf( + "Expected status output numCols to be one of %d or %d, found %d", + len(statusHeader), + len(statusHeaderWithDecommission), + numCols, + ) + } + for _, id := range ids { + build := []string{id} + for i := 0; i < numCols-1; i++ { + build = append(build, `.*`) + } + res = append(res, build) + } + return res +} + +func (h *decommTestHelper) getRandNode() int { + return h.nodeIDs[rand.Intn(len(h.nodeIDs))] +} - // To verify that all nodes are actually accepting replicas again, decommission - // the first nodes (blocking until it's done). This proves that the other nodes - // absorb the first one's replicas. - if _, err := decommission(ctx, 2, c.Node(1), "decommission"); err != nil { - t.Fatalf("decommission failed: %v", err) +func (h *decommTestHelper) getRandNodeOtherThan(ids ...int) int { + for { + cur := h.nodeIDs[rand.Intn(len(h.nodeIDs))] + inBlockList := false + for _, id := range ids { + if cur == id { + inBlockList = true + } + } + if inBlockList { + continue + } + return cur } } + +func execCLI( + ctx context.Context, t *test, c *cluster, runNode int, extraArgs ...string, +) (string, error) { + args := []string{"./cockroach"} + args = append(args, extraArgs...) + args = append(args, "--insecure") + args = append(args, fmt.Sprintf("--port={pgport:%d}", runNode)) + buf, err := c.RunWithBuffer(ctx, t.l, c.Node(runNode), args...) + t.l.Printf("%s\n", buf) + return string(buf), err +} diff --git a/pkg/cmd/roachtest/disk_full.go b/pkg/cmd/roachtest/disk_full.go deleted file mode 100644 index 76c9a129a016..000000000000 --- a/pkg/cmd/roachtest/disk_full.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2018 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package main - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/cockroachdb/cockroach/pkg/util/timeutil" -) - -func registerDiskFull(r *testRegistry) { - r.Add(testSpec{ - Name: "disk-full", - Owner: OwnerKV, - MinVersion: `v2.1.0`, - Skip: "https://github.com/cockroachdb/cockroach/issues/35328#issuecomment-478540195", - Cluster: makeClusterSpec(5), - Run: func(ctx context.Context, t *test, c *cluster) { - if c.isLocal() { - t.spec.Skip = "you probably don't want to fill your local disk" - return - } - - nodes := c.spec.NodeCount - 1 - c.Put(ctx, cockroach, "./cockroach", c.Range(1, nodes)) - c.Put(ctx, workload, "./workload", c.Node(nodes+1)) - c.Start(ctx, t, c.Range(1, nodes)) - - t.Status("running workload") - m := newMonitor(ctx, c, c.Range(1, nodes)) - m.Go(func(ctx context.Context) error { - cmd := fmt.Sprintf( - "./workload run kv --tolerate-errors --init --read-percent=0"+ - " --concurrency=10 --duration=2m {pgurl:1-%d}", - nodes) - c.Run(ctx, c.Node(nodes+1), cmd) - return nil - }) - m.Go(func(ctx context.Context) error { - time.Sleep(30 * time.Second) - const n = 1 - m.ExpectDeath() - t.l.Printf("filling disk on %d\n", n) - // The 100% ballast size will cause the disk to fill up and the ballast - // command to exit with an error. The "|| true" is used to ignore that - // error. - c.Run(ctx, c.Node(n), "./cockroach debug ballast {store-dir}/ballast --size=100% || true") - - // Restart cockroach in a loop for 30s. - for start := timeutil.Now(); timeutil.Since(start) < 30*time.Second; { - if t.Failed() { - return nil - } - t.l.Printf("starting %d when disk is full\n", n) - // We expect cockroach to die during startup, though it might get far - // enough along that the monitor detects the death. - m.ExpectDeath() - if err := c.StartE(ctx, c.Node(n)); err == nil { - t.Fatalf("node successfully started unexpectedly") - } else if strings.Contains(GetStderr(err), "a panic has occurred") { - t.Fatal(err) - } - } - - // Clear the disk full condition and restart cockroach again. - t.l.Printf("clearing full disk on %d\n", n) - c.Run(ctx, c.Node(n), "rm -f {store-dir}/ballast") - // Clear any death expectations that did not occur. - m.ResetDeaths() - return c.StartE(ctx, c.Node(n)) - }) - m.Wait() - }, - }) -} diff --git a/pkg/cmd/roachtest/disk_stall.go b/pkg/cmd/roachtest/disk_stall.go index 1129951a2ec7..2d5a65b95b57 100644 --- a/pkg/cmd/roachtest/disk_stall.go +++ b/pkg/cmd/roachtest/disk_stall.go @@ -14,6 +14,7 @@ import ( "context" "fmt" "math/rand" + "runtime" "strings" "time" @@ -32,8 +33,8 @@ func registerDiskStalledDetection(r *testRegistry) { "disk-stalled/log=%t,data=%t", affectsLogDir, affectsDataDir, ), - Owner: OwnerKV, - MinVersion: "v19.1.0", + Owner: OwnerStorage, + MinVersion: "v19.2.0", Cluster: makeClusterSpec(1), Run: func(ctx context.Context, t *test, c *cluster) { runDiskStalledDetection(ctx, t, c, affectsLogDir, affectsDataDir) @@ -46,6 +47,10 @@ func registerDiskStalledDetection(r *testRegistry) { func runDiskStalledDetection( ctx context.Context, t *test, c *cluster, affectsLogDir bool, affectsDataDir bool, ) { + if local && runtime.GOOS != "linux" { + t.Fatalf("must run on linux os, found %s", runtime.GOOS) + } + n := c.Node(1) c.Put(ctx, cockroach, "./cockroach") @@ -100,7 +105,7 @@ func runDiskStalledDetection( out, err := c.RunWithBuffer(ctx, l, n, fmt.Sprintf("timeout --signal 9 %ds env COCKROACH_ENGINE_MAX_SYNC_DURATION_FATAL=true "+ "COCKROACH_ENGINE_MAX_SYNC_DURATION=%s COCKROACH_LOG_MAX_SYNC_DURATION=%s "+ - "./cockroach start --insecure --logtostderr=INFO --store {store-dir}/%s --log-dir {store-dir}/%s", + "./cockroach start-single-node --insecure --logtostderr=INFO --store {store-dir}/%s --log-dir {store-dir}/%s", int(dur.Seconds()), maxDataSync, maxLogSync, dataDir, logDir, ), ) diff --git a/pkg/cmd/roachtest/django.go b/pkg/cmd/roachtest/django.go index 67895192558a..8471e2427f13 100644 --- a/pkg/cmd/roachtest/django.go +++ b/pkg/cmd/roachtest/django.go @@ -16,9 +16,11 @@ import ( "regexp" ) -var djangoReleaseTagRegex = regexp.MustCompile(`^(?P\d+)\.(?P\d+)\.(?P\d+)$`) +var djangoReleaseTagRegex = regexp.MustCompile(`^(?P\d+)\.(?P\d+)(\.(?P\d+))?$`) var djangoCockroachDBReleaseTagRegex = regexp.MustCompile(`^(?P\d+)\.(?P\d+)$`) +var djangoSupportedTag = "cockroach-3.1.x" + func registerDjango(r *testRegistry) { runDjango := func( ctx context.Context, @@ -59,15 +61,15 @@ func registerDjango(r *testRegistry) { c, node, "install dependencies", - `sudo apt-get -qq install make python3.6 libpq-dev python3.6-dev gcc python3-setuptools python-setuptools build-essential`, + `sudo apt-get -qq install make python3.7 libpq-dev python3.7-dev gcc python3-setuptools python-setuptools build-essential`, ); err != nil { t.Fatal(err) } if err := repeatRunE( - ctx, c, node, "set python3.6 as default", ` + ctx, c, node, "set python3.7 as default", ` sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.5 1 - sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.6 2 + sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.7 2 sudo update-alternatives --config python3`, ); err != nil { t.Fatal(err) @@ -75,7 +77,7 @@ func registerDjango(r *testRegistry) { if err := repeatRunE( ctx, c, node, "install pip", - `curl https://bootstrap.pypa.io/get-pip.py | sudo -H python3.6`, + `curl https://bootstrap.pypa.io/get-pip.py | sudo -H python3.7`, ); err != nil { t.Fatal(err) } @@ -103,14 +105,15 @@ func registerDjango(r *testRegistry) { t.Fatal(err) } c.l.Printf("Latest Django release is %s.", djangoLatestTag) + c.l.Printf("Supported Django release is %s.", djangoSupportedTag) if err := repeatGitCloneE( ctx, t.l, c, - "https://github.com/django/django/", + "https://github.com/timgraham/django/", "/mnt/data1/django", - djangoLatestTag, + djangoSupportedTag, node, ); err != nil { t.Fatal(err) @@ -130,7 +133,7 @@ func registerDjango(r *testRegistry) { c, "https://github.com/cockroachdb/django-cockroachdb", "/mnt/data1/django/tests/django-cockroachdb", - djangoCockroachDBLatestTag, + "master", node, ); err != nil { t.Fatal(err) @@ -165,21 +168,21 @@ func registerDjango(r *testRegistry) { t.Fatal(err) } - blacklistName, expectedFailureList, ignoredlistName, ignoredlist := djangoBlacklists.getLists(version) + blocklistName, expectedFailureList, ignoredlistName, ignoredlist := djangoBlocklists.getLists(version) if expectedFailureList == nil { - t.Fatalf("No django blacklist defined for cockroach version %s", version) + t.Fatalf("No django blocklist defined for cockroach version %s", version) } if ignoredlist == nil { t.Fatalf("No django ignorelist defined for cockroach version %s", version) } - c.l.Printf("Running cockroach version %s, using blacklist %s, using ignoredlist %s", - version, blacklistName, ignoredlistName) + c.l.Printf("Running cockroach version %s, using blocklist %s, using ignoredlist %s", + version, blocklistName, ignoredlistName) // TODO (rohany): move this to a file backed buffer if the output becomes // too large. var fullTestResults []byte for _, testName := range enabledDjangoTests { - t.Status("Running django test app", testName) + t.Status("Running django test app ", testName) // Running the test suite is expected to error out, so swallow the error. rawResults, _ := c.RunWithBuffer( ctx, t.l, node, fmt.Sprintf(djangoRunTestCmd, testName)) @@ -197,13 +200,12 @@ func registerDjango(r *testRegistry) { results := newORMTestsResults() results.parsePythonUnitTestOutput(fullTestResults, expectedFailureList, ignoredlist) results.summarizeAll( - t, "django" /* ormName */, blacklistName, - expectedFailureList, version, djangoLatestTag, + t, "django" /* ormName */, blocklistName, expectedFailureList, version, djangoSupportedTag, ) } r.Add(testSpec{ - MinVersion: "v19.2.0", + MinVersion: "v20.1.0", Name: "django", Owner: OwnerAppDev, Cluster: makeClusterSpec(1, cpu(16)), @@ -217,22 +219,43 @@ func registerDjango(r *testRegistry) { // Test results are only in stderr, so stdout is redirected and printed later. const djangoRunTestCmd = ` cd /mnt/data1/django/tests && -python3 runtests.py %[1]s --settings cockroach_settings --parallel 1 -v 2 > %[1]s.stdout +RUNNING_COCKROACH_BACKEND_TESTS=1 python3 runtests.py %[1]s --settings cockroach_settings --parallel 1 -v 2 > %[1]s.stdout ` const cockroachDjangoSettings = ` +from django.test.runner import DiscoverRunner + + DATABASES = { 'default': { 'ENGINE': 'django_cockroachdb', - 'NAME' : 'django_tests', - 'USER' : 'root', - 'PASSWORD' : '', + 'NAME': 'django_tests', + 'USER': 'root', + 'PASSWORD': '', 'HOST': 'localhost', - 'PORT' : 26257, + 'PORT': 26257, + }, + 'other': { + 'ENGINE': 'django_cockroachdb', + 'NAME': 'django_tests2', + 'USER': 'root', + 'PASSWORD': '', + 'HOST': 'localhost', + 'PORT': 26257, }, } SECRET_KEY = 'django_tests_secret_key' PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.MD5PasswordHasher', ] +TEST_RUNNER = '.cockroach_settings.NonDescribingDiscoverRunner' + +class NonDescribingDiscoverRunner(DiscoverRunner): + def get_test_runner_kwargs(self): + return { + 'failfast': self.failfast, + 'resultclass': self.get_resultclass(), + 'verbosity': self.verbosity, + 'descriptions': False, + } ` diff --git a/pkg/cmd/roachtest/django_blacklist.go b/pkg/cmd/roachtest/django_blacklist.go deleted file mode 100644 index 6d7fc17a2952..000000000000 --- a/pkg/cmd/roachtest/django_blacklist.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2019 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package main - -// As of now, we only run a subset of the test apps within the django -// testing suite. The full set we run is below, and should be kept -// in alphabetical order. As more progress is made with adding compatibility, -// more test apps should be added here to prevent against regression. -var enabledDjangoTests = []string{ - "app_loading", - "apps", - "admin_changelist", - "admin_custom_urls", - "admin_docs", - "admin_filters", - "admin_inlines", - "admin_ordering", - "admin_utils", - "admin_views", - "aggregation", - "aggregation_regress", - "annotations", - "auth_tests", - "backends", - "base", - "bash_completion", - "basic", - "bulk_create", - "cache", - "check_framework", - "conditional_processing", - "constraints", - "contenttypes_tests", - "custom_columns", - "custom_lookups", - "custom_managers", - "custom_methods", - "custom_migration_operations", - "custom_pk", - "datatypes", - "dates", - "datetimes", - "db_functions", - "db_typecasts", - "db_utils", - "defer", - "defer_regress", - "delete", - "delete_regress", - "distinct_on_fields", - "empty", - "expressions", - "expressions_case", - "expressions_window", - "extra_regress", - "field_defaults", - "field_subclassing", - "file_storage", - "file_uploads", - "filtered_relation", - "fixtures", - "fixtures_model_package", - "fixtures_regress", - "force_insert_update", - "foreign_object", - "forms_tests", - "from_db_value", - "generic_inline_admin", - "generic_relations", - "generic_relations_regress", - "generic_views", - "get_earliest_or_latest", - "get_object_or_404", - "get_or_create", - "i18n", - "indexes", - "inline_formsets", - "inspectdb", - "introspection", - "invalid_models_tests", - "known_related_objects", - "lookup", - "m2m_and_m2o", - "m2m_intermediary", - "m2m_multiple", - "m2m_recursive", - "m2m_regress", - "m2m_signals", - "m2m_through", - "m2m_through_regress", - "m2o_recursive", - "managers_regress", - "many_to_many", - "many_to_one", - "many_to_one_null", - "max_lengths", - "migrate_signals", - "migrations", - "migration_test_data_persistence", - "modeladmin", - "model_fields", - "model_forms", - "model_formsets", - "model_formsets_regress", - "model_indexes", - "model_inheritance", - "model_inheritance_regress", - "model_meta", - "model_options", - "model_package", - "model_regress", - "multiple_database", - "mutually_referential", - "nested_foreign_keys", - "null_fk", - "null_fk_ordering", - "null_queries", - "one_to_one", - "ordering", - "order_with_respect_to", - "or_lookups", - "pagination", - "prefetch_related", - "properties", - "proxy_model_inheritance", - "proxy_models", - "queries", - "queryset_pickle", - "raw_query", - "reserved_names", - "reverse_lookup", - "save_delete_hooks", - "schema", - "select_for_update", - "select_related", - "select_related_onetoone", - "select_related_regress", - "serializers", - "servers", - "signals", - "sitemaps_tests", - "sites_framework", - "sites_tests", - "string_lookup", - "swappable_models", - "syndication_tests", - "test_client", - "test_client_regress", - "test_utils", - "timezones", - "transaction_hooks", - "transactions", - "unmanaged_models", - "update", - "update_only_fields", - "validation", - "view_tests", -} - -var djangoBlacklists = blacklistsForVersion{ - {"v19.2", "djangoBlacklist19_2", djangoBlacklist19_2, "djangoIgnoreList19_2", djangoIgnoreList19_2}, - {"v20.1", "djangoBlacklist20_1", djangoBlacklist20_1, "djangoIgnoreList20_1", djangoIgnoreList20_1}, -} - -// Maintain that this list is alphabetized. -var djangoBlacklist20_1 = blacklist{ - "admin_changelist.tests.SeleniumTests.test_add_row_selection": "unknown", - "admin_custom_urls.tests.AdminCustomUrlsTest.test_post_url_continue": "unknown", - "admin_docs.test_views.TestFieldType.test_field_name": "unknown", - "admin_filters.tests.ListFiltersTests.test_two_characters_long_field": "unknown", - "admin_inlines.tests.SeleniumTests.test_delete_stackeds": "unknown", - "admin_utils.tests.UtilsTests.test_values_from_lookup_field": "unknown", - "admin_views.tests.GroupAdminTest.test_group_permission_performance": "unknown", - "admin_views.tests.SecureViewTests.test_secure_view_shows_login_if_not_logged_in": "unknown", - "admin_views.tests.SecureViewTests.test_staff_member_required_decorator_works_with_argument": "unknown", - "app_loading.tests.GetModelsTest.test_get_models_only_returns_installed_models": "unknown", - "apps.tests.AppConfigTests.test_repr": "unknown", - "conditional_processing.tests.ConditionalGet.test_weak_if_none_match": "unknown", - "db_typecasts.tests.DBTypeCasts.test_typeCasts": "unknown", - "field_subclassing.tests.TestDbType.test_db_parameters_respects_db_type": "unknown", - "model_meta.tests.GetFieldsTests.test_get_fields_is_immutable": "unknown", -} - -var djangoBlacklist19_2 = blacklist{ - // Blacklist generated from running the tests above. - "admin_custom_urls.tests.AdminCustomUrlsTest.test_post_url_continue": "unknown", - "admin_docs.test_views.TestFieldType.test_field_name": "unknown", - "admin_filters.tests.ListFiltersTests.test_two_characters_long_field": "unknown", - "admin_inlines.tests.SeleniumTests.test_delete_stackeds": "unknown", - "admin_ordering.tests.TestRelatedFieldsAdminOrdering.test_no_admin_fallback_to_model_ordering": "unknown", - "admin_utils.tests.UtilsTests.test_values_from_lookup_field": "unknown", - "admin_views.tests.AdminViewBasicTest.test_date_hierarchy_timezone_dst": "unknown", - "app_loading.tests.GetModelsTest.test_get_models_only_returns_installed_models": "unknown", - "conditional_processing.tests.ConditionalGet.test_without_conditions": "unknown", - "db_typecasts.tests.DBTypeCasts.test_typeCasts": "unknown", - "field_subclassing.tests.TestDbType.test_db_parameters_respects_db_type": "unknown", - "model_meta.tests.GetFieldsTests.test_get_fields_is_immutable": "unknown", - // TODO (rohany): The postgres_tests suite within Django is not in a automatically - // runnable state right now. - //"postgres_tests.test_aggregates.TestGeneralAggregate.test_bit_and_empty_result": "41334", - //"postgres_tests.test_aggregates.TestGeneralAggregate.test_bit_and_general": "41334", - //"postgres_tests.test_aggregates.TestGeneralAggregate.test_bit_and_on_only_false_values": "41334", - //"postgres_tests.test_aggregates.TestGeneralAggregate.test_bit_and_on_only_true_values": "41334", - //"postgres_tests.test_aggregates.TestGeneralAggregate.test_bit_or_empty_result": "41334", - //"postgres_tests.test_aggregates.TestGeneralAggregate.test_bit_or_general": "41334", - //"postgres_tests.test_aggregates.TestGeneralAggregate.test_bit_or_on_only_false_values": "41334", - //"postgres_tests.test_aggregates.TestGeneralAggregate.test_bit_or_on_only_true_values": "41334", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_corr_empty_result": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_corr_general": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_covar_pop_empty_result": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_covar_pop_general": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_covar_pop_sample": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_covar_pop_sample_empty_result": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_avgx_empty_result": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_avgx_general": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_avgx_with_related_obj_and_number_as_argument": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_avgy_empty_result": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_avgy_general": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_count_empty_result": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_count_general": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_intercept_empty_result": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_intercept_general": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_r2_empty_result": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_r2_general": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_slope_empty_result": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_slope_general": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_sxx_empty_result": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_sxx_general": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_sxy_empty_result": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_sxy_general": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_syy_empty_result": "41274", - //"postgres_tests.test_aggregates.TestStatisticsAggregate.test_regr_syy_general": "41274", - //"postgres_tests.test_array.TestOtherTypesExactQuerying.test_exact_decimals": "23468", -} - -var djangoIgnoreList20_1 = blacklist{} - -var djangoIgnoreList19_2 = blacklist{} diff --git a/pkg/cmd/roachtest/django_blocklist.go b/pkg/cmd/roachtest/django_blocklist.go new file mode 100644 index 000000000000..e181e363a110 --- /dev/null +++ b/pkg/cmd/roachtest/django_blocklist.go @@ -0,0 +1,200 @@ +// Copyright 2019 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +// As of now, we only run a subset of the test apps within the django +// testing suite. The full set we run is below, and should be kept +// in alphabetical order. As more progress is made with adding compatibility, +// more test apps should be added here to prevent against regression. +var enabledDjangoTests = []string{ + "admin_changelist", + "admin_custom_urls", + "admin_docs", + "admin_filters", + "admin_inlines", + "admin_ordering", + "admin_utils", + "admin_views", + "aggregation", + "aggregation_regress", + "annotations", + "auth_tests", + "backends", + "basic", + "bulk_create", + "cache", + "check_framework", + "conditional_processing", + "constraints", + "contenttypes_tests", + "custom_columns", + "custom_lookups", + "custom_managers", + "custom_methods", + "custom_migration_operations", + "custom_pk", + "datatypes", + "dates", + "datetimes", + "db_functions", + "db_typecasts", + "db_utils", + "defer", + "defer_regress", + "delete", + "delete_regress", + "distinct_on_fields", + "empty", + "expressions", + "expressions_case", + "expressions_window", + "extra_regress", + "field_defaults", + "field_subclassing", + "file_storage", + "file_uploads", + "filtered_relation", + "fixtures", + "fixtures_model_package", + "fixtures_regress", + "force_insert_update", + "foreign_object", + "forms_tests", + "from_db_value", + "generic_inline_admin", + "generic_relations", + "generic_relations_regress", + "generic_views", + "get_earliest_or_latest", + "get_object_or_404", + "get_or_create", + "i18n", + "indexes", + "inline_formsets", + "inspectdb", + "introspection", + "invalid_models_tests", + "known_related_objects", + "lookup", + "m2m_and_m2o", + "m2m_intermediary", + "m2m_multiple", + "m2m_recursive", + "m2m_regress", + "m2m_signals", + "m2m_through", + "m2m_through_regress", + "m2o_recursive", + "managers_regress", + "many_to_many", + "many_to_one", + "many_to_one_null", + "max_lengths", + "migrate_signals", + "migrations", + "migration_test_data_persistence", + "modeladmin", + "model_fields", + "model_forms", + "model_formsets", + "model_formsets_regress", + "model_indexes", + "model_inheritance", + "model_inheritance_regress", + "model_meta", + "model_options", + "model_package", + "model_regress", + "multiple_database", + "mutually_referential", + "nested_foreign_keys", + "null_fk", + "null_fk_ordering", + "null_queries", + "one_to_one", + "ordering", + "order_with_respect_to", + "or_lookups", + "pagination", + "prefetch_related", + "properties", + "proxy_model_inheritance", + "proxy_models", + "queries", + "queryset_pickle", + "raw_query", + "reserved_names", + "reverse_lookup", + "save_delete_hooks", + "schema", + "select_for_update", + "select_related", + "select_related_onetoone", + "select_related_regress", + "serializers", + "servers", + "signals", + "sitemaps_tests", + "sites_framework", + "sites_tests", + "string_lookup", + "swappable_models", + "syndication_tests", + "test_client", + "test_client_regress", + "test_utils", + "timezones", + "transaction_hooks", + "transactions", + "unmanaged_models", + "update", + "update_only_fields", + "validation", + "view_tests", +} + +var djangoBlocklists = blocklistsForVersion{ + {"v20.1", "djangoBlocklist20_1", djangoBlocklist20_1, "djangoIgnoreList20_1", djangoIgnoreList20_1}, + {"v20.2", "djangoBlocklist20_2", djangoBlocklist20_2, "djangoIgnoreList20_2", djangoIgnoreList20_2}, +} + +// Maintain that this list is alphabetized. +var djangoBlocklist20_2 = blocklist{} + +var djangoBlocklist20_1 = blocklist{ + "inspectdb.tests.InspectDBTestCase.test_json_field": "unknown", +} + +var djangoIgnoreList20_2 = djangoIgnoreList20_1 + +var djangoIgnoreList20_1 = blocklist{ + "expressions.tests.BasicExpressionsTests.test_boolean_expression_combined": "unknown", + "inspectdb.tests.InspectDBTestCase.test_attribute_name_not_python_keyword": "unknown", + "inspectdb.tests.InspectDBTestCase.test_digits_column_name_introspection": "unknown", + "inspectdb.tests.InspectDBTestCase.test_field_types": "unknown", + "inspectdb.tests.InspectDBTestCase.test_managed_models": "unknown", + "inspectdb.tests.InspectDBTestCase.test_number_field_types": "unknown", + "inspectdb.tests.InspectDBTestCase.test_special_column_name_introspection": "unknown", + "inspectdb.tests.InspectDBTestCase.test_stealth_table_name_filter_option": "unknown", + "inspectdb.tests.InspectDBTestCase.test_table_name_introspection": "unknown", + "inspectdb.tests.InspectDBTestCase.test_table_option": "unknown", + "inspectdb.tests.InspectDBTestCase.test_unique_together_meta": "unknown", + "introspection.tests.IntrospectionTests.test_get_constraints_indexes_orders": "unknown", + "introspection.tests.IntrospectionTests.test_get_table_description_types": "unknown", + "schema.tests.SchemaTests.test_add_field_temp_default": "unknown", + "schema.tests.SchemaTests.test_alter": "unknown", + "schema.tests.SchemaTests.test_alter_field_fk_keeps_index": "unknown", + "schema.tests.SchemaTests.test_alter_field_fk_to_o2o": "unknown", + "schema.tests.SchemaTests.test_alter_numeric_field_keep_null_status": "unknown", + "schema.tests.SchemaTests.test_alter_smallint_pk_to_smallautofield_pk": "unknown", + "schema.tests.SchemaTests.test_db_table": "unknown", + "schema.tests.SchemaTests.test_foreign_key_index_long_names_regression": "unknown", +} diff --git a/pkg/cmd/roachtest/drop.go b/pkg/cmd/roachtest/drop.go index 4178784f7ac2..6f8b24c7538c 100644 --- a/pkg/cmd/roachtest/drop.go +++ b/pkg/cmd/roachtest/drop.go @@ -88,7 +88,7 @@ func registerDrop(r *testRegistry) { t.l.Printf("Node %d space used: %s\n", j, humanizeutil.IBytes(int64(size))) - // Return if the size of the directory is less than 100mb + // Return if the size of the directory is less than expected. if size < initDiskSpace { t.Fatalf("Node %d space used: %s less than %s", j, humanizeutil.IBytes(int64(size)), humanizeutil.IBytes(int64(initDiskSpace))) @@ -160,9 +160,7 @@ func registerDrop(r *testRegistry) { warehouses := 100 numNodes := 9 - - // 1GB - initDiskSpace := int(1e9) + initDiskSpace := 256 << 20 // 256 MB r.Add(testSpec{ Name: fmt.Sprintf("drop/tpcc/w=%d,nodes=%d", warehouses, numNodes), @@ -175,9 +173,7 @@ func registerDrop(r *testRegistry) { if local { numNodes = 4 warehouses = 1 - - // 100 MB - initDiskSpace = 1e8 + initDiskSpace = 5 << 20 // 5 MB fmt.Printf("running with w=%d,nodes=%d in local mode\n", warehouses, numNodes) } runDrop(ctx, t, c, warehouses, numNodes, initDiskSpace) diff --git a/pkg/cmd/roachtest/election.go b/pkg/cmd/roachtest/election.go index e243e89d1383..79d92e839ee0 100644 --- a/pkg/cmd/roachtest/election.go +++ b/pkg/cmd/roachtest/election.go @@ -14,6 +14,7 @@ import ( "context" "time" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/timeutil" ) @@ -21,9 +22,10 @@ func registerElectionAfterRestart(r *testRegistry) { r.Add(testSpec{ Name: "election-after-restart", Owner: OwnerKV, - Skip: "https://github.com/cockroachdb/cockroach/issues/35047", + Skip: "https://github.com/cockroachdb/cockroach/issues/54246", Cluster: makeClusterSpec(3), Run: func(ctx context.Context, t *test, c *cluster) { + skip.UnderRace(t, "race builds make this test exceed its timeout") t.Status("starting up") c.Put(ctx, cockroach, "./cockroach") c.Start(ctx, t) diff --git a/pkg/cmd/roachtest/encryption.go b/pkg/cmd/roachtest/encryption.go index f431f545a234..2ccf9662bb92 100644 --- a/pkg/cmd/roachtest/encryption.go +++ b/pkg/cmd/roachtest/encryption.go @@ -14,7 +14,7 @@ import ( "context" "fmt" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) func registerEncryption(r *testRegistry) { diff --git a/pkg/cmd/roachtest/engine_switch.go b/pkg/cmd/roachtest/engine_switch.go index a5579c95531a..7e0486a05818 100644 --- a/pkg/cmd/roachtest/engine_switch.go +++ b/pkg/cmd/roachtest/engine_switch.go @@ -16,19 +16,20 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/cockroach/pkg/util/version" + "github.com/cockroachdb/errors" _ "github.com/lib/pq" - "github.com/pkg/errors" "golang.org/x/exp/rand" ) func registerEngineSwitch(r *testRegistry) { - runEngineSwitch := func(ctx context.Context, t *test, c *cluster) { + runEngineSwitch := func(ctx context.Context, t *test, c *cluster, additionalArgs ...string) { roachNodes := c.Range(1, c.spec.NodeCount-1) loadNode := c.Node(c.spec.NodeCount) c.Put(ctx, workload, "./workload", loadNode) c.Put(ctx, cockroach, "./cockroach", roachNodes) - pebbleArgs := startArgs("--args=--storage-engine=pebble") - rocksdbArgs := startArgs("--args=--storage-engine=rocksdb") + pebbleArgs := startArgs(append(additionalArgs, "--args=--storage-engine=pebble")...) + rocksdbArgs := startArgs(append(additionalArgs, "--args=--storage-engine=rocksdb")...) c.Start(ctx, t, roachNodes, rocksdbArgs) stageDuration := 1 * time.Minute if local { @@ -39,9 +40,14 @@ func registerEngineSwitch(r *testRegistry) { loadDuration := " --duration=" + (time.Duration(numIters) * stageDuration).String() + var deprecatedWorkloadsStr string + if !t.buildVersion.AtLeast(version.MustParse("v20.2.0")) { + deprecatedWorkloadsStr += " --deprecated-fk-indexes" + } + workloads := []string{ // Currently tpcc is the only one with CheckConsistency. We can add more later. - "./workload run tpcc --tolerate-errors --wait=false --drop --init --warehouses=1 " + loadDuration + " {pgurl:1-%d}", + "./workload run tpcc --tolerate-errors --wait=false --drop --init" + deprecatedWorkloadsStr + " --warehouses=1 " + loadDuration + " {pgurl:1-%d}", } checkWorkloads := []string{ "./workload check tpcc --warehouses=1 --expensive-checks=true {pgurl:1}", @@ -134,10 +140,21 @@ func registerEngineSwitch(r *testRegistry) { r.Add(testSpec{ Name: fmt.Sprintf("engine/switch/nodes=%d", n), Owner: OwnerStorage, + Skip: "rocksdb removed in 21.1", MinVersion: "v20.1.0", Cluster: makeClusterSpec(n + 1), Run: func(ctx context.Context, t *test, c *cluster) { runEngineSwitch(ctx, t, c) }, }) + r.Add(testSpec{ + Name: fmt.Sprintf("engine/switch/encrypted/nodes=%d", n), + Owner: OwnerStorage, + Skip: "rocksdb removed in 21.1", + MinVersion: "v20.1.0", + Cluster: makeClusterSpec(n + 1), + Run: func(ctx context.Context, t *test, c *cluster) { + runEngineSwitch(ctx, t, c, "--encrypt=true") + }, + }) } diff --git a/pkg/cmd/roachtest/fixtures/1/checkpoint-v1.1.tgz b/pkg/cmd/roachtest/fixtures/1/checkpoint-v1.1.tgz new file mode 100644 index 000000000000..ff202183ee00 Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/1/checkpoint-v1.1.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/1/checkpoint-v19.1.tgz b/pkg/cmd/roachtest/fixtures/1/checkpoint-v19.1.tgz new file mode 100644 index 000000000000..766be4e8c809 Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/1/checkpoint-v19.1.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/1/checkpoint-v19.2.tgz b/pkg/cmd/roachtest/fixtures/1/checkpoint-v19.2.tgz new file mode 100644 index 000000000000..16442fbfb34f Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/1/checkpoint-v19.2.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/1/checkpoint-v2.0.tgz b/pkg/cmd/roachtest/fixtures/1/checkpoint-v2.0.tgz new file mode 100644 index 000000000000..c3b17bbbf8c8 Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/1/checkpoint-v2.0.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/1/checkpoint-v2.1.tgz b/pkg/cmd/roachtest/fixtures/1/checkpoint-v2.1.tgz new file mode 100644 index 000000000000..a35feaa562c6 Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/1/checkpoint-v2.1.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/1/checkpoint-v20.1.tgz b/pkg/cmd/roachtest/fixtures/1/checkpoint-v20.1.tgz new file mode 100644 index 000000000000..cc1d595ab9db Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/1/checkpoint-v20.1.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/2/checkpoint-v1.1.tgz b/pkg/cmd/roachtest/fixtures/2/checkpoint-v1.1.tgz new file mode 100644 index 000000000000..da3dfd243b74 Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/2/checkpoint-v1.1.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/2/checkpoint-v19.1.tgz b/pkg/cmd/roachtest/fixtures/2/checkpoint-v19.1.tgz new file mode 100644 index 000000000000..0745a621008b Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/2/checkpoint-v19.1.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/2/checkpoint-v19.2.tgz b/pkg/cmd/roachtest/fixtures/2/checkpoint-v19.2.tgz new file mode 100644 index 000000000000..62a76a0f6cf4 Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/2/checkpoint-v19.2.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/2/checkpoint-v2.0.tgz b/pkg/cmd/roachtest/fixtures/2/checkpoint-v2.0.tgz new file mode 100644 index 000000000000..30f6e07cfdb7 Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/2/checkpoint-v2.0.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/2/checkpoint-v2.1.tgz b/pkg/cmd/roachtest/fixtures/2/checkpoint-v2.1.tgz new file mode 100644 index 000000000000..3a312367dd3d Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/2/checkpoint-v2.1.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/2/checkpoint-v20.1.tgz b/pkg/cmd/roachtest/fixtures/2/checkpoint-v20.1.tgz new file mode 100644 index 000000000000..47852a58fa1b Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/2/checkpoint-v20.1.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/3/checkpoint-v1.1.tgz b/pkg/cmd/roachtest/fixtures/3/checkpoint-v1.1.tgz new file mode 100644 index 000000000000..5cb5010e3bb3 Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/3/checkpoint-v1.1.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/3/checkpoint-v19.1.tgz b/pkg/cmd/roachtest/fixtures/3/checkpoint-v19.1.tgz new file mode 100644 index 000000000000..69bec442db40 Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/3/checkpoint-v19.1.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/3/checkpoint-v19.2.tgz b/pkg/cmd/roachtest/fixtures/3/checkpoint-v19.2.tgz new file mode 100644 index 000000000000..39b1d84101b4 Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/3/checkpoint-v19.2.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/3/checkpoint-v2.0.tgz b/pkg/cmd/roachtest/fixtures/3/checkpoint-v2.0.tgz new file mode 100644 index 000000000000..d39e2577db9d Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/3/checkpoint-v2.0.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/3/checkpoint-v2.1.tgz b/pkg/cmd/roachtest/fixtures/3/checkpoint-v2.1.tgz new file mode 100644 index 000000000000..1c81fe44212a Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/3/checkpoint-v2.1.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/3/checkpoint-v20.1.tgz b/pkg/cmd/roachtest/fixtures/3/checkpoint-v20.1.tgz new file mode 100644 index 000000000000..5af45095deb5 Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/3/checkpoint-v20.1.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/4/checkpoint-v1.1.tgz b/pkg/cmd/roachtest/fixtures/4/checkpoint-v1.1.tgz new file mode 100644 index 000000000000..1cd3f76589d0 Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/4/checkpoint-v1.1.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/4/checkpoint-v19.1.tgz b/pkg/cmd/roachtest/fixtures/4/checkpoint-v19.1.tgz new file mode 100644 index 000000000000..b28891bf90a8 Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/4/checkpoint-v19.1.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/4/checkpoint-v19.2.tgz b/pkg/cmd/roachtest/fixtures/4/checkpoint-v19.2.tgz new file mode 100644 index 000000000000..c06e9b146b2b Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/4/checkpoint-v19.2.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/4/checkpoint-v2.0.tgz b/pkg/cmd/roachtest/fixtures/4/checkpoint-v2.0.tgz new file mode 100644 index 000000000000..10c3a8e58f86 Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/4/checkpoint-v2.0.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/4/checkpoint-v2.1.tgz b/pkg/cmd/roachtest/fixtures/4/checkpoint-v2.1.tgz new file mode 100644 index 000000000000..e480058ad5db Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/4/checkpoint-v2.1.tgz differ diff --git a/pkg/cmd/roachtest/fixtures/4/checkpoint-v20.1.tgz b/pkg/cmd/roachtest/fixtures/4/checkpoint-v20.1.tgz new file mode 100644 index 000000000000..e4926dc70208 Binary files /dev/null and b/pkg/cmd/roachtest/fixtures/4/checkpoint-v20.1.tgz differ diff --git a/pkg/cmd/roachtest/follower_reads.go b/pkg/cmd/roachtest/follower_reads.go index 2a630bdb4ee5..d825db19437f 100644 --- a/pkg/cmd/roachtest/follower_reads.go +++ b/pkg/cmd/roachtest/follower_reads.go @@ -23,7 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/ts/tspb" "github.com/cockroachdb/cockroach/pkg/util/httputil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "golang.org/x/sync/errgroup" ) @@ -112,7 +112,7 @@ func runFollowerReadsTest(ctx context.Context, t *test, c *cluster) { } panic("data is empty") } - verifySelect := func(ctx context.Context, node, k int, expectError bool, expectedVal int64) func() error { + verifySelect := func(ctx context.Context, node, k int, expectedVal int64) func() error { return func() error { nodeDB := conns[node-1] r := nodeDB.QueryRowContext(ctx, "SELECT v FROM test.test AS OF SYSTEM "+ @@ -123,15 +123,9 @@ func runFollowerReadsTest(ctx context.Context, t *test, c *cluster) { if ctx.Err() != nil { return nil } - if expectError { - return nil - } return err } - if expectError { - return errors.Errorf("failed to get expected error on node %d", node) - } if got != expectedVal { return errors.Errorf("Didn't get expected val on node %d: %v != %v", node, got, expectedVal) @@ -143,7 +137,7 @@ func runFollowerReadsTest(ctx context.Context, t *test, c *cluster) { return func() error { for ctx.Err() == nil { k, v := chooseKV() - err := verifySelect(ctx, node, k, false, v)() + err := verifySelect(ctx, node, k, v)() if err != nil && ctx.Err() == nil { return err } @@ -159,16 +153,6 @@ func runFollowerReadsTest(ctx context.Context, t *test, c *cluster) { if err := g.Wait(); err != nil { t.Fatalf("failed to insert data: %v", err) } - // Verify error on immediate read. - g, gCtx = errgroup.WithContext(ctx) - for i := 1; i <= c.spec.NodeCount; i++ { - // Expect an error performing a historical read at first because the table - // won't have been created yet. - g.Go(verifySelect(gCtx, i, 0, true, 0)) - } - if err := g.Wait(); err != nil { - t.Fatalf("unexpected error performing historical reads: %v", err) - } // Wait for follower_timestamp() historical reads to have data. followerReadDuration, err := computeFollowerReadDuration(ctx, db) if err != nil { @@ -190,7 +174,7 @@ func runFollowerReadsTest(ctx context.Context, t *test, c *cluster) { g, gCtx = errgroup.WithContext(ctx) k, v := chooseKV() for i := 1; i <= c.spec.NodeCount; i++ { - g.Go(verifySelect(gCtx, i, k, false, v)) + g.Go(verifySelect(gCtx, i, k, v)) } if err := g.Wait(); err != nil { t.Fatalf("error verifying node values: %v", err) diff --git a/pkg/cmd/roachtest/gopg.go b/pkg/cmd/roachtest/gopg.go index 50ff74304821..66c830fae692 100644 --- a/pkg/cmd/roachtest/gopg.go +++ b/pkg/cmd/roachtest/gopg.go @@ -14,15 +14,17 @@ import ( "bufio" "bytes" "context" - "errors" "fmt" "regexp" "strconv" "strings" + + "github.com/cockroachdb/errors" ) // Currently, we're running a version like 'v9.0.1'. var gopgReleaseTagRegex = regexp.MustCompile(`^v(?P\d+)(?:\.(?P\d+)(?:\.(?P\d+))?)?$`) +var gopgSupportedTag = "v10.0.1" // This test runs gopg full test suite against a single cockroach node. func registerGopg(r *testRegistry) { @@ -54,11 +56,12 @@ func registerGopg(r *testRegistry) { } t.Status("cloning gopg and installing prerequisites") - latestTag, err := repeatGetLatestTag(ctx, c, "go-pg", "pg", gopgReleaseTagRegex) + gopgLatestTag, err := repeatGetLatestTag(ctx, c, "go-pg", "pg", gopgReleaseTagRegex) if err != nil { t.Fatal(err) } - c.l.Printf("Latest gopg release is %s.", latestTag) + c.l.Printf("Latest gopg release is %s.", gopgLatestTag) + c.l.Printf("Supported gopg release is %s.", gopgSupportedTag) installLatestGolang(ctx, t, c, node) @@ -85,27 +88,27 @@ func registerGopg(r *testRegistry) { c, "https://github.com/go-pg/pg.git", destPath, - latestTag, + gopgSupportedTag, node, ); err != nil { t.Fatal(err) } - blacklistName, expectedFailures, ignorelistName, ignorelist := gopgBlacklists.getLists(version) + blocklistName, expectedFailures, ignorelistName, ignorelist := gopgBlocklists.getLists(version) if expectedFailures == nil { - t.Fatalf("No gopg blacklist defined for cockroach version %s", version) + t.Fatalf("No gopg blocklist defined for cockroach version %s", version) } if ignorelist == nil { t.Fatalf("No gopg ignorelist defined for cockroach version %s", version) } - c.l.Printf("Running cockroach version %s, using blacklist %s, using ignorelist %s", - version, blacklistName, ignorelistName) + c.l.Printf("Running cockroach version %s, using blocklist %s, using ignorelist %s", + version, blocklistName, ignorelistName) _ = c.RunE(ctx, node, fmt.Sprintf("mkdir -p %s", resultsDirPath)) t.Status("running gopg test suite") // go test provides colorful output which - when redirected - interferes - // with matching of the blacklisted tests, so we will strip off all color + // with matching of the blocklisted tests, so we will strip off all color // code escape sequences. const removeColorCodes = `sed -r "s/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[mGK]//g"` // Note that this is expected to return an error, since the test suite @@ -123,7 +126,7 @@ func registerGopg(r *testRegistry) { // gopg test suite consists of multiple tests, some of them being a full // test suites in themselves. Those are run with TestGinkgo test harness. // First, we parse the result of running TestGinkgo. - if err := gormParseTestGinkgoOutput( + if err := gopgParseTestGinkgoOutput( results, rawResults, expectedFailures, ignorelist, ); err != nil { t.Fatal(err) @@ -144,7 +147,7 @@ func registerGopg(r *testRegistry) { results.parseJUnitXML(t, expectedFailures, ignorelist, xmlResults) results.summarizeFailed( - t, "gopg", blacklistName, expectedFailures, version, latestTag, + t, "gopg", blocklistName, expectedFailures, version, gopgSupportedTag, 0, /* notRunCount */ ) } @@ -161,11 +164,11 @@ func registerGopg(r *testRegistry) { }) } -// gormParseTestGinkgoOutput parses the summary of failures of running internal +// gopgParseTestGinkgoOutput parses the summary of failures of running internal // test suites from gopg ORM tests. TestGinkgo is a test harness that runs // several test suites described by gopg. -func gormParseTestGinkgoOutput( - r *ormTestsResults, rawResults []byte, expectedFailures, ignorelist blacklist, +func gopgParseTestGinkgoOutput( + r *ormTestsResults, rawResults []byte, expectedFailures, ignorelist blocklist, ) (err error) { var ( totalRunCount, totalTestCount int @@ -264,7 +267,7 @@ func gormParseTestGinkgoOutput( } } - // Blacklist contains both the expected failures for "global" tests as well + // Blocklist contains both the expected failures for "global" tests as well // as TestGinkgo's tests. We need to figure the number of the latter ones. testGinkgoExpectedFailures := 0 for failure := range expectedFailures { diff --git a/pkg/cmd/roachtest/gopg_blacklist.go b/pkg/cmd/roachtest/gopg_blocklist.go similarity index 68% rename from pkg/cmd/roachtest/gopg_blacklist.go rename to pkg/cmd/roachtest/gopg_blocklist.go index a09bbf3be334..b9dcd6fff8ee 100644 --- a/pkg/cmd/roachtest/gopg_blacklist.go +++ b/pkg/cmd/roachtest/gopg_blocklist.go @@ -10,9 +10,10 @@ package main -var gopgBlacklists = blacklistsForVersion{ - {"v19.2", "gopgBlackList19_2", gopgBlackList19_2, "gopgIgnoreList19_2", gopgIgnoreList19_2}, - {"v20.1", "gopgBlackList20_1", gopgBlackList20_1, "gopgIgnoreList20_1", gopgIgnoreList20_1}, +var gopgBlocklists = blocklistsForVersion{ + {"v19.2", "gopgBlockList19_2", gopgBlockList19_2, "gopgIgnoreList19_2", gopgIgnoreList19_2}, + {"v20.1", "gopgBlockList20_1", gopgBlockList20_1, "gopgIgnoreList20_1", gopgIgnoreList20_1}, + {"v20.2", "gopgBlockList20_2", gopgBlockList20_2, "gopgIgnoreList20_2", gopgIgnoreList20_2}, } // These are lists of known gopg test errors and failures. @@ -21,12 +22,18 @@ var gopgBlacklists = blacklistsForVersion{ // Any failed test that is not on this list is reported as FAIL - unexpected. // // Please keep these lists alphabetized for easy diffing. -// After a failed run, an updated version of this blacklist should be available +// After a failed run, an updated version of this blocklist should be available // in the test log. -var gopgBlackList20_1 = blacklist{ - "pg | CopyFrom/CopyTo | copies corrupted data to a table": "41608", - "pg | CopyFrom/CopyTo | copies data from a table and to a table": "41608", +var gopgBlockList20_2 = gopgBlockList20_1 + +var gopgBlockList20_1 = blocklist{ + "pg | BeforeQuery and AfterQuery CopyFrom | is called for CopyFrom with model": "41608", + "pg | BeforeQuery and AfterQuery CopyFrom | is called for CopyFrom without model": "41608", + "pg | BeforeQuery and AfterQuery CopyTo | is called for CopyTo with model": "41608", + "pg | BeforeQuery and AfterQuery CopyTo | is called for CopyTo without model": "41608", + "pg | CopyFrom/CopyTo | copies corrupted data to a table": "41608", + "pg | CopyFrom/CopyTo | copies data from a table and to a table": "41608", "pg | CountEstimate | works": "17511", "pg | CountEstimate | works when there are no results": "17511", "pg | CountEstimate | works with GROUP": "17511", @@ -36,15 +43,24 @@ var gopgBlackList20_1 = blacklist{ "pg | Listener | reconnects on receive error": "41522", "pg | Listener | returns an error on timeout": "41522", "pg | Listener | supports concurrent Listen and Receive": "41522", - "v9.ExampleDB_Model_postgresArrayStructTag": "32552", - "v9.TestBigColumn": "41608", - "v9.TestConversion": "32552", - "v9.TestGinkgo": "41522", - "v9.TestGocheck": "17511", - "v9.TestUnixSocket": "31113", + "v10.ExampleDB_Model_postgresArrayStructTag": "32552", + "v10.TestBigColumn": "41608", + "v10.TestConversion": "32552", + "v10.TestGinkgo": "41522", + "v10.TestGocheck": "17511", + "v10.TestReadColumnValue": "26925", + "v10.TestUnixSocket": "31113", } -var gopgBlackList19_2 = blacklist{ +var gopgBlockList19_2 = blocklist{ + "pg | BeforeQuery and AfterQuery | CopyFrom is called for CopyFrom with model": "5807", + "pg | BeforeQuery and AfterQuery | CopyFrom is called for CopyFrom without model": "5807", + "pg | BeforeQuery and AfterQuery | CopyTo is called for CopyTo with model": "5807", + "pg | BeforeQuery and AfterQuery | CopyTo is called for CopyTo without model": "5807", + "pg | BeforeQuery and AfterQuery | Model is called for Model": "5807", + "pg | BeforeQuery and AfterQuery | Query/Exec is called for Exec": "5807", + "pg | BeforeQuery and AfterQuery | Query/Exec is called for Query": "5807", + "pg | BeforeQuery and AfterQuery | model params is called for Model": "5807", "pg | CopyFrom/CopyTo | copies corrupted data to a table": "5807", "pg | CopyFrom/CopyTo | copies data from a table and to a table": "5807", "pg | CountEstimate | works": "17511", @@ -82,17 +98,20 @@ var gopgBlackList19_2 = blacklist{ "pg | soft delete with time column | nil model Deleted allows to select deleted model": "5807", "pg | soft delete with time column | nil model ForceDelete deletes the model": "5807", "pg | soft delete with time column | nil model soft deletes the model": "5807", - "v9.ExampleDB_Model_postgresArrayStructTag": "5807", - "v9.TestBigColumn": "5807", - "v9.TestConversion": "32552", - "v9.TestGinkgo": "5807", - "v9.TestGocheck": "5807", - "v9.TestUnixSocket": "31113", + "v10.ExampleDB_Model_postgresArrayStructTag": "32552", + "v10.TestBigColumn": "41608", + "v10.TestConversion": "32552", + "v10.TestGinkgo": "41522", + "v10.TestGocheck": "17511", + "v10.TestReadColumnValue": "26925", + "v10.TestUnixSocket": "31113", } +var gopgIgnoreList20_2 = gopgIgnoreList20_1 + var gopgIgnoreList20_1 = gopgIgnoreList19_2 -var gopgIgnoreList19_2 = blacklist{ +var gopgIgnoreList19_2 = blocklist{ // These "fetching" tests assume a particular order when ORDER BY clause is // omitted from the query by the ORM itself. "pg | ORM slice model | fetches Book relations": "41690", @@ -101,8 +120,11 @@ var gopgIgnoreList19_2 = blacklist{ "pg | ORM struct model | fetches Author relations": "41690", "pg | ORM struct model | fetches Book relations": "41690", "pg | ORM struct model | fetches Genre relations": "41690", - // This test assumes different transaction isolation level (READ COMMITTED). - "pg | Tx | supports CopyFrom and CopyIn": "41690", + // Different error message for context cancellation timeout. + "pg | OnConnect | does not panic on timeout": "41690", + // These tests assume different transaction isolation level (READ COMMITTED). + "pg | Tx | supports CopyFrom and CopyIn": "41690", + "pg | Tx | supports CopyFrom and CopyIn with errors": "41690", // These tests sometimes failed and we haven't diagnosed it "pg | DB race | SelectOrInsert with OnConflict is race free": "unknown", "pg | DB race | SelectOrInsert without OnConflict is race free": "unknown", diff --git a/pkg/cmd/roachtest/gossip.go b/pkg/cmd/roachtest/gossip.go index aaca4c4f6d5c..988e36754f31 100644 --- a/pkg/cmd/roachtest/gossip.go +++ b/pkg/cmd/roachtest/gossip.go @@ -27,7 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/httputil" "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) func registerGossip(r *testRegistry) { @@ -271,10 +271,15 @@ func runGossipPeerings(ctx context.Context, t *test, c *cluster) { t.l.Printf("%d: restarting node %d\n", i, node[0]) c.Stop(ctx, node) c.Start(ctx, t, node) + // Sleep a bit to avoid hitting: + // https://github.com/cockroachdb/cockroach/issues/48005 + time.Sleep(3 * time.Second) } } func runGossipRestart(ctx context.Context, t *test, c *cluster) { + t.Skip("skipping flaky acceptance/gossip/restart", "https://github.com/cockroachdb/cockroach/issues/48423") + c.Put(ctx, cockroach, "./cockroach") c.Start(ctx, t) @@ -397,6 +402,7 @@ SELECT count(replicas) `./cockroach start --insecure --background --store={store-dir} `+ `--log-dir={log-dir} --cache=10% --max-sql-memory=10% `+ `--listen-addr=:$[{pgport:1}+10000] --http-port=$[{pgport:1}+1] `+ + `--join={pghost:1}:{pgport:1}`+ `> {log-dir}/cockroach.stdout 2> {log-dir}/cockroach.stderr`) if err != nil { t.Fatal(err) @@ -484,9 +490,8 @@ func runCheckLocalityIPAddress(ctx context.Context, t *test, c *cluster) { if !strings.Contains(advertiseAddress, "localhost") { t.Fatal("Expected connect address to contain localhost") } - } else if c.ExternalAddr(ctx, c.Node(nodeID))[0] != advertiseAddress { - t.Fatalf("Connection address is %s but expected %s", - advertiseAddress, c.ExternalAddr(ctx, c.Node(nodeID))[0]) + } else if exp := c.ExternalAddr(ctx, c.Node(nodeID))[0]; exp != advertiseAddress { + t.Fatalf("Connection address is %s but expected %s", advertiseAddress, exp) } } } diff --git a/pkg/cmd/roachtest/hibernate.go b/pkg/cmd/roachtest/hibernate.go index 51a9bb3a6774..c25bb553fd88 100644 --- a/pkg/cmd/roachtest/hibernate.go +++ b/pkg/cmd/roachtest/hibernate.go @@ -10,11 +10,13 @@ package main -import "context" +import ( + "context" + "regexp" +) -// TODO(rafi): Once our fork is merged into the main repo, go back to using -// latest release tag. -//var hibernateReleaseTagRegex = regexp.MustCompile(`^(?P\d+)\.(?P\d+)\.(?P\d+)$`) +var hibernateReleaseTagRegex = regexp.MustCompile(`^(?P\d+)\.(?P\d+)\.(?P\d+)$`) +var supportedHibernateTag = "5.4.20" // This test runs hibernate-core's full test suite against a single cockroach // node. @@ -42,19 +44,15 @@ func registerHibernate(r *testRegistry) { t.Fatal(err) } - // TODO(rafi): Once our fork is merged into the main repo, go back to - // fetching the latest tag. For now, always use the - // `HHH-13724-cockroachdb-dialects` branch, where we are building the - // dialect. - latestTag := "HHH-13724-cockroachdb-dialects" - //t.Status("cloning hibernate and installing prerequisites") - //latestTag, err := repeatGetLatestTag( - // ctx, c, "hibernate", "hibernate-orm", hibernateReleaseTagRegex, - //) - //if err != nil { - // t.Fatal(err) - //} - //c.l.Printf("Latest Hibernate release is %s.", latestTag) + t.Status("cloning hibernate and installing prerequisites") + latestTag, err := repeatGetLatestTag( + ctx, c, "hibernate", "hibernate-orm", hibernateReleaseTagRegex, + ) + if err != nil { + t.Fatal(err) + } + c.l.Printf("Latest Hibernate release is %s.", latestTag) + c.l.Printf("Supported Hibernate release is %s.", supportedHibernateTag) if err := repeatRunE( ctx, c, node, "update apt-get", `sudo apt-get -qq update`, @@ -78,16 +76,13 @@ func registerHibernate(r *testRegistry) { t.Fatal(err) } - // TODO(rafi): Switch back to using the main hibernate/hibernate-orm repo - // once the CockroachDB dialect is merged into it. For now, we are using - // a fork so we can make incremental progress on building the dialect. if err := repeatGitCloneE( ctx, t.l, c, - "https://github.com/cockroachdb/hibernate-orm.git", + "https://github.com/hibernate/hibernate-orm.git", "/mnt/data1/hibernate", - latestTag, + supportedHibernateTag, node, ); err != nil { t.Fatal(err) @@ -109,11 +104,11 @@ func registerHibernate(r *testRegistry) { t.Fatal(err) } - blacklistName, expectedFailures, _, _ := hibernateBlacklists.getLists(version) + blocklistName, expectedFailures, _, _ := hibernateBlocklists.getLists(version) if expectedFailures == nil { - t.Fatalf("No hibernate blacklist defined for cockroach version %s", version) + t.Fatalf("No hibernate blocklist defined for cockroach version %s", version) } - c.l.Printf("Running cockroach version %s, using blacklist %s", version, blacklistName) + c.l.Printf("Running cockroach version %s, using blocklist %s", version, blocklistName) t.Status("running hibernate test suite, will take at least 3 hours") // When testing, it is helpful to run only a subset of the tests. To do so @@ -123,8 +118,8 @@ func registerHibernate(r *testRegistry) { // Also note that this is expected to return an error, since the test suite // will fail. And it is safe to swallow it here. _ = c.RunE(ctx, node, - `cd /mnt/data1/hibernate/hibernate-core/ && `+ - `HIBERNATE_CONNECTION_LEAK_DETECTION=true ./../gradlew test -Pdb=cockroachdb`, + `cd /mnt/data1/hibernate/ && `+ + `HIBERNATE_CONNECTION_LEAK_DETECTION=true ./gradlew test -Pdb=cockroachdb`, ) t.Status("collecting the test results") @@ -161,7 +156,7 @@ func registerHibernate(r *testRegistry) { t.l, node, "get list of test files", - `ls /mnt/data1/hibernate/hibernate-core/target/test-results/test/*.xml`, + `ls /mnt/data1/hibernate/*/target/test-results/test/*.xml`, ) if err != nil { t.Fatal(err) @@ -172,7 +167,7 @@ func registerHibernate(r *testRegistry) { parseAndSummarizeJavaORMTestsResults( ctx, t, c, node, "hibernate" /* ormName */, output, - blacklistName, expectedFailures, nil /* ignorelist */, version, latestTag, + blocklistName, expectedFailures, nil /* ignorelist */, version, supportedHibernateTag, ) } diff --git a/pkg/cmd/roachtest/hibernate_blacklist.go b/pkg/cmd/roachtest/hibernate_blocklist.go similarity index 98% rename from pkg/cmd/roachtest/hibernate_blacklist.go rename to pkg/cmd/roachtest/hibernate_blocklist.go index 7bd37061d6ec..210704957f94 100644 --- a/pkg/cmd/roachtest/hibernate_blacklist.go +++ b/pkg/cmd/roachtest/hibernate_blocklist.go @@ -10,119 +10,36 @@ package main -var hibernateBlacklists = blacklistsForVersion{ - {"v2.0", "hibernateBlackList2_0", hibernateBlackList2_0, "", nil}, - {"v2.1", "hibernateBlackList2_1", hibernateBlackList2_1, "", nil}, - {"v2.2", "hibernateBlackList19_1", hibernateBlackList19_1, "", nil}, - {"v19.1", "hibernateBlackList19_1", hibernateBlackList19_1, "", nil}, - {"v19.2", "hibernateBlackList19_2", hibernateBlackList19_2, "", nil}, - {"v20.1", "hibernateBlackList20_1", hibernateBlackList20_1, "", nil}, +var hibernateBlocklists = blocklistsForVersion{ + {"v2.0", "hibernateBlockList2_0", hibernateBlockList2_0, "", nil}, + {"v2.1", "hibernateBlockList2_1", hibernateBlockList2_1, "", nil}, + {"v2.2", "hibernateBlockList19_1", hibernateBlockList19_1, "", nil}, + {"v19.1", "hibernateBlockList19_1", hibernateBlockList19_1, "", nil}, + {"v19.2", "hibernateBlockList19_2", hibernateBlockList19_2, "", nil}, + {"v20.1", "hibernateBlockList20_1", hibernateBlockList20_1, "", nil}, + {"v20.2", "hibernateBlockList20_2", hibernateBlockList20_2, "", nil}, } // Please keep these lists alphabetized for easy diffing. -// After a failed run, an updated version of this blacklist should be available +// After a failed run, an updated version of this blocklist should be available // in the test log. -var hibernateBlackList20_1 = blacklist{} +var hibernateBlockList20_2 = blocklist{} -var hibernateBlackList19_2 = blacklist{} +var hibernateBlockList20_1 = blocklist{ + "org.hibernate.test.typedescriptor.CharInNativeQueryTest.testNativeQuery": "48563", +} -var hibernateBlackList19_1 = blacklist{ - "org.hibernate.jpa.test.criteria.QueryBuilderTest.testDateTimeFunctions": "31708", - "org.hibernate.jpa.test.indetifier.AssignedInitialValueTableGeneratorConfiguredTest.testTheFirstGeneratedIdIsEqualToTableGeneratorInitialValuePlusOne": "6583", - "org.hibernate.jpa.test.indetifier.AssignedInitialValueTableGeneratorConfiguredTest.testTheGeneratedIdValuesAreCorrect": "6583", - "org.hibernate.jpa.test.indetifier.DefaultInitialValueTableGeneratorConfiguredTest.testTheFirstGeneratedIdIsEqualToTableGeneratorInitialValuePlusOne": "6583", - "org.hibernate.jpa.test.indetifier.DefaultInitialValueTableGeneratorConfiguredTest.testTheGeneratedIdValuesAreCorrect": "6583", - "org.hibernate.jpa.test.lock.LockTest.testContendedPessimisticLock": "6583", - "org.hibernate.jpa.test.lock.LockTest.testLockWriteOnUnversioned": "6583", - "org.hibernate.jpa.test.lock.PessimisticWriteWithOptionalOuterJoinBreaksRefreshTest.pessimisticReadWithOptionalOuterJoinBreaksRefreshTest": "6583", - "org.hibernate.jpa.test.lock.PessimisticWriteWithOptionalOuterJoinBreaksRefreshTest.pessimisticWriteWithOptionalOuterJoinBreaksRefreshTest": "6583", - "org.hibernate.jpa.test.lock.QueryLockingTest.testEntityLockModeStateAfterQueryLocking": "6583", - "org.hibernate.jpa.test.query.ConstructorResultNativeQueryTest.testConstructorResultNativeQuery": "unknown", - "org.hibernate.jpa.test.query.ConstructorResultNativeQueryTest.testConstructorResultNativeQuerySpecifyingType": "unknown", - "org.hibernate.jpa.test.query.ConstructorResultNativeQueryTest.testMultipleConstructorResultNativeQuery": "unknown", - "org.hibernate.jpa.test.query.NativeQueryOrdinalParametersTest.testCteNativeQueryOrdinalParameter": "38636", - "org.hibernate.jpa.test.transaction.FlushAndTransactionTest.testAlwaysTransactionalOperations": "6583", - "org.hibernate.jpa.test.transaction.FlushAndTransactionTest.testMergeWhenExtended": "6583", - "org.hibernate.jpa.test.transaction.FlushAndTransactionTest.testRollbackClearPC": "6583", - "org.hibernate.jpa.test.transaction.FlushAndTransactionTest.testRollbackExceptionOnOptimisticLockException": "6583", - "org.hibernate.jpa.test.transaction.FlushAndTransactionTest.testSetRollbackOnlyAndFlush": "6583", - "org.hibernate.jpa.test.transaction.FlushAndTransactionTest.testTransactionAndContains": "6583", - "org.hibernate.jpa.test.transaction.FlushAndTransactionTest.testTransactionCommitDoesNotFlush": "6583", - "org.hibernate.jpa.test.transaction.FlushAndTransactionTest.testTransactionalOperationsWhenExtended": "6583", - "org.hibernate.query.GroupByAliasTest.testCompoundIdAlias": "unknown", - "org.hibernate.query.GroupByAliasTest.testMultiIdAlias": "unknown", - "org.hibernate.query.GroupByAliasTest.testSingleIdAlias": "unknown", - "org.hibernate.test.annotations.derivedidentities.e1.b.specjmapid.IdMapManyToOneSpecjTest.testComplexIdClass": "6583", - "org.hibernate.test.annotations.derivedidentities.e1.b.specjmapid.lazy.CompositeKeyDeleteTest.testRemove": "6583", - "org.hibernate.test.annotations.derivedidentities.e1.b2.IdClassGeneratedValueManyToOneTest.testComplexIdClass": "6583", - "org.hibernate.test.annotations.id.IdTest.testLowAllocationSize": "6583", - "org.hibernate.test.annotations.id.IdTest.testTableGenerator": "6583", - "org.hibernate.test.annotations.id.sequences.IdTest.testLowAllocationSize": "6583", - "org.hibernate.test.annotations.id.sequences.IdTest.testTableGenerator": "6583", - "org.hibernate.test.annotations.identifiercollection.IdentifierCollectionTest.testIdBag": "6583", - "org.hibernate.test.annotations.naturalid.ImmutableNaturalKeyLookupTest.testCriteriaWithAliasOneToOneJoin": "6583", - "org.hibernate.test.annotations.naturalid.ImmutableNaturalKeyLookupTest.testCriteriaWithFetchModeJoinCollection": "6583", - "org.hibernate.test.annotations.naturalid.ImmutableNaturalKeyLookupTest.testCriteriaWithFetchModeJoinOnetoOne": "6583", - "org.hibernate.test.annotations.naturalid.ImmutableNaturalKeyLookupTest.testNaturalKeyLookupWithConstraint": "6583", - "org.hibernate.test.annotations.naturalid.ImmutableNaturalKeyLookupTest.testSimpleImmutableNaturalKeyLookup": "6583", - "org.hibernate.test.annotations.naturalid.ImmutableNaturalKeyLookupTest.testSubCriteriaOneToOneJoin": "6583", - "org.hibernate.test.annotations.tableperclass.TablePerClassTest.testUnionSubClass": "6583", - "org.hibernate.test.hql.ASTParserLoadingTest.testStandardFunctions": "31708", - "org.hibernate.test.id.MultipleHiLoPerTableGeneratorTest.testAllParams": "6583", - "org.hibernate.test.id.MultipleHiLoPerTableGeneratorTest.testDistinctId": "6583", - "org.hibernate.test.id.MultipleHiLoPerTableGeneratorTest.testRollingBack": "6583", - "org.hibernate.test.idgen.enhanced.forcedtable.BasicForcedTableSequenceTest.testNormalBoundary": "6583", - "org.hibernate.test.idgen.enhanced.forcedtable.HiLoForcedTableSequenceTest.testNormalBoundary": "6583", - "org.hibernate.test.idgen.enhanced.forcedtable.PooledForcedTableSequenceTest.testNormalBoundary": "6583", - "org.hibernate.test.idgen.enhanced.table.BasicTableTest.testNormalBoundary": "6583", - "org.hibernate.test.idgen.enhanced.table.HiLoTableTest.testNormalBoundary": "6583", - "org.hibernate.test.idgen.enhanced.table.PooledTableTest.testNormalBoundary": "6583", - "org.hibernate.test.idgen.enhanced.table.concurrent.HiloOptimizerConcurrencyTest.testTwoSessionsParallelGeneration": "6583", - "org.hibernate.test.idgen.enhanced.table.concurrent.HiloOptimizerConcurrencyTest.testTwoSessionsSerialGeneration": "6583", - "org.hibernate.test.idgen.namescope.IdGeneratorNamesLocalScopeTest.testNoSequenceGenratorNameClash": "6583", - "org.hibernate.test.insertordering.InsertOrderingWithCascadeOnPersist.testInsertOrderingAvoidingForeignKeyConstraintViolation": "6583", - "org.hibernate.test.joinedsubclass.JoinedSubclassTest.testLockingJoinedSubclass": "6583", - "org.hibernate.test.jpa.compliance.tck2_2.TableGeneratorVisibilityTest.testGeneratorIsVisible": "6583", - "org.hibernate.test.legacy.ABCProxyTest.testSubclassing": "6583", - "org.hibernate.test.legacy.FooBarTest.testVersioning": "6583", - "org.hibernate.test.legacy.FumTest.testCompositeID": "6583", - "org.hibernate.test.legacy.FumTest.testDeleteOwner": "6583", - "org.hibernate.test.legacy.IJ2Test.testUnionSubclass": "6583", - "org.hibernate.test.legacy.IJTest.testFormulaDiscriminator": "6583", - "org.hibernate.test.legacy.MasterDetailTest.testCategories": "6583", - "org.hibernate.test.legacy.MasterDetailTest.testCollectionReplace": "6583", - "org.hibernate.test.legacy.MasterDetailTest.testCollectionReplace2": "6583", - "org.hibernate.test.legacy.MasterDetailTest.testCollectionReplaceOnUpdate": "6583", - "org.hibernate.test.legacy.ParentChildTest.testComplexCriteria": "6583", - "org.hibernate.test.locking.LockModeTest.testQueryUsingLockOptions": "6583", - "org.hibernate.test.locking.LockModeTest.testRefreshLockedEntity": "6583", - "org.hibernate.test.locking.LockModeTest.testRefreshWithExplicitLowerLevelLockMode": "6583", - "org.hibernate.test.locking.paging.PagingAndLockingTest.testCriteria": "6583", - "org.hibernate.test.locking.paging.PagingAndLockingTest.testHql": "6583", - "org.hibernate.test.locking.paging.PagingAndLockingTest.testNativeSql": "6583", - "org.hibernate.test.naturalid.mutable.cached.CachedMutableNaturalIdNonStrictReadWriteTest.testReattachementUnmodifiedInstance": "6583", - "org.hibernate.test.naturalid.mutable.cached.CachedMutableNaturalIdStrictReadWriteTest.testReattachementUnmodifiedInstance": "6583", - "org.hibernate.test.naturalid.nullable.NullableNaturalIdTest.testNaturalIdNullValueOnPersist": "6583", - "org.hibernate.test.naturalid.nullable.NullableNaturalIdTest.testNaturalIdQuerySupportingNullValues": "6583", - "org.hibernate.test.naturalid.nullable.NullableNaturalIdTest.testUniqueAssociation": "6583", - "org.hibernate.test.ops.genericApi.BasicGetLoadAccessTest.testIt": "6583", - "org.hibernate.test.ops.genericApi.ProxiedGetLoadAccessTest.testIt": "6583", - "org.hibernate.test.proxy.ProxyTest.testLockUninitializedProxy": "6583", - "org.hibernate.test.proxy.ProxyTest.testRefreshLockInitializedProxy": "6583", - "org.hibernate.test.quote.TableGeneratorQuotingTest.testTableGeneratorQuoting": "16769", - "org.hibernate.test.schemaupdate.SchemaUpdateTest.testSchemaUpdateAndValidation[0]": "24062", - "org.hibernate.test.schemaupdate.SchemaUpdateTest.testSchemaUpdateAndValidation[1]": "24062", - "org.hibernate.test.schemavalidation.LongVarcharValidationTest.testValidation[0]": "16769", - "org.hibernate.test.schemavalidation.LongVarcharValidationTest.testValidation[1]": "16769", - "org.hibernate.test.schemavalidation.NumericValidationTest.testValidation[0]": "16769", - "org.hibernate.test.schemavalidation.NumericValidationTest.testValidation[1]": "16769", - "org.hibernate.test.schemavalidation.matchingtablenames.TableNamesWithUnderscoreTest.testSchemaValidationDoesNotFailDueToAMoreThanOneTableFound": "16769", - "org.hibernate.test.subselect.SubselectTest.testCustomColumnReadAndWrite": "31673", - "org.hibernate.test.subselect.SubselectTest.testEntitySubselect": "31673", - "org.hibernate.test.tool.schema.SchemaToolTransactionHandlingTest.testValidateInExistingJtaTransaction": "16769", +var hibernateBlockList19_2 = blocklist{} + +var hibernateBlockList19_1 = blocklist{ + "org.hibernate.userguide.locking.ExplicitLockingTest.testBuildLockRequest": "unknown", + "org.hibernate.userguide.locking.ExplicitLockingTest.testJPALockScope": "unknown", + "org.hibernate.userguide.locking.ExplicitLockingTest.testJPALockTimeout": "unknown", + "org.hibernate.userguide.mapping.identifier.TableGeneratorConfiguredTest.test": "unknown", + "org.hibernate.userguide.mapping.identifier.TableGeneratorUnnamedTest.test": "unknown", } -var hibernateBlackList2_1 = blacklist{ +var hibernateBlockList2_1 = blocklist{ "org.hibernate.id.hhh12973.SequenceMismatchStrategyDefaultExceptionTest.test": "unknown", "org.hibernate.id.hhh12973.SequenceMismatchStrategyExceptionEnumTest.test": "unknown", "org.hibernate.id.hhh12973.SequenceMismatchStrategyFixWithSequenceGeneratorTest.test": "unknown", @@ -243,7 +160,7 @@ var hibernateBlackList2_1 = blacklist{ "org.hibernate.test.tool.schema.SchemaToolTransactionHandlingTest.testValidateInExistingJtaTransaction": "16769", } -var hibernateBlackList2_0 = blacklist{ +var hibernateBlockList2_0 = blocklist{ "org.hibernate.engine.spi.ExtraStateTest.shouldMaintainExtraStateWhenUsingIdentityIdGenerationStrategy": "unknown", "org.hibernate.event.EmbeddableCallbackTest.test": "unknown", "org.hibernate.id.CreateDeleteTest.createAndDeleteAnEntityInTheSameTransactionTest": "unknown", diff --git a/pkg/cmd/roachtest/hotspotsplits.go b/pkg/cmd/roachtest/hotspotsplits.go index 9ad28b6ed329..bb99764fe5e4 100644 --- a/pkg/cmd/roachtest/hotspotsplits.go +++ b/pkg/cmd/roachtest/hotspotsplits.go @@ -17,8 +17,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/errors" _ "github.com/lib/pq" - "github.com/pkg/errors" "golang.org/x/sync/errgroup" ) @@ -42,7 +42,7 @@ func registerHotSpotSplits(r *testRegistry) { m.Go(func() error { t.l.Printf("starting load generator\n") - const blockSize = 1 << 19 // 512 KB + const blockSize = 1 << 18 // 256 KB return c.RunE(ctx, appNode, fmt.Sprintf( "./workload run kv --read-percent=0 --tolerate-errors --concurrency=%d "+ "--min-block-bytes=%d --max-block-bytes=%d --duration=%s {pgurl:1-3}", diff --git a/pkg/cmd/roachtest/import.go b/pkg/cmd/roachtest/import.go index 566a8df68ce9..6aca4c009422 100644 --- a/pkg/cmd/roachtest/import.go +++ b/pkg/cmd/roachtest/import.go @@ -17,7 +17,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/util/retry" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) func registerImportTPCC(r *testRegistry) { @@ -35,12 +35,11 @@ func registerImportTPCC(r *testRegistry) { hc := NewHealthChecker(c, c.All()) m.Go(hc.Runner) + workloadStr := `./cockroach workload fixtures import tpcc --warehouses=%d --csv-server='http://localhost:8081'` m.Go(func(ctx context.Context) error { defer dul.Done() defer hc.Done() - cmd := fmt.Sprintf( - `./workload fixtures import tpcc --warehouses=%d --csv-server='http://localhost:8081'`, - warehouses) + cmd := fmt.Sprintf(workloadStr, warehouses) c.Run(ctx, c.Node(1), cmd) return nil }) @@ -62,12 +61,12 @@ func registerImportTPCC(r *testRegistry) { const geoWarehouses = 4000 const geoZones = "europe-west2-b,europe-west4-b,asia-northeast1-b,us-west1-b" r.Add(testSpec{ + Skip: "#37349 - OOMing", Name: fmt.Sprintf("import/tpcc/warehouses=%d/geo", geoWarehouses), Owner: OwnerBulkIO, Cluster: makeClusterSpec(8, cpu(16), geo(), zones(geoZones)), Timeout: 5 * time.Hour, Run: func(ctx context.Context, t *test, c *cluster) { - t.Skip("#37349 - OOMing", "" /* details */) runImportTPCC(ctx, t, c, geoWarehouses) }, }) diff --git a/pkg/cmd/roachtest/inconsistency.go b/pkg/cmd/roachtest/inconsistency.go index 5183fb0c5626..58a0d26a0609 100644 --- a/pkg/cmd/roachtest/inconsistency.go +++ b/pkg/cmd/roachtest/inconsistency.go @@ -22,6 +22,7 @@ func registerInconsistency(r *testRegistry) { r.Add(testSpec{ Name: fmt.Sprintf("inconsistency"), Owner: OwnerKV, + Skip: "Uses RocksDB put command; unskip when that's bypassed", MinVersion: "v19.2.2", // https://github.com/cockroachdb/cockroach/pull/42149 is new in 19.2.2 Cluster: makeClusterSpec(3), Run: runInconsistency, @@ -39,7 +40,9 @@ func runInconsistency(ctx context.Context, t *test, c *cluster) { { db := c.Conn(ctx, 1) - _, err := db.ExecContext(ctx, `SET CLUSTER SETTING server.consistency_check.interval = '10ms'`) + // Disable consistency checks. We're going to be introducing an inconsistency and wish for it to be detected when + // we've set up the test to expect it. + _, err := db.ExecContext(ctx, `SET CLUSTER SETTING server.consistency_check.interval = '0'`) if err != nil { t.Fatal(err) } @@ -85,6 +88,22 @@ func runInconsistency(ctx context.Context, t *test, c *cluster) { } return nil }) + + time.Sleep(10 * time.Second) // wait for n1-n3 to all be known as live to each other + + // set an aggressive consistency check interval, but only now (that we're + // reasonably sure all nodes are live, etc). This makes sure that the consistency + // check runs against all three nodes. If it targeted only two nodes, a random + // one would fatal - not what we want. + { + db := c.Conn(ctx, 2) + _, err := db.ExecContext(ctx, `SET CLUSTER SETTING server.consistency_check.interval = '10ms'`) + if err != nil { + t.Fatal(err) + } + _ = db.Close() + } + if err := m.WaitE(); err == nil { t.Fatal("expected a node to crash") } diff --git a/pkg/cmd/roachtest/interleavedpartitioned.go b/pkg/cmd/roachtest/interleavedpartitioned.go index c19b2a32bcae..7bb885d8d17a 100644 --- a/pkg/cmd/roachtest/interleavedpartitioned.go +++ b/pkg/cmd/roachtest/interleavedpartitioned.go @@ -120,13 +120,13 @@ func registerInterleaved(r *testRegistry) { r.Add(testSpec{ Name: "interleavedpartitioned", Owner: OwnerPartitioning, - Cluster: makeClusterSpec(12, geo(), zones("us-west1-b,us-east4-b,us-central1-a")), + Cluster: makeClusterSpec(12, geo(), zones("us-east1-b,us-west1-b,europe-west2-b")), Run: func(ctx context.Context, t *test, c *cluster) { runInterleaved(ctx, t, c, config{ - eastName: `us-east4-b`, + eastName: `europe-west2-b`, westName: `us-west1-b`, - centralName: `us-central1-a`, + centralName: `us-east1-b`, // us-east is central between us-west and eu-west initSessions: 1000, insertPercent: 80, retrievePercent: 10, diff --git a/pkg/cmd/roachtest/java_helpers.go b/pkg/cmd/roachtest/java_helpers.go index c313ab26c12e..e975fa32501b 100644 --- a/pkg/cmd/roachtest/java_helpers.go +++ b/pkg/cmd/roachtest/java_helpers.go @@ -18,14 +18,14 @@ import ( "strings" ) -var issueRegexp = regexp.MustCompile(`See: https://github.com/cockroachdb/cockroach/issues/(\d+)`) +var issueRegexp = regexp.MustCompile(`See: https://[^\s]+issues?/(\d+)`) type status int const ( - pass status = iota - fail - skip + statusPass status = iota + statusFail + statusSkip ) // extractFailureFromJUnitXML parses an XML report to find all failed tests. The @@ -72,11 +72,11 @@ func extractFailureFromJUnitXML(contents []byte) ([]string, []status, map[string testPassed := len(testCase.Failure.Message) == 0 && len(testCase.Error.Message) == 0 tests = append(tests, testName) if testCase.Skipped != nil { - testStatuses = append(testStatuses, skip) + testStatuses = append(testStatuses, statusSkip) } else if testPassed { - testStatuses = append(testStatuses, pass) + testStatuses = append(testStatuses, statusPass) } else { - testStatuses = append(testStatuses, fail) + testStatuses = append(testStatuses, statusFail) message := testCase.Failure.Message if len(message) == 0 { message = testCase.Error.Message @@ -114,7 +114,7 @@ func extractFailureFromJUnitXML(contents []byte) ([]string, []status, map[string // parseJUnitXML parses testOutputInJUnitXMLFormat and updates the receiver // accordingly. func (r *ormTestsResults) parseJUnitXML( - t *test, expectedFailures, ignorelist blacklist, testOutputInJUnitXMLFormat []byte, + t *test, expectedFailures, ignorelist blocklist, testOutputInJUnitXMLFormat []byte, ) { tests, statuses, issueHints, err := extractFailureFromJUnitXML(testOutputInJUnitXMLFormat) if err != nil { @@ -140,24 +140,24 @@ func (r *ormTestsResults) parseJUnitXML( case expectedIgnored: r.results[test] = fmt.Sprintf("--- IGNORE: %s due to %s (expected)", test, ignoredIssue) r.ignoredCount++ - case status == skip: + case status == statusSkip: r.results[test] = fmt.Sprintf("--- SKIP: %s", test) r.skipCount++ - case status == pass && !expectedFailure: + case status == statusPass && !expectedFailure: r.results[test] = fmt.Sprintf("--- PASS: %s (expected)", test) r.passExpectedCount++ - case status == pass && expectedFailure: + case status == statusPass && expectedFailure: r.results[test] = fmt.Sprintf("--- PASS: %s - %s (unexpected)", test, maybeAddGithubLink(issue), ) r.passUnexpectedCount++ - case status == fail && expectedFailure: + case status == statusFail && expectedFailure: r.results[test] = fmt.Sprintf("--- FAIL: %s - %s (expected)", test, maybeAddGithubLink(issue), ) r.failExpectedCount++ r.currentFailures = append(r.currentFailures, test) - case status == fail && !expectedFailure: + case status == statusFail && !expectedFailure: r.results[test] = fmt.Sprintf("--- FAIL: %s - %s (unexpected)", test, maybeAddGithubLink(issue)) r.failUnexpectedCount++ @@ -170,7 +170,7 @@ func (r *ormTestsResults) parseJUnitXML( // parseAndSummarizeJavaORMTestsResults parses the test output of running a // test suite for some Java ORM against cockroach and summarizes it. If an // unexpected result is observed (for example, a test unexpectedly failed or -// passed), a new blacklist is populated. +// passed), a new blocklist is populated. func parseAndSummarizeJavaORMTestsResults( ctx context.Context, t *test, @@ -178,11 +178,11 @@ func parseAndSummarizeJavaORMTestsResults( node nodeListOption, ormName string, testOutput []byte, - blacklistName string, - expectedFailures blacklist, - ignorelist blacklist, + blocklistName string, + expectedFailures blocklist, + ignorelist blocklist, version string, - latestTag string, + tag string, ) { results := newORMTestsResults() filesRaw := strings.Split(string(testOutput), "\n") @@ -214,6 +214,6 @@ func parseAndSummarizeJavaORMTestsResults( } results.summarizeAll( - t, ormName, blacklistName, expectedFailures, version, latestTag, + t, ormName, blocklistName, expectedFailures, version, tag, ) } diff --git a/pkg/cmd/roachtest/jepsen.go b/pkg/cmd/roachtest/jepsen.go index b5195abfce69..a8fc3c003457 100644 --- a/pkg/cmd/roachtest/jepsen.go +++ b/pkg/cmd/roachtest/jepsen.go @@ -241,7 +241,8 @@ cd /mnt/data1/jepsen/cockroachdb && set -eo pipefail && \ // downloading logs. `-e "clojure.lang.ExceptionInfo: clj-ssh scp failure" `+ // And sometimes the analysis succeeds and yet we still get an error code for some reason. - `-e "Everything looks good"`, + `-e "Everything looks good" `+ + `-e "RuntimeException: Connection to"`, // timeout ); err == nil { t.l.Printf("Recognized BrokenBarrier or other known exceptions (see grep output above). " + "Ignoring it and considering the test successful. " + @@ -311,8 +312,10 @@ func registerJepsen(r *testRegistry) { for _, nemesis := range jepsenNemeses { nemesis := nemesis // copy for closure spec := testSpec{ - Name: fmt.Sprintf("jepsen/%s/%s", testName, nemesis.name), - Owner: OwnerKV, + Name: fmt.Sprintf("jepsen/%s/%s", testName, nemesis.name), + // We don't run jepsen on older releases due to the high rate of flakes. + MinVersion: "v20.1.0", + Owner: OwnerKV, // The Jepsen tests do funky things to machines, like muck with the // system clock; therefore, their clusters cannot be reused other tests // except the Jepsen ones themselves which reset all this state when diff --git a/pkg/cmd/roachtest/kv.go b/pkg/cmd/roachtest/kv.go index a3304d4c4ce3..a8ff76db86db 100644 --- a/pkg/cmd/roachtest/kv.go +++ b/pkg/cmd/roachtest/kv.go @@ -192,7 +192,7 @@ func registerKVContention(r *testRegistry) { r.Add(testSpec{ Name: fmt.Sprintf("kv/contention/nodes=%d", nodes), Owner: OwnerKV, - MinVersion: "v19.2.0", + MinVersion: "v20.1.0", Cluster: makeClusterSpec(nodes + 1), Run: func(ctx context.Context, t *test, c *cluster) { c.Put(ctx, cockroach, "./cockroach", c.Range(1, nodes)) @@ -202,14 +202,7 @@ func registerKVContention(r *testRegistry) { // If requests ever get stuck on a transaction that was abandoned // then it will take 10m for them to get unstuck, at which point the // QPS threshold check in the test is guaranteed to fail. - // - // Additionally, ensure that even transactions that issue a 1PC - // batch begin heartbeating. This ensures that if they end up in - // part of a dependency cycle, they can never be expire without - // being actively aborted. - args := startArgs( - "--env=COCKROACH_TXN_LIVENESS_HEARTBEAT_MULTIPLIER=600 COCKROACH_TXN_HEARTBEAT_DURING_1PC=true", - ) + args := startArgs("--env=COCKROACH_TXN_LIVENESS_HEARTBEAT_MULTIPLIER=600") c.Start(ctx, t, args, c.Range(1, nodes)) conn := c.Conn(ctx, 1) @@ -254,9 +247,9 @@ func registerKVContention(r *testRegistry) { // Assert that the average throughput stayed above a certain // threshold. In this case, assert that max throughput only - // dipped below 100 qps for 5% of the time. - const minQPS = 100 - verifyTxnPerSecond(ctx, c, t, c.Node(1), start, end, minQPS, 0.05) + // dipped below 50 qps for 10% of the time. + const minQPS = 50 + verifyTxnPerSecond(ctx, c, t, c.Node(1), start, end, minQPS, 0.1) return nil }) m.Wait() @@ -333,6 +326,7 @@ func registerKVQuiescenceDead(r *testRegistry) { // other earlier kv invocation's footsteps. run(kv+" --seed 2 {pgurl:1}", true) }) + c.Start(ctx, t, c.Node(nodes)) // satisfy dead node detector, even if test fails below if minFrac, actFrac := 0.8, qpsOneDown/qpsAllUp; actFrac < minFrac { t.Fatalf( @@ -341,17 +335,17 @@ func registerKVQuiescenceDead(r *testRegistry) { ) } t.l.Printf("QPS went from %.2f to %2.f with one node down\n", qpsAllUp, qpsOneDown) - c.Start(ctx, t, c.Node(nodes)) // satisfy dead node detector }, }) } func registerKVGracefulDraining(r *testRegistry) { r.Add(testSpec{ - Skip: "https://github.com/cockroachdb/cockroach/issues/33501", - Name: "kv/gracefuldraining/nodes=3", - Owner: OwnerKV, - Cluster: makeClusterSpec(4), + Name: "kv/gracefuldraining/nodes=3", + Owner: OwnerKV, + Cluster: makeClusterSpec(4), + Skip: "flaky", + SkipDetails: "https://github.com/cockroachdb/cockroach/issues/53760", Run: func(ctx context.Context, t *test, c *cluster) { nodes := c.spec.NodeCount - 1 c.Put(ctx, cockroach, "./cockroach", c.Range(1, nodes)) @@ -596,7 +590,7 @@ func registerKVRangeLookups(r *testRegistry) { } close(doneInit) concurrency := ifLocal("", " --concurrency="+fmt.Sprint(nodes*64)) - duration := " --duration=" + ifLocal("10s", "10m") + duration := " --duration=10m" readPercent := " --read-percent=50" // We run kv with --tolerate-errors, since the relocate workload is // expected to create `result is ambiguous (replica removed)` errors. @@ -644,7 +638,7 @@ func registerKVRangeLookups(r *testRegistry) { EXPERIMENTAL_RELOCATE SELECT ARRAY[$1, $2, $3], CAST(floor(random() * 9223372036854775808) AS INT) `, newReplicas[0]+1, newReplicas[1]+1, newReplicas[2]+1) - if err != nil && !pgerror.IsSQLRetryableError(err) && !isExpectedRelocateError(err) { + if err != nil && !pgerror.IsSQLRetryableError(err) && !IsExpectedRelocateError(err) { return err } default: diff --git a/pkg/cmd/roachtest/kvbench.go b/pkg/cmd/roachtest/kvbench.go index 0e4ebc1853b7..6a56d451bf63 100644 --- a/pkg/cmd/roachtest/kvbench.go +++ b/pkg/cmd/roachtest/kvbench.go @@ -21,9 +21,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/search" "github.com/cockroachdb/cockroach/pkg/workload/histogram" + "github.com/cockroachdb/errors" "github.com/cockroachdb/ttycolor" "github.com/codahale/hdrhistogram" - "github.com/pkg/errors" ) // kvBenchKeyDistribution represents the distribution of keys generated by `workload`. @@ -80,7 +80,13 @@ func registerKVBenchSpec(r *testRegistry, b kvBenchSpec) { name := strings.Join(nameParts, "/") nodes := makeClusterSpec(b.Nodes+1, opts...) r.Add(testSpec{ - Name: name, + Name: name, + // These tests don't have pass/fail conditions so we don't want to run them + // nightly. Currently they're only good for printing the results of a search + // for --max-rate. + // TODO(andrei): output something to roachperf and start running them + // nightly. + Tags: []string{"manual"}, Owner: OwnerKV, Cluster: nodes, Run: func(ctx context.Context, t *test, c *cluster) { @@ -314,18 +320,22 @@ func runKVBench(ctx context.Context, t *test, c *cluster, b kvBenchSpec) { } close(resultChan) res := <-resultChan - failErr := res.failureError(b) - if failErr == nil { - ttycolor.Stdout(ttycolor.Green) - t.l.Printf(`--- PASS: kv workload maintained an average latency of %0.1fms`+ - ` with avg throughput of %d`, res.latency(), res.throughput()) + + var color ttycolor.Code + var msg string + pass := res.latency() <= b.LatencyThresholdMs + if pass { + color = ttycolor.Green + msg = "PASS" } else { - ttycolor.Stdout(ttycolor.Red) - t.l.Printf(`--- FAIL: kv workload maintained an average latency of %0.1fms (threshold: %0.1fms)`+ - ` with avg throughput of %d`, res.latency(), b.LatencyThresholdMs, res.throughput()) + color = ttycolor.Red + msg = "FAIL" } + ttycolor.Stdout(color) + t.l.Printf(`--- SEARCH ITER %s: kv workload avg latency: %0.1fms (threshold: %0.1fms), avg throughput: %d`, + msg, res.latency(), b.LatencyThresholdMs, res.throughput()) ttycolor.Stdout(ttycolor.Reset) - return failErr == nil, nil + return pass, nil } if res, err := s.Search(searchPredicate); err != nil { t.Fatal(err) @@ -381,10 +391,3 @@ func (r kvBenchResult) throughput() int { // compute the average throughput here but not much more than that. return int(float64(r.Cumulative[`write`].TotalCount()) / r.Elapsed.Seconds()) } - -func (r kvBenchResult) failureError(b kvBenchSpec) error { - if r.latency() <= b.LatencyThresholdMs { - return nil - } - return errors.Errorf(`average latency is too high %0.1fms`, r.latency()) -} diff --git a/pkg/cmd/roachtest/libpq.go b/pkg/cmd/roachtest/libpq.go index cc3776581bb4..c71c8b829616 100644 --- a/pkg/cmd/roachtest/libpq.go +++ b/pkg/cmd/roachtest/libpq.go @@ -81,11 +81,11 @@ func registerLibPQ(r *testRegistry) { _ = c.RunE(ctx, node, fmt.Sprintf("mkdir -p %s", resultsDir)) - blacklistName, expectedFailures, ignorelistName, ignoredFailures := libPQBlacklists.getLists(version) + blocklistName, expectedFailures, ignorelistName, ignoredFailures := libPQBlocklists.getLists(version) if expectedFailures == nil { - t.Fatalf("No lib/pq blacklist defined for cockroach version %s", version) + t.Fatalf("No lib/pq blocklist defined for cockroach version %s", version) } - c.l.Printf("Running cockroach version %s, using blacklist %s, using ignorelist %s", version, blacklistName, ignorelistName) + c.l.Printf("Running cockroach version %s, using blocklist %s, using ignorelist %s", version, blocklistName, ignorelistName) t.Status("running lib/pq test suite and collecting results") @@ -98,7 +98,7 @@ func registerLibPQ(r *testRegistry) { parseAndSummarizeJavaORMTestsResults( ctx, t, c, node, "lib/pq" /* ormName */, []byte(resultsPath), - blacklistName, expectedFailures, ignoredFailures, version, latestTag, + blocklistName, expectedFailures, ignoredFailures, version, latestTag, ) } diff --git a/pkg/cmd/roachtest/libpq_blacklist.go b/pkg/cmd/roachtest/libpq_blacklist.go deleted file mode 100644 index 2156e25cb778..000000000000 --- a/pkg/cmd/roachtest/libpq_blacklist.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2019 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package main - -var libPQBlacklists = blacklistsForVersion{ - {"v19.2", "libPQBlacklist19_2", libPQBlacklist19_2, "libPQIgnorelist19_2", libPQIgnorelist19_2}, - {"v20.1", "libPQBlacklist20_1", libPQBlacklist20_1, "libPQIgnorelist20_1", libPQIgnorelist20_1}, -} - -var libPQBlacklist20_1 = blacklist{ - "pq.TestBinaryByteSliceToInt": "41547", - "pq.TestBinaryByteSlicetoUUID": "41547", - "pq.TestByteaOutputFormats": "26947", - "pq.TestConnListen": "41522", - "pq.TestConnUnlisten": "41522", - "pq.TestConnUnlistenAll": "41522", - "pq.TestContextCancelBegin": "41335", - "pq.TestContextCancelExec": "41335", - "pq.TestContextCancelQuery": "41335", - "pq.TestCopyFromError": "5807", - "pq.TestCopyInBinaryError": "5807", - "pq.TestCopyInRaiseStmtTrigger": "5807", - "pq.TestCopyInTypes": "5807", - "pq.TestCopyRespLoopConnectionError": "5807", - "pq.TestEncodeAndParseTs": "41563", - "pq.TestErrorDuringStartup": "41551", - "pq.TestInfinityTimestamp": "41564", - "pq.TestIssue186": "41558", - "pq.TestIssue196": "41689", - "pq.TestIssue282": "12137", - "pq.TestListenerFailedQuery": "41522", - "pq.TestListenerListen": "41522", - "pq.TestListenerReconnect": "41522", - "pq.TestListenerUnlisten": "41522", - "pq.TestListenerUnlistenAll": "41522", - "pq.TestNotifyExtra": "41522", - "pq.TestPing": "35897", - "pq.TestQueryRowBugWorkaround": "5807", - "pq.TestReconnect": "35897", - "pq.TestRowsColumnTypes": "41688", - "pq.TestRuntimeParameters": "12137", - "pq.TestStringWithNul": "26366", -} - -var libPQBlacklist19_2 = blacklist{ - "pq.TestBinaryByteSliceToInt": "41547", - "pq.TestBinaryByteSlicetoUUID": "41547", - "pq.TestBindError": "5807", - "pq.TestByteaOutputFormats": "26947", - "pq.TestCommit": "5807", - "pq.TestConnListen": "41522", - "pq.TestConnUnlisten": "41522", - "pq.TestConnUnlistenAll": "41522", - "pq.TestContextCancelBegin": "41335", - "pq.TestContextCancelExec": "41335", - "pq.TestContextCancelQuery": "41335", - "pq.TestCopyFromError": "5807", - "pq.TestCopyInBinaryError": "5807", - "pq.TestCopyInMultipleValues": "5807", - "pq.TestCopyInRaiseStmtTrigger": "5807", - "pq.TestCopyInStmtAffectedRows": "5807", - "pq.TestCopyInTypes": "5807", - "pq.TestCopyInWrongType": "5807", - "pq.TestCopyRespLoopConnectionError": "5807", - "pq.TestEncodeAndParseTs": "41563", - "pq.TestErrorDuringStartup": "41551", - "pq.TestErrorOnExec": "5807", - "pq.TestErrorOnQuery": "5807", - "pq.TestErrorOnQueryRowSimpleQuery": "5807", - "pq.TestExec": "5807", - "pq.TestInfinityTimestamp": "41564", - "pq.TestIssue186": "41558", - "pq.TestIssue196": "41689", - "pq.TestIssue282": "12137", - "pq.TestIssue494": "5807", - "pq.TestListenerFailedQuery": "41522", - "pq.TestListenerListen": "41522", - "pq.TestListenerReconnect": "41522", - "pq.TestListenerUnlisten": "41522", - "pq.TestListenerUnlistenAll": "41522", - "pq.TestNotifyExtra": "41522", - "pq.TestPing": "35897", - "pq.TestQueryRowBugWorkaround": "5807", - "pq.TestReconnect": "35897", - "pq.TestReturning": "5807", - "pq.TestRowsColumnTypes": "41688", - "pq.TestRowsResultTag": "5807", - "pq.TestRuntimeParameters": "12137", - "pq.TestStringWithNul": "26366", - "pq.TestTimestampWithTimeZone": "41565", -} - -var libPQIgnorelist20_1 = libPQIgnorelist19_2 - -var libPQIgnorelist19_2 = blacklist{ - // TestFormatTsBacked fails due to not returning an error for accepting a - // timestamp format that postgres does not. - "pq.TestFormatTsBackend": "41690", - // TestTxOptions fails because it attempts to change isolation levels. - "pq.TestTxOptions": "41690", -} diff --git a/pkg/cmd/roachtest/libpq_blocklist.go b/pkg/cmd/roachtest/libpq_blocklist.go new file mode 100644 index 000000000000..5ac901b19050 --- /dev/null +++ b/pkg/cmd/roachtest/libpq_blocklist.go @@ -0,0 +1,182 @@ +// Copyright 2019 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +var libPQBlocklists = blocklistsForVersion{ + {"v19.2", "libPQBlocklist19_2", libPQBlocklist19_2, "libPQIgnorelist19_2", libPQIgnorelist19_2}, + {"v20.1", "libPQBlocklist20_1", libPQBlocklist20_1, "libPQIgnorelist20_1", libPQIgnorelist20_1}, + {"v20.2", "libPQBlocklist20_2", libPQBlocklist20_2, "libPQIgnorelist20_2", libPQIgnorelist20_2}, +} + +var libPQBlocklist20_2 = blocklist{ + "pq.ExampleConnectorWithNoticeHandler": "unknown", + "pq.TestBinaryByteSliceToInt": "41547", + "pq.TestBinaryByteSlicetoUUID": "41547", + "pq.TestByteaOutputFormats": "26947", + "pq.TestConnListen": "41522", + "pq.TestConnUnlisten": "41522", + "pq.TestConnUnlistenAll": "41522", + "pq.TestConnectorWithNoticeHandler_Simple": "unknown", + "pq.TestConnectorWithNotificationHandler_Simple": "unknown", + "pq.TestContextCancelBegin": "41335", + "pq.TestContextCancelExec": "41335", + "pq.TestContextCancelQuery": "41335", + "pq.TestCopyFromError": "5807", + "pq.TestCopyInBinaryError": "5807", + "pq.TestCopyInRaiseStmtTrigger": "5807", + "pq.TestCopyInTypes": "5807", + "pq.TestCopyRespLoopConnectionError": "5807", + "pq.TestEncodeAndParseTs": "41563", + "pq.TestErrorDuringStartup": "41551", + "pq.TestInfinityTimestamp": "41564", + "pq.TestIssue186": "41558", + "pq.TestIssue196": "41689", + "pq.TestIssue282": "12137", + "pq.TestListenerFailedQuery": "41522", + "pq.TestListenerListen": "41522", + "pq.TestListenerReconnect": "41522", + "pq.TestListenerUnlisten": "41522", + "pq.TestListenerUnlistenAll": "41522", + "pq.TestNotifyExtra": "41522", + "pq.TestPing": "35897", + "pq.TestQueryRowBugWorkaround": "5807", + "pq.TestReconnect": "35897", + "pq.TestRowsColumnTypes": "41688", + "pq.TestRuntimeParameters": "12137", + "pq.TestStringWithNul": "26366", +} + +var libPQBlocklist20_1 = blocklist{ + "pq.ExampleConnectorWithNoticeHandler": "unknown", + "pq.TestBinaryByteSliceToInt": "41547", + "pq.TestBinaryByteSlicetoUUID": "41547", + "pq.TestByteaOutputFormats": "26947", + "pq.TestConnListen": "41522", + "pq.TestConnUnlisten": "41522", + "pq.TestConnUnlistenAll": "41522", + "pq.TestConnectorWithNoticeHandler_Simple": "unknown", + "pq.TestConnectorWithNotificationHandler_Simple": "unknown", + "pq.TestContextCancelBegin": "41335", + "pq.TestContextCancelExec": "41335", + "pq.TestContextCancelQuery": "41335", + "pq.TestCopyFromError": "5807", + "pq.TestCopyInBinaryError": "5807", + "pq.TestCopyInRaiseStmtTrigger": "5807", + "pq.TestCopyInTypes": "5807", + "pq.TestCopyRespLoopConnectionError": "5807", + "pq.TestEncodeAndParseTs": "41563", + "pq.TestErrorDuringStartup": "41551", + "pq.TestInfinityTimestamp": "41564", + "pq.TestIssue186": "41558", + "pq.TestIssue196": "41689", + "pq.TestIssue282": "12137", + "pq.TestListenerFailedQuery": "41522", + "pq.TestListenerListen": "41522", + "pq.TestListenerReconnect": "41522", + "pq.TestListenerUnlisten": "41522", + "pq.TestListenerUnlistenAll": "41522", + "pq.TestNotifyExtra": "41522", + "pq.TestPing": "35897", + "pq.TestQueryRowBugWorkaround": "5807", + "pq.TestReconnect": "35897", + "pq.TestRowsColumnTypes": "41688", + "pq.TestRuntimeParameters": "12137", + "pq.TestStringWithNul": "26366", + "pq.TestTimeWithTimezone": "44548", + "pq.TestTimeWithTimezone/24:00+00_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithTimezone/24:00-04:00_=>_0000-01-02T00:00:00-04:00": "44548", + "pq.TestTimeWithTimezone/24:00:00+00_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithTimezone/24:00:00.0+00_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithTimezone/24:00:00.000000+00_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithTimezone/24:00Z_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithoutTimezone": "44548", + "pq.TestTimeWithoutTimezone/24:00:00.000000_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithoutTimezone/24:00:00.0_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithoutTimezone/24:00:00_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithoutTimezone/24:00_=>_0000-01-02T00:00:00Z": "44548", +} + +var libPQBlocklist19_2 = blocklist{ + "pq.ExampleConnectorWithNoticeHandler": "unknown", + "pq.TestBinaryByteSliceToInt": "41547", + "pq.TestBinaryByteSlicetoUUID": "41547", + "pq.TestBindError": "5807", + "pq.TestByteaOutputFormats": "26947", + "pq.TestCommit": "5807", + "pq.TestConnListen": "41522", + "pq.TestConnUnlisten": "41522", + "pq.TestConnUnlistenAll": "41522", + "pq.TestConnectorWithNoticeHandler_Simple": "unknown", + "pq.TestConnectorWithNotificationHandler_Simple": "unknown", + "pq.TestContextCancelBegin": "41335", + "pq.TestContextCancelExec": "41335", + "pq.TestContextCancelQuery": "41335", + "pq.TestCopyFromError": "5807", + "pq.TestCopyInBinaryError": "5807", + "pq.TestCopyInMultipleValues": "5807", + "pq.TestCopyInRaiseStmtTrigger": "5807", + "pq.TestCopyInStmtAffectedRows": "5807", + "pq.TestCopyInTypes": "5807", + "pq.TestCopyInWrongType": "5807", + "pq.TestCopyRespLoopConnectionError": "5807", + "pq.TestEncodeAndParseTs": "41563", + "pq.TestErrorDuringStartup": "41551", + "pq.TestErrorOnExec": "5807", + "pq.TestErrorOnQuery": "5807", + "pq.TestErrorOnQueryRowSimpleQuery": "5807", + "pq.TestExec": "5807", + "pq.TestInfinityTimestamp": "41564", + "pq.TestIssue186": "41558", + "pq.TestIssue196": "41689", + "pq.TestIssue282": "12137", + "pq.TestIssue494": "5807", + "pq.TestListenerFailedQuery": "41522", + "pq.TestListenerListen": "41522", + "pq.TestListenerReconnect": "41522", + "pq.TestListenerUnlisten": "41522", + "pq.TestListenerUnlistenAll": "41522", + "pq.TestNotifyExtra": "41522", + "pq.TestPing": "35897", + "pq.TestQueryRowBugWorkaround": "5807", + "pq.TestReconnect": "35897", + "pq.TestReturning": "5807", + "pq.TestRowsColumnTypes": "41688", + "pq.TestRowsResultTag": "5807", + "pq.TestRuntimeParameters": "12137", + "pq.TestStringWithNul": "26366", + "pq.TestTimeWithTimezone": "44548", + "pq.TestTimeWithTimezone/11:59:59+00:00_=>_0000-01-01T11:59:59Z": "44548", + "pq.TestTimeWithTimezone/11:59:59+04:00_=>_0000-01-01T11:59:59+04:00": "44548", + "pq.TestTimeWithTimezone/24:00+00_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithTimezone/24:00-04:00_=>_0000-01-02T00:00:00-04:00": "44548", + "pq.TestTimeWithTimezone/24:00:00+00_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithTimezone/24:00:00.0+00_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithTimezone/24:00:00.000000+00_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithTimezone/24:00Z_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithoutTimezone": "44548", + "pq.TestTimeWithoutTimezone/24:00:00.000000_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithoutTimezone/24:00:00.0_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithoutTimezone/24:00:00_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimeWithoutTimezone/24:00_=>_0000-01-02T00:00:00Z": "44548", + "pq.TestTimestampWithTimeZone": "41565", +} + +var libPQIgnorelist20_2 = libPQIgnorelist20_1 + +var libPQIgnorelist20_1 = libPQIgnorelist19_2 + +var libPQIgnorelist19_2 = blocklist{ + // TestFormatTsBacked fails due to not returning an error for accepting a + // timestamp format that postgres does not. + "pq.TestFormatTsBackend": "41690", + // TestTxOptions fails because it attempts to change isolation levels. + "pq.TestTxOptions": "41690", +} diff --git a/pkg/cmd/roachtest/main.go b/pkg/cmd/roachtest/main.go index 6db7dd59a048..2634a06df72c 100644 --- a/pkg/cmd/roachtest/main.go +++ b/pkg/cmd/roachtest/main.go @@ -63,7 +63,7 @@ func main() { } switch cmd.Name() { case "run", "bench", "store-gen": - initBinaries() + initBinariesAndLibraries() } return nil }, @@ -161,6 +161,10 @@ the test tags. }, } + // TODO(irfansharif): We could remove this by directly running `cockroach + // version` against the binary being tested, instead of what we do today + // which is defaulting to checking the last git release tag present in the + // local checkout. runCmd.Flags().StringVar( &buildTag, "build-tag", "", "build tag (auto-detect if empty)") runCmd.Flags().StringVar( @@ -220,6 +224,10 @@ the test tags. "The number of cloud CPUs roachtest is allowed to use at any one time.") cmd.Flags().IntVar( &httpPort, "port", 8080, "the port on which to serve the HTTP interface") + cmd.Flags().BoolVar( + &localSSD, "local-ssd", true, "Use a local SSD instead of an EBS volume (only for use with AWS) (defaults to true if instance type supports local SSDs)") + cmd.Flags().StringSliceVar( + &createArgs, "create-args", []string{}, "extra args to pass onto the roachprod create command") } rootCmd.AddCommand(listCmd) @@ -364,10 +372,11 @@ func CtrlC(ctx context.Context, l *logger, cancel func(), cr *clusterRegistry) { // If we get a second CTRL-C, exit immediately. select { case <-sig: - shout(ctx, l, os.Stderr, "Second SIGINT received. Quitting.") + shout(ctx, l, os.Stderr, "Second SIGINT received. Quitting. Cluster might be left behind.") os.Exit(2) case <-destroyCh: shout(ctx, l, os.Stderr, "Done destroying all clusters.") + os.Exit(2) } }() } diff --git a/pkg/cmd/roachtest/many_splits.go b/pkg/cmd/roachtest/many_splits.go new file mode 100644 index 000000000000..fa721f0c7e53 --- /dev/null +++ b/pkg/cmd/roachtest/many_splits.go @@ -0,0 +1,44 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +import ( + "context" + "fmt" +) + +// runManySplits attempts to create 2000 tiny ranges on a 4-node cluster using +// left-to-right splits and check the cluster is still live afterwards. +func runManySplits(ctx context.Context, t *test, c *cluster) { + args := startArgs("--env=COCKROACH_SCAN_MAX_IDLE_TIME=5ms") + c.Put(ctx, cockroach, "./cockroach") + c.Start(ctx, t, args) + + db := c.Conn(ctx, 1) + defer db.Close() + + // Wait for upreplication then create many ranges. + waitForFullReplication(t, db) + + m := newMonitor(ctx, c, c.All()) + m.Go(func(ctx context.Context) error { + const numRanges = 2000 + t.l.Printf("creating %d ranges...", numRanges) + if _, err := db.ExecContext(ctx, fmt.Sprintf(` + CREATE TABLE t(x, PRIMARY KEY(x)) AS TABLE generate_series(1,%[1]d); + ALTER TABLE t SPLIT AT TABLE generate_series(1,%[1]d); + `, numRanges)); err != nil { + return err + } + return nil + }) + m.Wait() +} diff --git a/pkg/cmd/roachtest/mixed_version_decommission.go b/pkg/cmd/roachtest/mixed_version_decommission.go new file mode 100644 index 000000000000..4f342f21df37 --- /dev/null +++ b/pkg/cmd/roachtest/mixed_version_decommission.go @@ -0,0 +1,293 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +import ( + "context" + "strconv" + + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/util/retry" + "github.com/cockroachdb/cockroach/pkg/util/version" + "github.com/cockroachdb/errors" +) + +// runDecommissionMixedVersions runs through randomized +// decommission/recommission processes in mixed-version clusters. +func runDecommissionMixedVersions( + ctx context.Context, t *test, c *cluster, buildVersion version.Version, +) { + predecessorVersion, err := PredecessorVersion(buildVersion) + if err != nil { + t.Fatal(err) + } + + h := newDecommTestHelper(t, c) + + // The v20.2 CLI can only be run against servers running v20.2. For this + // reason, we grab a handle on a specific server slated for an upgrade. + pinnedUpgrade := h.getRandNode() + t.l.Printf("pinned n%d for upgrade", pinnedUpgrade) + + // An empty string means that the cockroach binary specified by flag + // `cockroach` will be used. + const mainVersion = "" + allNodes := c.All() + u := newVersionUpgradeTest(c, + // We upload both binaries to each node, to be able to vary the binary + // used when issuing `cockroach node` subcommands. + uploadVersion(allNodes, predecessorVersion), + uploadVersion(allNodes, mainVersion), + + startVersion(allNodes, predecessorVersion), + waitForUpgradeStep(allNodes), + preventAutoUpgradeStep(h.nodeIDs[0]), + + // We upgrade a subset of the cluster to v20.2. + binaryUpgradeStep(c.Node(pinnedUpgrade), mainVersion), + binaryUpgradeStep(c.Node(h.getRandNodeOtherThan(pinnedUpgrade)), mainVersion), + checkAllMembership(pinnedUpgrade, "active"), + + // 1. Partially decommission a random node from another random node. We + // use the v20.1 CLI to do so. + partialDecommissionStep(h.getRandNode(), h.getRandNode(), predecessorVersion), + checkOneDecommissioning(h.getRandNode()), + checkOneMembership(pinnedUpgrade, "decommissioning"), + + // 2. Recommission all nodes, including the partially decommissioned + // one, from a random node. Use the v20.1 CLI to do so. + recommissionAllStep(h.getRandNode(), predecessorVersion), + checkNoDecommissioning(h.getRandNode()), + checkAllMembership(pinnedUpgrade, "active"), + // + // 3. Attempt to fully decommission a from a random node, again using + // the v20.1 CLI. + fullyDecommissionStep(h.getRandNode(), h.getRandNode(), predecessorVersion), + checkOneDecommissioning(h.getRandNode()), + checkOneMembership(pinnedUpgrade, "decommissioning"), + + // Roll back, which should to be fine because the cluster upgrade was + // not finalized. + binaryUpgradeStep(allNodes, predecessorVersion), + checkOneDecommissioning(h.getRandNode()), + + // Repeat similar recommission/decommission cycles as above. We can no + // longer assert against the `membership` column as none of the servers + // are running v20.2. + recommissionAllStep(h.getRandNode(), predecessorVersion), + checkNoDecommissioning(h.getRandNode()), + + partialDecommissionStep(h.getRandNode(), h.getRandNode(), predecessorVersion), + checkOneDecommissioning(h.getRandNode()), + + // Roll all nodes forward, and finalize upgrade. + binaryUpgradeStep(allNodes, mainVersion), + allowAutoUpgradeStep(1), + waitForUpgradeStep(allNodes), + + checkOneMembership(h.getRandNode(), "decommissioning"), + + // Use the v20.2 CLI here on forth. Lets start with recommissioning all + // the nodes in the cluster. + recommissionAllStep(h.getRandNode(), mainVersion), + checkNoDecommissioning(h.getRandNode()), + checkAllMembership(h.getRandNode(), "active"), + + // We partially decommission a random node. + partialDecommissionStep(h.getRandNode(), h.getRandNode(), mainVersion), + checkOneDecommissioning(h.getRandNode()), + checkOneMembership(h.getRandNode(), "decommissioning"), + + // We check that recommissioning is still functional. + recommissionAllStep(h.getRandNode(), mainVersion), + checkNoDecommissioning(h.getRandNode()), + checkAllMembership(h.getRandNode(), "active"), + + // We fully decommission a random node. We need to use the v20.2 CLI to + // do so. + fullyDecommissionStep(h.getRandNode(), h.getRandNode(), mainVersion), + checkOneDecommissioning(h.getRandNode()), + checkOneMembership(h.getRandNode(), "decommissioned"), + ) + + u.run(ctx, t) +} + +// cockroachBinaryPath is a shorthand to retrieve the path for a cockroach +// binary of a given version. +func cockroachBinaryPath(version string) string { + path := "./cockroach" + if version != "" { + path += "-" + version + } + return path +} + +// partialDecommissionStep runs `cockroach node decommission --wait=none` from a +// given node, targeting another. It uses the specified binary version to run +// the command. +func partialDecommissionStep(target, from int, binaryVersion string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + c := u.c + c.Run(ctx, c.Node(from), cockroachBinaryPath(binaryVersion), "node", "decommission", + "--wait=none", "--insecure", strconv.Itoa(target)) + } +} + +// recommissionAllStep runs `cockroach node recommission` from a given node, +// targeting all nodes in the cluster. It uses the specified binary version to +// run the command. +func recommissionAllStep(from int, binaryVersion string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + c := u.c + c.Run(ctx, c.Node(from), cockroachBinaryPath(binaryVersion), "node", "recommission", + "--insecure", c.All().nodeIDsString()) + } +} + +// fullyDecommissionStep is like partialDecommissionStep, except it uses +// `--wait=all`. +func fullyDecommissionStep(target, from int, binaryVersion string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + c := u.c + c.Run(ctx, c.Node(from), cockroachBinaryPath(binaryVersion), "node", "decommission", + "--wait=all", "--insecure", strconv.Itoa(target)) + } +} + +// checkOneDecommissioning checks against the `decommissioning` column in +// crdb_internal.gossip_liveness, asserting that only one node is marked as +// decommissioning. This check can be run against both v20.1 and v20.2 servers. +func checkOneDecommissioning(from int) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + // We use a retry block here (and elsewhere) because we're consulting + // crdb_internal.gossip_liveness, and need to make allowances for gossip + // propagation delays. + if err := retry.ForDuration(testutils.DefaultSucceedsSoonDuration, func() error { + db := u.conn(ctx, t, from) + var count int + if err := db.QueryRow( + `select count(*) from crdb_internal.gossip_liveness where decommissioning = true;`).Scan(&count); err != nil { + t.Fatal(err) + } + + if count != 1 { + return errors.Newf("expected to find 1 node with decommissioning=true, found %d", count) + } + + var nodeID int + if err := db.QueryRow( + `select node_id from crdb_internal.gossip_liveness where decommissioning = true;`).Scan(&nodeID); err != nil { + t.Fatal(err) + } + t.l.Printf("n%d decommissioning=true", nodeID) + return nil + }); err != nil { + t.Fatal(err) + } + } +} + +// checkNoDecommissioning checks against the `decommissioning` column in +// crdb_internal.gossip_liveness, asserting that only no nodes are marked as +// decommissioning. This check can be run against both v20.1 and v20.2 servers. +func checkNoDecommissioning(from int) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + if err := retry.ForDuration(testutils.DefaultSucceedsSoonDuration, func() error { + db := u.conn(ctx, t, from) + var count int + if err := db.QueryRow( + `select count(*) from crdb_internal.gossip_liveness where decommissioning = true;`).Scan(&count); err != nil { + t.Fatal(err) + } + + if count != 0 { + return errors.Newf("expected to find 0 nodes with decommissioning=false, found %d", count) + } + return nil + }); err != nil { + t.Fatal(err) + } + } +} + +// checkOneMembership checks against the `membership` column in +// crdb_internal.gossip_liveness, asserting that only one node is marked with +// the specified membership status. This check can be only be run against +// servers running v20.2 and beyond. +func checkOneMembership(from int, membership string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + if err := retry.ForDuration(testutils.DefaultSucceedsSoonDuration, func() error { + db := u.conn(ctx, t, from) + var count int + if err := db.QueryRow( + `select count(*) from crdb_internal.gossip_liveness where membership = $1;`, membership).Scan(&count); err != nil { + t.Fatal(err) + } + + if count != 1 { + return errors.Newf("expected to find 1 node with membership=%s, found %d", membership, count) + } + + var nodeID int + if err := db.QueryRow( + `select node_id from crdb_internal.gossip_liveness where decommissioning = true;`).Scan(&nodeID); err != nil { + t.Fatal(err) + } + t.l.Printf("n%d membership=%s", nodeID, membership) + return nil + }); err != nil { + t.Fatal(err) + } + } +} + +// checkAllMembership checks against the `membership` column in +// crdb_internal.gossip_liveness, asserting that all nodes are marked with +// the specified membership status. This check can be only be run against +// servers running v20.2 and beyond. +func checkAllMembership(from int, membership string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + if err := retry.ForDuration(testutils.DefaultSucceedsSoonDuration, func() error { + db := u.conn(ctx, t, from) + var count int + if err := db.QueryRow( + `select count(*) from crdb_internal.gossip_liveness where membership != $1;`, membership).Scan(&count); err != nil { + t.Fatal(err) + } + + if count != 0 { + return errors.Newf("expected to find 0 nodes with membership!=%s, found %d", membership, count) + } + return nil + }); err != nil { + t.Fatal(err) + } + } +} + +// uploadVersion uploads the specified cockroach binary version on the specified +// nodes. +func uploadVersion(nodes nodeListOption, version string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + // Put the binary. + u.uploadVersion(ctx, t, nodes, version) + } +} + +// startVersion starts the specified cockroach binary version on the specified +// nodes. +func startVersion(nodes nodeListOption, version string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + args := startArgs("--binary=" + cockroachBinaryPath(version)) + u.c.Start(ctx, t, nodes, args, startArgsDontEncrypt) + } +} diff --git a/pkg/cmd/roachtest/mixed_version_jobs.go b/pkg/cmd/roachtest/mixed_version_jobs.go new file mode 100644 index 000000000000..de1efd40e4c2 --- /dev/null +++ b/pkg/cmd/roachtest/mixed_version_jobs.go @@ -0,0 +1,334 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +import ( + "context" + "fmt" + "time" + + "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/testutils" +) + +type backgroundFn func(ctx context.Context, u *versionUpgradeTest) error + +// A backgroundStepper is a tool to run long-lived commands while a cluster is +// going through a sequence of version upgrade operations. +// It exposes a `launch` step that launches the method carrying out long-running +// work (in the background) and a `stop` step collecting any errors. +type backgroundStepper struct { + // This is the operation that will be launched in the background. When the + // context gets canceled, it should shut down and return without an error. + // The way to typically get this is: + // + // err := doSomething(ctx) + // ctx.Err() != nil { + // return nil + // } + // return err + run backgroundFn + + // Internal. + m *monitor +} + +func makeBackgroundStepper(run backgroundFn) backgroundStepper { + return backgroundStepper{run: run} +} + +// launch spawns the function the background step was initialized with. +func (s *backgroundStepper) launch(ctx context.Context, _ *test, u *versionUpgradeTest) { + s.m = newMonitor(ctx, u.c) + _, s.m.cancel = context.WithCancel(ctx) + s.m.Go(func(ctx context.Context) error { + return s.run(ctx, u) + }) +} + +func (s *backgroundStepper) stop(ctx context.Context, t *test, u *versionUpgradeTest) { + s.m.cancel() + // We don't care about the workload failing since we only use it to produce a + // few `RESTORE` jobs. And indeed workload will fail because it does not + // tolerate pausing of its jobs. + _ = s.m.WaitE() + db := u.conn(ctx, t, 1) + t.l.Printf("Resuming any paused jobs left") + for { + _, err := db.ExecContext( + ctx, + `RESUME JOBS (SELECT job_id FROM [SHOW JOBS] WHERE status = $1);`, + jobs.StatusPaused, + ) + if err != nil { + t.Fatal(err) + } + row := db.QueryRow( + "SELECT count(*) FROM [SHOW JOBS] WHERE status = $1", + jobs.StatusPauseRequested, + ) + var nNotYetPaused int + if err = row.Scan(&nNotYetPaused); err != nil { + t.Fatal(err) + } + if nNotYetPaused <= 0 { + break + } + // Sleep a bit not to DOS the jobs table. + time.Sleep(10 * time.Second) + t.l.Printf("Waiting for %d jobs to pause", nNotYetPaused) + } + + t.l.Printf("Waiting for jobs to complete...") + var err error + for { + q := "SHOW JOBS WHEN COMPLETE (SELECT job_id FROM [SHOW JOBS]);" + _, err = db.ExecContext(ctx, q) + if testutils.IsError(err, "pq: restart transaction:.*") { + t.l.Printf("SHOW JOBS WHEN COMPLETE returned %s, retrying", err.Error()) + time.Sleep(10 * time.Second) + continue + } + break + } + if err != nil { + t.Fatal(err) + } +} + +func backgroundTPCCWorkload(t *test, warehouses int, tpccDB string) backgroundStepper { + return makeBackgroundStepper(func(ctx context.Context, u *versionUpgradeTest) error { + cmd := []string{ + "./workload fixtures load tpcc", + fmt.Sprintf("--warehouses=%d", warehouses), + fmt.Sprintf("--db=%s", tpccDB), + } + // The workload has to run on one of the nodes of the cluster. + err := u.c.RunE(ctx, u.c.Node(1), cmd...) + if ctx.Err() != nil { + // If the context is canceled, that's probably why the workload returned + // so swallow error. (This is how the harness tells us to shut down the + // workload). + t.l.Printf("Restore failed with %s", err.Error()) + return nil + } + if err != nil { + t.l.Printf("Restore failed with %s", err.Error()) + } + return err + }) +} + +func pauseAllJobsStep() versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + db := u.conn(ctx, t, 1) + _, err := db.ExecContext( + ctx, + `PAUSE JOBS (SELECT job_id FROM [SHOW JOBS] WHERE status = $1);`, + jobs.StatusRunning, + ) + if err != nil { + t.Fatal(err) + } + + row := db.QueryRow("SELECT count(*) FROM [SHOW JOBS] WHERE status LIKE 'pause%'") + var nPaused int + if err := row.Scan(&nPaused); err != nil { + t.Fatal(err) + } + t.l.Printf("Paused %d jobs", nPaused) + time.Sleep(time.Second) + } +} + +func makeResumeAllJobsAndWaitStep(d time.Duration) versionStep { + var numResumes int + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + numResumes++ + t.l.Printf("Resume all jobs number: %d", numResumes) + db := u.conn(ctx, t, 1) + _, err := db.ExecContext( + ctx, + `RESUME JOBS (SELECT job_id FROM [SHOW JOBS] WHERE status = $1);`, + jobs.StatusPaused, + ) + if err != nil { + t.Fatal(err) + } + + row := db.QueryRow( + "SELECT count(*) FROM [SHOW JOBS] WHERE status = $1", + jobs.StatusRunning, + ) + var nRunning int + if err := row.Scan(&nRunning); err != nil { + t.Fatal(err) + } + t.l.Printf("Resumed %d jobs", nRunning) + time.Sleep(d) + } +} + +func checkForFailedJobsStep(ctx context.Context, t *test, u *versionUpgradeTest) { + t.l.Printf("Checking for failed jobs.") + + db := u.conn(ctx, t, 1) + rows, err := db.Query(` +SELECT job_id, job_type, description, status, error, coordinator_id +FROM [SHOW JOBS] WHERE status = $1 OR status = $2`, + jobs.StatusFailed, jobs.StatusReverting, + ) + if err != nil { + t.Fatal(err) + } + var jobType, desc, status, jobError string + var jobID, coordinatorID int64 + var errMsg string + for rows.Next() { + err := rows.Scan(&jobID, &jobType, &desc, &status, &jobError, &coordinatorID) + if err != nil { + t.Fatal(err) + } + // Concatenate all unsuccessful jobs info. + errMsg = fmt.Sprintf( + "%sUnsuccessful job %d of type %s, description %s, status %s, error %s, coordinator %d\n", + errMsg, jobID, jobType, desc, status, jobError, coordinatorID, + ) + } + if errMsg != "" { + nodeInfo := "Cluster info\n" + for i := range u.c.All() { + nodeInfo = fmt.Sprintf( + "%sNode %d: %s\n", nodeInfo, i+1, u.binaryVersion(ctx, t, i+1)) + } + t.Fatalf("%s\n%s", nodeInfo, errMsg) + } +} + +func runJobsMixedVersions( + ctx context.Context, t *test, c *cluster, warehouses int, predecessorVersion string, +) { + // An empty string means that the cockroach binary specified by flag + // `cockroach` will be used. + const mainVersion = "" + roachNodes := c.All() + backgroundTPCC := backgroundTPCCWorkload(t, warehouses, "tpcc") + resumeAllJobsAndWaitStep := makeResumeAllJobsAndWaitStep(10 * time.Second) + c.Put(ctx, workload, "./workload", c.Node(1)) + + u := newVersionUpgradeTest(c, + uploadAndStartFromCheckpointFixture(roachNodes, predecessorVersion), + waitForUpgradeStep(roachNodes), + preventAutoUpgradeStep(1), + + backgroundTPCC.launch, + func(ctx context.Context, _ *test, u *versionUpgradeTest) { + time.Sleep(10 * time.Second) + }, + checkForFailedJobsStep, + pauseAllJobsStep(), + + // Roll the nodes into the new version one by one, while repeatedly pausing + // and resuming all jobs. + binaryUpgradeStep(c.Node(3), mainVersion), + resumeAllJobsAndWaitStep, + checkForFailedJobsStep, + pauseAllJobsStep(), + + binaryUpgradeStep(c.Node(2), mainVersion), + resumeAllJobsAndWaitStep, + checkForFailedJobsStep, + pauseAllJobsStep(), + + binaryUpgradeStep(c.Node(1), mainVersion), + resumeAllJobsAndWaitStep, + checkForFailedJobsStep, + pauseAllJobsStep(), + + binaryUpgradeStep(c.Node(4), mainVersion), + resumeAllJobsAndWaitStep, + checkForFailedJobsStep, + pauseAllJobsStep(), + + // Roll back again, which ought to be fine because the cluster upgrade was + // not finalized. + binaryUpgradeStep(c.Node(2), predecessorVersion), + resumeAllJobsAndWaitStep, + checkForFailedJobsStep, + pauseAllJobsStep(), + + binaryUpgradeStep(c.Node(4), predecessorVersion), + resumeAllJobsAndWaitStep, + checkForFailedJobsStep, + pauseAllJobsStep(), + + binaryUpgradeStep(c.Node(3), predecessorVersion), + resumeAllJobsAndWaitStep, + checkForFailedJobsStep, + pauseAllJobsStep(), + + binaryUpgradeStep(c.Node(1), predecessorVersion), + resumeAllJobsAndWaitStep, + checkForFailedJobsStep, + pauseAllJobsStep(), + + // Roll nodes forward and finalize upgrade. + binaryUpgradeStep(c.Node(4), mainVersion), + resumeAllJobsAndWaitStep, + checkForFailedJobsStep, + pauseAllJobsStep(), + + binaryUpgradeStep(c.Node(3), mainVersion), + resumeAllJobsAndWaitStep, + checkForFailedJobsStep, + pauseAllJobsStep(), + + binaryUpgradeStep(c.Node(1), mainVersion), + resumeAllJobsAndWaitStep, + checkForFailedJobsStep, + pauseAllJobsStep(), + + binaryUpgradeStep(c.Node(2), mainVersion), + resumeAllJobsAndWaitStep, + checkForFailedJobsStep, + pauseAllJobsStep(), + + allowAutoUpgradeStep(1), + waitForUpgradeStep(roachNodes), + resumeAllJobsAndWaitStep, + backgroundTPCC.stop, + checkForFailedJobsStep, + ) + u.run(ctx, t) +} + +func registerJobsMixedVersions(r *testRegistry) { + r.Add(testSpec{ + Name: "jobs/mixed-versions", + Owner: OwnerBulkIO, + // Jobs infrastructure was unstable prior to 20.1 in terms of the behavior + // of `PAUSE/CANCEL JOB` commands which were best effort and relied on the + // job itself to detect the request. These were fixed by introducing new job + // state machine states `Status{Pause,Cancel}Requested`. This test purpose + // is to to test the state transitions of jobs from paused to resumed and + // vice versa in order to detect regressions in the work done for 20.1. + MinVersion: "v20.1.0", + Cluster: makeClusterSpec(4), + Run: func(ctx context.Context, t *test, c *cluster) { + predV, err := PredecessorVersion(r.buildVersion) + if err != nil { + t.Fatal(err) + } + warehouses := 10 + runJobsMixedVersions(ctx, t, c, warehouses, predV) + }, + }) +} diff --git a/pkg/cmd/roachtest/mixed_version_schemachange.go b/pkg/cmd/roachtest/mixed_version_schemachange.go new file mode 100644 index 000000000000..1460b829c5f0 --- /dev/null +++ b/pkg/cmd/roachtest/mixed_version_schemachange.go @@ -0,0 +1,145 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +import ( + "context" + "fmt" + + "github.com/cockroachdb/cockroach/pkg/util/version" +) + +func registerSchemaChangeMixedVersions(r *testRegistry) { + r.Add(testSpec{ + Name: "schemachange/mixed-versions", + Owner: OwnerSQLSchema, + // This tests the work done for 20.1 that made schema changes jobs and in + // addition prevented making any new schema changes on a mixed cluster in + // order to prevent bugs during upgrades. + MinVersion: "v20.1.0", + Cluster: makeClusterSpec(4), + Run: func(ctx context.Context, t *test, c *cluster) { + maxOps := 100 + concurrency := 5 + if local { + maxOps = 10 + concurrency = 2 + } + runSchemaChangeMixedVersions(ctx, t, c, maxOps, concurrency, r.buildVersion) + }, + }) +} + +func uploadAndInitSchemaChangeWorkload() versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + // Stage workload on all nodes as the load node to run workload is chosen + // randomly. + u.c.Put(ctx, workload, "./workload", u.c.All()) + u.c.Run(ctx, u.c.All(), "./workload init schemachange") + } +} + +func runSchemaChangeWorkloadStep(loadNode, maxOps, concurrency int) versionStep { + var numFeatureRuns int + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + numFeatureRuns++ + t.l.Printf("Workload step run: %d", numFeatureRuns) + runCmd := []string{ + "./workload run schemachange --verbose=1", + // The workload is still in development and occasionally discovers schema + // change errors so for now we don't fail on them but only on panics, server + // crashes, deadlocks, etc. + // TODO(spaskob): remove when https://github.com/cockroachdb/cockroach/issues/47430 + // is closed. + "--tolerate-errors=true", + fmt.Sprintf("--max-ops %d", maxOps), + fmt.Sprintf("--concurrency %d", concurrency), + fmt.Sprintf("{pgurl:1-%d}", u.c.spec.NodeCount), + } + u.c.Run(ctx, u.c.Node(loadNode), runCmd...) + } +} + +func runSchemaChangeMixedVersions( + ctx context.Context, + t *test, + c *cluster, + maxOps int, + concurrency int, + buildVersion version.Version, +) { + predecessorVersion, err := PredecessorVersion(buildVersion) + if err != nil { + t.Fatal(err) + } + + // An empty string will lead to the cockroach binary specified by flag + // `cockroach` to be used. + const mainVersion = "" + schemaChangeStep := runSchemaChangeWorkloadStep(c.All().randNode()[0], maxOps, concurrency) + if buildVersion.Major() < 20 { + // Schema change workload is meant to run only on versions 19.2 or higher. + // If the main version is below 20.1 then then predecessor version will be + // below 19.2. + schemaChangeStep = nil + } + + u := newVersionUpgradeTest(c, + uploadAndStartFromCheckpointFixture(c.All(), predecessorVersion), + uploadAndInitSchemaChangeWorkload(), + waitForUpgradeStep(c.All()), + + // NB: at this point, cluster and binary version equal predecessorVersion, + // and auto-upgrades are on. + + preventAutoUpgradeStep(1), + schemaChangeStep, + + // Roll the nodes into the new version one by one, while repeatedly running + // schema changes. We use an empty string for the version below, which means + // use the main ./cockroach binary (i.e. the one being tested in this run). + binaryUpgradeStep(c.Node(3), mainVersion), + schemaChangeStep, + binaryUpgradeStep(c.Node(2), mainVersion), + schemaChangeStep, + binaryUpgradeStep(c.Node(1), mainVersion), + schemaChangeStep, + binaryUpgradeStep(c.Node(4), mainVersion), + schemaChangeStep, + + // Roll back again, which ought to be fine because the cluster upgrade was + // not finalized. + binaryUpgradeStep(c.Node(2), predecessorVersion), + schemaChangeStep, + binaryUpgradeStep(c.Node(4), predecessorVersion), + schemaChangeStep, + binaryUpgradeStep(c.Node(3), predecessorVersion), + schemaChangeStep, + binaryUpgradeStep(c.Node(1), predecessorVersion), + schemaChangeStep, + + // Roll nodes forward and finalize upgrade. + binaryUpgradeStep(c.Node(4), mainVersion), + schemaChangeStep, + binaryUpgradeStep(c.Node(3), mainVersion), + schemaChangeStep, + binaryUpgradeStep(c.Node(1), mainVersion), + schemaChangeStep, + binaryUpgradeStep(c.Node(2), mainVersion), + schemaChangeStep, + + allowAutoUpgradeStep(1), + waitForUpgradeStep(c.All()), + schemaChangeStep, + ) + + u.run(ctx, t) +} diff --git a/pkg/cmd/roachtest/namespace_upgrade.go b/pkg/cmd/roachtest/namespace_upgrade.go new file mode 100644 index 000000000000..b55c9666c423 --- /dev/null +++ b/pkg/cmd/roachtest/namespace_upgrade.go @@ -0,0 +1,277 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/cockroachdb/cockroach/pkg/util/retry" +) + +func registerNamespaceUpgrade(r *testRegistry) { + r.Add(testSpec{ + Name: "version/namespace-upgrade", + Owner: OwnerSQLSchema, + // This test is a regression test designed to test for #49092. + // It drops objects from the 19.2 node after the 20.1 node joins the + // cluster, and also drops/adds objects in each of the states before, during + // and after the migration, making sure results are as we expect. + MinVersion: "v20.1.0", + Cluster: makeClusterSpec(3), + Run: func(ctx context.Context, t *test, c *cluster) { + predV, err := PredecessorVersion(r.buildVersion) + if err != nil { + t.Fatal(err) + } + if !strings.HasPrefix(predV, "v19.2") { + t.Skip("wrong version", "this test only makes sense for the v19.2->v20.1 upgrade") + } + runNamespaceUpgrade(ctx, t, c, predV) + }, + }) +} + +func createTableStep(node int, table string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + db := u.conn(ctx, t, node) + _, err := db.ExecContext(ctx, + fmt.Sprintf(`CREATE TABLE %s (a INT)`, table)) + if err != nil { + t.Fatal(err) + } + } +} + +func createDBStep(node int, name string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + db := u.conn(ctx, t, node) + _, err := db.ExecContext(ctx, + fmt.Sprintf(`CREATE DATABASE %s`, name)) + if err != nil { + t.Fatal(err) + } + } +} + +func dropTableStep(node int, table string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + db := u.conn(ctx, t, node) + _, err := db.ExecContext(ctx, + fmt.Sprintf(`DROP TABLE %s`, table)) + if err != nil { + t.Fatal(err) + } + } +} + +func dropDBStep(node int, name string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + db := u.conn(ctx, t, node) + _, err := db.ExecContext(ctx, + fmt.Sprintf(`DROP DATABASE %s`, name)) + if err != nil { + t.Fatal(err) + } + } +} + +func renameDBStep(node int, oldDB string, newDB string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + db := u.conn(ctx, t, node) + _, err := db.ExecContext(ctx, + fmt.Sprintf(`ALTER DATABASE %s RENAME TO %s`, oldDB, newDB)) + if err != nil { + t.Fatal(err) + } + } +} + +func renameTableStep(node int, oldTable string, newTable string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + db := u.conn(ctx, t, node) + _, err := db.ExecContext(ctx, + fmt.Sprintf(`ALTER TABLE %s RENAME TO %s`, oldTable, newTable)) + if err != nil { + t.Fatal(err) + } + } +} + +func truncateTableStep(node int, table string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + db := u.conn(ctx, t, node) + _, err := db.ExecContext(ctx, + fmt.Sprintf(`TRUNCATE %s`, table)) + if err != nil { + t.Fatal(err) + } + } +} + +func showDatabasesStep(node int) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + db := u.conn(ctx, t, node) + _, err := db.ExecContext(ctx, + `SHOW DATABASES`) + if err != nil { + t.Fatal(err) + } + } +} + +func changeMigrationSetting(node int, enable bool) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + db := u.conn(ctx, t, node) + _, err := db.ExecContext(ctx, `SET CLUSTER SETTING testing.system_namespace_migration.enabled = $1`, enable) + if err != nil { + t.Fatal(err) + } + } +} + +func verifyNoOrphanedOldEntries(node int) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + db := u.conn(ctx, t, node) + // Check that there are no rows in namespace that aren't in namespace2, + // except for the old entry for namespace (descriptor 2) which we don't + //copy. + row := db.QueryRowContext(ctx, + `SELECT count(*) FROM [2 AS namespace] WHERE id != 2 AND id NOT IN (SELECT id FROM [30 as namespace2])`) + var count int + if err := row.Scan(&count); err != nil { + t.Fatal(err) + } + if count != 0 { + t.Fatal("unexpected entries found in namespace but not in namespace2") + } + } + +} + +func uploadAndStart(nodes nodeListOption, v string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + // Put and start the binary. + args := u.uploadVersion(ctx, t, nodes, v) + // NB: can't start sequentially since cluster already bootstrapped. + u.c.Start(ctx, t, nodes, args, startArgsDontEncrypt, roachprodArgOption{"--sequential=false"}) + } +} + +func runNamespaceUpgrade(ctx context.Context, t *test, c *cluster, predecessorVersion string) { + roachNodes := c.All() + // An empty string means that the cockroach binary specified by flag + // `cockroach` will be used. + const mainVersion = "" + u := newVersionUpgradeTest(c, + uploadAndStart(roachNodes, predecessorVersion), + waitForUpgradeStep(roachNodes), + preventAutoUpgradeStep(1), + + // Make some objects on node 1. + createTableStep(1, "a"), + createTableStep(1, "todrop"), + createTableStep(1, "torename"), + createTableStep(1, "totruncate"), + createDBStep(1, "foo"), + createDBStep(1, "todrop"), + createDBStep(1, "torename"), + + // Upgrade Node 3. + binaryUpgradeStep(c.Node(3), mainVersion), + + // Disable the migration. We'll re-enable it later. + changeMigrationSetting(3, false), + + // Drop the objects on node 1, which is still on the old version. + dropTableStep(1, "a"), + dropDBStep(1, "foo"), + + // Verify that the new node can still run SHOW DATABASES. + showDatabasesStep(3), + // Verify that the new node can recreate the dropped objects. + createTableStep(3, "a"), + createDBStep(3, "foo"), + + // Drop the objects on node 1 again, which is still on the old version. + dropTableStep(1, "a"), + dropDBStep(1, "foo"), + + // Upgrade the other 2 nodes. + binaryUpgradeStep(c.Node(1), mainVersion), + binaryUpgradeStep(c.Node(2), mainVersion), + + // Finalize upgrade. + allowAutoUpgradeStep(1), + + waitForUpgradeStep(roachNodes), + + // After finalization, but before upgrade, add a table, drop a table, + // rename a table, and truncate a table. + createTableStep(1, "fresh"), + createDBStep(1, "fresh"), + dropTableStep(1, "todrop"), + dropDBStep(1, "todrop"), + renameTableStep(1, "torename", "new"), + renameDBStep(1, "torename", "new"), + truncateTableStep(1, "totruncate"), + + // Re-enable the migration. + changeMigrationSetting(1, true), + + // Wait for the migration to finish. + func(ctx context.Context, t *test, u *versionUpgradeTest) { + t.l.Printf("waiting for cluster to finish namespace migration\n") + + for _, i := range roachNodes { + err := retry.ForDuration(30*time.Second, func() error { + db := u.conn(ctx, t, i) + // This is copied from pkg/sqlmigrations/migrations.go. We don't + // export it just for this test because it feels unnecessary. + const systemNamespaceMigrationName = "upgrade system.namespace post-20.1-finalization" + var complete bool + if err := db.QueryRowContext(ctx, + `SELECT crdb_internal.completed_migrations() @> ARRAY[$1::string]`, + systemNamespaceMigrationName, + ).Scan(&complete); err != nil { + t.Fatal(err) + } + if !complete { + return fmt.Errorf("%d: migration not complete", i) + } + return nil + }) + if err != nil { + t.Fatal(err) + } + } + }, + + // Verify that there are no remaining entries that only live in the old + // namespace table. + verifyNoOrphanedOldEntries(1), + + // Verify that the cluster can run SHOW DATABASES and re-use the names. + showDatabasesStep(3), + createTableStep(1, "a"), + createDBStep(1, "foo"), + createTableStep(1, "torename"), + createDBStep(1, "torename"), + createTableStep(1, "todrop"), + createDBStep(1, "todrop"), + + verifyNoOrphanedOldEntries(1), + ) + u.run(ctx, t) + +} diff --git a/pkg/cmd/roachtest/network.go b/pkg/cmd/roachtest/network.go index ef5c9285c5bf..de228a866a0e 100644 --- a/pkg/cmd/roachtest/network.go +++ b/pkg/cmd/roachtest/network.go @@ -23,7 +23,8 @@ import ( ) // runNetworkSanity is just a sanity check to make sure we're setting up toxiproxy -// correctly. +// correctly. It injects latency between the nodes and verifies that we're not +// seeing the latency on the client connection running `SELECT 1` on each node. func runNetworkSanity(ctx context.Context, t *test, origC *cluster, nodes int) { origC.Put(ctx, cockroach, "./cockroach", origC.All()) c, err := Toxify(ctx, origC, origC.All()) @@ -41,8 +42,6 @@ func runNetworkSanity(ctx context.Context, t *test, origC *cluster, nodes int) { // the upstream connections aren't affected by latency below, but the fixed // cost of starting the binary and processing the query is already close to // 100ms. - // - // NB: last node gets no latency injected, but first node gets cut off below. const latency = 300 * time.Millisecond for i := 1; i <= nodes; i++ { // NB: note that these latencies only apply to connections *to* the node @@ -84,7 +83,7 @@ insert into test.commit values(3,1000), (1,1000), (2,1000); select age, message from [ show trace for session ]; `) - for i := 1; i < origC.spec.NodeCount; i++ { + for i := 1; i <= origC.spec.NodeCount; i++ { if dur := c.Measure(ctx, i, `SELECT 1`); dur > latency { t.Fatalf("node %d unexpectedly affected by latency: select 1 took %.2fs", i, dur.Seconds()) } @@ -248,6 +247,28 @@ func registerNetwork(r *testRegistry) { Name: fmt.Sprintf("network/tpcc/nodes=%d", numNodes), Owner: OwnerKV, Cluster: makeClusterSpec(numNodes), + Skip: "https://github.com/cockroachdb/cockroach/issues/49901#issuecomment-640666646", + SkipDetails: `The ordering of steps in the test is: + +- install toxiproxy +- start cluster, wait for up-replication +- launch the goroutine that starts the tpcc client command, but do not wait on +it starting +- immediately, cause a network partition +- only then, the goroutine meant to start the tpcc client goes to fetch the +pg URLs and start workload, but of course this fails because network +partition +- tpcc fails to start, so the test tears down before it resolves the network partition +- test tear-down and debug zip fail because the network partition is still active + +There are two problems here: + +the tpcc client is not actually started yet when the test sets up the +network partition. This is a race condition. there should be a defer in +there to resolve the partition when the test aborts prematurely. (And the +command to resolve the partition should not be sensitive to the test +context's Done() channel, because during a tear-down that is closed already) +`, Run: func(ctx context.Context, t *test, c *cluster) { runNetworkTPCC(ctx, t, c, numNodes) }, diff --git a/pkg/cmd/roachtest/orm_helpers.go b/pkg/cmd/roachtest/orm_helpers.go index e0975156d9da..b1df36b169e2 100644 --- a/pkg/cmd/roachtest/orm_helpers.go +++ b/pkg/cmd/roachtest/orm_helpers.go @@ -32,54 +32,64 @@ func alterZoneConfigAndClusterSettings( defer db.Close() if _, err := db.ExecContext( - ctx, `ALTER RANGE default CONFIGURE ZONE USING num_replicas = 1, gc.ttlseconds = 120;`, + ctx, `ALTER RANGE default CONFIGURE ZONE USING num_replicas = 1, gc.ttlseconds = 60;`, ); err != nil { return err } if _, err := db.ExecContext( - ctx, `ALTER DATABASE system CONFIGURE ZONE USING num_replicas = 1, gc.ttlseconds = 120;`, + ctx, `ALTER DATABASE system CONFIGURE ZONE USING num_replicas = 1, gc.ttlseconds = 60;`, ); err != nil { return err } if _, err := db.ExecContext( - ctx, `ALTER TABLE system.public.jobs CONFIGURE ZONE USING num_replicas = 1, gc.ttlseconds = 120;`, + ctx, `ALTER TABLE system.public.jobs CONFIGURE ZONE USING num_replicas = 1, gc.ttlseconds = 60;`, ); err != nil { return err } if _, err := db.ExecContext( - ctx, `ALTER RANGE meta CONFIGURE ZONE USING num_replicas = 1, gc.ttlseconds = 120;`, + ctx, `ALTER RANGE meta CONFIGURE ZONE USING num_replicas = 1, gc.ttlseconds = 60;`, ); err != nil { return err } if _, err := db.ExecContext( - ctx, `ALTER RANGE system CONFIGURE ZONE USING num_replicas = 1, gc.ttlseconds = 120;`, + ctx, `ALTER RANGE system CONFIGURE ZONE USING num_replicas = 1, gc.ttlseconds = 60;`, ); err != nil { return err } if _, err := db.ExecContext( - ctx, `ALTER RANGE liveness CONFIGURE ZONE USING num_replicas = 1, gc.ttlseconds = 120;`, + ctx, `ALTER RANGE liveness CONFIGURE ZONE USING num_replicas = 1, gc.ttlseconds = 60;`, ); err != nil { return err } - // TODO(rafi): remove this check once we stop testing against 2.0 and 2.1 - if strings.HasPrefix(version, "v2.0") || strings.HasPrefix(version, "v2.1") { - return nil + if _, err := db.ExecContext( + ctx, `SET CLUSTER SETTING jobs.retention_time = '180s';`, + ); err != nil { + return err } + // Shorten the merge queue interval to clean up ranges due to dropped tables. if _, err := db.ExecContext( - ctx, `SET CLUSTER SETTING jobs.retention_time = '180s';`, + ctx, `SET CLUSTER SETTING kv.range_merge.queue_interval = '200ms'`, ); err != nil { return err } - // Enable temp tables for v20.1 - if strings.HasPrefix(version, "v20.") { + // Disable syncs associated with the Raft log which are the primary causes of + // fsyncs. + if _, err := db.ExecContext( + ctx, `SET CLUSTER SETTING kv.raft_log.disable_synchronization_unsafe = 'true'`, + ); err != nil { + return err + } + + // Enable temp tables for v20.1+ + if strings.HasPrefix(version, "v20.") || strings.HasPrefix(version, "v21.") { if _, err := db.ExecContext( ctx, `SET CLUSTER SETTING sql.defaults.experimental_temporary_tables.enabled = 'true';`, ); err != nil { @@ -114,9 +124,9 @@ func newORMTestsResults() *ormTestsResults { // summarizeAll summarizes the result of running an ORM or a driver test suite // against a cockroach node. If an unexpected result is observed (for example, -// a test unexpectedly failed or passed), a new blacklist is populated. +// a test unexpectedly failed or passed), a new blocklist is populated. func (r *ormTestsResults) summarizeAll( - t *test, ormName, blacklistName string, expectedFailures blacklist, version, latestTag string, + t *test, ormName, blocklistName string, expectedFailures blocklist, version, tag string, ) { // Collect all the tests that were not run. notRunCount := 0 @@ -142,7 +152,7 @@ func (r *ormTestsResults) summarizeAll( t.l.Printf("------------------------\n") r.summarizeFailed( - t, ormName, blacklistName, expectedFailures, version, latestTag, notRunCount, + t, ormName, blocklistName, expectedFailures, version, tag, notRunCount, ) } @@ -152,17 +162,21 @@ func (r *ormTestsResults) summarizeAll( // If a test suite outputs only the failures, then this method should be used. func (r *ormTestsResults) summarizeFailed( t *test, - ormName, blacklistName string, - expectedFailures blacklist, + ormName, blocklistName string, + expectedFailures blocklist, version, latestTag string, notRunCount int, ) { var bResults strings.Builder fmt.Fprintf(&bResults, "Tests run on Cockroach %s\n", version) fmt.Fprintf(&bResults, "Tests run against %s %s\n", ormName, latestTag) + totalTestsRun := r.passExpectedCount + r.passUnexpectedCount + r.failExpectedCount + r.failUnexpectedCount fmt.Fprintf(&bResults, "%d Total Tests Run\n", - r.passExpectedCount+r.passUnexpectedCount+r.failExpectedCount+r.failUnexpectedCount, + totalTestsRun, ) + if totalTestsRun == 0 { + t.Fatal("No tests ran! Fix the testing commands.") + } p := func(msg string, count int) { testString := "tests" @@ -193,11 +207,11 @@ func (r *ormTestsResults) summarizeFailed( if r.failUnexpectedCount > 0 || r.passUnexpectedCount > 0 || notRunCount > 0 || r.unexpectedSkipCount > 0 { - // Create a new blacklist so we can easily update this test. + // Create a new blocklist so we can easily update this test. sort.Strings(r.currentFailures) var b strings.Builder - fmt.Fprintf(&b, "Here is new %s blacklist that can be used to update the test:\n\n", ormName) - fmt.Fprintf(&b, "var %s = blacklist{\n", blacklistName) + fmt.Fprintf(&b, "Here is new %s blocklist that can be used to update the test:\n\n", ormName) + fmt.Fprintf(&b, "var %s = blocklist{\n", blocklistName) for _, test := range r.currentFailures { issue := expectedFailures[test] if len(issue) == 0 || issue == "unknown" { @@ -211,9 +225,9 @@ func (r *ormTestsResults) summarizeFailed( fmt.Fprintf(&b, "}\n\n") t.l.Printf("\n\n%s\n\n", b.String()) t.l.Printf("------------------------\n") - t.Fatalf("\n%s\nAn updated blacklist (%s) is available in the artifacts' %s log\n", + t.Fatalf("\n%s\nAn updated blocklist (%s) is available in the artifacts' %s log\n", bResults.String(), - blacklistName, + blocklistName, ormName, ) } diff --git a/pkg/cmd/roachtest/overload_tpcc_olap.go b/pkg/cmd/roachtest/overload_tpcc_olap.go index b1e1ce15e60f..5dd1074351e1 100644 --- a/pkg/cmd/roachtest/overload_tpcc_olap.go +++ b/pkg/cmd/roachtest/overload_tpcc_olap.go @@ -43,7 +43,10 @@ type tpccOLAPSpec struct { } func (s tpccOLAPSpec) run(ctx context.Context, t *test, c *cluster) { - crdbNodes, workloadNode := setupTPCC(ctx, t, c, s.Warehouses, nil /* versions */) + crdbNodes, workloadNode := setupTPCC( + ctx, t, c, tpccOptions{ + Warehouses: s.Warehouses, SetupType: usingImport, + }) const queryFileName = "queries.sql" // querybench expects the entire query to be on a single line. queryLine := `"` + strings.Replace(tpccOlapQuery, "\n", " ", -1) + `"` diff --git a/pkg/cmd/roachtest/pebble.go b/pkg/cmd/roachtest/pebble.go new file mode 100644 index 000000000000..12f3d33e7ad8 --- /dev/null +++ b/pkg/cmd/roachtest/pebble.go @@ -0,0 +1,105 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +import ( + "context" + "fmt" + "os" + "path/filepath" + "time" +) + +func registerPebble(r *testRegistry) { + pebble := os.Getenv("PEBBLE_BIN") + if pebble == "" { + pebble = "./pebble.linux" + } + + run := func(ctx context.Context, t *test, c *cluster, size int) { + c.Put(ctx, pebble, "./pebble") + + const initialKeys = 10_000_000 + const cache = 4 << 30 // 4 GB + const duration = 10 * time.Minute + const dataDir = "$(dirname {store-dir})" + const dataTar = dataDir + "/data.tar" + const benchDir = dataDir + "/bench" + + runCmd := func(cmd string) { + c.l.PrintfCtx(ctx, "> %s", cmd) + err := c.RunL(ctx, c.l, c.All(), cmd) + c.l.Printf("> result: %+v", err) + if err := ctx.Err(); err != nil { + c.l.Printf("(note: incoming context was canceled: %s", err) + } + if err != nil { + t.Fatal(err) + } + } + + // Generate the initial DB state. This is somewhat time consuming for + // larger value sizes, so we do this once and reuse the same DB state on + // all of the workloads. + runCmd(fmt.Sprintf( + "(./pebble bench ycsb %s"+ + " --wipe "+ + " --workload=read=100"+ + " --concurrency=1"+ + " --values=%d"+ + " --initial-keys=%d"+ + " --cache=%d"+ + " --num-ops=1 && "+ + "rm -f %s && tar cvPf %s %s) > init.log 2>&1", + benchDir, size, initialKeys, cache, dataTar, dataTar, benchDir)) + + for _, workload := range []string{"A", "B", "C", "D", "E"} { + keys := "zipf" + switch workload { + case "D": + keys = "uniform" + } + + runCmd(fmt.Sprintf( + "rm -fr %s && tar xPf %s &&"+ + " ./pebble bench ycsb %s"+ + " --workload=%s"+ + " --concurrency=256"+ + " --values=%d"+ + " --keys=%s"+ + " --initial-keys=0"+ + " --prepopulated-keys=%d"+ + " --cache=%d"+ + " --duration=%s > ycsb.log 2>&1", + benchDir, dataTar, benchDir, workload, size, keys, initialKeys, cache, duration)) + + dest := filepath.Join(t.artifactsDir, fmt.Sprintf("ycsb_%s.log", workload)) + if err := c.Get(ctx, c.l, "ycsb.log", dest, c.All()); err != nil { + t.Fatal(err) + } + } + } + + for _, size := range []int{64, 1024} { + size := size + r.Add(testSpec{ + Name: fmt.Sprintf("pebble/ycsb/size=%d", size), + Owner: OwnerStorage, + Timeout: 2 * time.Hour, + MinVersion: "v20.1.0", + Cluster: makeClusterSpec(5, cpu(16)), + Tags: []string{"pebble"}, + Run: func(ctx context.Context, t *test, c *cluster) { + run(ctx, t, c, size) + }, + }) + } +} diff --git a/pkg/cmd/roachtest/pgjdbc.go b/pkg/cmd/roachtest/pgjdbc.go index f69910ab5e06..123f23149095 100644 --- a/pkg/cmd/roachtest/pgjdbc.go +++ b/pkg/cmd/roachtest/pgjdbc.go @@ -17,6 +17,7 @@ import ( ) var pgjdbcReleaseTagRegex = regexp.MustCompile(`^REL(?P\d+)\.(?P\d+)\.(?P\d+)$`) +var supportedPGJDBCTag = "REL42.2.9" // This test runs pgjdbc's full test suite against a single cockroach node. @@ -82,7 +83,7 @@ func registerPgjdbc(r *testRegistry) { c, "https://github.com/pgjdbc/pgjdbc.git", "/mnt/data1/pgjdbc", - "REL42.2.9", + supportedPGJDBCTag, node, ); err != nil { t.Fatal(err) @@ -117,14 +118,14 @@ func registerPgjdbc(r *testRegistry) { t.Fatal(err) } - blacklistName, expectedFailures, ignorelistName, ignorelist := pgjdbcBlacklists.getLists(version) + blocklistName, expectedFailures, ignorelistName, ignorelist := pgjdbcBlocklists.getLists(version) if expectedFailures == nil { - t.Fatalf("No pgjdbc blacklist defined for cockroach version %s", version) + t.Fatalf("No pgjdbc blocklist defined for cockroach version %s", version) } - status := fmt.Sprintf("Running cockroach version %s, using blacklist %s", version, blacklistName) + status := fmt.Sprintf("Running cockroach version %s, using blocklist %s", version, blocklistName) if ignorelist != nil { - status = fmt.Sprintf("Running cockroach version %s, using blacklist %s, using ignorelist %s", - version, blacklistName, ignorelistName) + status = fmt.Sprintf("Running cockroach version %s, using blocklist %s, using ignorelist %s", + version, blocklistName, ignorelistName) } c.l.Printf("%s", status) @@ -173,15 +174,16 @@ func registerPgjdbc(r *testRegistry) { parseAndSummarizeJavaORMTestsResults( ctx, t, c, node, "pgjdbc" /* ormName */, output, - blacklistName, expectedFailures, ignorelist, version, latestTag, + blocklistName, expectedFailures, ignorelist, version, supportedPGJDBCTag, ) } r.Add(testSpec{ - Name: "pgjdbc", - Owner: OwnerAppDev, - Cluster: makeClusterSpec(1), - Tags: []string{`default`, `driver`}, + MinVersion: "v2.1.0", + Name: "pgjdbc", + Owner: OwnerAppDev, + Cluster: makeClusterSpec(1), + Tags: []string{`default`, `driver`}, Run: func(ctx context.Context, t *test, c *cluster) { runPgjdbc(ctx, t, c) }, diff --git a/pkg/cmd/roachtest/pgjdbc_blacklist.go b/pkg/cmd/roachtest/pgjdbc_blocklist.go similarity index 85% rename from pkg/cmd/roachtest/pgjdbc_blacklist.go rename to pkg/cmd/roachtest/pgjdbc_blocklist.go index 03e44942e679..5ad2f8d7cdd9 100644 --- a/pkg/cmd/roachtest/pgjdbc_blacklist.go +++ b/pkg/cmd/roachtest/pgjdbc_blocklist.go @@ -10,19 +10,934 @@ package main -var pgjdbcBlacklists = blacklistsForVersion{ - {"v2.0", "pgjdbcBlackList2_0", pgjdbcBlackList2_0, "", nil}, - {"v2.1", "pgjdbcBlackList2_1", pgjdbcBlackList2_1, "", nil}, - {"v2.2", "pgjdbcBlackList19_1", pgjdbcBlackList19_1, "", nil}, - {"v19.1", "pgjdbcBlackList19_1", pgjdbcBlackList19_1, "", nil}, - {"v19.2", "pgjdbcBlackList19_2", pgjdbcBlackList19_2, "pgjdbcIgnoreList19_2", pgjdbcIgnoreList19_2}, - {"v20.1", "pgjdbcBlackList20_1", pgjdbcBlackList20_1, "pgjdbcIgnoreList20_1", pgjdbcIgnoreList20_1}, +var pgjdbcBlocklists = blocklistsForVersion{ + {"v2.1", "pgjdbcBlockList2_1", pgjdbcBlockList2_1, "", nil}, + {"v2.2", "pgjdbcBlockList19_1", pgjdbcBlockList19_1, "", pgjdbcIgnoreList19_1}, + {"v19.1", "pgjdbcBlockList19_1", pgjdbcBlockList19_1, "", pgjdbcIgnoreList19_1}, + {"v19.2", "pgjdbcBlockList19_2", pgjdbcBlockList19_2, "pgjdbcIgnoreList19_2", pgjdbcIgnoreList19_2}, + {"v20.1", "pgjdbcBlockList20_1", pgjdbcBlockList20_1, "pgjdbcIgnoreList20_1", pgjdbcIgnoreList20_1}, + {"v20.2", "pgjdbcBlockList20_2", pgjdbcBlockList20_2, "pgjdbcIgnoreList20_2", pgjdbcIgnoreList20_2}, } // Please keep these lists alphabetized for easy diffing. -// After a failed run, an updated version of this blacklist should be available +// After a failed run, an updated version of this blocklist should be available // in the test log. -var pgjdbcBlackList20_1 = blacklist{ +var pgjdbcBlockList20_2 = blocklist{ + "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testDeepInternalsBatchedQueryDecorator": "26508", + "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testUnspecifiedParameterType": "26508", + "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testVaryingTypeCounts": "26508", + "org.postgresql.jdbc.PgSQLXMLTest.setCharacterStream": "43355", + "org.postgresql.test.core.OptionsPropertyTest.testOptionsInProperties": "26443", + "org.postgresql.test.jdbc2.ArrayTest.testEscaping[binary = FORCE]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testEscaping[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testMultiDimensionalArray[binary = FORCE]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testMultiDimensionalArray[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testNonStandardBounds[binary = FORCE]": "41405", + "org.postgresql.test.jdbc2.ArrayTest.testNonStandardBounds[binary = REGULAR]": "41405", + "org.postgresql.test.jdbc2.ArrayTest.testNonStandardDelimiter[binary = FORCE]": "21286", + "org.postgresql.test.jdbc2.ArrayTest.testNonStandardDelimiter[binary = REGULAR]": "21286", + "org.postgresql.test.jdbc2.ArrayTest.testNullValues[binary = FORCE]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testNullValues[binary = REGULAR]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testRecursiveResultSets[binary = FORCE]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testRecursiveResultSets[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testRetrieveArrays[binary = FORCE]": "41405", + "org.postgresql.test.jdbc2.ArrayTest.testRetrieveArrays[binary = REGULAR]": "41405", + "org.postgresql.test.jdbc2.ArrayTest.testRetrieveResultSets[binary = FORCE]": "41405", + "org.postgresql.test.jdbc2.ArrayTest.testRetrieveResultSets[binary = REGULAR]": "41405", + "org.postgresql.test.jdbc2.ArrayTest.testSetArray[binary = FORCE]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testSetArray[binary = REGULAR]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveArraysObjects[binary = FORCE]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveArraysObjects[binary = REGULAR]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveObjects[binary = FORCE]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveObjects[binary = REGULAR]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testWriteMultiDimensional[binary = FORCE]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testWriteMultiDimensional[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[100: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[101: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[102: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[103: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[13: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41445", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[144: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[145: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[146: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[147: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[148: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[149: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[150: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[151: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[152: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[153: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[154: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[155: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[156: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[157: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[158: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[159: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[15: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41445", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[160: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[161: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[162: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[163: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[177: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41445", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[179: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41445", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[228: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[229: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[230: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[231: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[232: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[233: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[234: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[235: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[236: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[237: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[238: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[239: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[260: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[261: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[262: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[263: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[264: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[265: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[266: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[267: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[308: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[309: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[310: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[311: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[312: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[313: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[314: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[315: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[316: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[317: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[318: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[319: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[320: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[321: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[322: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[323: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[324: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[325: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[326: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[327: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[341: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[343: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[348: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[349: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[350: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[351: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[368: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[369: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[370: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[371: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[372: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[373: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[374: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[375: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[392: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[393: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[394: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[395: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[396: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[397: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[398: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[399: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[400: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[401: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[402: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[403: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[424: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[425: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[426: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[427: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[428: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[429: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[430: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[431: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[472: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[473: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[474: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[475: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[476: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[477: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[478: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[479: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[480: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[481: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[482: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[483: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[484: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[485: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[486: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[487: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[488: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[489: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[490: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[491: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[505: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[507: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[512: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[513: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[514: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[515: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[532: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[533: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[534: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[535: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[536: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[537: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[538: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[539: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DISCARD, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[556: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[557: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[558: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[559: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[560: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[561: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[562: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[563: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[564: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[565: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[566: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[567: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[588: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[589: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[590: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[591: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[592: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[593: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[594: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[595: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[636: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[637: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[638: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[639: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[640: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[641: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[642: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[643: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[644: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[645: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[646: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[647: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[648: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[649: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[64: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[650: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[651: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[652: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[653: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[654: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[655: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[65: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[669: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[66: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[671: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[67: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[68: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[69: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[70: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[71: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[720: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[721: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[722: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[723: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[724: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[725: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[726: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[727: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[728: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[729: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[72: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[730: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[731: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[73: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[74: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[752: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[753: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[754: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[755: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[756: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[757: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[758: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[759: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[75: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[800: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[801: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[802: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[803: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[804: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[805: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[806: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[807: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[808: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[809: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[810: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[811: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[812: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[813: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[814: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[815: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[816: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[817: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[818: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[819: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[833: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41445", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[835: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41445", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[884: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[885: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[886: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[887: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[888: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[889: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[890: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[891: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[892: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[893: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[894: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[895: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[916: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[917: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[918: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[919: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[920: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[921: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[922: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[923: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[964: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[965: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[966: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[967: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[968: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[969: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[96: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[970: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[971: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[972: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[973: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[974: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[975: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[976: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[977: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[978: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[979: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[97: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[980: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[981: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[982: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[983: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[98: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "26508", + "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[99: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "26508", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = FORCE, insertRewrite = false]": "41513", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = FORCE, insertRewrite = true]": "41513", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = REGULAR, insertRewrite = false]": "41513", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = REGULAR, insertRewrite = true]": "41513", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = FORCE, insertRewrite = false]": "26366", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = FORCE, insertRewrite = true]": "26366", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = REGULAR, insertRewrite = false]": "26366", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = REGULAR, insertRewrite = true]": "26366", + "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = FORCE, insertRewrite = false]": "31463", + "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = FORCE, insertRewrite = true]": "31463", + "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = REGULAR, insertRewrite = false]": "31463", + "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = REGULAR, insertRewrite = true]": "31463", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = true]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = true]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = FORCE, insertRewrite = false]": "40195", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = FORCE, insertRewrite = true]": "40195", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = REGULAR, insertRewrite = false]": "40195", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = REGULAR, insertRewrite = true]": "40195", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = FORCE, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = REGULAR, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[105: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[107: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[112: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[113: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[114: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[115: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[120: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[121: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[122: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[123: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[128: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[129: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[130: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[131: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[16: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[17: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[18: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[19: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[24: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[25: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[26: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[27: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[32: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[33: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[34: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[35: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[40: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[41: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[42: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[43: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[56: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[57: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[58: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[59: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[64: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[65: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[66: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[67: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[72: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[73: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[74: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[75: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[80: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[81: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[82: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[83: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test17000Binds[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test17000Binds[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test32000Binds[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test32000Binds[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenBatchStatementWithFixedParameter[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenBatchStatementWithFixedParameter[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenBatchStatementWithFixedParametersOnly[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenBatchStatementWithFixedParametersOnly[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenRepeatedInsertStatementOptimizationEnabled[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenRepeatedInsertStatementOptimizationEnabled[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBindsInNestedParens[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBindsInNestedParens[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testConsistentOutcome[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testConsistentOutcome[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testINSERTwithNamedColumnsNotBroken[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testINSERTwithNamedColumnsNotBroken[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testMixedCaseInSeRtStatement[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testMixedCaseInSeRtStatement[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testMultiValues1bind[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testMultiValues1bind[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BlobTest.testGetBytesOffset": "26725", + "org.postgresql.test.jdbc2.BlobTest.testLargeLargeObject": "26725", + "org.postgresql.test.jdbc2.BlobTest.testMarkResetStream": "26725", + "org.postgresql.test.jdbc2.BlobTest.testMultipleStreams": "26725", + "org.postgresql.test.jdbc2.BlobTest.testParallelStreams": "26725", + "org.postgresql.test.jdbc2.BlobTest.testSet": "26725", + "org.postgresql.test.jdbc2.BlobTest.testSetNull": "26725", + "org.postgresql.test.jdbc2.BlobTest.testUploadBlob_LOOP": "26725", + "org.postgresql.test.jdbc2.BlobTest.testUploadBlob_NATIVE": "26725", + "org.postgresql.test.jdbc2.BlobTransactionTest.testConcurrentReplace": "17511", + "org.postgresql.test.jdbc2.CallableStmtTest.testBadStmt": "17511", + "org.postgresql.test.jdbc2.CallableStmtTest.testBatchCall": "17511", + "org.postgresql.test.jdbc2.CallableStmtTest.testFetchBeforeExecute": "17511", + "org.postgresql.test.jdbc2.CallableStmtTest.testFetchWithNoResults": "17511", + "org.postgresql.test.jdbc2.CallableStmtTest.testGetArray": "17511", + "org.postgresql.test.jdbc2.CallableStmtTest.testGetDouble": "17511", + "org.postgresql.test.jdbc2.CallableStmtTest.testGetInt": "17511", + "org.postgresql.test.jdbc2.CallableStmtTest.testGetNumeric": "17511", + "org.postgresql.test.jdbc2.CallableStmtTest.testGetNumericWithoutArg": "17511", + "org.postgresql.test.jdbc2.CallableStmtTest.testGetShort": "17511", + "org.postgresql.test.jdbc2.CallableStmtTest.testGetString": "17511", + "org.postgresql.test.jdbc2.CallableStmtTest.testGetUpdateCount": "17511", + "org.postgresql.test.jdbc2.CallableStmtTest.testRaiseNotice": "17511", + "org.postgresql.test.jdbc2.CallableStmtTest.testWasNullBeforeFetch": "17511", + "org.postgresql.test.jdbc2.ClientEncodingTest.setEncodingAscii[allowEncodingChanges=false]": "37129", + "org.postgresql.test.jdbc2.ClientEncodingTest.setEncodingAscii[allowEncodingChanges=true]": "37129", + "org.postgresql.test.jdbc2.ConcurrentStatementFetch.testFetchTwoStatements[6: fetch(autoCommit=NO, fetchSize=1, binaryMode=REGULAR)]": "4035", + "org.postgresql.test.jdbc2.ConcurrentStatementFetch.testFetchTwoStatements[7: fetch(autoCommit=NO, fetchSize=1, binaryMode=FORCE)]": "4035", + "org.postgresql.test.jdbc2.ConcurrentStatementFetch.testFetchTwoStatements[8: fetch(autoCommit=NO, fetchSize=2, binaryMode=REGULAR)]": "4035", + "org.postgresql.test.jdbc2.ConcurrentStatementFetch.testFetchTwoStatements[9: fetch(autoCommit=NO, fetchSize=2, binaryMode=FORCE)]": "4035", + "org.postgresql.test.jdbc2.ConnectionTest.testCreateStatement": "41578", + "org.postgresql.test.jdbc2.ConnectionTest.testDoubleClose": "41578", + "org.postgresql.test.jdbc2.ConnectionTest.testIsClosed": "41578", + "org.postgresql.test.jdbc2.ConnectionTest.testNativeSQL": "41578", + "org.postgresql.test.jdbc2.ConnectionTest.testPrepareCall": "41578", + "org.postgresql.test.jdbc2.ConnectionTest.testPrepareStatement": "41578", + "org.postgresql.test.jdbc2.ConnectionTest.testReadOnly_always": "41578", + "org.postgresql.test.jdbc2.ConnectionTest.testReadOnly_ignore": "41578", + "org.postgresql.test.jdbc2.ConnectionTest.testReadOnly_transaction": "41578", + "org.postgresql.test.jdbc2.ConnectionTest.testTransactionIsolation": "41578", + "org.postgresql.test.jdbc2.ConnectionTest.testTransactions": "41578", + "org.postgresql.test.jdbc2.ConnectionTest.testTypeMaps": "41578", + "org.postgresql.test.jdbc2.ConnectionTest.testWarnings": "41578", + "org.postgresql.test.jdbc2.CopyTest.testChangeDateStyle": "41608", + "org.postgresql.test.jdbc2.CopyTest.testCopyOut": "41608", + "org.postgresql.test.jdbc2.CopyTest.testCopyOutByRow": "41608", + "org.postgresql.test.jdbc2.CopyTest.testCopyQuery": "41608", + "org.postgresql.test.jdbc2.CopyTest.testLockReleaseOnCancelFailure": "41608", + "org.postgresql.test.jdbc2.CursorFetchTest.testMultistatement[binary = FORCE]": "40195", + "org.postgresql.test.jdbc2.CursorFetchTest.testMultistatement[binary = REGULAR]": "40195", + "org.postgresql.test.jdbc2.DatabaseEncodingTest.testEncoding": "41771", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testArrayInt4DoubleDim": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testArrayTypeInfo": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testAscDescIndexInfo": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testBestRowIdentifier": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testCatalogs": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testColumnPrivileges": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testColumns": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testCrossReference": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testCustomArrayTypeInfo": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testDomainColumnSize": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testDroppedColumns": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testEscaping": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testForeignKeyActions": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testForeignKeys": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testForeignKeysToUniqueIndexes": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testFuncReturningComposite": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testFuncReturningTable": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testFuncWithDirection": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testFuncWithNames": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testFuncWithoutNames": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testFunctionColumns": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testGetSQLKeywords": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testGetUDT1": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testGetUDT2": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testGetUDT3": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testGetUDT4": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testGetUDTQualified": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testIdentityColumns": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testIndexInfo": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testInformationAboutArrayTypes": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testMultiColumnForeignKeys": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testNoTablePrivileges": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testNotNullDomainColumn": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testPartialIndexInfo": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testPartitionedTables": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testPrimaryKeys": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testProcedures": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testSameTableForeignKeys": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testSchemas": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testSearchStringEscape": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testSerialColumns": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testTablePrivileges": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testTableTypes": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testTables": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testTypeInfoQuoting": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testTypeInfoSigned": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testTypes": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testVersionColumns": "17511", + "org.postgresql.test.jdbc2.DateStyleTest.conenct[dateStyle=ISO,ymd, shouldPass=true]": "41773", + "org.postgresql.test.jdbc2.DateStyleTest.conenct[dateStyle=PostgreSQL, shouldPass=false]": "41773", + "org.postgresql.test.jdbc2.GeometricTest.testPGbox[binary = FORCE]": "21286", + "org.postgresql.test.jdbc2.GeometricTest.testPGbox[binary = REGULAR]": "21286", + "org.postgresql.test.jdbc2.GeometricTest.testPGcircle[binary = FORCE]": "21286", + "org.postgresql.test.jdbc2.GeometricTest.testPGcircle[binary = REGULAR]": "21286", + "org.postgresql.test.jdbc2.GeometricTest.testPGline[binary = FORCE]": "21286", + "org.postgresql.test.jdbc2.GeometricTest.testPGline[binary = REGULAR]": "21286", + "org.postgresql.test.jdbc2.GeometricTest.testPGlseg[binary = FORCE]": "21286", + "org.postgresql.test.jdbc2.GeometricTest.testPGlseg[binary = REGULAR]": "21286", + "org.postgresql.test.jdbc2.GeometricTest.testPGpath[binary = FORCE]": "21286", + "org.postgresql.test.jdbc2.GeometricTest.testPGpath[binary = REGULAR]": "21286", + "org.postgresql.test.jdbc2.GeometricTest.testPGpoint[binary = FORCE]": "21286", + "org.postgresql.test.jdbc2.GeometricTest.testPGpoint[binary = REGULAR]": "21286", + "org.postgresql.test.jdbc2.GeometricTest.testPGpolygon[binary = FORCE]": "21286", + "org.postgresql.test.jdbc2.GeometricTest.testPGpolygon[binary = REGULAR]": "21286", + "org.postgresql.test.jdbc2.JBuilderTest.testMoney": "41578", + "org.postgresql.test.jdbc2.NotifyTest.testAsyncNotify": "41522", + "org.postgresql.test.jdbc2.NotifyTest.testAsyncNotifyWithEndlessTimeoutAndMessagesAvailableWhenStartingListening": "41522", + "org.postgresql.test.jdbc2.NotifyTest.testAsyncNotifyWithEndlessTimeoutAndMessagesSendAfter": "41522", + "org.postgresql.test.jdbc2.NotifyTest.testAsyncNotifyWithTimeout": "41522", + "org.postgresql.test.jdbc2.NotifyTest.testAsyncNotifyWithTimeoutAndMessagesAvailableWhenStartingListening": "41522", + "org.postgresql.test.jdbc2.NotifyTest.testAsyncNotifyWithTimeoutAndMessagesSendAfter": "41522", + "org.postgresql.test.jdbc2.NotifyTest.testAsyncNotifyWithTimeoutAndSocketThatBecomesClosed": "41522", + "org.postgresql.test.jdbc2.NotifyTest.testNotify": "41522", + "org.postgresql.test.jdbc2.NotifyTest.testNotifyArgument": "41522", + "org.postgresql.test.jdbc2.PGTimeTest.testTimeInsertAndSelect": "41775", + "org.postgresql.test.jdbc2.PGTimestampTest.testTimeInsertAndSelect": "41775", + "org.postgresql.test.jdbc2.ParameterStatusTest.transactionalParametersAutocommit": "32562", + "org.postgresql.test.jdbc2.ParameterStatusTest.transactionalParametersCommit": "32562", + "org.postgresql.test.jdbc2.ParameterStatusTest.transactionalParametersRollback": "32562", + "org.postgresql.test.jdbc2.PreparedStatementTest.testBatchWithPrepareThreshold5[binary = REGULAR]": "5807", + "org.postgresql.test.jdbc2.PreparedStatementTest.testDollarQuotes[binary = FORCE]": "41777", + "org.postgresql.test.jdbc2.PreparedStatementTest.testDollarQuotes[binary = REGULAR]": "41777", + "org.postgresql.test.jdbc2.PreparedStatementTest.testDoubleQuestionMark[binary = FORCE]": "21286", + "org.postgresql.test.jdbc2.PreparedStatementTest.testDoubleQuestionMark[binary = REGULAR]": "21286", + "org.postgresql.test.jdbc2.PreparedStatementTest.testSingleQuotes[binary = FORCE]": "36215", + "org.postgresql.test.jdbc2.PreparedStatementTest.testSingleQuotes[binary = REGULAR]": "36215", + "org.postgresql.test.jdbc2.PreparedStatementTest.testUnknownSetObject[binary = FORCE]": "41779", + "org.postgresql.test.jdbc2.PreparedStatementTest.testUnknownSetObject[binary = REGULAR]": "41779", + "org.postgresql.test.jdbc2.RefCursorTest.testEmptyResult[typeName = OTHER, cursorType = 1,111]": "17511", + "org.postgresql.test.jdbc2.RefCursorTest.testEmptyResult[typeName = REF_CURSOR, cursorType = 2,012]": "17511", + "org.postgresql.test.jdbc2.RefCursorTest.testMetaData[typeName = OTHER, cursorType = 1,111]": "17511", + "org.postgresql.test.jdbc2.RefCursorTest.testMetaData[typeName = REF_CURSOR, cursorType = 2,012]": "17511", + "org.postgresql.test.jdbc2.RefCursorTest.testResultType[typeName = OTHER, cursorType = 1,111]": "17511", + "org.postgresql.test.jdbc2.RefCursorTest.testResultType[typeName = REF_CURSOR, cursorType = 2,012]": "17511", + "org.postgresql.test.jdbc2.RefCursorTest.testResult[typeName = OTHER, cursorType = 1,111]": "17511", + "org.postgresql.test.jdbc2.RefCursorTest.testResult[typeName = REF_CURSOR, cursorType = 2,012]": "17511", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testCache[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testCache[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testCache[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testCache[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClassesMatch[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClassesMatch[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClassesMatch[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClassesMatch[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClosedResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClosedResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClosedResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClosedResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testColumnDisplaySize[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testColumnDisplaySize[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testColumnDisplaySize[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testColumnDisplaySize[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testComposite[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testComposite[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testComposite[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testComposite[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testDatabaseMetaDataNames[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testDatabaseMetaDataNames[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testDatabaseMetaDataNames[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testDatabaseMetaDataNames[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIdentityColumn[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIdentityColumn[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIdentityColumn[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIdentityColumn[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIsAutoIncrement[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIsAutoIncrement[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIsAutoIncrement[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIsAutoIncrement[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedUpdate[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedUpdate[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedUpdate[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedUpdate[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testStandardResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testStandardResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testStandardResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testStandardResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testTimestampInfo[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testTimestampInfo[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testTimestampInfo[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testTimestampInfo[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testUnexecutedStatement[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testUnexecutedStatement[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testUnexecutedStatement[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]": "32565", + "org.postgresql.test.jdbc2.ResultSetMetaDataTest.testUnexecutedStatement[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]": "32565", + "org.postgresql.test.jdbc2.ResultSetTest.testgetBadBoolean": "32552", + "org.postgresql.test.jdbc2.SearchPathLookupTest.testSearchPathBackwardsCompatibleLookup": "26443", + "org.postgresql.test.jdbc2.SearchPathLookupTest.testSearchPathHiddenLookup": "26443", + "org.postgresql.test.jdbc2.SearchPathLookupTest.testSearchPathNormalLookup": "26443", + "org.postgresql.test.jdbc2.ServerCursorTest.testBasicFetch": "41412", + "org.postgresql.test.jdbc2.ServerCursorTest.testBinaryFetch": "41412", + "org.postgresql.test.jdbc2.ServerErrorTest.testColumn": "27796", + "org.postgresql.test.jdbc2.ServerErrorTest.testDatatype": "27796", + "org.postgresql.test.jdbc2.ServerErrorTest.testPrimaryKey": "27796", + "org.postgresql.test.jdbc2.StatementTest.testClose": "17511", + "org.postgresql.test.jdbc2.StatementTest.testCloseInProgressStatement": "17511", + "org.postgresql.test.jdbc2.StatementTest.testConcurrentWarningReadAndClear": "17511", + "org.postgresql.test.jdbc2.StatementTest.testDateFuncWithParam": "17511", + "org.postgresql.test.jdbc2.StatementTest.testDateFunctions": "17511", + "org.postgresql.test.jdbc2.StatementTest.testDollarInComment": "17511", + "org.postgresql.test.jdbc2.StatementTest.testDollarInCommentTwoComments": "17511", + "org.postgresql.test.jdbc2.StatementTest.testDoubleClose": "17511", + "org.postgresql.test.jdbc2.StatementTest.testEmptyQuery": "17511", + "org.postgresql.test.jdbc2.StatementTest.testEscapeProcessing": "17511", + "org.postgresql.test.jdbc2.StatementTest.testExecuteUpdateFailsOnMultiStatementSelect": "17511", + "org.postgresql.test.jdbc2.StatementTest.testExecuteUpdateFailsOnSelect": "17511", + "org.postgresql.test.jdbc2.StatementTest.testFastCloses": "17511", + "org.postgresql.test.jdbc2.StatementTest.testJavascriptFunction": "17511", + "org.postgresql.test.jdbc2.StatementTest.testLongQueryTimeout": "17511", + "org.postgresql.test.jdbc2.StatementTest.testMultiExecute": "17511", + "org.postgresql.test.jdbc2.StatementTest.testMultipleCancels": "17511", + "org.postgresql.test.jdbc2.StatementTest.testNumericFunctions": "17511", + "org.postgresql.test.jdbc2.StatementTest.testParsingDollarQuotes": "17511", + "org.postgresql.test.jdbc2.StatementTest.testParsingSemiColons": "17511", + "org.postgresql.test.jdbc2.StatementTest.testPreparedFunction": "17511", + "org.postgresql.test.jdbc2.StatementTest.testResultSetTwice": "17511", + "org.postgresql.test.jdbc2.StatementTest.testSetQueryTimeout": "17511", + "org.postgresql.test.jdbc2.StatementTest.testSetQueryTimeoutOnPrepared": "17511", + "org.postgresql.test.jdbc2.StatementTest.testSetQueryTimeoutWithSleep": "17511", + "org.postgresql.test.jdbc2.StatementTest.testSetQueryTimeoutWithoutExecute": "17511", + "org.postgresql.test.jdbc2.StatementTest.testShortQueryTimeout": "17511", + "org.postgresql.test.jdbc2.StatementTest.testSideStatementFinalizers": "17511", + "org.postgresql.test.jdbc2.StatementTest.testStringFunctions": "17511", + "org.postgresql.test.jdbc2.StatementTest.testSystemFunctions": "17511", + "org.postgresql.test.jdbc2.StatementTest.testUnbalancedParensParseError": "17511", + "org.postgresql.test.jdbc2.StatementTest.testUnterminatedComment": "17511", + "org.postgresql.test.jdbc2.StatementTest.testUnterminatedDollarQuotes": "17511", + "org.postgresql.test.jdbc2.StatementTest.testUnterminatedIdentifier": "17511", + "org.postgresql.test.jdbc2.StatementTest.testUnterminatedLiteral": "17511", + "org.postgresql.test.jdbc2.StatementTest.testUnterminatedNamedDollarQuotes": "17511", + "org.postgresql.test.jdbc2.StatementTest.testUpdateCount": "17511", + "org.postgresql.test.jdbc2.StatementTest.testWarningsAreAvailableAsap": "17511", + "org.postgresql.test.jdbc2.StatementTest.testWarningsAreCleared": "17511", + "org.postgresql.test.jdbc2.StringTypeUnspecifiedArrayTest.testCreateArrayWithNonCachedType[binary = FORCE]": "21286", + "org.postgresql.test.jdbc2.StringTypeUnspecifiedArrayTest.testCreateArrayWithNonCachedType[binary = REGULAR]": "21286", + "org.postgresql.test.jdbc2.TimeTest.testGetTimeZone": "41775", + "org.postgresql.test.jdbc2.TimestampTest.testGetTimestampWOTZ[binary = FORCE]": "41786", + "org.postgresql.test.jdbc2.TimestampTest.testGetTimestampWOTZ[binary = REGULAR]": "41786", + "org.postgresql.test.jdbc2.TimestampTest.testInfinity[binary = FORCE]": "41786", + "org.postgresql.test.jdbc2.TimestampTest.testInfinity[binary = REGULAR]": "41786", + "org.postgresql.test.jdbc2.TimestampTest.testSetTimestampWOTZ[binary = FORCE]": "41786", + "org.postgresql.test.jdbc2.TimestampTest.testSetTimestampWOTZ[binary = REGULAR]": "41786", + "org.postgresql.test.jdbc2.TimezoneTest.testGetDate": "41776", + "org.postgresql.test.jdbc2.TimezoneTest.testGetTime": "41776", + "org.postgresql.test.jdbc2.TimezoneTest.testGetTimestamp": "41776", + "org.postgresql.test.jdbc2.TimezoneTest.testSetDate": "41776", + "org.postgresql.test.jdbc2.TimezoneTest.testSetTime": "41776", + "org.postgresql.test.jdbc2.TimezoneTest.testSetTimestamp": "41776", + "org.postgresql.test.jdbc2.UpdateableResultTest.testArray": "26925", + "org.postgresql.test.jdbc2.UpdateableResultTest.testUpdateSelectOnly": "53552", + "org.postgresql.test.jdbc3.CompositeTest.testComplexArgumentSelect": "27793", + "org.postgresql.test.jdbc3.CompositeTest.testComplexSelect": "27793", + "org.postgresql.test.jdbc3.CompositeTest.testComplexTableNameMetadata": "27793", + "org.postgresql.test.jdbc3.CompositeTest.testCompositeFromTable": "27793", + "org.postgresql.test.jdbc3.CompositeTest.testNullArrayElement": "27793", + "org.postgresql.test.jdbc3.CompositeTest.testSimpleArgumentSelect": "27793", + "org.postgresql.test.jdbc3.CompositeTest.testSimpleSelect": "27793", + "org.postgresql.test.jdbc3.CompositeTest.testTableMetadata": "27793", + "org.postgresql.test.jdbc3.DatabaseMetaDataTest.testGetColumnsForDomain": "27796", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallIfNoReturnTest.testInvokeFunction": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallIfNoReturnTest.testInvokeFunctionHavingReturnParameter": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallIfNoReturnTest.testInvokeProcedure": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallTest.testInvokeFunction": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallTest.testInvokeFunctionHavingReturnParameter": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallTest.testInvokeProcedure": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeSelectTest.testInvokeFunction": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeSelectTest.testInvokeFunctionHavingReturnParameter": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeSelectTest.testInvokeProcedure": "17511", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testBatchGeneratedKeys[returningInQuery = A, binary = FORCE]": "26925", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testBatchGeneratedKeys[returningInQuery = A, binary = REGULAR]": "26925", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testBatchGeneratedKeys[returningInQuery = AB, binary = FORCE]": "26925", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testBatchGeneratedKeys[returningInQuery = AB, binary = REGULAR]": "26925", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testBatchGeneratedKeys[returningInQuery = NO, binary = FORCE]": "26925", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testBatchGeneratedKeys[returningInQuery = NO, binary = REGULAR]": "26925", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testBatchGeneratedKeys[returningInQuery = STAR, binary = FORCE]": "26925", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testBatchGeneratedKeys[returningInQuery = STAR, binary = REGULAR]": "26925", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testSerialWorks[returningInQuery = A, binary = FORCE]": "26925", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testSerialWorks[returningInQuery = A, binary = REGULAR]": "26925", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testSerialWorks[returningInQuery = AB, binary = FORCE]": "26925", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testSerialWorks[returningInQuery = AB, binary = REGULAR]": "26925", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testSerialWorks[returningInQuery = NO, binary = FORCE]": "26925", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testSerialWorks[returningInQuery = NO, binary = REGULAR]": "26925", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testSerialWorks[returningInQuery = STAR, binary = FORCE]": "26925", + "org.postgresql.test.jdbc3.GeneratedKeysTest.testSerialWorks[returningInQuery = STAR, binary = REGULAR]": "26925", + "org.postgresql.test.jdbc3.Jdbc3BlobTest.test1Byte": "26725", + "org.postgresql.test.jdbc3.Jdbc3BlobTest.test1ByteOffset": "26725", + "org.postgresql.test.jdbc3.Jdbc3BlobTest.test1ByteOffsetStream": "26725", + "org.postgresql.test.jdbc3.Jdbc3BlobTest.test1ByteStream": "26725", + "org.postgresql.test.jdbc3.Jdbc3BlobTest.testAllBytes": "26725", + "org.postgresql.test.jdbc3.Jdbc3BlobTest.testAllBytesStream": "26725", + "org.postgresql.test.jdbc3.Jdbc3BlobTest.testManyBytes": "26725", + "org.postgresql.test.jdbc3.Jdbc3BlobTest.testManyBytesOffset": "26725", + "org.postgresql.test.jdbc3.Jdbc3BlobTest.testManyBytesOffsetStream": "26725", + "org.postgresql.test.jdbc3.Jdbc3BlobTest.testManyBytesStream": "26725", + "org.postgresql.test.jdbc3.Jdbc3BlobTest.testPattern": "26725", + "org.postgresql.test.jdbc3.Jdbc3BlobTest.testTruncate": "26725", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testAllInOut": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testFunctionNoParametersWithParentheses": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testFunctionNoParametersWithoutParentheses": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetBoolean01": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetByte01": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetBytes01": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetBytes02": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetDouble01": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetDoubleAsReal": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetInt01": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetLong01": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetObjectDecimal": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetObjectFloat": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetObjectLongVarchar": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetShort01": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testInOut": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testMultipleOutExecutions": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testNotEnoughParameters": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testNumeric": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureInOnlyNativeCall": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureInOutNativeCall": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureNoParametersWithParentheses": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureNoParametersWithoutParentheses": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testSetObjectBit": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testSomeInOut": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testSum": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testTooManyParameters": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testUpdateDecimal": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testUpdateReal": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testVarcharBool": "17511", + "org.postgresql.test.jdbc3.ParameterMetaDataTest.testFailsOnBadIndex": "21286", + "org.postgresql.test.jdbc3.ParameterMetaDataTest.testMultiStatement": "21286", + "org.postgresql.test.jdbc3.ParameterMetaDataTest.testParameterMD": "21286", + "org.postgresql.test.jdbc3.ParameterMetaDataTest.testTypeChangeWithUnknown": "21286", + "org.postgresql.test.jdbc3.StringTypeParameterTest.testMultipleEnumBinds[stringType = null]": "27793", + "org.postgresql.test.jdbc3.StringTypeParameterTest.testMultipleEnumBinds[stringType = unspecified]": "27793", + "org.postgresql.test.jdbc3.StringTypeParameterTest.testMultipleEnumBinds[stringType = varchar]": "27793", + "org.postgresql.test.jdbc3.StringTypeParameterTest.testOtherAsEnum[stringType = null]": "27793", + "org.postgresql.test.jdbc3.StringTypeParameterTest.testOtherAsEnum[stringType = unspecified]": "27793", + "org.postgresql.test.jdbc3.StringTypeParameterTest.testOtherAsEnum[stringType = varchar]": "27793", + "org.postgresql.test.jdbc3.StringTypeParameterTest.testParameterUnspecified[stringType = null]": "27793", + "org.postgresql.test.jdbc3.StringTypeParameterTest.testParameterUnspecified[stringType = unspecified]": "27793", + "org.postgresql.test.jdbc3.StringTypeParameterTest.testParameterUnspecified[stringType = varchar]": "27793", + "org.postgresql.test.jdbc3.StringTypeParameterTest.testVarcharAsEnum[stringType = null]": "27793", + "org.postgresql.test.jdbc3.StringTypeParameterTest.testVarcharAsEnum[stringType = unspecified]": "27793", + "org.postgresql.test.jdbc3.StringTypeParameterTest.testVarcharAsEnum[stringType = varchar]": "27793", + "org.postgresql.test.jdbc3.TypesTest.testCallableBoolean": "17511", + "org.postgresql.test.jdbc3.TypesTest.testPreparedBoolean": "17511", + "org.postgresql.test.jdbc3.TypesTest.testPreparedByte": "17511", + "org.postgresql.test.jdbc3.TypesTest.testUnknownType": "17511", + "org.postgresql.test.jdbc4.ArrayTest.createNullArray[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.createNullArray[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.multiDimIntArray[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.multiDimIntArray[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.nullArray[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.nullArray[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCasingBuiltinAlias[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCasingBuiltinAlias[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCasingBuiltinNonAlias[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCasingBuiltinNonAlias[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCasingComposite[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCasingComposite[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfBool[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfBool[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfInt[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfInt[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfMultiJson[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfMultiJson[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfMultiString[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfMultiString[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfNull[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfNull[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfSmallInt[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfSmallInt[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayWithNonStandardDelimiter[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayWithNonStandardDelimiter[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayWithoutServer[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateArrayWithoutServer[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateEmptyArrayOfIntViaAlias[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreateEmptyArrayOfIntViaAlias[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreatePrimitiveArray[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testCreatePrimitiveArray[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testEvilCasing[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testEvilCasing[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testGetArrayOfComposites[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testGetArrayOfComposites[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testSetObjectFromJavaArray[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testSetObjectFromJavaArray[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testToString[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testToString[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testUUIDArray[binary = FORCE]": "32552", + "org.postgresql.test.jdbc4.ArrayTest.testUUIDArray[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc4.BlobTest.testFree": "26725", + "org.postgresql.test.jdbc4.BlobTest.testGetBinaryStreamWithBoundaries": "26725", + "org.postgresql.test.jdbc4.BlobTest.testSetBlobWithStream": "26725", + "org.postgresql.test.jdbc4.BlobTest.testSetBlobWithStreamAndLength": "26725", + "org.postgresql.test.jdbc4.DatabaseMetaDataHideUnprivilegedObjectsTest.org.postgresql.test.jdbc4.DatabaseMetaDataHideUnprivilegedObjectsTest": "26443", + "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testGetClientInfoProperties": "26443", + "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testGetColumnsForAutoIncrement": "41870", + "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testGetFunctionsInSchema": "26443", + "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testGetFunctionsWithBlankPatterns": "41872", + "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testGetFunctionsWithSpecificTypes": "17511", + "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testGetProceduresInSchema": "26443", + "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testGetSchemas": "26443", + "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testSortedDataTypes": "26443", + "org.postgresql.test.jdbc4.IsValidTest.testIsValidRemoteClose": "35897", + "org.postgresql.test.jdbc4.PGCopyInputStreamTest.testReadBytesCorrectlyHandlesEof": "41608", + "org.postgresql.test.jdbc4.PGCopyInputStreamTest.testReadBytesCorrectlyReadsDataInChunks": "41608", + "org.postgresql.test.jdbc4.PGCopyInputStreamTest.testStreamCanBeClosedAfterReadUp": "41608", + "org.postgresql.test.jdbc4.UUIDTest.testUUIDString[binary=FORCE, stringType=VARCHAR]": "5807", + "org.postgresql.test.jdbc4.UUIDTest.testUUIDString[binary=REGULAR, stringType=VARCHAR]": "5807", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetArray": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetBigDecimal": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetBigInteger": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetBlob": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetBoolean": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetBooleanNull": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetBox": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetCalendar": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetCircle": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetClob": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetDate": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetDouble": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetDoubleNull": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetFloat": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetFloatNull": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetInet4Address": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetInet6Address": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetInetAddressNull": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetInteger": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetIntegerNull": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetInterval": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetJavaUtilDate": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetLine": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetLineseg": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetLong": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetLongNull": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetNullDate": "21286", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetNullTimestamp": "21286", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetPath": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetPoint": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetPolygon": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetSerial": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetShort": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetShortNull": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetString": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetTime": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetTimestamp": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetTimestampWithTimeZone": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetUuid": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetXml": "26097", + "org.postgresql.test.jdbc4.jdbc41.SchemaTest.testCurrentSchemaPropertyNotVisibilityTableDuringFunctionCreation": "26443", + "org.postgresql.test.jdbc4.jdbc41.SchemaTest.testCurrentSchemaPropertyNotVisibilityTableInsideFunction": "26443", + "org.postgresql.test.jdbc4.jdbc41.SchemaTest.testCurrentSchemaPropertyVisibilityFunction": "26443", + "org.postgresql.test.jdbc4.jdbc41.SchemaTest.testCurrentSchemaPropertyVisibilityTableDuringFunctionCreation": "26443", + "org.postgresql.test.jdbc4.jdbc41.SchemaTest.testGetSetSchema": "26443", + "org.postgresql.test.jdbc4.jdbc41.SchemaTest.testMultipleSearchPath": "26443", + "org.postgresql.test.jdbc4.jdbc41.SchemaTest.testSchemaInProperties": "26443", + "org.postgresql.test.jdbc4.jdbc41.SchemaTest.testSchemaPath$User": "26443", + "org.postgresql.test.jdbc4.jdbc41.SchemaTest.testSearchPathPreparedStatement": "26443", + "org.postgresql.test.jdbc4.jdbc41.SchemaTest.testSearchPathPreparedStatementAutoCommitFalse": "26443", + "org.postgresql.test.jdbc4.jdbc41.SchemaTest.testSearchPathPreparedStatementAutoCommitTrue": "26443", + "org.postgresql.test.jdbc4.jdbc41.SchemaTest.testUsingSchema": "26443", + "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = -infinity, pgType = timestamp with time zone, klass = class java.time.OffsetDateTime]": "41786", + "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = -infinity, pgType = timestamp, klass = class java.time.LocalDateTime]": "41786", + "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = -infinity, pgType = timestamp, klass = class java.time.OffsetDateTime]": "41786", + "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = infinity, pgType = timestamp with time zone, klass = class java.time.OffsetDateTime]": "41786", + "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = infinity, pgType = timestamp, klass = class java.time.LocalDateTime]": "41786", + "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = infinity, pgType = timestamp, klass = class java.time.OffsetDateTime]": "41786", + "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = -infinity, pgType = timestamp with time zone, klass = class java.time.OffsetDateTime]": "41786", + "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = -infinity, pgType = timestamp, klass = class java.time.LocalDateTime]": "41786", + "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = -infinity, pgType = timestamp, klass = class java.time.OffsetDateTime]": "41786", + "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = infinity, pgType = timestamp with time zone, klass = class java.time.OffsetDateTime]": "41786", + "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = infinity, pgType = timestamp, klass = class java.time.LocalDateTime]": "41786", + "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = infinity, pgType = timestamp, klass = class java.time.OffsetDateTime]": "41786", + "org.postgresql.test.jdbc42.GetObject310Test.testGetLocalTimeInvalidType[binary = FORCE]": "26097", + "org.postgresql.test.jdbc42.GetObject310Test.testGetLocalTimeInvalidType[binary = REGULAR]": "26097", + "org.postgresql.test.jdbc42.GetObject310Test.testProlepticCalendarTimestamptz[binary = FORCE]": "26097", + "org.postgresql.test.jdbc42.GetObject310Test.testProlepticCalendarTimestamptz[binary = REGULAR]": "26097", + "org.postgresql.test.jdbc42.Jdbc42CallableStatementTest.testGetResultSetWithoutArg": "17511", + "org.postgresql.test.jdbc42.Jdbc42CallableStatementTest.testGetResultSetWithoutArgUnsupportedConversion": "17511", + "org.postgresql.test.xa.XADataSourceTest.testAutoCommit": "22329", + "org.postgresql.test.xa.XADataSourceTest.testCloseBeforeCommit": "22329", + "org.postgresql.test.xa.XADataSourceTest.testCommitByDifferentConnection": "22329", + "org.postgresql.test.xa.XADataSourceTest.testCommitUnknownXid": "22329", + "org.postgresql.test.xa.XADataSourceTest.testCommitingCommittedXid": "22329", + "org.postgresql.test.xa.XADataSourceTest.testDatabaseRemovesPreparedBeforeCommit": "22329", + "org.postgresql.test.xa.XADataSourceTest.testDatabaseRemovesPreparedBeforeRollback": "22329", + "org.postgresql.test.xa.XADataSourceTest.testEndThenJoin": "22329", + "org.postgresql.test.xa.XADataSourceTest.testMappingOfConstraintViolations": "22329", + "org.postgresql.test.xa.XADataSourceTest.testNetworkIssueOnCommit": "22329", + "org.postgresql.test.xa.XADataSourceTest.testNetworkIssueOnOnePhaseCommit": "22329", + "org.postgresql.test.xa.XADataSourceTest.testNetworkIssueOnRollback": "22329", + "org.postgresql.test.xa.XADataSourceTest.testOnePhase": "22329", + "org.postgresql.test.xa.XADataSourceTest.testOnePhaseCommitOfPrepared": "22329", + "org.postgresql.test.xa.XADataSourceTest.testOnePhaseCommitUnknownXid": "22329", + "org.postgresql.test.xa.XADataSourceTest.testOnePhaseCommitingCommittedXid": "22329", + "org.postgresql.test.xa.XADataSourceTest.testPrepareUnknownXid": "22329", + "org.postgresql.test.xa.XADataSourceTest.testPreparingPreparedXid": "22329", + "org.postgresql.test.xa.XADataSourceTest.testRecover": "22329", + "org.postgresql.test.xa.XADataSourceTest.testRepeatedRolledBack": "22329", + "org.postgresql.test.xa.XADataSourceTest.testRestoreOfAutoCommit": "22329", + "org.postgresql.test.xa.XADataSourceTest.testRestoreOfAutoCommitEndThenJoin": "22329", + "org.postgresql.test.xa.XADataSourceTest.testRollback": "22329", + "org.postgresql.test.xa.XADataSourceTest.testRollbackByDifferentConnection": "22329", + "org.postgresql.test.xa.XADataSourceTest.testRollbackUnknownXid": "22329", + "org.postgresql.test.xa.XADataSourceTest.testRollbackWithoutPrepare": "22329", + "org.postgresql.test.xa.XADataSourceTest.testTwoPhaseCommit": "22329", + "org.postgresql.test.xa.XADataSourceTest.testWrapperEquals": "22329", +} + +var pgjdbcBlockList20_1 = blocklist{ "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testDeepInternalsBatchedQueryDecorator": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testUnspecifiedParameterType": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testVaryingTypeCounts": "26508", @@ -120,16 +1035,8 @@ var pgjdbcBlackList20_1 = blacklist{ "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[325: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[326: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[327: autorollback(autoSave=NEVER, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[328: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[329: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[330: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[331: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[341: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[343: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[344: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[345: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[346: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[347: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[348: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[349: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[350: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", @@ -154,26 +1061,6 @@ var pgjdbcBlackList20_1 = blacklist{ "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[401: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[402: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[403: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[404: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[405: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[406: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[407: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[408: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[409: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[410: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[411: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[412: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[413: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[414: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[415: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[416: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[417: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[418: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[419: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[420: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[421: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[422: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[423: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[424: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41511", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[425: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41511", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[426: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", @@ -182,46 +1069,6 @@ var pgjdbcBlackList20_1 = blacklist{ "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[429: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[430: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[431: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[432: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[433: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[434: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[435: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[436: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[437: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[438: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[439: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[440: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[441: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[442: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[443: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[444: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[445: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[446: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[447: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[448: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=true, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[449: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=true, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[450: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[451: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[452: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[453: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[454: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[455: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[456: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[457: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[458: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[459: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[460: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[461: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[462: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[463: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[464: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=true, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[465: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=true, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[466: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[467: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[468: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[469: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[470: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[471: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[472: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[473: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[474: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", @@ -242,16 +1089,8 @@ var pgjdbcBlackList20_1 = blacklist{ "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[489: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[490: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[491: autorollback(autoSave=ALWAYS, cleanSavePoint=TRUE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[492: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[493: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[494: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[495: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[505: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[507: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[508: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[509: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[510: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[511: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[512: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[513: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[514: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", @@ -276,26 +1115,6 @@ var pgjdbcBlackList20_1 = blacklist{ "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[565: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[566: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[567: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=YES, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[568: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[569: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[570: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[571: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[572: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[573: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[574: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[575: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[576: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[577: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[578: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[579: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[580: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[581: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[582: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[583: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[584: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[585: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[586: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[587: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=SELECT, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[588: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41511", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[589: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41511", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[590: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", @@ -304,46 +1123,6 @@ var pgjdbcBlackList20_1 = blacklist{ "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[593: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[594: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[595: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[596: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[597: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[598: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[599: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[600: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[601: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[602: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[603: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=COMMIT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[604: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[605: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[606: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[607: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[608: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[609: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[610: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[611: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[612: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=true, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[613: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=true, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[614: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[615: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[616: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[617: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[618: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[619: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=IS_VALID, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[620: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[621: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[622: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[623: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[624: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[625: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[626: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[627: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[628: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=true, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[629: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=true, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[630: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[631: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[632: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[633: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=false, sql=SELECT, columns=STAR)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[634: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41511", - "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[635: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=DEALLOCATE, continueMode=SELECT, flushOnDeallocate=false, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41511", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[636: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=EXACT)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[637: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=SELECT, columns=STAR)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[638: autorollback(autoSave=ALWAYS, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=COMMIT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=EXACT)]": "41448", @@ -464,10 +1243,6 @@ var pgjdbcBlackList20_1 = blacklist{ "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[983: autorollback(autoSave=CONSERVATIVE, cleanSavePoint=FALSE, autoCommit=NO, failMode=INSERT_BATCH, continueMode=SELECT, flushOnDeallocate=true, hastransaction=false, sql=WITH_INSERT_SELECT, columns=STAR)]": "41448", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[98: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=EXACT)]": "26508", "org.postgresql.test.jdbc2.AutoRollbackTestSuite.run[99: autorollback(autoSave=NEVER, cleanSavePoint=TRUE, autoCommit=NO, failMode=ALTER, continueMode=SELECT, flushOnDeallocate=true, hastransaction=true, sql=WITH_INSERT_SELECT, columns=STAR)]": "26508", - "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchEscapeProcessing[binary = FORCE, insertRewrite = false]": "44773", - "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchEscapeProcessing[binary = FORCE, insertRewrite = true]": "44773", - "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchEscapeProcessing[binary = REGULAR, insertRewrite = false]": "44773", - "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchEscapeProcessing[binary = REGULAR, insertRewrite = true]": "44773", "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = FORCE, insertRewrite = false]": "41513", "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = FORCE, insertRewrite = true]": "41513", "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = REGULAR, insertRewrite = false]": "41513", @@ -480,86 +1255,62 @@ var pgjdbcBlackList20_1 = blacklist{ "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = FORCE, insertRewrite = true]": "31463", "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = REGULAR, insertRewrite = false]": "31463", "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = REGULAR, insertRewrite = true]": "31463", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = false]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = true]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = false]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = true]": "41514", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = true]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = true]": "44803", "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = FORCE, insertRewrite = false]": "40195", "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = FORCE, insertRewrite = true]": "40195", "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = REGULAR, insertRewrite = false]": "40195", "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = REGULAR, insertRewrite = true]": "40195", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = FORCE, insertRewrite = false]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = REGULAR, insertRewrite = false]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[105: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[107: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[112: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[113: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[114: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[115: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[120: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[121: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[122: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[123: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[128: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[129: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[130: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[131: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[16: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[17: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[18: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[19: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[24: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[25: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[26: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[27: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[32: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[33: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[34: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[35: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[40: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[41: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[42: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[43: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[48: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[49: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[50: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[51: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[52: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[53: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[54: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[55: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[56: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[57: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[58: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[59: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[60: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[61: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[62: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[63: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[64: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[65: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[66: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[67: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[68: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[69: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[70: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[71: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[72: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[73: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[74: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[75: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[76: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[77: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[78: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[79: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[80: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[81: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[82: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[83: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[84: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[85: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[86: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[87: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "41514", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = FORCE, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = REGULAR, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[105: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[107: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[112: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[113: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[114: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[115: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[120: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[121: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[122: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[123: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[128: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[129: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[130: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[131: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[16: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[17: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[18: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[19: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[24: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[25: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[26: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[27: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[32: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[33: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[34: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[35: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[40: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[41: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[42: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[43: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[56: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[57: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[58: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[59: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[64: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[65: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[66: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[67: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[72: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[73: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[74: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[75: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[80: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[81: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[82: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[83: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test17000Binds[2: autoCommit=NO, binary=REGULAR]": "26508", "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test17000Binds[3: autoCommit=NO, binary=FORCE]": "26508", "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test32000Binds[2: autoCommit=NO, binary=REGULAR]": "26508", @@ -715,8 +1466,6 @@ var pgjdbcBlackList20_1 = blacklist{ "org.postgresql.test.jdbc2.PreparedStatementTest.testDollarQuotes[binary = REGULAR]": "41777", "org.postgresql.test.jdbc2.PreparedStatementTest.testDoubleQuestionMark[binary = FORCE]": "21286", "org.postgresql.test.jdbc2.PreparedStatementTest.testDoubleQuestionMark[binary = REGULAR]": "21286", - "org.postgresql.test.jdbc2.PreparedStatementTest.testSetObjectBigDecimalWithScale[binary = FORCE]": "26925", - "org.postgresql.test.jdbc2.PreparedStatementTest.testSetObjectBigDecimalWithScale[binary = REGULAR]": "26925", "org.postgresql.test.jdbc2.PreparedStatementTest.testSingleQuotes[binary = FORCE]": "36215", "org.postgresql.test.jdbc2.PreparedStatementTest.testSingleQuotes[binary = REGULAR]": "36215", "org.postgresql.test.jdbc2.PreparedStatementTest.testUnknownSetObject[binary = FORCE]": "41779", @@ -842,26 +1591,17 @@ var pgjdbcBlackList20_1 = blacklist{ "org.postgresql.test.jdbc2.TimezoneTest.testGetDate": "41776", "org.postgresql.test.jdbc2.TimezoneTest.testGetTime": "41776", "org.postgresql.test.jdbc2.TimezoneTest.testGetTimestamp": "41776", - "org.postgresql.test.jdbc2.TimezoneTest.testHalfHourTimezone": "41776", "org.postgresql.test.jdbc2.TimezoneTest.testSetDate": "41776", "org.postgresql.test.jdbc2.TimezoneTest.testSetTime": "41776", "org.postgresql.test.jdbc2.TimezoneTest.testSetTimestamp": "41776", "org.postgresql.test.jdbc2.UpdateableResultTest.simpleAndUpdateableSameQuery": "19141", "org.postgresql.test.jdbc2.UpdateableResultTest.testArray": "19141", - "org.postgresql.test.jdbc2.UpdateableResultTest.testBadColumnIndexes": "19141", "org.postgresql.test.jdbc2.UpdateableResultTest.testCancelRowUpdates": "19141", - "org.postgresql.test.jdbc2.UpdateableResultTest.testDeleteRows": "19141", - "org.postgresql.test.jdbc2.UpdateableResultTest.testInsertRowIllegalMethods": "19141", "org.postgresql.test.jdbc2.UpdateableResultTest.testMultiColumnUpdate": "19141", - "org.postgresql.test.jdbc2.UpdateableResultTest.testMultiColumnUpdateWithoutAllColumns": "19141", - "org.postgresql.test.jdbc2.UpdateableResultTest.testPositioning": "19141", - "org.postgresql.test.jdbc2.UpdateableResultTest.testUpdateReadOnlyResultSet": "19141", "org.postgresql.test.jdbc2.UpdateableResultTest.testUpdateSelectOnly": "19141", "org.postgresql.test.jdbc2.UpdateableResultTest.testUpdateStreams": "19141", "org.postgresql.test.jdbc2.UpdateableResultTest.testUpdateTimestamp": "19141", "org.postgresql.test.jdbc2.UpdateableResultTest.testUpdateable": "19141", - "org.postgresql.test.jdbc2.UpdateableResultTest.testUpdateablePreparedStatement": "19141", - "org.postgresql.test.jdbc2.UpdateableResultTest.testZeroRowResult": "19141", "org.postgresql.test.jdbc3.CompositeTest.testComplexArgumentSelect": "27793", "org.postgresql.test.jdbc3.CompositeTest.testComplexSelect": "27793", "org.postgresql.test.jdbc3.CompositeTest.testComplexTableNameMetadata": "27793", @@ -1186,10 +1926,11 @@ var pgjdbcBlackList20_1 = blacklist{ "org.postgresql.test.xa.XADataSourceTest.testWrapperEquals": "22329", } -var pgjdbcBlackList19_1 = blacklist{ +var pgjdbcBlockList19_1 = blocklist{ "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testDeepInternalsBatchedQueryDecorator": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testUnspecifiedParameterType": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testVaryingTypeCounts": "26508", + "org.postgresql.jdbc.PgSQLXMLTest.setCharacterStream": "5807", "org.postgresql.replication.ReplicationTestSuite.org.postgresql.replication.ReplicationTestSuite": "unknown", "org.postgresql.test.core.OptionsPropertyTest.testOptionsInProperties": "26443", "org.postgresql.test.jdbc2.ArrayTest.testEscaping[binary = FORCE]": "32552", @@ -1692,86 +2433,86 @@ var pgjdbcBlackList19_1 = blacklist{ "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = FORCE, insertRewrite = true]": "5807", "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = REGULAR, insertRewrite = false]": "5807", "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = REGULAR, insertRewrite = true]": "5807", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = false]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = true]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = false]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = true]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = FORCE, insertRewrite = false]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = REGULAR, insertRewrite = false]": "41514", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = true]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = true]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = FORCE, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = REGULAR, insertRewrite = false]": "44803", "org.postgresql.test.jdbc2.BatchExecuteTest.testWarningsAreCleared[binary = FORCE, insertRewrite = false]": "5807", "org.postgresql.test.jdbc2.BatchExecuteTest.testWarningsAreCleared[binary = FORCE, insertRewrite = true]": "5807", "org.postgresql.test.jdbc2.BatchExecuteTest.testWarningsAreCleared[binary = REGULAR, insertRewrite = false]": "5807", "org.postgresql.test.jdbc2.BatchExecuteTest.testWarningsAreCleared[binary = REGULAR, insertRewrite = true]": "5807", - "org.postgresql.test.jdbc2.BatchFailureTest.run[105: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[107: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[112: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[113: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[114: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[115: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[120: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[121: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[122: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[123: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[128: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[129: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[130: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[131: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[16: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[17: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[18: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[19: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[24: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[25: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[26: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[27: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[32: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[33: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[34: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[35: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[40: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[41: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[42: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[43: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[48: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[49: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[50: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[51: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[52: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[53: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[54: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[55: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[56: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[57: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[58: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[59: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[60: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[61: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[62: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[63: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[64: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[65: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[66: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[67: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[68: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[69: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[70: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[71: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[72: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[73: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[74: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[75: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[76: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[77: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[78: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[79: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[80: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[81: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[82: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[83: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[84: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[85: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[86: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[87: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "41514", + "org.postgresql.test.jdbc2.BatchFailureTest.run[105: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[107: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[112: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[113: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[114: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[115: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[120: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[121: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[122: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[123: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[128: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[129: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[130: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[131: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[16: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[17: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[18: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[19: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[24: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[25: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[26: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[27: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[32: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[33: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[34: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[35: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[40: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[41: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[42: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[43: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[48: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[49: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[50: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[51: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[52: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[53: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[54: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[55: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[56: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[57: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[58: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[59: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[60: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[61: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[62: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[63: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[64: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[65: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[66: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[67: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[68: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[69: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[70: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[71: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[72: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[73: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[74: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[75: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[76: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[77: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[78: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[79: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[80: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[81: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[82: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[83: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[84: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[85: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[86: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[87: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "44803", "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test17000Binds[2: autoCommit=NO, binary=REGULAR]": "26508", "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test17000Binds[3: autoCommit=NO, binary=FORCE]": "26508", "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test32000Binds[2: autoCommit=NO, binary=REGULAR]": "26508", @@ -1828,6 +2569,9 @@ var pgjdbcBlackList19_1 = blacklist{ "org.postgresql.test.jdbc2.ConnectionTest.testNativeSQL": "41578", "org.postgresql.test.jdbc2.ConnectionTest.testPrepareCall": "41578", "org.postgresql.test.jdbc2.ConnectionTest.testPrepareStatement": "41578", + "org.postgresql.test.jdbc2.ConnectionTest.testReadOnly_always": "41578", + "org.postgresql.test.jdbc2.ConnectionTest.testReadOnly_ignore": "41578", + "org.postgresql.test.jdbc2.ConnectionTest.testReadOnly_transaction": "41578", "org.postgresql.test.jdbc2.ConnectionTest.testTransactionIsolation": "41578", "org.postgresql.test.jdbc2.ConnectionTest.testTransactions": "41578", "org.postgresql.test.jdbc2.ConnectionTest.testTypeMaps": "41578", @@ -1863,6 +2607,7 @@ var pgjdbcBlackList19_1 = blacklist{ "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testColumns": "17511", "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testCrossReference": "17511", "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testCustomArrayTypeInfo": "17511", + "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testDomainColumnSize": "17511", "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testDroppedColumns": "17511", "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testEscaping": "17511", "org.postgresql.test.jdbc2.DatabaseMetaDataTest.testForeignKeyActions": "17511", @@ -1955,6 +2700,8 @@ var pgjdbcBlackList19_1 = blacklist{ "org.postgresql.test.jdbc2.PreparedStatementTest.testNaNLiteralsPreparedStatement[binary = REGULAR]": "unknown", "org.postgresql.test.jdbc2.PreparedStatementTest.testNaNLiteralsSimpleStatement[binary = FORCE]": "unknown", "org.postgresql.test.jdbc2.PreparedStatementTest.testNaNLiteralsSimpleStatement[binary = REGULAR]": "unknown", + "org.postgresql.test.jdbc2.PreparedStatementTest.testNumeric[binary = FORCE]": "5807", + "org.postgresql.test.jdbc2.PreparedStatementTest.testNumeric[binary = REGULAR]": "5807", "org.postgresql.test.jdbc2.PreparedStatementTest.testSelectPrepareThreshold0AutoCommitFalseFetchSizeNonZero[binary = REGULAR]": "unknown", "org.postgresql.test.jdbc2.PreparedStatementTest.testSetBooleanDecimal[binary = FORCE]": "5807", "org.postgresql.test.jdbc2.PreparedStatementTest.testSetBooleanDecimal[binary = REGULAR]": "5807", @@ -2181,6 +2928,15 @@ var pgjdbcBlackList19_1 = blacklist{ "org.postgresql.test.jdbc3.CompositeTest.testSimpleSelect": "27793", "org.postgresql.test.jdbc3.CompositeTest.testTableMetadata": "27793", "org.postgresql.test.jdbc3.DatabaseMetaDataTest.testGetColumnsForDomain": "27796", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallIfNoReturnTest.testInvokeFunction": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallIfNoReturnTest.testInvokeFunctionHavingReturnParameter": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallIfNoReturnTest.testInvokeProcedure": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallTest.testInvokeFunction": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallTest.testInvokeFunctionHavingReturnParameter": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallTest.testInvokeProcedure": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeSelectTest.testInvokeFunction": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeSelectTest.testInvokeFunctionHavingReturnParameter": "17511", + "org.postgresql.test.jdbc3.EscapeSyntaxCallModeSelectTest.testInvokeProcedure": "17511", "org.postgresql.test.jdbc3.GeneratedKeysTest.breakDescribeOnFirstServerPreparedExecution[returningInQuery = A, binary = FORCE]": "5807", "org.postgresql.test.jdbc3.GeneratedKeysTest.breakDescribeOnFirstServerPreparedExecution[returningInQuery = A, binary = REGULAR]": "5807", "org.postgresql.test.jdbc3.GeneratedKeysTest.breakDescribeOnFirstServerPreparedExecution[returningInQuery = AB, binary = FORCE]": "5807", @@ -2364,6 +3120,8 @@ var pgjdbcBlackList19_1 = blacklist{ "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testMultipleOutExecutions": "17511", "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testNotEnoughParameters": "17511", "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testNumeric": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureInOnlyNativeCall": "17511", + "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureInOutNativeCall": "17511", "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureNoParametersWithParentheses": "17511", "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureNoParametersWithoutParentheses": "17511", "org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testSetObjectBit": "17511", @@ -2452,9 +3210,15 @@ var pgjdbcBlackList19_1 = blacklist{ "org.postgresql.test.jdbc4.ClientInfoTest.testExplicitSetAppNameNotificationIsParsed": "unknown", "org.postgresql.test.jdbc4.ClientInfoTest.testSetAppName": "unknown", "org.postgresql.test.jdbc4.ClientInfoTest.testSetAppNameProps": "unknown", + "org.postgresql.test.jdbc4.DatabaseMetaDataHideUnprivilegedObjectsTest.org.postgresql.test.jdbc4.DatabaseMetaDataHideUnprivilegedObjectsTest": "26443", + "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testGetClientInfoProperties": "26443", "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testGetColumnsForAutoIncrement": "41870", + "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testGetFunctionsInSchema": "26443", "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testGetFunctionsWithBlankPatterns": "41872", "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testGetFunctionsWithSpecificTypes": "17511", + "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testGetProceduresInSchema": "26443", + "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testGetSchemas": "26443", + "org.postgresql.test.jdbc4.DatabaseMetaDataTest.testSortedDataTypes": "26443", "org.postgresql.test.jdbc4.IsValidTest.testIsValidRemoteClose": "35897", "org.postgresql.test.jdbc4.JsonbTest.testJsonbNonPreparedStatement": "40855", "org.postgresql.test.jdbc4.JsonbTest.testJsonbPreparedStatement": "40855", @@ -2495,6 +3259,8 @@ var pgjdbcBlackList19_1 = blacklist{ "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetLineseg": "26097", "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetLong": "26097", "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetLongNull": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetNullDate": "26097", + "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetNullTimestamp": "26097", "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetPath": "26097", "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetPoint": "26097", "org.postgresql.test.jdbc4.jdbc41.GetObjectTest.testGetPolygon": "26097", @@ -2557,6 +3323,70 @@ var pgjdbcBlackList19_1 = blacklist{ "org.postgresql.test.jdbc42.GetObject310Test.testProlepticCalendarTimestamptz[binary = REGULAR]": "26097", "org.postgresql.test.jdbc42.Jdbc42CallableStatementTest.testGetResultSetWithoutArg": "17511", "org.postgresql.test.jdbc42.Jdbc42CallableStatementTest.testGetResultSetWithoutArgUnsupportedConversion": "17511", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testEmptyExecuteLargeBatchPreparedStatement[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testEmptyExecuteLargeBatchPreparedStatement[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testEmptyExecuteLargeBatchPreparedStatement[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testEmptyExecuteLargeBatchPreparedStatement[binary = REGULAR, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testEmptyExecuteLargeBatchStatement[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testEmptyExecuteLargeBatchStatement[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testEmptyExecuteLargeBatchStatement[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testEmptyExecuteLargeBatchStatement[binary = REGULAR, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeBatchStatementSMALL[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeBatchStatementSMALL[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeBatchStatementSMALL[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeBatchStatementSMALL[binary = REGULAR, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeBatchValuesInsertSMALL[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeBatchValuesInsertSMALL[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeBatchValuesInsertSMALL[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeBatchValuesInsertSMALL[binary = REGULAR, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargePreparedStatementStatementLoopSMALL[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargePreparedStatementStatementLoopSMALL[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargePreparedStatementStatementLoopSMALL[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargePreparedStatementStatementLoopSMALL[binary = REGULAR, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargePreparedStatementStatementSMALL[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargePreparedStatementStatementSMALL[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargePreparedStatementStatementSMALL[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargePreparedStatementStatementSMALL[binary = REGULAR, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdatePreparedStatementSELECT[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdatePreparedStatementSELECT[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdatePreparedStatementSELECT[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdatePreparedStatementSELECT[binary = REGULAR, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdatePreparedStatementSMALL[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdatePreparedStatementSMALL[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdatePreparedStatementSMALL[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdatePreparedStatementSMALL[binary = REGULAR, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdateStatementSELECT[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdateStatementSELECT[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdateStatementSELECT[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdateStatementSELECT[binary = REGULAR, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdateStatementSMALL[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdateStatementSMALL[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdateStatementSMALL[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testExecuteLargeUpdateStatementSMALL[binary = REGULAR, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountPreparedStatementSELECT[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountPreparedStatementSELECT[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountPreparedStatementSELECT[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountPreparedStatementSELECT[binary = REGULAR, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountPreparedStatementSMALL[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountPreparedStatementSMALL[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountPreparedStatementSMALL[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountPreparedStatementSMALL[binary = REGULAR, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountStatementSELECT[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountStatementSELECT[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountStatementSELECT[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountStatementSELECT[binary = REGULAR, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountStatementSMALL[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountStatementSMALL[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountStatementSMALL[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testGetLargeUpdateCountStatementSMALL[binary = REGULAR, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testNullExecuteLargeBatchPreparedStatement[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testNullExecuteLargeBatchPreparedStatement[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testNullExecuteLargeBatchPreparedStatement[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testNullExecuteLargeBatchPreparedStatement[binary = REGULAR, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testNullExecuteLargeBatchStatement[binary = FORCE, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testNullExecuteLargeBatchStatement[binary = FORCE, insertRewrite = true]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testNullExecuteLargeBatchStatement[binary = REGULAR, insertRewrite = false]": "43352", + "org.postgresql.test.jdbc42.LargeCountJdbc42Test.testNullExecuteLargeBatchStatement[binary = REGULAR, insertRewrite = true]": "43352", "org.postgresql.test.jdbc42.PreparedStatementTest.testLocalTimeMax": "26097", "org.postgresql.test.jdbc42.PreparedStatementTest.testSetNumber": "26097", "org.postgresql.test.jdbc42.PreparedStatementTest.testTimeTzSetNull": "26097", @@ -2613,7 +3443,7 @@ var pgjdbcBlackList19_1 = blacklist{ "org.postgresql.test.xa.XADataSourceTest.testWrapperEquals": "22329", } -var pgjdbcBlackList19_2 = blacklist{ +var pgjdbcBlockList19_2 = blocklist{ "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testDeepInternalsBatchedQueryDecorator": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testUnspecifiedParameterType": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testVaryingTypeCounts": "26508", @@ -3119,90 +3949,66 @@ var pgjdbcBlackList19_2 = blacklist{ "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = FORCE, insertRewrite = true]": "5807", "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = REGULAR, insertRewrite = false]": "5807", "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = REGULAR, insertRewrite = true]": "5807", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = false]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = true]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = false]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = true]": "41514", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = true]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = true]": "44803", "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = FORCE, insertRewrite = false]": "40195", "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = FORCE, insertRewrite = true]": "40195", "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = REGULAR, insertRewrite = false]": "40195", "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = REGULAR, insertRewrite = true]": "40195", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = FORCE, insertRewrite = false]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = REGULAR, insertRewrite = false]": "41514", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = FORCE, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = REGULAR, insertRewrite = false]": "44803", "org.postgresql.test.jdbc2.BatchExecuteTest.testWarningsAreCleared[binary = FORCE, insertRewrite = false]": "5807", "org.postgresql.test.jdbc2.BatchExecuteTest.testWarningsAreCleared[binary = FORCE, insertRewrite = true]": "5807", "org.postgresql.test.jdbc2.BatchExecuteTest.testWarningsAreCleared[binary = REGULAR, insertRewrite = false]": "5807", "org.postgresql.test.jdbc2.BatchExecuteTest.testWarningsAreCleared[binary = REGULAR, insertRewrite = true]": "5807", - "org.postgresql.test.jdbc2.BatchFailureTest.run[105: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[107: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[112: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[113: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[114: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[115: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[120: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[121: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[122: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[123: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[128: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[129: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[130: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[131: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[16: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[17: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[18: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[19: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[24: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[25: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[26: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[27: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[32: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[33: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[34: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[35: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[40: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[41: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[42: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[43: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[48: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[49: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[50: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[51: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[52: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[53: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[54: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[55: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=FIRST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[56: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[57: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[58: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[59: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[60: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[61: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[62: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[63: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[64: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[65: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[66: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[67: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[68: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[69: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[70: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[71: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[72: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[73: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[74: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[75: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[76: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[77: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[78: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[79: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[80: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[81: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[82: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[83: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[84: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[85: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[86: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[87: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=NO, batchType=SIMPLE, generateKeys=NO, binary=FORCE, insertRewrite=false)]": "41514", + "org.postgresql.test.jdbc2.BatchFailureTest.run[105: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[107: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[112: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[113: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[114: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[115: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[120: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[121: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[122: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[123: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[128: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[129: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[130: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[131: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[16: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[17: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[18: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[19: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[24: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[25: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[26: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[27: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[32: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[33: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[34: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[35: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[40: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[41: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[42: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[43: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[56: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[57: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[58: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[59: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[64: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[65: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[66: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[67: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[72: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[73: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[74: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[75: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[80: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[81: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[82: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[83: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test17000Binds[2: autoCommit=NO, binary=REGULAR]": "26508", "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test17000Binds[3: autoCommit=NO, binary=FORCE]": "26508", "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test32000Binds[2: autoCommit=NO, binary=REGULAR]": "26508", @@ -4109,7 +4915,7 @@ var pgjdbcBlackList19_2 = blacklist{ "org.postgresql.test.xa.XADataSourceTest.testWrapperEquals": "22329", } -var pgjdbcBlackList2_1 = blacklist{ +var pgjdbcBlockList2_1 = blocklist{ "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testDeepInternalsBatchedQueryDecorator": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testUnspecifiedParameterType": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testVaryingTypeCounts": "26508", @@ -4615,62 +5421,62 @@ var pgjdbcBlackList2_1 = blacklist{ "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = FORCE, insertRewrite = true]": "5807", "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = REGULAR, insertRewrite = false]": "5807", "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = REGULAR, insertRewrite = true]": "5807", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = false]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = true]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = false]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = true]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = FORCE, insertRewrite = false]": "41514", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = REGULAR, insertRewrite = false]": "41514", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = true]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = true]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = FORCE, insertRewrite = false]": "44803", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = REGULAR, insertRewrite = false]": "44803", "org.postgresql.test.jdbc2.BatchExecuteTest.testWarningsAreCleared[binary = FORCE, insertRewrite = false]": "5807", "org.postgresql.test.jdbc2.BatchExecuteTest.testWarningsAreCleared[binary = FORCE, insertRewrite = true]": "5807", "org.postgresql.test.jdbc2.BatchExecuteTest.testWarningsAreCleared[binary = REGULAR, insertRewrite = false]": "5807", "org.postgresql.test.jdbc2.BatchExecuteTest.testWarningsAreCleared[binary = REGULAR, insertRewrite = true]": "5807", - "org.postgresql.test.jdbc2.BatchFailureTest.run[105: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[107: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[112: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[113: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[114: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[115: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[120: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[121: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[122: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[123: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[128: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[129: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[130: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[131: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[16: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[17: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[18: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[19: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[24: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[25: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[26: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[27: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[32: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[33: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[34: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[35: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[40: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[41: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[42: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[43: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[56: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[57: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[58: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[59: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[64: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[65: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[66: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[67: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[72: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[73: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[74: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[75: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[80: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[81: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[82: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "41514", - "org.postgresql.test.jdbc2.BatchFailureTest.run[83: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "41514", + "org.postgresql.test.jdbc2.BatchFailureTest.run[105: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[107: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[112: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[113: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[114: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[115: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[120: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[121: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[122: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[123: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[128: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[129: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[130: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[131: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[16: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[17: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[18: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[19: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[24: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[25: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[26: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[27: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[32: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[33: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[34: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[35: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[40: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[41: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[42: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[43: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[56: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[57: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[58: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[59: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[64: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[65: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[66: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[67: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[72: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[73: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[74: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[75: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[80: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[81: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[82: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", + "org.postgresql.test.jdbc2.BatchFailureTest.run[83: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test17000Binds[2: autoCommit=NO, binary=REGULAR]": "26508", "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test17000Binds[3: autoCommit=NO, binary=FORCE]": "26508", "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test32000Binds[2: autoCommit=NO, binary=REGULAR]": "26508", @@ -5518,26 +6324,18 @@ var pgjdbcBlackList2_1 = blacklist{ "org.postgresql.test.xa.XADataSourceTest.testWrapperEquals": "22329", } -var pgjdbcBlackList2_0 = blacklist{ - "org.postgresql.test.jdbc2.ClientEncodingTest.setEncodingAscii[allowEncodingChanges=true]": "37129", - "org.postgresql.test.jdbc4.ClientInfoTest.testExplicitSetAppNameNotificationIsParsed": "40854", - "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = -infinity, pgType = timestamp with time zone, klass = class java.time.OffsetDateTime]": "41786", - "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = -infinity, pgType = timestamp, klass = class java.time.LocalDateTime]": "41786", - "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = -infinity, pgType = timestamp, klass = class java.time.OffsetDateTime]": "41786", - "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = infinity, pgType = timestamp with time zone, klass = class java.time.OffsetDateTime]": "41786", - "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = infinity, pgType = timestamp, klass = class java.time.LocalDateTime]": "41786", - "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = infinity, pgType = timestamp, klass = class java.time.OffsetDateTime]": "41786", - "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = -infinity, pgType = timestamp with time zone, klass = class java.time.OffsetDateTime]": "41786", - "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = -infinity, pgType = timestamp, klass = class java.time.LocalDateTime]": "41786", - "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = -infinity, pgType = timestamp, klass = class java.time.OffsetDateTime]": "41786", - "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = infinity, pgType = timestamp with time zone, klass = class java.time.OffsetDateTime]": "41786", - "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = infinity, pgType = timestamp, klass = class java.time.LocalDateTime]": "41786", - "org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = infinity, pgType = timestamp, klass = class java.time.OffsetDateTime]": "41786", -} +var pgjdbcIgnoreList20_2 = pgjdbcIgnoreList20_1 var pgjdbcIgnoreList20_1 = pgjdbcIgnoreList19_2 -var pgjdbcIgnoreList19_2 = blacklist{ +var pgjdbcIgnoreList19_2 = blocklist{ + "org.postgresql.replication.ReplicationTestSuite.org.postgresql.replication.ReplicationTestSuite": "expected fail - no replication", + "org.postgresql.test.core.LogServerMessagePropertyTest.testWithDefaults": "expected fail - checks error message", + "org.postgresql.test.core.LogServerMessagePropertyTest.testWithExplicitlyEnabled": "expected fail - checks error message", + "org.postgresql.test.core.LogServerMessagePropertyTest.testWithLogServerErrorDetailDisabled": "expected fail - checks error message", +} + +var pgjdbcIgnoreList19_1 = blocklist{ "org.postgresql.replication.ReplicationTestSuite.org.postgresql.replication.ReplicationTestSuite": "expected fail - no replication", "org.postgresql.test.core.LogServerMessagePropertyTest.testWithDefaults": "expected fail - checks error message", "org.postgresql.test.core.LogServerMessagePropertyTest.testWithExplicitlyEnabled": "expected fail - checks error message", diff --git a/pkg/cmd/roachtest/pgx.go b/pkg/cmd/roachtest/pgx.go index 231362d16624..c7ff876d1747 100644 --- a/pkg/cmd/roachtest/pgx.go +++ b/pkg/cmd/roachtest/pgx.go @@ -17,6 +17,7 @@ import ( ) var pgxReleaseTagRegex = regexp.MustCompile(`^v(?P\d+)\.(?P\d+)\.(?P\d+)$`) +var supportedPGXTag = "v4.6.0" // This test runs pgx's full test suite against a single cockroach node. @@ -46,9 +47,15 @@ func registerPgx(r *testRegistry) { t.Status("setting up go") installLatestGolang(ctx, t, c, node) - t.Status("installing pgx") - if err := repeatRunE( - ctx, c, node, "install pgx", "go get -u github.com/jackc/pgx", + t.Status("getting pgx") + if err := repeatGitCloneE( + ctx, + t.l, + c, + "https://github.com/jackc/pgx.git", + "/mnt/data1/pgx", + supportedPGXTag, + node, ); err != nil { t.Fatal(err) } @@ -58,23 +65,24 @@ func registerPgx(r *testRegistry) { t.Fatal(err) } c.l.Printf("Latest jackc/pgx release is %s.", latestTag) + c.l.Printf("Supported release is %s.", supportedPGXTag) t.Status("installing go-junit-report") if err := repeatRunE( - ctx, c, node, "install pgx", "go get -u github.com/jstemmer/go-junit-report", + ctx, c, node, "install go-junit-report", "go get -u github.com/jstemmer/go-junit-report", ); err != nil { t.Fatal(err) } - t.Status("checking blacklist") - blacklistName, expectedFailures, ignorelistName, ignorelist := pgxBlacklists.getLists(version) + t.Status("checking blocklist") + blocklistName, expectedFailures, ignorelistName, ignorelist := pgxBlocklists.getLists(version) if expectedFailures == nil { - t.Fatalf("No pgx blacklist defined for cockroach version %s", version) + t.Fatalf("No pgx blocklist defined for cockroach version %s", version) } - status := fmt.Sprintf("Running cockroach version %s, using blacklist %s", version, blacklistName) + status := fmt.Sprintf("Running cockroach version %s, using blocklist %s", version, blocklistName) if ignorelist != nil { - status = fmt.Sprintf("Running cockroach version %s, using blacklist %s, using ignorelist %s", - version, blacklistName, ignorelistName) + status = fmt.Sprintf("Running cockroach version %s, using blocklist %s, using ignorelist %s", + version, blocklistName, ignorelistName) } c.l.Printf("%s", status) @@ -101,7 +109,7 @@ func registerPgx(r *testRegistry) { xmlResults, _ := repeatRunWithBuffer( ctx, c, t.l, node, "run pgx test suite", - "cd `go env GOPATH`/src/github.com/jackc/pgx && "+ + "cd /mnt/data1/pgx && "+ "PGX_TEST_DATABASE='postgresql://root:@localhost:26257/pgx_test' go test -v 2>&1 | "+ "`go env GOPATH`/bin/go-junit-report", ) @@ -109,7 +117,7 @@ func registerPgx(r *testRegistry) { results := newORMTestsResults() results.parseJUnitXML(t, expectedFailures, ignorelist, xmlResults) results.summarizeAll( - t, "pgx", blacklistName, expectedFailures, version, latestTag, + t, "pgx", blocklistName, expectedFailures, version, supportedPGXTag, ) } diff --git a/pkg/cmd/roachtest/pgx_blacklist.go b/pkg/cmd/roachtest/pgx_blocklist.go similarity index 72% rename from pkg/cmd/roachtest/pgx_blacklist.go rename to pkg/cmd/roachtest/pgx_blocklist.go index c20c89b72a7c..76a9f3b7f30d 100644 --- a/pkg/cmd/roachtest/pgx_blacklist.go +++ b/pkg/cmd/roachtest/pgx_blocklist.go @@ -10,18 +10,56 @@ package main -var pgxBlacklists = blacklistsForVersion{ - {"v19.2", "pgxBlacklist19_2", pgxBlacklist19_2, "pgxIgnorelist19_2", pgxIgnorelist19_2}, - {"v20.1", "pgxBlacklist20_1", pgxBlacklist20_1, "pgxIgnorelist20_1", pgxIgnorelist20_1}, +var pgxBlocklists = blocklistsForVersion{ + {"v19.2", "pgxBlocklist19_2", pgxBlocklist19_2, "pgxIgnorelist19_2", pgxIgnorelist19_2}, + {"v20.1", "pgxBlocklist20_1", pgxBlocklist20_1, "pgxIgnorelist20_1", pgxIgnorelist20_1}, + {"v20.2", "pgxBlocklist20_2", pgxBlocklist20_2, "pgxIgnorelist20_2", pgxIgnorelist20_2}, } // Please keep these lists alphabetized for easy diffing. -// After a failed run, an updated version of this blacklist should be available +// After a failed run, an updated version of this blocklist should be available // in the test log. -var pgxBlacklist20_1 = blacklist{ +var pgxBlocklist20_2 = blocklist{ + "v4.Example_CustomType": "27796", + "v4.TestConnBeginBatchDeferredError": "31632", + "v4.TestConnCopyFromLarge": "52722", + "v4.TestConnQueryDeferredError": "31632", + "v4.TestConnQueryErrorWhileReturningRows": "26925", + "v4.TestConnQueryReadRowMultipleTimes": "26925", + "v4.TestConnQueryValues": "26925", + "v4.TestConnSendBatch": "44712", + "v4.TestConnSendBatchWithPreparedStatement": "41558", + "v4.TestConnSimpleProtocol": "21286", + "v4.TestConnSimpleProtocolRefusesNonStandardConformingStrings": "36215", + "v4.TestConnSimpleProtocolRefusesNonUTF8ClientEncoding": "37129", + "v4.TestDomainType": "27796", + "v4.TestFatalRxError": "35897", + "v4.TestFatalTxError": "35897", + "v4.TestInetCIDRArrayTranscodeIP": "18846", + "v4.TestInetCIDRArrayTranscodeIPNet": "18846", + "v4.TestInetCIDRTranscodeIP": "18846", + "v4.TestInetCIDRTranscodeIPNet": "18846", + "v4.TestInetCIDRTranscodeWithJustIP": "18846", + "v4.TestLargeObjects": "26725", + "v4.TestLargeObjectsMultipleTransactions": "26725", + "v4.TestLargeObjectsPreferSimpleProtocol": "26725", + "v4.TestListenNotify": "41522", + "v4.TestListenNotifySelfNotification": "41522", + "v4.TestListenNotifyWhileBusyIsSafe": "41522", + "v4.TestQueryContextErrorWhileReceivingRows": "26925", + "v4.TestRowDecode": "26925", + "v4.TestTransactionSuccessfulCommit": "31632", + "v4.TestTransactionSuccessfulRollback": "31632", + "v4.TestTxCommitSerializationFailure": "12701", + "v4.TestTxCommitWhenTxBroken": "31632", + "v4.TestTxNestedTransactionCommit": "31632", + "v4.TestTxNestedTransactionRollback": "31632", + "v4.TestUnregisteredTypeUsableAsStringArgumentAndBaseResult": "27796", +} + +var pgxBlocklist20_1 = blocklist{ "v4.Example_CustomType": "27796", "v4.TestConnBeginBatchDeferredError": "31632", - "v4.TestConnCopyFromFailServerSideMidway": "19603", "v4.TestConnCopyFromJSON": "19603", "v4.TestConnCopyFromLarge": "19603", "v4.TestConnCopyFromSmall": "19603", @@ -60,12 +98,7 @@ var pgxBlacklist20_1 = blacklist{ "v4.TestUnregisteredTypeUsableAsStringArgumentAndBaseResult": "27796", } -var pgxIgnorelist20_1 = blacklist{ - "v4.TestBeginIsoLevels": "We don't support isolation levels", - "v4.TestQueryEncodeError": "This test checks the exact error message", -} - -var pgxBlacklist19_2 = blacklist{ +var pgxBlocklist19_2 = blocklist{ "v4.Example_CustomType": "27796", "v4.TestConnBeginBatchDeferredError": "31632", "v4.TestConnCopyFromCopyFromSourceErrorEnd": "5807", @@ -94,8 +127,8 @@ var pgxBlacklist19_2 = blacklist{ "v4.TestDomainType": "27796", "v4.TestExec": "5807", "v4.TestExecContextWithoutCancelation": "5807", - "v4.TestExecExtendedProtocol": "5807", - "v4.TestExecSimpleProtocol": "5807", + "v4.TestExecExtendedProtocol": "unknown", + "v4.TestExecSimpleProtocol": "unknown", "v4.TestFatalRxError": "35897", "v4.TestFatalTxError": "35897", "v4.TestInetCIDRArrayTranscodeIP": "18846", @@ -124,7 +157,13 @@ var pgxBlacklist19_2 = blacklist{ "v4.TestUnregisteredTypeUsableAsStringArgumentAndBaseResult": "27796", } -var pgxIgnorelist19_2 = blacklist{ +var pgxIgnorelist20_2 = pgxIgnorelist20_1 + +var pgxIgnorelist20_1 = blocklist{ + "v4.TestBeginIsoLevels": "We don't support isolation levels", + "v4.TestQueryEncodeError": "This test checks the exact error message", +} +var pgxIgnorelist19_2 = blocklist{ "v4.TestBeginIsoLevels": "We don't support isolation levels", "v4.TestQueryEncodeError": "This test checks the exact error message", } diff --git a/pkg/cmd/roachtest/psycopg.go b/pkg/cmd/roachtest/psycopg.go index 8bb2271d9ebc..5f2ea8d04168 100644 --- a/pkg/cmd/roachtest/psycopg.go +++ b/pkg/cmd/roachtest/psycopg.go @@ -16,9 +16,9 @@ import ( ) var psycopgReleaseTagRegex = regexp.MustCompile(`^(?P\d+)(?:_(?P\d+)(?:_(?P\d+)(?:_(?P\d+))?)?)?$`) +var supportedPsycopgTag = "2_8_6" // This test runs psycopg full test suite against a single cockroach node. - func registerPsycopg(r *testRegistry) { runPsycopg := func( ctx context.Context, @@ -48,6 +48,7 @@ func registerPsycopg(r *testRegistry) { t.Fatal(err) } c.l.Printf("Latest Psycopg release is %s.", latestTag) + c.l.Printf("Supported Psycopg release is %s.", supportedPsycopgTag) if err := repeatRunE( ctx, c, node, "update apt-get", `sudo apt-get -qq update`, @@ -77,7 +78,7 @@ func registerPsycopg(r *testRegistry) { c, "https://github.com/psycopg/psycopg2.git", "/mnt/data1/psycopg", - latestTag, + supportedPsycopgTag, node, ); err != nil { t.Fatal(err) @@ -90,15 +91,15 @@ func registerPsycopg(r *testRegistry) { t.Fatal(err) } - blacklistName, expectedFailures, ignoredlistName, ignoredlist := psycopgBlacklists.getLists(version) + blocklistName, expectedFailures, ignoredlistName, ignoredlist := psycopgBlocklists.getLists(version) if expectedFailures == nil { - t.Fatalf("No psycopg blacklist defined for cockroach version %s", version) + t.Fatalf("No psycopg blocklist defined for cockroach version %s", version) } if ignoredlist == nil { t.Fatalf("No psycopg ignorelist defined for cockroach version %s", version) } - c.l.Printf("Running cockroach version %s, using blacklist %s, using ignoredlist %s", - version, blacklistName, ignoredlistName) + c.l.Printf("Running cockroach version %s, using blocklist %s, using ignoredlist %s", + version, blocklistName, ignoredlistName) t.Status("running psycopg test suite") // Note that this is expected to return an error, since the test suite @@ -119,8 +120,8 @@ func registerPsycopg(r *testRegistry) { results := newORMTestsResults() results.parsePythonUnitTestOutput(rawResults, expectedFailures, ignoredlist) results.summarizeAll( - t, "psycopg" /* ormName */, blacklistName, expectedFailures, - version, latestTag, + t, "psycopg" /* ormName */, blocklistName, expectedFailures, + version, supportedPsycopgTag, ) } @@ -128,7 +129,7 @@ func registerPsycopg(r *testRegistry) { Name: "psycopg", Owner: OwnerAppDev, Cluster: makeClusterSpec(1), - MinVersion: "v19.1.0", + MinVersion: "v19.2.0", Tags: []string{`default`, `driver`}, Run: func(ctx context.Context, t *test, c *cluster) { runPsycopg(ctx, t, c) diff --git a/pkg/cmd/roachtest/psycopg_blacklist.go b/pkg/cmd/roachtest/psycopg_blacklist.go deleted file mode 100644 index d9ac0c5ba8f7..000000000000 --- a/pkg/cmd/roachtest/psycopg_blacklist.go +++ /dev/null @@ -1,830 +0,0 @@ -// Copyright 2018 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package main - -var psycopgBlacklists = blacklistsForVersion{ - {"v2.2", "psycopgBlackList19_1", psycopgBlackList19_1, "psycopgIgnoreList19_1", psycopgIgnoreList19_1}, - {"v19.1", "psycopgBlackList19_1", psycopgBlackList19_1, "psycopgIgnoreList19_1", psycopgIgnoreList19_1}, - {"v19.2", "psycopgBlackList19_2", psycopgBlackList19_2, "psycopgIgnoreList19_2", psycopgIgnoreList19_2}, - {"v20.1", "psycopgBlackList20_1", psycopgBlackList20_1, "psycopgIgnoreList20_1", psycopgIgnoreList20_1}, -} - -// These are lists of known psycopg test errors and failures. -// When the psycopg test suite is run, the results are compared to this list. -// Any passed test that is not on this list is reported as PASS - expected -// Any passed test that is on this list is reported as PASS - unexpected -// Any failed test that is on this list is reported as FAIL - expected -// Any failed test that is not on this list is reported as FAIL - unexpected -// Any test on this list that is not run is reported as FAIL - not run -// -// Please keep these lists alphabetized for easy diffing. -// After a failed run, an updated version of this blacklist should be available -// in the test log. -var psycopgBlackList20_1 = blacklist{ - "tests.test_async.AsyncTests.test_async_callproc": "44701", - "tests.test_async.AsyncTests.test_error": "44706", - "tests.test_async.AsyncTests.test_flush_on_write": "44709", - "tests.test_async.AsyncTests.test_non_block_after_notification": "17511", - "tests.test_async.AsyncTests.test_notices": "44711", - "tests.test_async.AsyncTests.test_notify": "41522", - "tests.test_async.AsyncTests.test_poll_conn_for_notification": "41522", - "tests.test_async_keyword.CancelTests.test_async_cancel": "41335", - "tests.test_cancel.CancelTests.test_async_cancel": "41335", - "tests.test_cancel.CancelTests.test_cancel": "41335", - "tests.test_connection.AutocommitTests.test_set_session_autocommit": "35879", - "tests.test_connection.ConnectionTests.test_cleanup_on_badconn_close": "35897", - "tests.test_connection.ConnectionTests.test_encoding_name": "35882", - "tests.test_connection.ConnectionTests.test_notices": "44711", - "tests.test_connection.ConnectionTests.test_notices_consistent_order": "44711", - "tests.test_connection.ConnectionTests.test_notices_deque": "44711", - "tests.test_connection.ConnectionTests.test_notices_limited": "44711", - "tests.test_connection.ConnectionTests.test_reset": "35879", - "tests.test_connection.ConnectionTwoPhaseTests.test_recovered_xids": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_status_after_recover": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_commit": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_commit_one_phase": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_commit_recovered": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_recover_non_dbapi_connection": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_rollback": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_rollback_one_phase": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_rollback_recovered": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_unparsed_roundtrip": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_construction": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_encoding": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_from_string": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_roundtrip": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_to_string": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_unicode": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_unicode_unparsed": "22329", - "tests.test_connection.IsolationLevelsTestCase.test_encoding": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_isolation_level": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_isolation_level_autocommit": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_isolation_level_closed": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_isolation_level_read_committed": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_isolation_level_serializable": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_set_isolation_level": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_set_isolation_level_abort": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_set_isolation_level_autocommit": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_set_isolation_level_default": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_setattr_isolation_level_int": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_setattr_isolation_level_invalid": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_setattr_isolation_level_str": "12123", - "tests.test_connection.TransactionControlTests.test_idempotence_check": "35879", - "tests.test_connection.TransactionControlTests.test_mixing_session_attribs": "35879", - "tests.test_connection.TransactionControlTests.test_set_deferrable": "31632", - "tests.test_connection.TransactionControlTests.test_set_isolation_level": "12123", - "tests.test_connection.TransactionControlTests.test_set_isolation_level_str": "12123", - "tests.test_connection.TransactionControlTests.test_setattr_deferrable": "12123", - "tests.test_copy.CopyTests.test_copy_bytes": "35882", - "tests.test_copy.CopyTests.test_copy_expert_textiobase": "35882", - "tests.test_copy.CopyTests.test_copy_from": "41608", - "tests.test_copy.CopyTests.test_copy_from_cols": "41608", - "tests.test_copy.CopyTests.test_copy_from_insane_size": "41608", - "tests.test_copy.CopyTests.test_copy_from_propagate_error": "41608", - "tests.test_copy.CopyTests.test_copy_no_column_limit": "41608", - "tests.test_copy.CopyTests.test_copy_rowcount": "41608", - "tests.test_copy.CopyTests.test_copy_rowcount_error": "41608", - "tests.test_copy.CopyTests.test_copy_text": "35882", - "tests.test_copy.CopyTests.test_copy_to": "41608", - "tests.test_copy.CopyTests.test_copy_to_propagate_error": "41608", - "tests.test_cursor.CursorTests.test_callproc_dict": "17511", - "tests.test_cursor.CursorTests.test_description_attribs": "30352", - "tests.test_cursor.CursorTests.test_description_extra_attribs": "30352", - "tests.test_cursor.CursorTests.test_external_close_async": "35897", - "tests.test_cursor.CursorTests.test_external_close_sync": "35897", - "tests.test_cursor.CursorTests.test_invalid_name": "41412", - "tests.test_cursor.CursorTests.test_iter_named_cursor_default_itersize": "30352", - "tests.test_cursor.CursorTests.test_iter_named_cursor_efficient": "30352", - "tests.test_cursor.CursorTests.test_iter_named_cursor_itersize": "30352", - "tests.test_cursor.CursorTests.test_iter_named_cursor_rownumber": "30352", - "tests.test_cursor.CursorTests.test_named_cursor_stealing": "30352", - "tests.test_cursor.CursorTests.test_named_noop_close": "30352", - "tests.test_cursor.CursorTests.test_not_scrollable": "30352", - "tests.test_cursor.CursorTests.test_scroll_named": "30352", - "tests.test_cursor.CursorTests.test_scrollable": "30352", - "tests.test_cursor.CursorTests.test_stolen_named_cursor_close": "30352", - "tests.test_cursor.CursorTests.test_withhold": "30352", - "tests.test_cursor.CursorTests.test_withhold_autocommit": "30352", - "tests.test_cursor.CursorTests.test_withhold_no_begin": "30352", - "tests.test_dates.DatetimeTests.test_adapt_datetime": "36115", - "tests.test_dates.DatetimeTests.test_adapt_infinity_tz": "36116", - "tests.test_dates.DatetimeTests.test_interval_iso_8601_not_supported": "32562", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchAll": "41412", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchMany": "41412", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchManyNoarg": "41412", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchOne": "41412", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorIter": "41412", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorIterRowNumber": "41412", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorNotGreedy": "41412", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchAll": "41412", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchMany": "41412", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchManyNoarg": "41412", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchOne": "41412", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorIter": "41412", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorIterRowNumber": "41412", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorNotGreedy": "41412", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named": "41412", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_fetchall": "41412", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_fetchmany": "41412", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_fetchone": "41412", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_rownumber": "41412", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_not_greedy": "41412", - "tests.test_green.CallbackErrorTestCase.test_errors_named_cursor": "30352", - "tests.test_green.GreenTestCase.test_non_block_after_notification": "17511", - "tests.test_lobject.LargeObject64Tests.test_seek_tell_truncate_greater_than_2gb": "35902", - "tests.test_lobject.LargeObjectTests.test_close": "35902", - "tests.test_lobject.LargeObjectTests.test_close_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_close_connection_gone": "243", - "tests.test_lobject.LargeObjectTests.test_close_twice": "243", - "tests.test_lobject.LargeObjectTests.test_create": "243", - "tests.test_lobject.LargeObjectTests.test_create_with_existing_oid": "243", - "tests.test_lobject.LargeObjectTests.test_create_with_oid": "243", - "tests.test_lobject.LargeObjectTests.test_export": "243", - "tests.test_lobject.LargeObjectTests.test_export_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_export_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_factory": "243", - "tests.test_lobject.LargeObjectTests.test_import": "243", - "tests.test_lobject.LargeObjectTests.test_mode_defaults": "243", - "tests.test_lobject.LargeObjectTests.test_open_existing": "243", - "tests.test_lobject.LargeObjectTests.test_open_for_write": "243", - "tests.test_lobject.LargeObjectTests.test_open_mode_n": "243", - "tests.test_lobject.LargeObjectTests.test_open_non_existent": "243", - "tests.test_lobject.LargeObjectTests.test_read": "243", - "tests.test_lobject.LargeObjectTests.test_read_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_read_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_read_binary": "243", - "tests.test_lobject.LargeObjectTests.test_read_large": "243", - "tests.test_lobject.LargeObjectTests.test_read_text": "243", - "tests.test_lobject.LargeObjectTests.test_seek_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_seek_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_seek_tell": "243", - "tests.test_lobject.LargeObjectTests.test_tell_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_tell_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_unlink": "243", - "tests.test_lobject.LargeObjectTests.test_unlink_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_unlink_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_write": "243", - "tests.test_lobject.LargeObjectTests.test_write_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_write_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_write_large": "243", - "tests.test_lobject.LargeObjectTruncateTests.test_truncate": "243", - "tests.test_lobject.LargeObjectTruncateTests.test_truncate_after_close": "243", - "tests.test_lobject.LargeObjectTruncateTests.test_truncate_after_commit": "243", - "tests.test_module.ExceptionsTestCase.test_9_3_diagnostics": "44714", - "tests.test_module.ExceptionsTestCase.test_diagnostics_copy": "41608", - "tests.test_module.ExceptionsTestCase.test_diagnostics_from_commit": "31632", - "tests.test_notify.NotifiesTests.test_many_notifies": "6130", - "tests.test_notify.NotifiesTests.test_notifies_received_on_execute": "6130", - "tests.test_notify.NotifiesTests.test_notifies_received_on_poll": "6130", - "tests.test_notify.NotifiesTests.test_notify_attributes": "6130", - "tests.test_notify.NotifiesTests.test_notify_deque": "6130", - "tests.test_notify.NotifiesTests.test_notify_noappend": "6130", - "tests.test_notify.NotifiesTests.test_notify_object": "6130", - "tests.test_notify.NotifiesTests.test_notify_payload": "6130", - "tests.test_quote.QuotingTestCase.test_koi8": "35882", - "tests.test_quote.QuotingTestCase.test_latin1": "35882", - "tests.test_sql.SqlFormatTests.test_copy": "41608", - "tests.test_transaction.DeadlockSerializationTests.test_deadlock": "6583", - "tests.test_transaction.TransactionTests.test_commit": "31632", - "tests.test_transaction.TransactionTests.test_failed_commit": "31632", - "tests.test_transaction.TransactionTests.test_rollback": "31632", - "tests.test_types_basic.TypesBasicTests.testArray": "32552", - "tests.test_types_basic.TypesBasicTests.testArrayOfNulls": "32552", - "tests.test_types_basic.TypesBasicTests.testEmptyArrayRegression": "36179", - "tests.test_types_basic.TypesBasicTests.testNestedArrays": "32552", - "tests.test_types_basic.TypesBasicTests.testNestedEmptyArray": "32552", - "tests.test_types_basic.TypesBasicTests.testNetworkArray": "18846", - "tests.test_types_extras.AdaptTypeTestCase.test_cast_composite": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_cast_nested": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_composite_array": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_composite_namespace": "26443", - "tests.test_types_extras.AdaptTypeTestCase.test_empty_string": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_from_tables": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_non_dbapi_connection": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_register_globally": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_register_on_connection": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_register_on_cursor": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_subclass": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_wrong_schema": "27793", - "tests.test_types_extras.JsonTestCase.test_default_cast": "23468", - "tests.test_types_extras.JsonTestCase.test_register_default": "41653", - "tests.test_types_extras.JsonTestCase.test_scs": "36215", - "tests.test_types_extras.JsonbTestCase.test_default_cast": "23468", - "tests.test_types_extras.JsonbTestCase.test_loads": "23468", - "tests.test_types_extras.JsonbTestCase.test_null": "23468", - "tests.test_types_extras.JsonbTestCase.test_register_default": "23468", - "tests.test_types_extras.RangeCasterTestCase.test_adapt_date_range": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_adapt_number_range": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_adapt_numeric_range": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_date": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_empty": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_inf": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_null": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_numbers": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_timestamp": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_timestamptz": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_range_escaping": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_register_range_adapter": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_schema_range": "26443", - "tests.test_with.WithCursorTestCase.test_exception_swallow": "30352", - "tests.test_with.WithCursorTestCase.test_named_with_noop": "30352", -} - -var psycopgBlackList19_2 = blacklist{ - "tests.test_async.AsyncTests.test_async_after_async": "5807", - "tests.test_async.AsyncTests.test_async_callproc": "5807", - "tests.test_async.AsyncTests.test_async_connection_error_message": "5807", - "tests.test_async.AsyncTests.test_async_cursor_gone": "5807", - "tests.test_async.AsyncTests.test_async_dont_read_all": "5807", - "tests.test_async.AsyncTests.test_async_executemany": "5807", - "tests.test_async.AsyncTests.test_async_fetch_wrong_cursor": "5807", - "tests.test_async.AsyncTests.test_async_iter": "5807", - "tests.test_async.AsyncTests.test_async_named_cursor": "5807", - "tests.test_async.AsyncTests.test_async_scroll": "5807", - "tests.test_async.AsyncTests.test_async_select": "5807", - "tests.test_async.AsyncTests.test_async_subclass": "5807", - "tests.test_async.AsyncTests.test_commit_while_async": "5807", - "tests.test_async.AsyncTests.test_connection_setup": "5807", - "tests.test_async.AsyncTests.test_copy_no_hang": "5807", - "tests.test_async.AsyncTests.test_copy_while_async": "5807", - "tests.test_async.AsyncTests.test_error": "5807", - "tests.test_async.AsyncTests.test_error_two_cursors": "5807", - "tests.test_async.AsyncTests.test_fetch_after_async": "5807", - "tests.test_async.AsyncTests.test_flush_on_write": "5807", - "tests.test_async.AsyncTests.test_lobject_while_async": "5807", - "tests.test_async.AsyncTests.test_non_block_after_notification": "5807", - "tests.test_async.AsyncTests.test_notices": "5807", - "tests.test_async.AsyncTests.test_notify": "5807", - "tests.test_async.AsyncTests.test_poll_conn_for_notification": "5807", - "tests.test_async.AsyncTests.test_poll_noop": "5807", - "tests.test_async.AsyncTests.test_reset_while_async": "5807", - "tests.test_async.AsyncTests.test_rollback_while_async": "5807", - "tests.test_async.AsyncTests.test_scroll": "5807", - "tests.test_async.AsyncTests.test_set_parameters_while_async": "5807", - "tests.test_async.AsyncTests.test_stop_on_first_error": "5807", - "tests.test_async.AsyncTests.test_sync_poll": "5807", - "tests.test_async_keyword.AsyncTests.test_async_connection_error_message": "5807", - "tests.test_async_keyword.AsyncTests.test_async_subclass": "5807", - "tests.test_async_keyword.AsyncTests.test_connection_setup": "5807", - "tests.test_async_keyword.CancelTests.test_async_cancel": "5807", - "tests.test_async_keyword.CancelTests.test_async_connection_cancel": "5807", - "tests.test_cancel.CancelTests.test_async_cancel": "5807", - "tests.test_cancel.CancelTests.test_async_connection_cancel": "5807", - "tests.test_cancel.CancelTests.test_cancel": "5807", - "tests.test_cancel.CancelTests.test_empty_cancel": "5807", - "tests.test_connection.AutocommitTests.test_set_session_autocommit": "35879", - "tests.test_connection.ConnectionTests.test_cleanup_on_badconn_close": "35897", - "tests.test_connection.ConnectionTests.test_encoding_name": "35882", - "tests.test_connection.ConnectionTests.test_notices": "5807", - "tests.test_connection.ConnectionTests.test_notices_consistent_order": "5807", - "tests.test_connection.ConnectionTests.test_notices_deque": "5807", - "tests.test_connection.ConnectionTests.test_notices_limited": "5807", - "tests.test_connection.ConnectionTests.test_notices_noappend": "5807", - "tests.test_connection.ConnectionTests.test_reset": "35879", - "tests.test_connection.ConnectionTwoPhaseTests.test_recovered_xids": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_status_after_recover": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_commit": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_commit_one_phase": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_commit_recovered": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_recover_non_dbapi_connection": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_rollback": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_rollback_one_phase": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_rollback_recovered": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_unparsed_roundtrip": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_construction": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_encoding": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_from_string": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_roundtrip": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_to_string": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_unicode": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_unicode_unparsed": "22329", - "tests.test_connection.IsolationLevelsTestCase.test_encoding": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_isolation_level": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_isolation_level_autocommit": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_isolation_level_closed": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_isolation_level_read_committed": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_isolation_level_serializable": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_set_isolation_level": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_set_isolation_level_abort": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_set_isolation_level_autocommit": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_set_isolation_level_default": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_setattr_isolation_level_int": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_setattr_isolation_level_invalid": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_setattr_isolation_level_str": "12123", - "tests.test_connection.TransactionControlTests.test_idempotence_check": "35879", - "tests.test_connection.TransactionControlTests.test_mixing_session_attribs": "35879", - "tests.test_connection.TransactionControlTests.test_set_deferrable": "31632", - "tests.test_connection.TransactionControlTests.test_set_isolation_level": "12123", - "tests.test_connection.TransactionControlTests.test_set_isolation_level_str": "12123", - "tests.test_connection.TransactionControlTests.test_setattr_deferrable": "12123", - "tests.test_copy.CopyTests.test_copy_bytes": "5807", - "tests.test_copy.CopyTests.test_copy_expert_file_refcount": "5807", - "tests.test_copy.CopyTests.test_copy_expert_textiobase": "5807", - "tests.test_copy.CopyTests.test_copy_from": "5807", - "tests.test_copy.CopyTests.test_copy_from_cols": "5807", - "tests.test_copy.CopyTests.test_copy_from_cols_err": "5807", - "tests.test_copy.CopyTests.test_copy_from_insane_size": "5807", - "tests.test_copy.CopyTests.test_copy_from_propagate_error": "5807", - "tests.test_copy.CopyTests.test_copy_from_segfault": "5807", - "tests.test_copy.CopyTests.test_copy_no_column_limit": "5807", - "tests.test_copy.CopyTests.test_copy_rowcount": "5807", - "tests.test_copy.CopyTests.test_copy_rowcount_error": "5807", - "tests.test_copy.CopyTests.test_copy_text": "5807", - "tests.test_copy.CopyTests.test_copy_to": "5807", - "tests.test_copy.CopyTests.test_copy_to_propagate_error": "5807", - "tests.test_copy.CopyTests.test_copy_to_segfault": "5807", - "tests.test_cursor.CursorTests.test_callproc_dict": "17511", - "tests.test_cursor.CursorTests.test_description_attribs": "30352", - "tests.test_cursor.CursorTests.test_description_extra_attribs": "30352", - "tests.test_cursor.CursorTests.test_executemany_propagate_exceptions": "5807", - "tests.test_cursor.CursorTests.test_external_close_async": "35897", - "tests.test_cursor.CursorTests.test_external_close_sync": "35897", - "tests.test_cursor.CursorTests.test_invalid_name": "5807", - "tests.test_cursor.CursorTests.test_iter_named_cursor_default_itersize": "30352", - "tests.test_cursor.CursorTests.test_iter_named_cursor_efficient": "30352", - "tests.test_cursor.CursorTests.test_iter_named_cursor_itersize": "30352", - "tests.test_cursor.CursorTests.test_iter_named_cursor_rownumber": "30352", - "tests.test_cursor.CursorTests.test_named_cursor_stealing": "30352", - "tests.test_cursor.CursorTests.test_named_noop_close": "30352", - "tests.test_cursor.CursorTests.test_not_scrollable": "30352", - "tests.test_cursor.CursorTests.test_scroll_named": "30352", - "tests.test_cursor.CursorTests.test_scrollable": "30352", - "tests.test_cursor.CursorTests.test_stolen_named_cursor_close": "30352", - "tests.test_cursor.CursorTests.test_withhold": "30352", - "tests.test_cursor.CursorTests.test_withhold_autocommit": "30352", - "tests.test_cursor.CursorTests.test_withhold_no_begin": "30352", - "tests.test_dates.DatetimeTests.test_adapt_datetime": "36115", - "tests.test_dates.DatetimeTests.test_adapt_infinity_tz": "36116", - "tests.test_dates.DatetimeTests.test_adapt_negative_timedelta": "35807", - "tests.test_dates.DatetimeTests.test_adapt_timedelta": "35807", - "tests.test_dates.DatetimeTests.test_interval_iso_8601_not_supported": "32562", - "tests.test_dates.DatetimeTests.test_time_24": "36118", - "tests.test_dates.DatetimeTests.test_type_roundtrip_timetz": "26097", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchAll": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchMany": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchManyNoarg": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchOne": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorIter": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorIterRowNumber": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorNotGreedy": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealFetchAll": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealFetchMany": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealFetchManyNoarg": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealFetchOne": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealIter": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealIterRowNumber": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testPickleRealDictRow": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testRealMeansReal": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_iter_methods_2": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_mod": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_order": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_order_iter": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_pop": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictConnCursorArgs": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchAll": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchMany": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchManyNoarg": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchOne": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorIter": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorIterRowNumber": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorNotGreedy": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorFetchAll": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorFetchMany": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorFetchManyNoarg": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorFetchOne": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorIter": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorIterRowNumber": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testPickleDictRow": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testUpdateRow": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.test_iter_methods_2": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.test_order": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.test_order_iter": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_bad_col_names": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_cache": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_cursor_args": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_executemany": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_fetchall": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_fetchmany": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_fetchmany_noarg": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_fetchone": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_iter": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_max_cache": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_minimal_generation": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_fetchall": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_fetchmany": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_fetchone": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_rownumber": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_no_result_no_surprise": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_not_greedy": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_record_updated": "5807", - "tests.test_green.CallbackErrorTestCase.test_errors_named_cursor": "30352", - "tests.test_green.GreenTestCase.test_non_block_after_notification": "5807", - "tests.test_lobject.LargeObject64Tests.test_seek_tell_truncate_greater_than_2gb": "35902", - "tests.test_lobject.LargeObjectTests.test_close": "35902", - "tests.test_lobject.LargeObjectTests.test_close_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_close_connection_gone": "243", - "tests.test_lobject.LargeObjectTests.test_close_twice": "243", - "tests.test_lobject.LargeObjectTests.test_create": "243", - "tests.test_lobject.LargeObjectTests.test_create_with_existing_oid": "243", - "tests.test_lobject.LargeObjectTests.test_create_with_oid": "243", - "tests.test_lobject.LargeObjectTests.test_export": "243", - "tests.test_lobject.LargeObjectTests.test_export_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_export_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_factory": "243", - "tests.test_lobject.LargeObjectTests.test_import": "243", - "tests.test_lobject.LargeObjectTests.test_mode_defaults": "243", - "tests.test_lobject.LargeObjectTests.test_open_existing": "243", - "tests.test_lobject.LargeObjectTests.test_open_for_write": "243", - "tests.test_lobject.LargeObjectTests.test_open_mode_n": "243", - "tests.test_lobject.LargeObjectTests.test_open_non_existent": "243", - "tests.test_lobject.LargeObjectTests.test_read": "243", - "tests.test_lobject.LargeObjectTests.test_read_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_read_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_read_binary": "243", - "tests.test_lobject.LargeObjectTests.test_read_large": "243", - "tests.test_lobject.LargeObjectTests.test_read_text": "243", - "tests.test_lobject.LargeObjectTests.test_seek_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_seek_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_seek_tell": "243", - "tests.test_lobject.LargeObjectTests.test_tell_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_tell_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_unlink": "243", - "tests.test_lobject.LargeObjectTests.test_unlink_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_unlink_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_write": "243", - "tests.test_lobject.LargeObjectTests.test_write_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_write_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_write_large": "243", - "tests.test_lobject.LargeObjectTruncateTests.test_truncate": "243", - "tests.test_lobject.LargeObjectTruncateTests.test_truncate_after_close": "243", - "tests.test_lobject.LargeObjectTruncateTests.test_truncate_after_commit": "243", - "tests.test_module.ExceptionsTestCase.test_9_3_diagnostics": "5807", - "tests.test_module.ExceptionsTestCase.test_diagnostics_copy": "5807", - "tests.test_module.ExceptionsTestCase.test_diagnostics_from_commit": "5807", - "tests.test_notify.NotifiesTests.test_many_notifies": "6130", - "tests.test_notify.NotifiesTests.test_notifies_received_on_execute": "6130", - "tests.test_notify.NotifiesTests.test_notifies_received_on_poll": "6130", - "tests.test_notify.NotifiesTests.test_notify_attributes": "6130", - "tests.test_notify.NotifiesTests.test_notify_deque": "6130", - "tests.test_notify.NotifiesTests.test_notify_noappend": "6130", - "tests.test_notify.NotifiesTests.test_notify_object": "6130", - "tests.test_notify.NotifiesTests.test_notify_payload": "6130", - "tests.test_quote.QuotingTestCase.test_koi8": "35882", - "tests.test_quote.QuotingTestCase.test_latin1": "35882", - "tests.test_sql.SqlFormatTests.test_copy": "5807", - "tests.test_transaction.DeadlockSerializationTests.test_deadlock": "6583", - "tests.test_transaction.TransactionTests.test_commit": "5807", - "tests.test_transaction.TransactionTests.test_failed_commit": "5807", - "tests.test_transaction.TransactionTests.test_rollback": "5807", - "tests.test_types_basic.TypesBasicTests.testArray": "32552", - "tests.test_types_basic.TypesBasicTests.testArrayOfNulls": "32552", - "tests.test_types_basic.TypesBasicTests.testEmptyArray": "23299", - "tests.test_types_basic.TypesBasicTests.testEmptyArrayRegression": "36179", - "tests.test_types_basic.TypesBasicTests.testNestedArrays": "32552", - "tests.test_types_basic.TypesBasicTests.testNestedEmptyArray": "32552", - "tests.test_types_basic.TypesBasicTests.testNetworkArray": "18846", - "tests.test_types_extras.AdaptTypeTestCase.test_cast_composite": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_cast_nested": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_composite_array": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_composite_namespace": "26443", - "tests.test_types_extras.AdaptTypeTestCase.test_empty_string": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_from_tables": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_non_dbapi_connection": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_register_globally": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_register_on_connection": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_register_on_cursor": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_subclass": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_wrong_schema": "27793", - "tests.test_types_extras.JsonTestCase.test_default_cast": "23468", - "tests.test_types_extras.JsonTestCase.test_register_default": "41653", - "tests.test_types_extras.JsonTestCase.test_scs": "36215", - "tests.test_types_extras.JsonbTestCase.test_default_cast": "23468", - "tests.test_types_extras.JsonbTestCase.test_loads": "23468", - "tests.test_types_extras.JsonbTestCase.test_null": "23468", - "tests.test_types_extras.JsonbTestCase.test_register_default": "23468", - "tests.test_types_extras.RangeCasterTestCase.test_adapt_date_range": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_adapt_number_range": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_adapt_numeric_range": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_date": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_empty": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_inf": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_null": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_numbers": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_timestamp": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_timestamptz": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_range_escaping": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_register_range_adapter": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_schema_range": "26443", - "tests.test_with.WithCursorTestCase.test_exception_swallow": "30352", - "tests.test_with.WithCursorTestCase.test_named_with_noop": "30352", -} - -var psycopgBlackList19_1 = blacklist{ - "tests.test_async.AsyncTests.test_async_after_async": "5807", - "tests.test_async.AsyncTests.test_async_callproc": "5807", - "tests.test_async.AsyncTests.test_async_connection_error_message": "5807", - "tests.test_async.AsyncTests.test_async_cursor_gone": "5807", - "tests.test_async.AsyncTests.test_async_dont_read_all": "5807", - "tests.test_async.AsyncTests.test_async_executemany": "5807", - "tests.test_async.AsyncTests.test_async_fetch_wrong_cursor": "5807", - "tests.test_async.AsyncTests.test_async_iter": "5807", - "tests.test_async.AsyncTests.test_async_named_cursor": "5807", - "tests.test_async.AsyncTests.test_async_scroll": "5807", - "tests.test_async.AsyncTests.test_async_select": "5807", - "tests.test_async.AsyncTests.test_async_subclass": "5807", - "tests.test_async.AsyncTests.test_commit_while_async": "5807", - "tests.test_async.AsyncTests.test_connection_setup": "5807", - "tests.test_async.AsyncTests.test_copy_no_hang": "5807", - "tests.test_async.AsyncTests.test_copy_while_async": "5807", - "tests.test_async.AsyncTests.test_error": "5807", - "tests.test_async.AsyncTests.test_error_two_cursors": "5807", - "tests.test_async.AsyncTests.test_fetch_after_async": "5807", - "tests.test_async.AsyncTests.test_flush_on_write": "5807", - "tests.test_async.AsyncTests.test_lobject_while_async": "5807", - "tests.test_async.AsyncTests.test_non_block_after_notification": "5807", - "tests.test_async.AsyncTests.test_notices": "5807", - "tests.test_async.AsyncTests.test_notify": "5807", - "tests.test_async.AsyncTests.test_poll_conn_for_notification": "5807", - "tests.test_async.AsyncTests.test_poll_noop": "5807", - "tests.test_async.AsyncTests.test_reset_while_async": "5807", - "tests.test_async.AsyncTests.test_rollback_while_async": "5807", - "tests.test_async.AsyncTests.test_scroll": "5807", - "tests.test_async.AsyncTests.test_set_parameters_while_async": "5807", - "tests.test_async.AsyncTests.test_stop_on_first_error": "5807", - "tests.test_async.AsyncTests.test_sync_poll": "5807", - "tests.test_async_keyword.AsyncTests.test_async_connection_error_message": "5807", - "tests.test_async_keyword.AsyncTests.test_async_subclass": "5807", - "tests.test_async_keyword.AsyncTests.test_connection_setup": "5807", - "tests.test_async_keyword.CancelTests.test_async_cancel": "5807", - "tests.test_async_keyword.CancelTests.test_async_connection_cancel": "5807", - "tests.test_cancel.CancelTests.test_async_cancel": "5807", - "tests.test_cancel.CancelTests.test_async_connection_cancel": "5807", - "tests.test_cancel.CancelTests.test_cancel": "5807", - "tests.test_cancel.CancelTests.test_empty_cancel": "5807", - "tests.test_connection.AutocommitTests.test_set_session_autocommit": "35879", - "tests.test_connection.ConnectionTests.test_cleanup_on_badconn_close": "35897", - "tests.test_connection.ConnectionTests.test_encoding_name": "35882", - "tests.test_connection.ConnectionTests.test_notices": "5807", - "tests.test_connection.ConnectionTests.test_notices_consistent_order": "5807", - "tests.test_connection.ConnectionTests.test_notices_deque": "5807", - "tests.test_connection.ConnectionTests.test_notices_limited": "5807", - "tests.test_connection.ConnectionTests.test_notices_noappend": "5807", - "tests.test_connection.ConnectionTests.test_reset": "35879", - "tests.test_connection.ConnectionTwoPhaseTests.test_cancel_fails_prepared": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_recovered_xids": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_status_after_recover": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_commit": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_commit_one_phase": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_commit_recovered": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_recover_non_dbapi_connection": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_rollback": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_rollback_one_phase": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_tpc_rollback_recovered": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_unparsed_roundtrip": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_construction": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_encoding": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_from_string": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_roundtrip": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_to_string": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_unicode": "22329", - "tests.test_connection.ConnectionTwoPhaseTests.test_xid_unicode_unparsed": "22329", - "tests.test_connection.IsolationLevelsTestCase.test_encoding": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_isolation_level": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_isolation_level_autocommit": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_isolation_level_closed": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_isolation_level_read_committed": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_isolation_level_serializable": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_set_isolation_level": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_set_isolation_level_abort": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_set_isolation_level_autocommit": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_set_isolation_level_default": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_setattr_isolation_level_int": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_setattr_isolation_level_invalid": "12123", - "tests.test_connection.IsolationLevelsTestCase.test_setattr_isolation_level_str": "12123", - "tests.test_connection.TransactionControlTests.test_idempotence_check": "35879", - "tests.test_connection.TransactionControlTests.test_mixing_session_attribs": "35879", - "tests.test_connection.TransactionControlTests.test_set_deferrable": "31632", - "tests.test_connection.TransactionControlTests.test_set_isolation_level": "12123", - "tests.test_connection.TransactionControlTests.test_set_isolation_level_str": "12123", - "tests.test_connection.TransactionControlTests.test_setattr_deferrable": "12123", - "tests.test_copy.CopyTests.test_copy_bytes": "5807", - "tests.test_copy.CopyTests.test_copy_expert_file_refcount": "5807", - "tests.test_copy.CopyTests.test_copy_expert_textiobase": "5807", - "tests.test_copy.CopyTests.test_copy_from": "5807", - "tests.test_copy.CopyTests.test_copy_from_cols": "5807", - "tests.test_copy.CopyTests.test_copy_from_cols_err": "5807", - "tests.test_copy.CopyTests.test_copy_from_insane_size": "5807", - "tests.test_copy.CopyTests.test_copy_from_propagate_error": "5807", - "tests.test_copy.CopyTests.test_copy_from_segfault": "5807", - "tests.test_copy.CopyTests.test_copy_no_column_limit": "5807", - "tests.test_copy.CopyTests.test_copy_rowcount": "5807", - "tests.test_copy.CopyTests.test_copy_rowcount_error": "5807", - "tests.test_copy.CopyTests.test_copy_text": "5807", - "tests.test_copy.CopyTests.test_copy_to": "5807", - "tests.test_copy.CopyTests.test_copy_to_propagate_error": "5807", - "tests.test_copy.CopyTests.test_copy_to_segfault": "5807", - "tests.test_cursor.CursorTests.test_callproc_dict": "17511", - "tests.test_cursor.CursorTests.test_description_attribs": "30352", - "tests.test_cursor.CursorTests.test_description_extra_attribs": "30352", - "tests.test_cursor.CursorTests.test_executemany_propagate_exceptions": "5807", - "tests.test_cursor.CursorTests.test_external_close_async": "35897", - "tests.test_cursor.CursorTests.test_external_close_sync": "35897", - "tests.test_cursor.CursorTests.test_invalid_name": "5807", - "tests.test_cursor.CursorTests.test_iter_named_cursor_default_itersize": "30352", - "tests.test_cursor.CursorTests.test_iter_named_cursor_efficient": "30352", - "tests.test_cursor.CursorTests.test_iter_named_cursor_itersize": "30352", - "tests.test_cursor.CursorTests.test_iter_named_cursor_rownumber": "30352", - "tests.test_cursor.CursorTests.test_named_cursor_stealing": "30352", - "tests.test_cursor.CursorTests.test_named_noop_close": "30352", - "tests.test_cursor.CursorTests.test_not_scrollable": "30352", - "tests.test_cursor.CursorTests.test_scroll_named": "30352", - "tests.test_cursor.CursorTests.test_scrollable": "30352", - "tests.test_cursor.CursorTests.test_stolen_named_cursor_close": "30352", - "tests.test_cursor.CursorTests.test_withhold": "30352", - "tests.test_cursor.CursorTests.test_withhold_autocommit": "30352", - "tests.test_cursor.CursorTests.test_withhold_no_begin": "30352", - "tests.test_dates.DatetimeTests.test_adapt_datetime": "36115", - "tests.test_dates.DatetimeTests.test_adapt_infinity_tz": "36116", - "tests.test_dates.DatetimeTests.test_adapt_negative_timedelta": "35807", - "tests.test_dates.DatetimeTests.test_adapt_timedelta": "35807", - "tests.test_dates.DatetimeTests.test_interval_iso_8601_not_supported": "32562", - "tests.test_dates.DatetimeTests.test_time_24": "36118", - "tests.test_dates.DatetimeTests.test_type_roundtrip_timetz": "26097", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchAll": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchMany": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchManyNoarg": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchOne": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorIter": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorIterRowNumber": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorNotGreedy": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealFetchAll": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealFetchMany": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealFetchManyNoarg": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealFetchOne": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealIter": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealIterRowNumber": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testPickleRealDictRow": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testRealMeansReal": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_iter_methods_2": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_mod": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_order": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_order_iter": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_pop": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictConnCursorArgs": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchAll": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchMany": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchManyNoarg": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchOne": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorIter": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorIterRowNumber": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorNotGreedy": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorFetchAll": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorFetchMany": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorFetchManyNoarg": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorFetchOne": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorIter": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorIterRowNumber": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testPickleDictRow": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.testUpdateRow": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.test_iter_methods_2": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.test_order": "5807", - "tests.test_extras_dictcursor.ExtrasDictCursorTests.test_order_iter": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_bad_col_names": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_cache": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_cursor_args": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_executemany": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_fetchall": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_fetchmany": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_fetchmany_noarg": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_fetchone": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_iter": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_max_cache": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_minimal_generation": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_fetchall": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_fetchmany": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_fetchone": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_rownumber": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_no_result_no_surprise": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_not_greedy": "5807", - "tests.test_extras_dictcursor.NamedTupleCursorTest.test_record_updated": "5807", - "tests.test_green.CallbackErrorTestCase.test_errors_named_cursor": "30352", - "tests.test_green.GreenTestCase.test_non_block_after_notification": "5807", - "tests.test_lobject.LargeObject64Tests.test_seek_tell_truncate_greater_than_2gb": "35902", - "tests.test_lobject.LargeObjectTests.test_close": "35902", - "tests.test_lobject.LargeObjectTests.test_close_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_close_connection_gone": "243", - "tests.test_lobject.LargeObjectTests.test_close_twice": "243", - "tests.test_lobject.LargeObjectTests.test_create": "243", - "tests.test_lobject.LargeObjectTests.test_create_with_existing_oid": "243", - "tests.test_lobject.LargeObjectTests.test_create_with_oid": "243", - "tests.test_lobject.LargeObjectTests.test_export": "243", - "tests.test_lobject.LargeObjectTests.test_export_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_export_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_factory": "243", - "tests.test_lobject.LargeObjectTests.test_import": "243", - "tests.test_lobject.LargeObjectTests.test_mode_defaults": "243", - "tests.test_lobject.LargeObjectTests.test_open_existing": "243", - "tests.test_lobject.LargeObjectTests.test_open_for_write": "243", - "tests.test_lobject.LargeObjectTests.test_open_mode_n": "243", - "tests.test_lobject.LargeObjectTests.test_open_non_existent": "243", - "tests.test_lobject.LargeObjectTests.test_read": "243", - "tests.test_lobject.LargeObjectTests.test_read_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_read_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_read_binary": "243", - "tests.test_lobject.LargeObjectTests.test_read_large": "243", - "tests.test_lobject.LargeObjectTests.test_read_text": "243", - "tests.test_lobject.LargeObjectTests.test_seek_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_seek_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_seek_tell": "243", - "tests.test_lobject.LargeObjectTests.test_tell_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_tell_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_unlink": "243", - "tests.test_lobject.LargeObjectTests.test_unlink_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_unlink_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_write": "243", - "tests.test_lobject.LargeObjectTests.test_write_after_close": "243", - "tests.test_lobject.LargeObjectTests.test_write_after_commit": "243", - "tests.test_lobject.LargeObjectTests.test_write_large": "243", - "tests.test_lobject.LargeObjectTruncateTests.test_truncate": "243", - "tests.test_lobject.LargeObjectTruncateTests.test_truncate_after_close": "243", - "tests.test_lobject.LargeObjectTruncateTests.test_truncate_after_commit": "243", - "tests.test_module.ExceptionsTestCase.test_9_3_diagnostics": "5807", - "tests.test_module.ExceptionsTestCase.test_diagnostics_copy": "5807", - "tests.test_module.ExceptionsTestCase.test_diagnostics_from_commit": "5807", - "tests.test_notify.NotifiesTests.test_many_notifies": "6130", - "tests.test_notify.NotifiesTests.test_notifies_received_on_execute": "6130", - "tests.test_notify.NotifiesTests.test_notifies_received_on_poll": "6130", - "tests.test_notify.NotifiesTests.test_notify_attributes": "6130", - "tests.test_notify.NotifiesTests.test_notify_deque": "6130", - "tests.test_notify.NotifiesTests.test_notify_noappend": "6130", - "tests.test_notify.NotifiesTests.test_notify_object": "6130", - "tests.test_notify.NotifiesTests.test_notify_payload": "6130", - "tests.test_quote.QuotingTestCase.test_koi8": "35882", - "tests.test_quote.QuotingTestCase.test_latin1": "35882", - "tests.test_sql.SqlFormatTests.test_copy": "5807", - "tests.test_transaction.DeadlockSerializationTests.test_deadlock": "6583", - "tests.test_transaction.TransactionTests.test_commit": "5807", - "tests.test_transaction.TransactionTests.test_failed_commit": "5807", - "tests.test_transaction.TransactionTests.test_rollback": "5807", - "tests.test_types_basic.TypesBasicTests.testArray": "32552", - "tests.test_types_basic.TypesBasicTests.testArrayOfNulls": "32552", - "tests.test_types_basic.TypesBasicTests.testEmptyArray": "23299", - "tests.test_types_basic.TypesBasicTests.testEmptyArrayRegression": "36179", - "tests.test_types_basic.TypesBasicTests.testGenericArrayNull": "25123", - "tests.test_types_basic.TypesBasicTests.testNestedArrays": "32552", - "tests.test_types_basic.TypesBasicTests.testNestedEmptyArray": "32552", - "tests.test_types_basic.TypesBasicTests.testNetworkArray": "18846", - "tests.test_types_extras.AdaptTypeTestCase.test_cast_composite": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_cast_nested": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_composite_array": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_composite_namespace": "26443", - "tests.test_types_extras.AdaptTypeTestCase.test_empty_string": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_from_tables": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_non_dbapi_connection": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_register_globally": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_register_on_connection": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_register_on_cursor": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_subclass": "27793", - "tests.test_types_extras.AdaptTypeTestCase.test_wrong_schema": "27793", - "tests.test_types_extras.JsonTestCase.test_default_cast": "23468", - "tests.test_types_extras.JsonTestCase.test_register_default": "41653", - "tests.test_types_extras.JsonTestCase.test_scs": "36215", - "tests.test_types_extras.JsonbTestCase.test_default_cast": "23468", - "tests.test_types_extras.JsonbTestCase.test_loads": "23468", - "tests.test_types_extras.JsonbTestCase.test_null": "23468", - "tests.test_types_extras.JsonbTestCase.test_register_default": "23468", - "tests.test_types_extras.RangeCasterTestCase.test_adapt_date_range": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_adapt_number_range": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_adapt_numeric_range": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_date": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_empty": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_inf": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_null": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_numbers": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_timestamp": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_cast_timestamptz": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_range_escaping": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_register_range_adapter": "27791", - "tests.test_types_extras.RangeCasterTestCase.test_schema_range": "26443", - "tests.test_with.WithCursorTestCase.test_exception_swallow": "30352", - "tests.test_with.WithCursorTestCase.test_named_with_noop": "30352", -} - -var psycopgIgnoreList20_1 = psycopgIgnoreList19_2 - -var psycopgIgnoreList19_2 = psycopgIgnoreList19_1 - -var psycopgIgnoreList19_1 = blacklist{ - "tests.test_green.GreenTestCase.test_flush_on_write": "flakey", - "tests.test_connection.TestConnectionInfo.test_backend_pid": "we return -1 for pg_backend_pid()", -} diff --git a/pkg/cmd/roachtest/psycopg_blocklist.go b/pkg/cmd/roachtest/psycopg_blocklist.go new file mode 100644 index 000000000000..2f87381ba9dc --- /dev/null +++ b/pkg/cmd/roachtest/psycopg_blocklist.go @@ -0,0 +1,151 @@ +// Copyright 2018 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +var psycopgBlocklists = blocklistsForVersion{ + {"v19.2", "psycopgBlockList19_2", psycopgBlockList19_2, "psycopgIgnoreList19_2", psycopgIgnoreList19_2}, + {"v20.1", "psycopgBlockList20_1", psycopgBlockList20_1, "psycopgIgnoreList20_1", psycopgIgnoreList20_1}, + {"v20.2", "psycopgBlockList20_2", psycopgBlockList20_2, "psycopgIgnoreList20_2", psycopgIgnoreList20_2}, +} + +// These are lists of known psycopg test errors and failures. +// When the psycopg test suite is run, the results are compared to this list. +// Any passed test that is not on this list is reported as PASS - expected +// Any passed test that is on this list is reported as PASS - unexpected +// Any failed test that is on this list is reported as FAIL - expected +// Any failed test that is not on this list is reported as FAIL - unexpected +// Any test on this list that is not run is reported as FAIL - not run +// +// Please keep these lists alphabetized for easy diffing. +// After a failed run, an updated version of this blocklist should be available +// in the test log. +var psycopgBlockList20_2 = blocklist{ + "tests.test_async_keyword.CancelTests.test_async_cancel": "41335", +} + +var psycopgBlockList20_1 = blocklist{ + "tests.test_async_keyword.CancelTests.test_async_cancel": "41335", +} + +var psycopgBlockList19_2 = blocklist{ + "tests.test_async.AsyncTests.test_async_after_async": "5807", + "tests.test_async.AsyncTests.test_async_callproc": "5807", + "tests.test_async.AsyncTests.test_async_connection_error_message": "5807", + "tests.test_async.AsyncTests.test_async_cursor_gone": "5807", + "tests.test_async.AsyncTests.test_async_dont_read_all": "5807", + "tests.test_async.AsyncTests.test_async_executemany": "5807", + "tests.test_async.AsyncTests.test_async_fetch_wrong_cursor": "5807", + "tests.test_async.AsyncTests.test_async_iter": "5807", + "tests.test_async.AsyncTests.test_async_named_cursor": "5807", + "tests.test_async.AsyncTests.test_async_scroll": "5807", + "tests.test_async.AsyncTests.test_async_select": "5807", + "tests.test_async.AsyncTests.test_async_subclass": "5807", + "tests.test_async.AsyncTests.test_close": "unknown", + "tests.test_async.AsyncTests.test_commit_while_async": "5807", + "tests.test_async.AsyncTests.test_connection_setup": "5807", + "tests.test_async.AsyncTests.test_copy_no_hang": "5807", + "tests.test_async.AsyncTests.test_copy_while_async": "5807", + "tests.test_async.AsyncTests.test_error": "5807", + "tests.test_async.AsyncTests.test_error_two_cursors": "5807", + "tests.test_async.AsyncTests.test_fetch_after_async": "5807", + "tests.test_async.AsyncTests.test_lobject_while_async": "5807", + "tests.test_async.AsyncTests.test_non_block_after_notification": "5807", + "tests.test_async.AsyncTests.test_notices": "5807", + "tests.test_async.AsyncTests.test_notify": "5807", + "tests.test_async.AsyncTests.test_poll_conn_for_notification": "5807", + "tests.test_async.AsyncTests.test_poll_noop": "5807", + "tests.test_async.AsyncTests.test_reset_while_async": "5807", + "tests.test_async.AsyncTests.test_rollback_while_async": "5807", + "tests.test_async.AsyncTests.test_scroll": "5807", + "tests.test_async.AsyncTests.test_set_parameters_while_async": "5807", + "tests.test_async.AsyncTests.test_stop_on_first_error": "5807", + "tests.test_async.AsyncTests.test_sync_poll": "5807", + "tests.test_async_keyword.AsyncTests.test_async_connection_error_message": "5807", + "tests.test_async_keyword.AsyncTests.test_async_subclass": "5807", + "tests.test_async_keyword.AsyncTests.test_connection_setup": "5807", + "tests.test_async_keyword.CancelTests.test_async_cancel": "5807", + "tests.test_async_keyword.CancelTests.test_async_connection_cancel": "5807", + "tests.test_dates.DatetimeTests.test_adapt_negative_timedelta": "35807", + "tests.test_dates.DatetimeTests.test_adapt_timedelta": "35807", + "tests.test_dates.DatetimeTests.test_time_24": "36118", + "tests.test_dates.DatetimeTests.test_type_roundtrip_timetz": "26097", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchAll": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchMany": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchManyNoarg": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorFetchOne": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorIter": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorIterRowNumber": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorRealWithNamedCursorNotGreedy": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealFetchAll": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealFetchMany": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealFetchManyNoarg": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealFetchOne": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealIter": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testDictCursorWithPlainCursorRealIterRowNumber": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testPickleRealDictRow": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.testRealMeansReal": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_copy": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_iter_methods_2": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_mod": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_order": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_order_iter": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorRealTests.test_pop": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictConnCursorArgs": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchAll": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchMany": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchManyNoarg": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorFetchOne": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorIter": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorIterRowNumber": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithNamedCursorNotGreedy": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorFetchAll": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorFetchMany": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorFetchManyNoarg": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorFetchOne": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorIter": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testDictCursorWithPlainCursorIterRowNumber": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testPickleDictRow": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.testUpdateRow": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.test_copy": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.test_iter_methods_2": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.test_order": "5807", + "tests.test_extras_dictcursor.ExtrasDictCursorTests.test_order_iter": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_bad_col_names": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_cache": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_cursor_args": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_executemany": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_fetchall": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_fetchmany": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_fetchmany_noarg": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_fetchone": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_iter": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_max_cache": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_minimal_generation": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_fetchall": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_fetchmany": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_fetchone": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_named_rownumber": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_no_result_no_surprise": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_not_greedy": "5807", + "tests.test_extras_dictcursor.NamedTupleCursorTest.test_record_updated": "5807", + "tests.test_types_basic.TypesBasicTests.testEmptyArray": "23299", +} + +var psycopgIgnoreList20_2 = psycopgIgnoreList20_1 + +var psycopgIgnoreList20_1 = psycopgIgnoreList19_2 + +var psycopgIgnoreList19_2 = blocklist{ + "tests.test_async.AsyncTests.test_flush_on_write": "44709", + "tests.test_green.GreenTestCase.test_flush_on_write": "flakey", + "tests.test_connection.TestConnectionInfo.test_backend_pid": "we return -1 for pg_backend_pid()", +} diff --git a/pkg/cmd/roachtest/python_helpers.go b/pkg/cmd/roachtest/python_helpers.go index 53040104536b..331de2072a12 100644 --- a/pkg/cmd/roachtest/python_helpers.go +++ b/pkg/cmd/roachtest/python_helpers.go @@ -17,10 +17,10 @@ import ( "regexp" ) -var pythonUnitTestOutputRegex = regexp.MustCompile(`(?P.*) \((?P.*)\) \.\.\. (?P[^ ']*)(?: u?['"](?P.*)['"])?`) +var pythonUnitTestOutputRegex = regexp.MustCompile(`(?P.*) \((?P.*)\) \.\.\. (?P[^'"]*?)(?: u?['"](?P.*)['"])?$`) func (r *ormTestsResults) parsePythonUnitTestOutput( - input []byte, expectedFailures blacklist, ignoredList blacklist, + input []byte, expectedFailures blocklist, ignoredList blocklist, ) { scanner := bufio.NewScanner(bytes.NewReader(input)) for scanner.Scan() { @@ -31,11 +31,12 @@ func (r *ormTestsResults) parsePythonUnitTestOutput( groups[pythonUnitTestOutputRegex.SubexpNames()[i]] = name } test := fmt.Sprintf("%s.%s", groups["class"], groups["name"]) - var skipReason string - if groups["result"] == "skipped" { + skipped := groups["result"] == "skipped" || groups["result"] == "expected failure" + skipReason := "" + if skipped { skipReason = groups["reason"] } - pass := groups["result"] == "ok" + pass := groups["result"] == "ok" || groups["result"] == "unexpected success" r.allTests = append(r.allTests, test) ignoredIssue, expectedIgnored := ignoredList[test] @@ -44,10 +45,10 @@ func (r *ormTestsResults) parsePythonUnitTestOutput( case expectedIgnored: r.results[test] = fmt.Sprintf("--- SKIP: %s due to %s (expected)", test, ignoredIssue) r.ignoredCount++ - case len(skipReason) > 0 && expectedFailure: + case skipped && expectedFailure: r.results[test] = fmt.Sprintf("--- SKIP: %s due to %s (unexpected)", test, skipReason) r.unexpectedSkipCount++ - case len(skipReason) > 0: + case skipped: r.results[test] = fmt.Sprintf("--- SKIP: %s due to %s (expected)", test, skipReason) r.skipCount++ case pass && !expectedFailure: diff --git a/pkg/cmd/roachtest/quit.go b/pkg/cmd/roachtest/quit.go new file mode 100644 index 000000000000..01b014ddbd12 --- /dev/null +++ b/pkg/cmd/roachtest/quit.go @@ -0,0 +1,463 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/util/contextutil" + "github.com/cockroachdb/cockroach/pkg/util/retry" + "github.com/cockroachdb/errors" + "github.com/kr/pretty" +) + +type quitTest struct { + t *test + c *cluster + args option +} + +// runQuitTransfersLeases performs rolling restarts on a +// 3-node cluster and ascertains that each node shutting down +// transfers all its leases reliably to other nodes prior to +// terminating. +func runQuitTransfersLeases( + ctx context.Context, + t *test, + c *cluster, + methodName string, + method func(ctx context.Context, t *test, c *cluster, nodeID int), +) { + q := quitTest{t: t, c: c} + q.init(ctx) + q.runTest(ctx, method) +} + +func (q *quitTest) init(ctx context.Context) { + q.args = startArgs( + "--env=COCKROACH_SCAN_MAX_IDLE_TIME=5ms", // iterate fast for rebalancing + "-a", "--vmodule=store=1,replica=1,replica_proposal=1", // verbosity to troubleshoot drains + ) + q.c.Put(ctx, cockroach, "./cockroach") + q.c.Start(ctx, q.t, q.args) +} + +func (q *quitTest) Fatal(args ...interface{}) { + q.t.Fatal(args...) +} + +func (q *quitTest) Fatalf(format string, args ...interface{}) { + q.t.Fatalf(format, args...) +} + +func (q *quitTest) runTest( + ctx context.Context, method func(ctx context.Context, t *test, c *cluster, nodeID int), +) { + q.waitForUpReplication(ctx) + q.createRanges(ctx) + q.setupIncrementalDrain(ctx) + + // runTest iterates through the cluster two times and restarts each + // node in turn. After each node shutdown it verifies that there are + // no leases held by the down node. (See the comments inside + // checkNoLeases() for details.) + // + // The shutdown method is passed in via the 'method' parameter, used + // below. + q.t.l.Printf("now running restart loop\n") + for i := 0; i < 3; i++ { + q.t.l.Printf("iteration %d\n", i) + for nodeID := 1; nodeID <= q.c.spec.NodeCount; nodeID++ { + q.t.l.Printf("stopping node %d\n", nodeID) + q.runWithTimeout(ctx, func(ctx context.Context) { method(ctx, q.t, q.c, nodeID) }) + q.runWithTimeout(ctx, func(ctx context.Context) { q.checkNoLeases(ctx, nodeID) }) + q.t.l.Printf("restarting node %d\n", nodeID) + q.runWithTimeout(ctx, func(ctx context.Context) { q.restartNode(ctx, nodeID) }) + } + } +} + +// restartNode restarts one node and waits until it's up and ready to +// accept clients. +func (q *quitTest) restartNode(ctx context.Context, nodeID int) { + q.c.Start(ctx, q.t, q.args, q.c.Node(nodeID)) + + q.t.l.Printf("waiting for readiness of node %d\n", nodeID) + // Now perform a SQL query. This achieves two goals: + // - it waits until the server is ready. + // - the particular query forces a cluster-wide RPC; which + // forces any circuit breaker to trip and re-establish + // the RPC connection if needed. + db := q.c.Conn(ctx, nodeID) + defer db.Close() + if _, err := db.ExecContext(ctx, `TABLE crdb_internal.cluster_sessions`); err != nil { + q.Fatal(err) + } +} + +func (q *quitTest) waitForUpReplication(ctx context.Context) { + db := q.c.Conn(ctx, 1) + defer db.Close() + + // We'll want rebalancing to be a bit faster than normal, so + // that the up-replication does not take ages. + if _, err := db.ExecContext(ctx, `SET CLUSTER SETTING kv.snapshot_rebalance.max_rate = '128MiB'`); err != nil { + q.Fatal(err) + } + + err := retry.ForDuration(30*time.Second, func() error { + q.t.l.Printf("waiting for up-replication\n") + row := db.QueryRowContext(ctx, `SELECT min(array_length(replicas, 1)) FROM crdb_internal.ranges_no_leases`) + minReplicas := 0 + if err := row.Scan(&minReplicas); err != nil { + q.Fatal(err) + } + if minReplicas < 3 { + time.Sleep(time.Second) + return errors.Newf("some ranges not up-replicated yet") + } + return nil + }) + if err != nil { + q.Fatalf("cluster did not up-replicate: %v", err) + } +} + +// runWithTimeout runs a command with a 1-minute timeout. +func (q *quitTest) runWithTimeout(ctx context.Context, fn func(ctx context.Context)) { + if err := contextutil.RunWithTimeout(ctx, "do", time.Minute, func(ctx context.Context) error { + fn(ctx) + return nil + }); err != nil { + q.Fatal(err) + } +} + +// setupIncrementalDrain simulate requiring more than one Drain round +// to transfer all leases. This way, we exercise the iterating code in +// quit/node drain. +func (q *quitTest) setupIncrementalDrain(ctx context.Context) { + db := q.c.Conn(ctx, 1) + defer db.Close() + if _, err := db.ExecContext(ctx, ` +SET CLUSTER SETTING server.shutdown.lease_transfer_wait = '10ms'`); err != nil { + if strings.Contains(err.Error(), "unknown cluster setting") { + // old version; ok + } else { + q.Fatal(err) + } + } +} + +// createRanges creates a bunch of ranges on the test cluster. +func (q *quitTest) createRanges(ctx context.Context) { + const numRanges = 500 + + db := q.c.Conn(ctx, 1) + defer db.Close() + if _, err := db.ExecContext(ctx, fmt.Sprintf(` +CREATE TABLE t(x, y, PRIMARY KEY(x)) AS SELECT @1, 1 FROM generate_series(1,%[1]d)`, + numRanges)); err != nil { + q.Fatal(err) + } + // We split them from right-to-left so we're peeling at most 1 + // row each time on the right. + // + // Also we do it a hundred at a time, so as to be able to see the + // progress when watching the roachtest progress interactively. + for i := numRanges; i > 1; i -= 100 { + q.t.l.Printf("creating %d ranges (%d-%d)...\n", numRanges, i, i-99) + if _, err := db.ExecContext(ctx, fmt.Sprintf(` +ALTER TABLE t SPLIT AT TABLE generate_series(%[1]d,%[1]d-99,-1)`, i)); err != nil { + q.Fatal(err) + } + } +} + +// checkNoLeases verifies that no range has a lease on the node +// that's just been shut down. +func (q *quitTest) checkNoLeases(ctx context.Context, nodeID int) { + // We need to use SQL against a node that's not the one we're + // shutting down. + otherNodeID := 1 + nodeID%q.c.spec.NodeCount + + // Now we're going to check two things: + // + // 1) *immediately*, that every range in the cluster has a lease + // some other place than nodeID. + // + // Note that for with this condition, it is possible that _some_ + // replica of any given range think that the leaseholder is + // nodeID, even though _another_ replica has become leaseholder + // already. That's because followers can lag behind and + // drain does not wait for followers to catch up. + // https://github.com/cockroachdb/cockroach/issues/47100 + // + // 2) *eventually* that every other node than nodeID has no range + // replica whose lease refers to nodeID, i.e. the followers + // have all caught up. + // Note: when issue #47100 is fixed, this 2nd condition + // must be true immediately -- drain is then able to wait + // for all followers to learn who the new leaseholder is. + + if err := testutils.SucceedsSoonError(func() error { + // To achieve that, we ask first each range in turn for its range + // report. + // + // For condition (1) we accumulate all the known ranges in + // knownRanges, and assign them the node ID of their leaseholder + // whenever it is not nodeID. Then at the end we check that every + // entry in the map has a non-zero value. + knownRanges := map[string]int{} + // + // For condition (2) we accumulate the unwanted leases in + // invLeaseMap, then check at the end that the map is empty. + invLeaseMap := map[int][]string{} + for i := 1; i <= q.c.spec.NodeCount; i++ { + if i == nodeID { + // Can't request this node. Ignore. + continue + } + + q.t.l.Printf("retrieving ranges for node %d\n", i) + // Get the report via HTTP. + // Flag -s is to remove progress on stderr, so that the buffer + // contains the JSON of the response and nothing else. + buf, err := q.c.RunWithBuffer(ctx, q.t.l, q.c.Node(otherNodeID), + "curl", "-s", fmt.Sprintf("http://%s/_status/ranges/%d", + q.c.InternalAdminUIAddr(ctx, q.c.Node(otherNodeID))[0], i)) + if err != nil { + q.Fatal(err) + } + // We need just a subset of the response. Make an ad-hoc + // struct with just the bits of interest. + type jsonOutput struct { + Ranges []struct { + State struct { + State struct { + Desc struct { + RangeID string `json:"rangeId"` + } `json:"desc"` + Lease struct { + Replica struct { + NodeID int `json:"nodeId"` + } `json:"replica"` + } `json:"lease"` + } `json:"state"` + } `json:"state"` + } `json:"ranges"` + } + var details jsonOutput + if err := json.Unmarshal(buf, &details); err != nil { + q.Fatal(err) + } + // Some sanity check. + if len(details.Ranges) == 0 { + q.Fatal("expected some ranges from RPC, got none") + } + // Is there any range whose lease refers to nodeID? + var invalidLeases []string + for _, r := range details.Ranges { + // Some more sanity check. + if r.State.State.Lease.Replica.NodeID == 0 { + q.Fatalf("expected a valid lease state, got %# v", pretty.Formatter(r)) + } + curLeaseHolder := knownRanges[r.State.State.Desc.RangeID] + if r.State.State.Lease.Replica.NodeID == nodeID { + // As per condition (2) above we want to know which ranges + // have an unexpected left over lease on nodeID. + invalidLeases = append(invalidLeases, r.State.State.Desc.RangeID) + } else { + // As per condition (1) above we track in knownRanges if there + // is at least one known other than nodeID that thinks that + // the lease has been transferred. + curLeaseHolder = r.State.State.Lease.Replica.NodeID + } + knownRanges[r.State.State.Desc.RangeID] = curLeaseHolder + } + if len(invalidLeases) > 0 { + invLeaseMap[i] = invalidLeases + } + } + // (1): is there a range with no replica outside of nodeID? + var leftOver []string + for r, n := range knownRanges { + if n == 0 { + leftOver = append(leftOver, r) + } + } + if len(leftOver) > 0 { + q.Fatalf("(1) ranges with no lease outside of node %d: %# v", nodeID, pretty.Formatter(leftOver)) + } + // (2): is there a range with left over replicas on nodeID? + // + // TODO(knz): Eventually we want this condition to be always + // true, i.e. fail the test immediately if found to be false + // instead of waiting. (#47100) + if len(invLeaseMap) > 0 { + err := errors.Newf( + "(2) ranges with remaining leases on node %d, per node: %# v", + nodeID, pretty.Formatter(invLeaseMap)) + q.t.l.Printf("condition failed: %v\n", err) + q.t.l.Printf("retrying until SucceedsSoon has enough...\n") + return err + } + return nil + }); err != nil { + q.Fatal(err) + } + + db := q.c.Conn(ctx, otherNodeID) + defer db.Close() + // For good measure, also write to the table. This ensures it + // remains available. + if _, err := db.ExecContext(ctx, `UPDATE t SET y = y + 1`); err != nil { + q.Fatal(err) + } +} + +func registerQuitTransfersLeases(r *testRegistry) { + registerTest := func(name, minver string, method func(context.Context, *test, *cluster, int)) { + r.Add(testSpec{ + Name: fmt.Sprintf("transfer-leases/%s", name), + Owner: OwnerKV, + Cluster: makeClusterSpec(3), + MinVersion: minver, + Run: func(ctx context.Context, t *test, c *cluster) { + runQuitTransfersLeases(ctx, t, c, name, method) + }, + }) + } + + // Uses 'roachprod stop --sig 15 --wait', ie send SIGTERM and wait + // until the process exits. + registerTest("signal", "v19.2.0", func(ctx context.Context, t *test, c *cluster, nodeID int) { + c.Stop(ctx, c.Node(nodeID), + roachprodArgOption{"--sig", "15", "--wait"}, // graceful shutdown + ) + }) + + // Uses 'cockroach quit' which should drain and then request a + // shutdown. It then waits for the process to self-exit. + registerTest("quit", "v19.2.0", func(ctx context.Context, t *test, c *cluster, nodeID int) { + _ = runQuit(ctx, t, c, nodeID) + }) + + // Uses 'cockroach drain', followed by a non-graceful process + // kill. If the drain is successful, the leases are transferred + // successfully even if if the process terminates non-gracefully. + registerTest("drain", "v20.1.0", func(ctx context.Context, t *test, c *cluster, nodeID int) { + buf, err := c.RunWithBuffer(ctx, t.l, c.Node(nodeID), + "./cockroach", "node", "drain", "--insecure", "--logtostderr=INFO", + fmt.Sprintf("--port={pgport:%d}", nodeID), + ) + t.l.Printf("cockroach node drain:\n%s\n", buf) + if err != nil { + t.Fatal(err) + } + // Send first SIGHUP to the process to force it to flush its logs + // before terminating. Otherwise the SIGKILL below will truncate + // the log. + c.Stop(ctx, c.Node(nodeID), + roachprodArgOption{"--sig", "1"}, + ) + // We use SIGKILL to terminate nodes here. Of course, an operator + // should not do this and instead terminate with SIGTERM even + // after a complete graceful drain. However, what this test is + // asserting is that a graceful drain is *sufficient* to make + // everything look smooth from the perspective of other nodes, + // even if the node goes "kaput" after the drain. + // + // (This also ensures that the test exercises separate code; if we + // used SIGTERM here we'd be combining the graceful drain by 'node + // drain' with the graceful drain by the signal handler. If either + // becomes broken, the test wouldn't help identify which one needs + // attention.) + c.Stop(ctx, c.Node(nodeID), + roachprodArgOption{"--sig", "9", "--wait"}) + }) +} + +func runQuit(ctx context.Context, t *test, c *cluster, nodeID int, extraArgs ...string) []byte { + args := append([]string{ + "./cockroach", "quit", "--insecure", "--logtostderr=INFO", + fmt.Sprintf("--port={pgport:%d}", nodeID)}, + extraArgs...) + buf, err := c.RunWithBuffer(ctx, t.l, c.Node(nodeID), args...) + t.l.Printf("cockroach quit:\n%s\n", buf) + if err != nil { + t.Fatal(err) + } + c.Stop(ctx, c.Node(nodeID), + roachprodArgOption{"--sig", "0", "--wait"}, // no shutdown, just wait for exit + ) + return buf +} + +func registerQuitAllNodes(r *testRegistry) { + // This test verifies that 'cockroach quit' can terminate all nodes + // in the cluster: normally as long as there's quorum, then with a + // short --drain-wait for the remaining nodes under quorum. + r.Add(testSpec{ + Name: "quit-all-nodes", + Owner: OwnerKV, + Cluster: makeClusterSpec(5), + MinVersion: "v20.1.0", + Run: func(ctx context.Context, t *test, c *cluster) { + q := quitTest{t: t, c: c} + + // Start the cluster. + q.init(ctx) + // Wait for up-replication so that the cluster expects 1 ranges + // everywhere for system ranges. + q.waitForUpReplication(ctx) + + // Shut one nodes down gracefully with a very long wait (longer + // than the test timeout). This is guaranteed to work - we still + // have quorum at that point. + q.runWithTimeout(ctx, func(ctx context.Context) { _ = runQuit(ctx, q.t, q.c, 5, "--drain-wait=1h") }) + + // Now shut down the remaining 4 nodes less gracefully, with a + // short wait. + + // For the next two nodes, we may or may not observe that + // the graceful shutdown succeed. It may succeed if every + // range has enough quorum on the last 2 nodes (shut down later below). + // It may fail if some ranges have a quorum composed of n3, n4, n5. + // See: https://github.com/cockroachdb/cockroach/issues/48339 + q.runWithTimeout(ctx, func(ctx context.Context) { _ = runQuit(ctx, q.t, q.c, 4, "--drain-wait=4s") }) + q.runWithTimeout(ctx, func(ctx context.Context) { _ = runQuit(ctx, q.t, q.c, 3, "--drain-wait=4s") }) + + // For the lat two nodes, we are always under quorum. In this + // case we can expect `quit` to always report a hard shutdown + // was required. + q.runWithTimeout(ctx, func(ctx context.Context) { expectHardShutdown(ctx, q.t, runQuit(ctx, q.t, q.c, 2, "--drain-wait=4s")) }) + q.runWithTimeout(ctx, func(ctx context.Context) { expectHardShutdown(ctx, q.t, runQuit(ctx, q.t, q.c, 1, "--drain-wait=4s")) }) + + // At the end, restart all nodes. We do this to check that + // the cluster can indeed restart, and also to please + // the dead node detection check at the end of each test. + q.c.Start(ctx, q.t, q.args) + }, + }) +} + +// expectHardShutdown expects a "drain did not complete successfully" message. +func expectHardShutdown(ctx context.Context, t *test, cmdOut []byte) { + if !strings.Contains(string(cmdOut), "drain did not complete successfully") { + t.Fatalf("expected 'drain did not complete successfully' in quit output, got:\n%s", cmdOut) + } +} diff --git a/pkg/cmd/roachtest/rapid_restart.go b/pkg/cmd/roachtest/rapid_restart.go index 21965849df2c..303dab829016 100644 --- a/pkg/cmd/roachtest/rapid_restart.go +++ b/pkg/cmd/roachtest/rapid_restart.go @@ -20,7 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/httputil" "github.com/cockroachdb/cockroach/pkg/util/sysutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) func runRapidRestart(ctx context.Context, t *test, c *cluster) { @@ -47,7 +47,7 @@ func runRapidRestart(ctx context.Context, t *test, c *cluster) { exitCh := make(chan error, 1) go func() { err := c.RunE(ctx, nodes, - `mkdir -p {log-dir} && ./cockroach start --insecure --store={store-dir} `+ + `mkdir -p {log-dir} && ./cockroach start-single-node --insecure --store={store-dir} `+ `--log-dir={log-dir} --cache=10% --max-sql-memory=10% `+ `--listen-addr=:{pgport:1} --http-port=$[{pgport:1}+1] `+ `> {log-dir}/cockroach.stdout 2> {log-dir}/cockroach.stderr`) @@ -75,13 +75,12 @@ func runRapidRestart(ctx context.Context, t *test, c *cluster) { t.l.Printf("no exit status yet, killing again") } } - cause := errors.Cause(err) - if exitErr, ok := cause.(*exec.ExitError); ok { + if exitErr := (*exec.ExitError)(nil); errors.As(err, &exitErr) { switch status := sysutil.ExitStatus(exitErr); status { case -1: // Received SIGINT before setting up our own signal handlers or // SIGKILL. - case 30: + case 20: // Exit code from a SIGINT received by our signal handlers. default: t.Fatalf("unexpected exit status %d", status) diff --git a/pkg/cmd/roachtest/rebalance_load.go b/pkg/cmd/roachtest/rebalance_load.go index d7d2a414ecf6..860e7669b817 100644 --- a/pkg/cmd/roachtest/rebalance_load.go +++ b/pkg/cmd/roachtest/rebalance_load.go @@ -19,6 +19,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/errors" "golang.org/x/sync/errgroup" ) @@ -73,7 +74,7 @@ func registerRebalanceLoad(r *testRegistry) { "./workload run kv --read-percent=95 --tolerate-errors --concurrency=%d "+ "--duration=%v {pgurl:1-%d}", concurrency, maxDuration, len(roachNodes))) - if ctx.Err() == context.Canceled { + if errors.Is(ctx.Err(), context.Canceled) { // We got canceled either because lease balance was achieved or the // other worker hit an error. In either case, it's not this worker's // fault. @@ -125,7 +126,7 @@ func registerRebalanceLoad(r *testRegistry) { concurrency := 128 r.Add(testSpec{ - Name: `rebalance-leases-by-load`, + Name: `rebalance/by-load/leases`, Owner: OwnerKV, Cluster: makeClusterSpec(4), // the last node is just used to generate load MinVersion: "v2.1.0", @@ -138,7 +139,7 @@ func registerRebalanceLoad(r *testRegistry) { }, }) r.Add(testSpec{ - Name: `rebalance-replicas-by-load`, + Name: `rebalance/by-load/replicas`, Owner: OwnerKV, Cluster: makeClusterSpec(7), // the last node is just used to generate load MinVersion: "v2.1.0", diff --git a/pkg/cmd/roachtest/registry.go b/pkg/cmd/roachtest/registry.go index 9d75217f72a5..c25cdf3d8736 100644 --- a/pkg/cmd/roachtest/registry.go +++ b/pkg/cmd/roachtest/registry.go @@ -16,8 +16,10 @@ func registerTests(r *testRegistry) { // grep -h -E 'func register[^(]+\(.*testRegistry\) {' pkg/cmd/roachtest/*.go | grep -E -o 'register[^(]+' | grep -E -v '^register(Tests|Benchmarks)$' | grep -v '^\w*Bench$' | sort -f | awk '{printf "\t%s(r)\n", $0}' registerAcceptance(r) + registerActiveRecord(r) registerAllocator(r) registerAlterPK(r) + registerAutoUpgrade(r) registerBackup(r) registerCancel(r) registerCDC(r) @@ -26,7 +28,6 @@ func registerTests(r *testRegistry) { registerClockMonotonicTests(r) registerCopy(r) registerDecommission(r) - registerDiskFull(r) registerDiskStalledDetection(r) registerDjango(r) registerDrop(r) @@ -45,6 +46,7 @@ func registerTests(r *testRegistry) { registerIndexes(r) registerInterleaved(r) registerJepsen(r) + registerJobsMixedVersions(r) registerKV(r) registerKVContention(r) registerKVQuiescenceDead(r) @@ -55,11 +57,15 @@ func registerTests(r *testRegistry) { registerLargeRange(r) registerLedger(r) registerLibPQ(r) + registerNamespaceUpgrade(r) registerNetwork(r) + registerPebble(r) registerPgjdbc(r) registerPgx(r) registerPsycopg(r) registerQueue(r) + registerQuitAllNodes(r) + registerQuitTransfersLeases(r) registerRebalanceLoad(r) registerReplicaGC(r) registerRestart(r) @@ -67,11 +73,13 @@ func registerTests(r *testRegistry) { registerRoachmart(r) registerScaleData(r) registerSchemaChangeBulkIngest(r) - registerSchemaChangeKV(r) + registerSchemaChangeDuringKV(r) registerSchemaChangeIndexTPCC100(r) registerSchemaChangeIndexTPCC1000(r) - registerMixedSchemaChangesTPCC1000(r) + registerSchemaChangeDuringTPCC1000(r) registerSchemaChangeInvertedIndex(r) + registerSchemaChangeMixedVersions(r) + registerSchemaChangeRandomLoad(r) registerScrubAllChecksTPCC(r) registerScrubIndexOnlyTPCC(r) registerSecondaryIndexesMultiVersionCluster(r) @@ -80,11 +88,11 @@ func registerTests(r *testRegistry) { registerSyncTest(r) registerSysbench(r) registerTPCC(r) + registerTPCDSVec(r) registerTPCHVec(r) registerKVBench(r) registerTypeORM(r) registerLoadSplits(r) - registerUpgrade(r) registerVersion(r) registerYCSB(r) registerTPCHBench(r) diff --git a/pkg/cmd/roachtest/replicagc.go b/pkg/cmd/roachtest/replicagc.go index cb6169bc012d..3229c3e581eb 100644 --- a/pkg/cmd/roachtest/replicagc.go +++ b/pkg/cmd/roachtest/replicagc.go @@ -12,31 +12,40 @@ package main import ( "context" + gosql "database/sql" + "fmt" + "strconv" "time" "github.com/cockroachdb/cockroach/pkg/util/timeutil" ) func registerReplicaGC(r *testRegistry) { - - r.Add(testSpec{ - Name: "replicagc-changed-peers/withRestart", - Owner: OwnerKV, - Cluster: makeClusterSpec(6), - Run: func(ctx context.Context, t *test, c *cluster) { - runReplicaGCChangedPeers(ctx, t, c, true /* withRestart */) - }, - }) - r.Add(testSpec{ - Name: "replicagc-changed-peers/noRestart", - Owner: OwnerKV, - Cluster: makeClusterSpec(6), - Run: func(ctx context.Context, t *test, c *cluster) { - runReplicaGCChangedPeers(ctx, t, c, false /* withRestart */) - }, - }) + for _, restart := range []bool{true, false} { + r.Add(testSpec{ + Name: fmt.Sprintf("replicagc-changed-peers/restart=%t", restart), + Owner: OwnerKV, + Cluster: makeClusterSpec(6), + Run: func(ctx context.Context, t *test, c *cluster) { + runReplicaGCChangedPeers(ctx, t, c, restart) + }, + }) + } } +var deadNodeAttr = "deadnode" + +// runReplicaGCChangedPeers checks that when a node has all of its replicas +// taken away in absentia restarts, without it being able to talk to any of its +// old peers, it will still replicaGC its (now stale) replicas quickly. +// +// It does so by setting up a six node cluster, but initially with just three +// live nodes. After adding a bit of data into the system and waiting for full +// replication, it downs a node and adds the remaining three nodes. It then +// attempts to decommission the original three nodes in order to move the +// replicas off of them, and after having done so, it recommissions the downed +// node. It expects the downed node to discover the new replica placement and gc +// its replicas. func runReplicaGCChangedPeers(ctx context.Context, t *test, c *cluster, withRestart bool) { if c.spec.NodeCount != 6 { t.Fatal("test needs to be run with 6 nodes") @@ -47,66 +56,48 @@ func runReplicaGCChangedPeers(ctx context.Context, t *test, c *cluster, withRest c.Put(ctx, workload, "./workload", c.Node(1)) c.Start(ctx, t, args, c.Range(1, 3)) + h := &replicagcTestHelper{c: c, t: t} + t.Status("waiting for full replication") - func() { - db := c.Conn(ctx, 3) - defer func() { - _ = db.Close() - }() - for { - var fullReplicated bool - if err := db.QueryRow( - // Check if all ranges are fully replicated. - "SELECT min(array_length(replicas, 1)) >= 3 FROM crdb_internal.ranges", - ).Scan(&fullReplicated); err != nil { - t.Fatal(err) - } - if fullReplicated { - break - } - time.Sleep(time.Second) - } - }() + h.waitForFullReplication(ctx) - c.Run(ctx, c.Node(1), "./workload run kv {pgurl:1} --init --max-ops=1 --splits 100") + // Fill in a bunch of data. + c.Run(ctx, c.Node(1), "./workload init kv {pgurl:1} --splits 100") // Kill the third node so it won't know that all of its replicas are moved - // elsewhere. (We don't use the first because that's what roachprod will + // elsewhere (we don't use the first because that's what roachprod will // join new nodes to). c.Stop(ctx, c.Node(3)) // Start three new nodes that will take over all data. c.Start(ctx, t, args, c.Range(4, 6)) - if _, err := execCLI(ctx, t, c, 2, "node", "decommission", "1", "2", "3"); err != nil { + // Recommission n1-3, with n3 in absentia, moving the replicas to n4-6. + if err := h.decommission(ctx, c.Range(1, 3), 2, "--wait=none"); err != nil { t.Fatal(err) } - // Stop the remaining two old nodes. - c.Stop(ctx, c.Range(1, 2)) + t.Status("waiting for zero replicas on n1") + h.waitForZeroReplicas(ctx, 1) - db4 := c.Conn(ctx, 4) - defer func() { - _ = db4.Close() - }() + t.Status("waiting for zero replicas on n2") + h.waitForZeroReplicas(ctx, 2) - for _, change := range []string{ - "RANGE default", "RANGE meta", "RANGE system", "RANGE liveness", "DATABASE system", "TABLE system.jobs", - } { - stmt := `ALTER ` + change + ` CONFIGURE ZONE = 'constraints: {"-deadnode"}'` - c.l.Printf(stmt + "\n") - if _, err := db4.ExecContext(ctx, stmt); err != nil { - t.Fatal(err) - } - } + // Stop the remaining two old nodes, no replicas remaining there. + c.Stop(ctx, c.Range(1, 2)) + + // Set up zone configs to isolate out nodes with the `deadNodeAttr` + // attribute. We'll later start n3 using this attribute to test GC replica + // count. + h.isolateDeadNodes(ctx, 4) // Run this on n4 (it's live, that's all that matters). // Recommission n3 so that when it starts again, it doesn't even know that - // it was decommissioned (being decommissioning basically lets the replica + // it was marked for decommissioning (which basically let the replica // GC queue run wild). We also recommission the other nodes, for if we didn't, - // n3 would learn that they are decommissioned and would try to perform - // replication changes on its ranges, which acquires the lease, which hits - // the eager GC path since the Raft groups get initialized. - if _, err := execCLI(ctx, t, c, 4, "node", "recommission", "1", "2", "3"); err != nil { + // n3 would learn that they were marked for decommissioning, and would try + // to perform replication changes on its ranges, which acquires the lease, + // which hits the eager GC path since the Raft groups get initialized. + if err := h.recommission(ctx, c.Range(1, 3), 4); err != nil { t.Fatal(err) } @@ -123,43 +114,127 @@ func runReplicaGCChangedPeers(ctx context.Context, t *test, c *cluster, withRest } // Restart n3. We have to manually tell it where to find a new node or it - // won't be able to connect. Give it the attribute that we've used as a - // negative constraint for "everything" so that no new replicas are added - // to this node. - addr4 := c.InternalAddr(ctx, c.Node(4))[0] + // won't be able to connect. Give it the deadNodeAttr attribute that we've + // used as a negative constraint for "everything", which should prevent new + // replicas from being added to it. c.Start(ctx, t, c.Node(3), startArgs( - "--args=--join="+addr4, - "--args=--attrs=deadnode", + "--args=--join="+c.InternalAddr(ctx, c.Node(4))[0], + "--args=--attrs="+deadNodeAttr, "--args=--vmodule=raft=5,replicate_queue=5,allocator=5", "--env=COCKROACH_SCAN_MAX_IDLE_TIME=5ms", )) - db3 := c.Conn(ctx, 3) + // Loop for two metric sample intervals (10s) to make sure n3 doesn't see any + // underreplicated ranges. + h.waitForZeroReplicas(ctx, 3) + + // Restart the remaining nodes to satisfy the dead node detector. + c.Start(ctx, t, c.Range(1, 2)) +} + +type replicagcTestHelper struct { + t *test + c *cluster +} + +func (h *replicagcTestHelper) waitForFullReplication(ctx context.Context) { + db := h.c.Conn(ctx, 1) defer func() { - _ = db3.Close() + _ = db.Close() }() - // Loop for two metric sample intervals (10s) to make sure n3 doesn't see any - // underreplicated ranges. - var sawNonzero bool - var n int - for tBegin := timeutil.Now(); timeutil.Since(tBegin) < 5*time.Minute; time.Sleep(time.Second) { - if err := db3.QueryRowContext( - ctx, - `SELECT value FROM crdb_internal.node_metrics WHERE name = 'replicas'`, - ).Scan(&n); err != nil { - t.Fatal(err) + for { + var fullReplicated bool + if err := db.QueryRow( + // Check if all ranges are fully replicated. + "SELECT min(array_length(replicas, 1)) >= 3 FROM crdb_internal.ranges", + ).Scan(&fullReplicated); err != nil { + h.t.Fatal(err) } - c.l.Printf("%d replicas on n3\n", n) - if sawNonzero && n == 0 { + if fullReplicated { + break + } + time.Sleep(time.Second) + } +} + +func (h *replicagcTestHelper) waitForZeroReplicas(ctx context.Context, targetNode int) { + db := h.c.Conn(ctx, targetNode) + defer func() { + _ = db.Close() + }() + + var n = 0 + for tBegin := timeutil.Now(); timeutil.Since(tBegin) < 5*time.Minute; time.Sleep(5 * time.Second) { + n = h.numReplicas(ctx, db, targetNode) + if n == 0 { break } - sawNonzero = true } if n != 0 { - t.Fatalf("replica count didn't drop to zero: %d", n) + h.t.Fatalf("replica count on n%d didn't drop to zero: %d", targetNode, n) } +} - // Restart the remaining nodes to satisfy the dead node detector. - c.Start(ctx, t, c.Range(1, 2)) +// numReplicas returns the number of replicas found on targetNode, provided a db +// connected to the targetNode. +func (h *replicagcTestHelper) numReplicas(ctx context.Context, db *gosql.DB, targetNode int) int { + var n int + if err := db.QueryRowContext( + ctx, + `SELECT value FROM crdb_internal.node_metrics WHERE name = 'replicas'`, + ).Scan(&n); err != nil { + h.t.Fatal(err) + } + h.c.l.Printf("found %d replicas found on n%d\n", n, targetNode) + return n +} + +// decommission decommissions the given targetNodes, running the process +// through the specified runNode. +func (h *replicagcTestHelper) decommission( + ctx context.Context, targetNodes nodeListOption, runNode int, verbs ...string, +) error { + args := []string{"node", "decommission"} + args = append(args, verbs...) + + for _, target := range targetNodes { + args = append(args, strconv.Itoa(target)) + } + _, err := execCLI(ctx, h.t, h.c, runNode, args...) + return err +} + +// recommission recommissions the given targetNodes, running the process +// through the specified runNode. +func (h *replicagcTestHelper) recommission( + ctx context.Context, targetNodes nodeListOption, runNode int, verbs ...string, +) error { + args := []string{"node", "recommission"} + args = append(args, verbs...) + for _, target := range targetNodes { + args = append(args, strconv.Itoa(target)) + } + _, err := execCLI(ctx, h.t, h.c, runNode, args...) + return err +} + +// isolateDeadNodes sets up the zone configs so as to avoid replica placement to +// nodes started with deadNodeAttr. This can then be used as a negative +// constraint for everything. +func (h *replicagcTestHelper) isolateDeadNodes(ctx context.Context, runNode int) { + db := h.c.Conn(ctx, runNode) + defer func() { + _ = db.Close() + }() + + for _, change := range []string{ + "RANGE default", "RANGE meta", "RANGE system", "RANGE liveness", "DATABASE system", "TABLE system.jobs", + } { + stmt := `ALTER ` + change + ` CONFIGURE ZONE = 'constraints: {"-` + deadNodeAttr + `"}'` + h.c.l.Printf(stmt + "\n") + if _, err := db.ExecContext(ctx, stmt); err != nil { + h.t.Fatal(err) + } + } } diff --git a/pkg/cmd/roachtest/restore.go b/pkg/cmd/roachtest/restore.go index 37ccf04d178f..5e898baf2ecb 100644 --- a/pkg/cmd/roachtest/restore.go +++ b/pkg/cmd/roachtest/restore.go @@ -26,7 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/httputil" "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) // HealthChecker runs a regular check that verifies that a specified subset diff --git a/pkg/cmd/roachtest/scaledata.go b/pkg/cmd/roachtest/scaledata.go index 9fd5d9abf97e..6e9b27e50aea 100644 --- a/pkg/cmd/roachtest/scaledata.go +++ b/pkg/cmd/roachtest/scaledata.go @@ -13,11 +13,11 @@ package main import ( "context" "fmt" - "runtime" "strings" "time" "github.com/cockroachdb/cockroach/pkg/util/binfetcher" + "github.com/cockroachdb/errors" ) func registerScaleData(r *testRegistry) { @@ -31,20 +31,27 @@ func registerScaleData(r *testRegistry) { // The map provides a mapping between application name and command-line // flags unique to that application. apps := map[string]string{ - "distributed_semaphore": "", - "filesystem_simulator": "", - "jobcoordinator": "--num_jobs_per_worker=8 --job_period_scale_millis=100", + "distributed-semaphore": "", + "filesystem-simulator": "", + "job-coordinator": "--num_jobs_per_worker=8 --job_period_scale_millis=100", } for app, flags := range apps { app, flags := app, flags // copy loop iterator vars const duration = 10 * time.Minute for _, n := range []int{3, 6} { + var skip, skipDetail string + if app == "job-coordinator" { + skip = "skipping flaky scaledata/job-coordinator test" + skipDetail = "work underway to deflake https://github.com/cockroachdb/cockroach/issues/51765" + } r.Add(testSpec{ - Name: fmt.Sprintf("scaledata/%s/nodes=%d", app, n), - Owner: OwnerKV, - Timeout: 2 * duration, - Cluster: makeClusterSpec(n + 1), + Name: fmt.Sprintf("scaledata/%s/nodes=%d", app, n), + Owner: OwnerKV, + Timeout: 2 * duration, + Cluster: makeClusterSpec(n + 1), + Skip: skip, + SkipDetails: skipDetail, Run: func(ctx context.Context, t *test, c *cluster) { runSqlapp(ctx, t, c, app, flags, duration) }, @@ -58,21 +65,28 @@ func runSqlapp(ctx context.Context, t *test, c *cluster, app, flags string, dur roachNodes := c.Range(1, roachNodeCount) appNode := c.Node(c.spec.NodeCount) - if local && runtime.GOOS != "linux" { - t.Fatalf("must run on linux os, found %s", runtime.GOOS) - } - b, err := binfetcher.Download(ctx, binfetcher.Options{ - Component: "rubrik", - Binary: app, - Version: "LATEST", - GOOS: "linux", - GOARCH: "amd64", - }) - if err != nil { - t.Fatal(err) - } + if local { + appBinary, err := findBinary("", app) + if err != nil { + err = errors.WithHint(err, + "place binaries built from cockroachdb/rksql in repo root, or add to $PATH") + t.Fatal(err) + } + c.Put(ctx, appBinary, app, appNode) + } else { + b, err := binfetcher.Download(ctx, binfetcher.Options{ + Component: "rubrik", + Binary: app, + Version: "LATEST", + GOOS: "linux", + GOARCH: "amd64", + }) + if err != nil { + t.Fatal(err) + } - c.Put(ctx, b, app, appNode) + c.Put(ctx, b, app, appNode) + } c.Put(ctx, cockroach, "./cockroach", roachNodes) c.Start(ctx, t, roachNodes) diff --git a/pkg/cmd/roachtest/schemachange.go b/pkg/cmd/roachtest/schemachange.go index 28291a8f64c8..612173ebef00 100644 --- a/pkg/cmd/roachtest/schemachange.go +++ b/pkg/cmd/roachtest/schemachange.go @@ -17,12 +17,12 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) -func registerSchemaChangeKV(r *testRegistry) { +func registerSchemaChangeDuringKV(r *testRegistry) { r.Add(testSpec{ - Name: `schemachange/mixed/kv`, + Name: `schemachange/during/kv`, Owner: OwnerSQLSchema, Cluster: makeClusterSpec(5), Run: func(ctx context.Context, t *test, c *cluster) { @@ -303,7 +303,7 @@ func makeIndexAddTpccTest(spec clusterSpec, warehouses int, length time.Duration Warehouses: warehouses, // We limit the number of workers because the default results in a lot // of connections which can lead to OOM issues (see #40566). - Extra: fmt.Sprintf("--wait=false --tolerate-errors --workers=%d", warehouses), + ExtraRunArgs: fmt.Sprintf("--wait=false --tolerate-errors --workers=%d", warehouses), During: func(ctx context.Context) error { return runAndLogStmts(ctx, t, c, "addindex", []string{ `CREATE UNIQUE INDEX ON tpcc.order (o_entry_d, o_w_id, o_d_id, o_carrier_id, o_id);`, @@ -311,7 +311,8 @@ func makeIndexAddTpccTest(spec clusterSpec, warehouses int, length time.Duration `CREATE INDEX ON tpcc.customer (c_last, c_first);`, }) }, - Duration: length, + Duration: length, + SetupType: usingImport, }) }, MinVersion: "v19.1.0", @@ -402,13 +403,13 @@ func makeSchemaChangeBulkIngestTest(numNodes, numRows int, length time.Duration) } } -func registerMixedSchemaChangesTPCC1000(r *testRegistry) { - r.Add(makeMixedSchemaChanges(makeClusterSpec(5, cpu(16)), 1000, time.Hour*3)) +func registerSchemaChangeDuringTPCC1000(r *testRegistry) { + r.Add(makeSchemaChangeDuringTPCC(makeClusterSpec(5, cpu(16)), 1000, time.Hour*3)) } -func makeMixedSchemaChanges(spec clusterSpec, warehouses int, length time.Duration) testSpec { +func makeSchemaChangeDuringTPCC(spec clusterSpec, warehouses int, length time.Duration) testSpec { return testSpec{ - Name: "schemachange/mixed/tpcc", + Name: "schemachange/during/tpcc", Owner: OwnerSQLSchema, Cluster: spec, Timeout: length * 3, @@ -417,17 +418,17 @@ func makeMixedSchemaChanges(spec clusterSpec, warehouses int, length time.Durati Warehouses: warehouses, // We limit the number of workers because the default results in a lot // of connections which can lead to OOM issues (see #40566). - Extra: fmt.Sprintf("--wait=false --tolerate-errors --workers=%d", warehouses), + ExtraRunArgs: fmt.Sprintf("--wait=false --tolerate-errors --workers=%d", warehouses), During: func(ctx context.Context) error { if t.IsBuildVersion(`v19.2.0`) { - if err := runAndLogStmts(ctx, t, c, "mixed-schema-changes-19.2", []string{ + if err := runAndLogStmts(ctx, t, c, "during-schema-changes-19.2", []string{ // CREATE TABLE AS with a specified primary key was added in 19.2. `CREATE TABLE tpcc.orderpks (o_w_id, o_d_id, o_id, PRIMARY KEY(o_w_id, o_d_id, o_id)) AS select o_w_id, o_d_id, o_id FROM tpcc.order;`, }); err != nil { return err } } else { - if err := runAndLogStmts(ctx, t, c, "mixed-schema-changes-19.1", []string{ + if err := runAndLogStmts(ctx, t, c, "during-schema-changes-19.1", []string{ `CREATE TABLE tpcc.orderpks (o_w_id INT, o_d_id INT, o_id INT, PRIMARY KEY(o_w_id, o_d_id, o_id));`, // We can't populate the table with CREATE TABLE AS, so just // insert the rows. The limit exists to reduce contention. @@ -436,7 +437,7 @@ func makeMixedSchemaChanges(spec clusterSpec, warehouses int, length time.Durati return err } } - return runAndLogStmts(ctx, t, c, "mixed-schema-changes", []string{ + return runAndLogStmts(ctx, t, c, "during-schema-changes", []string{ `CREATE INDEX ON tpcc.order (o_carrier_id);`, `CREATE TABLE tpcc.customerpks (c_w_id INT, c_d_id INT, c_id INT, FOREIGN KEY (c_w_id, c_d_id, c_id) REFERENCES tpcc.customer (c_w_id, c_d_id, c_id));`, @@ -455,7 +456,8 @@ func makeMixedSchemaChanges(spec clusterSpec, warehouses int, length time.Durati `DROP TABLE tpcc.readytodrop CASCADE;`, }) }, - Duration: length, + Duration: length, + SetupType: usingImport, }) }, MinVersion: "v19.1.0", diff --git a/pkg/cmd/roachtest/schemachange_random_load.go b/pkg/cmd/roachtest/schemachange_random_load.go new file mode 100644 index 000000000000..dd996636c3f4 --- /dev/null +++ b/pkg/cmd/roachtest/schemachange_random_load.go @@ -0,0 +1,103 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +import ( + "context" + "fmt" + "strings" +) + +type randomLoadBenchSpec struct { + Nodes int + Ops int + Concurrency int +} + +func registerSchemaChangeRandomLoad(r *testRegistry) { + r.Add(testSpec{ + Name: "schemachange/random-load", + Owner: OwnerSQLSchema, + Cluster: makeClusterSpec(3), + MinVersion: "v20.1.0", + Run: func(ctx context.Context, t *test, c *cluster) { + maxOps := 5000 + concurrency := 20 + if local { + maxOps = 1000 + concurrency = 2 + } + runSchemaChangeRandomLoad(ctx, t, c, maxOps, concurrency) + }, + }) + + // Run a few representative scbench specs in CI. + registerRandomLoadBenchSpec(r, randomLoadBenchSpec{ + Nodes: 3, + Ops: 2000, + Concurrency: 1, + }) + + registerRandomLoadBenchSpec(r, randomLoadBenchSpec{ + Nodes: 3, + Ops: 10000, + Concurrency: 20, + }) +} + +func registerRandomLoadBenchSpec(r *testRegistry, b randomLoadBenchSpec) { + nameParts := []string{ + "scbench", + "randomload", + fmt.Sprintf("nodes=%d", b.Nodes), + fmt.Sprintf("ops=%d", b.Ops), + fmt.Sprintf("conc=%d", b.Concurrency), + } + name := strings.Join(nameParts, "/") + + r.Add(testSpec{ + Name: name, + Owner: OwnerSQLSchema, + Cluster: makeClusterSpec(b.Nodes), + MinVersion: "v20.1.0", + Run: func(ctx context.Context, t *test, c *cluster) { + runSchemaChangeRandomLoad(ctx, t, c, b.Ops, b.Concurrency) + }, + }) +} + +func runSchemaChangeRandomLoad(ctx context.Context, t *test, c *cluster, maxOps, concurrency int) { + loadNode := c.Node(1) + roachNodes := c.Range(1, c.spec.NodeCount) + t.Status("copying binaries") + c.Put(ctx, cockroach, "./cockroach", roachNodes) + c.Put(ctx, workload, "./workload", loadNode) + + t.Status("starting cockroach nodes") + c.Start(ctx, t, roachNodes) + c.Run(ctx, loadNode, "./workload init schemachange") + + runCmd := []string{ + "./workload run schemachange --verbose=1", + // The workload is still in development and occasionally discovers schema + // change errors so for now we don't fail on them but only on panics, server + // crashes, deadlocks, etc. + // TODO(spaskob): remove when https://github.com/cockroachdb/cockroach/issues/47430 + // is closed. + "--tolerate-errors=true", + // Save the histograms so that they can be reported to https://roachperf.crdb.dev/. + " --histograms=" + perfArtifactsDir + "/stats.json", + fmt.Sprintf("--max-ops %d", maxOps), + fmt.Sprintf("--concurrency %d", concurrency), + } + t.Status("running schemachange workload") + c.Run(ctx, loadNode, runCmd...) +} diff --git a/pkg/cmd/roachtest/scrub.go b/pkg/cmd/roachtest/scrub.go index ea552e37ce49..38e126ff14fb 100644 --- a/pkg/cmd/roachtest/scrub.go +++ b/pkg/cmd/roachtest/scrub.go @@ -52,8 +52,8 @@ func makeScrubTPCCTest( Cluster: makeClusterSpec(numNodes), Run: func(ctx context.Context, t *test, c *cluster) { runTPCC(ctx, t, c, tpccOptions{ - Warehouses: warehouses, - Extra: "--wait=false --tolerate-errors", + Warehouses: warehouses, + ExtraRunArgs: "--wait=false --tolerate-errors", During: func(ctx context.Context) error { if !c.isLocal() { // Wait until tpcc has been running for a few minutes to start SCRUB checks @@ -81,7 +81,8 @@ func makeScrubTPCCTest( } return nil }, - Duration: length, + Duration: length, + SetupType: usingImport, }) }, MinVersion: "v19.1.0", diff --git a/pkg/cmd/roachtest/secondary_indexes.go b/pkg/cmd/roachtest/secondary_indexes.go index bfd6feb97e2b..552fb54a2c6b 100644 --- a/pkg/cmd/roachtest/secondary_indexes.go +++ b/pkg/cmd/roachtest/secondary_indexes.go @@ -12,30 +12,79 @@ package main import ( "context" - gosql "database/sql" - "runtime" - "github.com/cockroachdb/cockroach/pkg/util/binfetcher" "github.com/stretchr/testify/require" ) -func registerSecondaryIndexesMultiVersionCluster(r *testRegistry) { - runTest := func(ctx context.Context, t *test, c *cluster) { - // Start a 3 node 19.2 cluster. - goos := ifLocal(runtime.GOOS, "linux") - b, err := binfetcher.Download(ctx, binfetcher.Options{ - Binary: "cockroach", - Version: "v19.2.2", - GOOS: goos, - GOARCH: "amd64", - }) - if err != nil { - t.Fatal(err) - } - c.Put(ctx, b, "./cockroach", c.All()) - c.Start(ctx, t, c.All()) - // Create a table with some data, and a secondary index. - conn := c.Conn(ctx, 1) +// runIndexUpgrade runs a test that creates an index before a version upgrade, +// and modifies it in a mixed version setting. It aims to test the changes made +// to index encodings done to allow secondary indexes to respect column families. +func runIndexUpgrade(ctx context.Context, t *test, c *cluster, predecessorVersion string) { + firstExpected := [][]int{ + {2, 3, 4}, + {6, 7, 8}, + {10, 11, 12}, + {14, 15, 17}, + } + secondExpected := [][]int{ + {2, 3, 4}, + {6, 7, 8}, + {10, 11, 12}, + {14, 15, 17}, + {21, 25, 25}, + } + + roachNodes := c.All() + // An empty string means that the cockroach binary specified by flag + // `cockroach` will be used. + const mainVersion = "" + u := newVersionUpgradeTest(c, + uploadAndStart(roachNodes, predecessorVersion), + waitForUpgradeStep(roachNodes), + + // Fill the cluster with data. + createDataStep(), + + // Upgrade one of the nodes. + binaryUpgradeStep(c.Node(1), mainVersion), + + // Modify index data from that node. + modifyData(1, + `INSERT INTO t VALUES (13, 14, 15, 16)`, + `UPDATE t SET w = 17 WHERE y = 14`, + ), + + // Ensure all nodes see valid index data. + verifyTableData(1, firstExpected), + verifyTableData(2, firstExpected), + verifyTableData(3, firstExpected), + + // Upgrade the rest of the cluster. + binaryUpgradeStep(c.Node(2), mainVersion), + binaryUpgradeStep(c.Node(3), mainVersion), + + // Finalize the upgrade. + allowAutoUpgradeStep(1), + waitForUpgradeStep(roachNodes), + + // Modify some more data now that the cluster is upgraded. + modifyData(1, + `INSERT INTO t VALUES (20, 21, 22, 23)`, + `UPDATE t SET w = 25, z = 25 WHERE y = 21`, + ), + + // Ensure all nodes see valid index data. + verifyTableData(1, secondExpected), + verifyTableData(2, secondExpected), + verifyTableData(3, secondExpected), + ) + + u.run(ctx, t) +} + +func createDataStep() versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + conn := u.conn(ctx, t, 1) if _, err := conn.Exec(` CREATE TABLE t ( x INT PRIMARY KEY, y INT, z INT, w INT, @@ -46,86 +95,52 @@ INSERT INTO t VALUES (1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12); `); err != nil { t.Fatal(err) } - t.Status("created sample data") + } +} - upgradeNode := func(node int) { - if err := c.StopCockroachGracefullyOnNode(ctx, node); err != nil { +func modifyData(node int, sql ...string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + // Write some data into the table. + conn := u.conn(ctx, t, node) + for _, s := range sql { + if _, err := conn.Exec(s); err != nil { t.Fatal(err) } - c.Put(ctx, cockroach, "./cockroach", c.Node(node)) - c.Start(ctx, t, c.Node(node)) } + } +} - // Upgrade one of the nodes to the current cockroach version. - upgradeNode(1) - t.Status("done upgrading node 1") - - // Get a connection to the new node and ensure that we can read the index fine, and - // an insert in the mixed cluster setting doesn't result in unreadable data. - conn = c.Conn(ctx, 1) - if _, err := conn.Exec(`INSERT INTO t VALUES (13, 14, 15, 16)`); err != nil { - t.Fatal(err) - } - if _, err := conn.Exec(`UPDATE t SET w = 17 WHERE y = 14`); err != nil { +func verifyTableData(node int, expected [][]int) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + conn := u.conn(ctx, t, node) + rows, err := conn.Query(`SELECT y, z, w FROM t@i ORDER BY y`) + if err != nil { t.Fatal(err) } - verifyTable := func(conn *gosql.DB, expected [][]int) { - rows, err := conn.Query(`SELECT y, z, w FROM t@i ORDER BY y`) - if err != nil { + var y, z, w int + count := 0 + for ; rows.Next(); count++ { + if err := rows.Scan(&y, &z, &w); err != nil { t.Fatal(err) } - var y, z, w int - count := 0 - for ; rows.Next(); count++ { - if err := rows.Scan(&y, &z, &w); err != nil { - t.Fatal(err) - } - found := []int{y, z, w} - require.Equal(t, found, expected[count]) - } - } - expected := [][]int{ - {2, 3, 4}, - {6, 7, 8}, - {10, 11, 12}, - {14, 15, 17}, - } - for i := 1; i <= c.spec.NodeCount; i++ { - verifyTable(c.Conn(ctx, i), expected) - } - t.Status("mixed version cluster passed test") - - // Fully upgrade the cluster and ensure that the data is still valid. - for i := 2; i <= c.spec.NodeCount; i++ { - upgradeNode(i) - } - - conn = c.Conn(ctx, 1) - - if _, err := conn.Exec(`INSERT INTO t VALUES (20, 21, 22, 23)`); err != nil { - t.Fatal(err) + found := []int{y, z, w} + require.Equal(t, found, expected[count]) } - if _, err := conn.Exec(`UPDATE t SET w = 25, z = 25 WHERE y = 21`); err != nil { - t.Fatal(err) - } - - expected = [][]int{ - {2, 3, 4}, - {6, 7, 8}, - {10, 11, 12}, - {14, 15, 17}, - {21, 25, 25}, - } - for i := 1; i <= c.spec.NodeCount; i++ { - verifyTable(c.Conn(ctx, i), expected) - } - t.Status("passed on fully upgraded cluster") } +} + +func registerSecondaryIndexesMultiVersionCluster(r *testRegistry) { r.Add(testSpec{ - Name: "secondary-index-multi-version", - Owner: OwnerSQLExec, + Name: "schemachange/secondary-index-multi-version", + Owner: OwnerSQLSchema, Cluster: makeClusterSpec(3), MinVersion: "v20.1.0", - Run: runTest, + Run: func(ctx context.Context, t *test, c *cluster) { + predV, err := PredecessorVersion(r.buildVersion) + if err != nil { + t.Fatal(err) + } + runIndexUpgrade(ctx, t, c, predV) + }, }) } diff --git a/pkg/cmd/roachtest/split.go b/pkg/cmd/roachtest/split.go index 8b3ee07c8d8f..a9e901996dd6 100644 --- a/pkg/cmd/roachtest/split.go +++ b/pkg/cmd/roachtest/split.go @@ -19,9 +19,9 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/util/retry" + "github.com/cockroachdb/errors" humanize "github.com/dustin/go-humanize" _ "github.com/lib/pq" - "github.com/pkg/errors" ) type splitParams struct { diff --git a/pkg/cmd/roachtest/sqlalchemy.go b/pkg/cmd/roachtest/sqlalchemy.go index 998283a33c28..d6f65c54087a 100644 --- a/pkg/cmd/roachtest/sqlalchemy.go +++ b/pkg/cmd/roachtest/sqlalchemy.go @@ -19,216 +19,196 @@ import ( "strings" ) -var sqlAlchemyResultRegex = regexp.MustCompile(`^(?Ptest.*::.*::[^ \[\]]*(?:\[.*])?) (?P.*)$`) +var sqlAlchemyResultRegex = regexp.MustCompile(`^(?Ptest.*::.*::[^ \[\]]*(?:\[.*])?) (?P\w+)\s+\[.+]$`) var sqlAlchemyReleaseTagRegex = regexp.MustCompile(`^rel_(?P\d+)_(?P\d+)_(?P\d+)$`) +var supportedSQLAlchemyTag = "rel_1_3_17" + // This test runs the SQLAlchemy dialect test suite against a single Cockroach // node. func registerSQLAlchemy(r *testRegistry) { - runSQLAlchemy := func( - ctx context.Context, - t *test, - c *cluster, - ) { - if c.isLocal() { - t.Fatal("cannot be run in local mode") - } - node := c.Node(1) - t.Status("setting up cockroach") - c.Put(ctx, cockroach, "./cockroach", c.All()) - c.Start(ctx, t, c.All()) - - version, err := fetchCockroachVersion(ctx, c, node[0]) - if err != nil { - t.Fatal(err) - } + r.Add(testSpec{ + Name: "sqlalchemy", + Owner: OwnerAppDev, + Cluster: makeClusterSpec(1), + MinVersion: "v2.1.0", + Tags: []string{`default`, `orm`}, + Run: func(ctx context.Context, t *test, c *cluster) { + runSQLAlchemy(ctx, t, c) + }, + }) +} - if err := alterZoneConfigAndClusterSettings(ctx, version, c, node[0]); err != nil { - t.Fatal(err) - } +func runSQLAlchemy(ctx context.Context, t *test, c *cluster) { + if c.isLocal() { + t.Fatal("cannot be run in local mode") + } - t.Status("cloning sqlalchemy and installing prerequisites") - latestTag, err := repeatGetLatestTag(ctx, c, "sqlalchemy", "sqlalchemy", sqlAlchemyReleaseTagRegex) - if err != nil { - t.Fatal(err) - } - c.l.Printf("Latest sqlalchemy release is %s.", latestTag) - - if err := repeatRunE( - ctx, c, node, "update apt-get", - ` - sudo add-apt-repository ppa:deadsnakes/ppa && - sudo apt-get -qq update`, - ); err != nil { - t.Fatal(err) - } + node := c.Node(1) - if err := repeatRunE( - ctx, - c, - node, - "install dependencies", - `sudo apt-get -qq install make python3.7 libpq-dev python3.7-dev gcc python3-setuptools python-setuptools build-essential`, - ); err != nil { - t.Fatal(err) - } + t.Status("cloning sqlalchemy and installing prerequisites") + latestTag, err := repeatGetLatestTag(ctx, c, "sqlalchemy", "sqlalchemy", sqlAlchemyReleaseTagRegex) + if err != nil { + t.Fatal(err) + } + c.l.Printf("Latest sqlalchemy release is %s.", latestTag) + c.l.Printf("Supported sqlalchemy release is %s.", supportedSQLAlchemyTag) + + if err := repeatRunE(ctx, c, node, "update apt-get", ` + sudo add-apt-repository ppa:deadsnakes/ppa && + sudo apt-get -qq update + `); err != nil { + t.Fatal(err) + } - if err := repeatRunE( - ctx, c, node, "set python3.7 as default", ` - sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.5 1 - sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.7 2 - sudo update-alternatives --config python3`, - ); err != nil { - t.Fatal(err) - } + if err := repeatRunE(ctx, c, node, "install dependencies", ` + sudo apt-get -qq install make python3.7 libpq-dev python3.7-dev gcc python3-setuptools python-setuptools build-essential + `); err != nil { + t.Fatal(err) + } - if err := repeatRunE( - ctx, c, node, "install pip", - `curl https://bootstrap.pypa.io/get-pip.py | sudo -H python3.7`, - ); err != nil { - t.Fatal(err) - } + if err := repeatRunE(ctx, c, node, "set python3.7 as default", ` + sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.5 1 + sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.7 2 + sudo update-alternatives --config python3 + `); err != nil { + t.Fatal(err) + } - if err := repeatRunE( - ctx, - c, - node, - "install pytest", - `sudo pip3 install --upgrade --force-reinstall setuptools pytest pytest-xdist psycopg2`, - ); err != nil { - t.Fatal(err) - } + if err := repeatRunE(ctx, c, node, "install pip", ` + curl https://bootstrap.pypa.io/get-pip.py | sudo -H python3.7 + `); err != nil { + t.Fatal(err) + } - if err := repeatRunE( - ctx, c, node, "remove old cockroachdb-python", `sudo rm -rf /mnt/data1/cockroachdb-python`, - ); err != nil { - t.Fatal(err) - } + if err := repeatRunE(ctx, c, node, "install pytest", ` + sudo pip3 install --upgrade --force-reinstall setuptools pytest==6.0.1 pytest-xdist psycopg2 + `); err != nil { + t.Fatal(err) + } - if err := repeatGitCloneE( - ctx, - t.l, - c, - "https://github.com/cockroachdb/cockroachdb-python.git", - "/mnt/data1/cockroachdb-python", - "master", - node, - ); err != nil { - t.Fatal(err) - } + if err := repeatRunE(ctx, c, node, "remove old sqlalchemy-cockroachdb", ` + sudo rm -rf /mnt/data1/sqlalchemy-cockroachdb + `); err != nil { + t.Fatal(err) + } - t.Status("installing cockroachdb-python") - if err := repeatRunE( - ctx, c, node, "installing cockroachdb-python", - `cd /mnt/data1/cockroachdb-python && sudo python3 setup.py install`, - ); err != nil { - t.Fatal(err) - } + if err := repeatGitCloneE(ctx, t.l, c, + "https://github.com/cockroachdb/sqlalchemy-cockroachdb.git", "/mnt/data1/sqlalchemy-cockroachdb", + "master", node); err != nil { + t.Fatal(err) + } - if err := repeatRunE( - ctx, c, node, "remove old sqlalchemy", `sudo rm -rf /mnt/data1/sqlalchemy`, - ); err != nil { - t.Fatal(err) - } + t.Status("installing sqlalchemy-cockroachdb") + if err := repeatRunE(ctx, c, node, "installing sqlalchemy=cockroachdb", ` + cd /mnt/data1/sqlalchemy-cockroachdb && sudo pip3 install . + `); err != nil { + t.Fatal(err) + } - if err := repeatGitCloneE( - ctx, - t.l, - c, - "https://github.com/sqlalchemy/sqlalchemy.git", - "/mnt/data1/sqlalchemy", - latestTag, - node, - ); err != nil { - t.Fatal(err) - } + if err := repeatRunE(ctx, c, node, "remove old sqlalchemy", ` + sudo rm -rf /mnt/data1/sqlalchemy + `); err != nil { + t.Fatal(err) + } - t.Status("building sqlalchemy") - if err := repeatRunE( - ctx, c, node, "building sqlalchemy", `cd /mnt/data1/sqlalchemy && python3 setup.py build`, - ); err != nil { - t.Fatal(err) - } + if err := repeatGitCloneE(ctx, t.l, c, + "https://github.com/sqlalchemy/sqlalchemy.git", "/mnt/data1/sqlalchemy", + supportedSQLAlchemyTag, node); err != nil { + t.Fatal(err) + } - blacklistName, expectedFailures, ignoredlistName, ignoredlist := sqlAlchemyBlacklists.getLists(version) - if expectedFailures == nil { - t.Fatalf("No sqlalchemy blacklist defined for cockroach version %s", version) - } - c.l.Printf("Running cockroach version %s, using blacklist %s, using ignoredlist %s", - version, blacklistName, ignoredlistName) - - t.Status("running sqlalchemy test suite") - // Note that this is expected to return an error, since the test suite - // will fail. And it is safe to swallow it here. - rawResults, _ := c.RunWithBuffer(ctx, t.l, node, - `cd /mnt/data1/sqlalchemy/ && pytest -s --maxfail=0 `+ - `--dburi=cockroachdb://root@localhost:26257/defaultdb?sslmode=disable `+ - `test/dialect/test_suite.py`) - - t.Status("collating the test results") - c.l.Printf("Test Results: %s", rawResults) - - // Find all the failed and errored tests. - results := newORMTestsResults() - - scanner := bufio.NewScanner(bytes.NewReader(rawResults)) - for scanner.Scan() { - match := sqlAlchemyResultRegex.FindStringSubmatch(scanner.Text()) - if match == nil { - continue - } - test, result := match[1], match[2] - pass := result == "PASSED" || strings.Contains(result, "failed as expected") - skipped := result == "SKIPPED" - results.allTests = append(results.allTests, test) - - ignoredIssue, expectedIgnored := ignoredlist[test] - issue, expectedFailure := expectedFailures[test] - switch { - case expectedIgnored: - results.results[test] = fmt.Sprintf("--- SKIP: %s due to %s (expected)", test, ignoredIssue) - results.ignoredCount++ - case skipped && expectedFailure: - results.results[test] = fmt.Sprintf("--- SKIP: %s (unexpected)", test) - results.unexpectedSkipCount++ - case skipped: - results.results[test] = fmt.Sprintf("--- SKIP: %s (expected)", test) - results.skipCount++ - case pass && !expectedFailure: - results.results[test] = fmt.Sprintf("--- PASS: %s (expected)", test) - results.passExpectedCount++ - case pass && expectedFailure: - results.results[test] = fmt.Sprintf("--- PASS: %s - %s (unexpected)", - test, maybeAddGithubLink(issue), - ) - results.passUnexpectedCount++ - case !pass && expectedFailure: - results.results[test] = fmt.Sprintf("--- FAIL: %s - %s (expected)", - test, maybeAddGithubLink(issue), - ) - results.failExpectedCount++ - results.currentFailures = append(results.currentFailures, test) - case !pass && !expectedFailure: - results.results[test] = fmt.Sprintf("--- FAIL: %s (unexpected)", test) - results.failUnexpectedCount++ - results.currentFailures = append(results.currentFailures, test) - } - results.runTests[test] = struct{}{} - } + t.Status("building sqlalchemy") + if err := repeatRunE(ctx, c, node, "building sqlalchemy", ` + cd /mnt/data1/sqlalchemy && python3 setup.py build + `); err != nil { + t.Fatal(err) + } - results.summarizeAll( - t, "sqlalchemy" /* ormName */, blacklistName, expectedFailures, version, latestTag) + // Phew, after having setup all that, let's actually run the test. + + t.Status("setting up cockroach") + c.Put(ctx, cockroach, "./cockroach", c.All()) + c.Start(ctx, t, c.All()) + + version, err := fetchCockroachVersion(ctx, c, node[0]) + if err != nil { + t.Fatal(err) } - r.Add(testSpec{ - Name: "sqlalchemy", - Owner: OwnerAppDev, - Cluster: makeClusterSpec(1), - MinVersion: "v2.1.0", - Tags: []string{`default`, `orm`}, - Run: func(ctx context.Context, t *test, c *cluster) { - runSQLAlchemy(ctx, t, c) - }, - }) + if err := alterZoneConfigAndClusterSettings(ctx, version, c, node[0]); err != nil { + t.Fatal(err) + } + + blocklistName, expectedFailures, ignoredlistName, ignoredlist := sqlAlchemyBlocklists.getLists(version) + if expectedFailures == nil { + t.Fatalf("No sqlalchemy blocklist defined for cockroach version %s", version) + } + c.l.Printf("Running cockroach version %s, using blocklist %s, using ignoredlist %s", + version, blocklistName, ignoredlistName) + + t.Status("running sqlalchemy test suite") + // Note that this is expected to return an error, since the test suite + // will fail. And it is safe to swallow it here. + rawResults, _ := c.RunWithBuffer(ctx, t.l, node, + `cd /mnt/data1/sqlalchemy/ && pytest --maxfail=0 \ + --requirements=cockroachdb.sqlalchemy.test_requirements:Requirements \ + --dburi=cockroachdb://root@localhost:26257/defaultdb?sslmode=disable \ + test/dialect/test_suite.py + `) + + t.Status("collating the test results") + c.l.Printf("Test Results: %s", rawResults) + + // Find all the failed and errored tests. + results := newORMTestsResults() + + scanner := bufio.NewScanner(bytes.NewReader(rawResults)) + for scanner.Scan() { + match := sqlAlchemyResultRegex.FindStringSubmatch(scanner.Text()) + if match == nil { + continue + } + test, result := match[1], match[2] + pass := result == "PASSED" || strings.Contains(result, "failed as expected") + skipped := result == "SKIPPED" + results.allTests = append(results.allTests, test) + + ignoredIssue, expectedIgnored := ignoredlist[test] + issue, expectedFailure := expectedFailures[test] + switch { + case expectedIgnored: + results.results[test] = fmt.Sprintf("--- SKIP: %s due to %s (expected)", test, ignoredIssue) + results.ignoredCount++ + case skipped && expectedFailure: + results.results[test] = fmt.Sprintf("--- SKIP: %s (unexpected)", test) + results.unexpectedSkipCount++ + case skipped: + results.results[test] = fmt.Sprintf("--- SKIP: %s (expected)", test) + results.skipCount++ + case pass && !expectedFailure: + results.results[test] = fmt.Sprintf("--- PASS: %s (expected)", test) + results.passExpectedCount++ + case pass && expectedFailure: + results.results[test] = fmt.Sprintf("--- PASS: %s - %s (unexpected)", + test, maybeAddGithubLink(issue), + ) + results.passUnexpectedCount++ + case !pass && expectedFailure: + results.results[test] = fmt.Sprintf("--- FAIL: %s - %s (expected)", + test, maybeAddGithubLink(issue), + ) + results.failExpectedCount++ + results.currentFailures = append(results.currentFailures, test) + case !pass && !expectedFailure: + results.results[test] = fmt.Sprintf("--- FAIL: %s (unexpected)", test) + results.failUnexpectedCount++ + results.currentFailures = append(results.currentFailures, test) + } + results.runTests[test] = struct{}{} + } + + results.summarizeAll( + t, "sqlalchemy" /* ormName */, blocklistName, expectedFailures, version, supportedSQLAlchemyTag) } diff --git a/pkg/cmd/roachtest/sqlalchemy_blacklist.go b/pkg/cmd/roachtest/sqlalchemy_blacklist.go deleted file mode 100644 index 02924befe3e7..000000000000 --- a/pkg/cmd/roachtest/sqlalchemy_blacklist.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2019 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package main - -var sqlAlchemyBlacklists = blacklistsForVersion{ - {"v2.1", "sqlAlchemyBlacklist", sqlAlchemyBlacklist, "sqlAlchemyIgnoreList", sqlAlchemyIgnoreList}, - {"v19.1", "sqlAlchemyBlacklist", sqlAlchemyBlacklist, "sqlAlchemyIgnoreList", sqlAlchemyIgnoreList}, - {"v19.2", "sqlAlchemyBlacklist", sqlAlchemyBlacklist, "sqlAlchemyIgnoreList", sqlAlchemyIgnoreList}, - {"v20.1", "sqlAlchemyBlacklist20_1", sqlAlchemyBlacklist20_1, "sqlAlchemyIgnoreList20_1", sqlAlchemyIgnoreList20_1}, -} - -var sqlAlchemyBlacklist20_1 = blacklist{ - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_autoincrement_col": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_deprecated_get_primary_keys": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_dialect_initialize": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_check_constraints": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_check_constraints_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_columns": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_columns_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_comments": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_comments_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_default_schema_name": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_foreign_key_options_ondelete": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_foreign_key_options_onupdate": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_foreign_keys": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_foreign_keys_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_indexes": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_indexes_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_inter_schema_foreign_keys": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_noncol_index_no_pk": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_noncol_index_pk": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_pk_constraint": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_pk_constraint_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_schema_names": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_table_names": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_table_names_fks": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_table_names_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_table_oid": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_table_oid_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_tables_and_views": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_temp_table_columns": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_temp_table_indexes": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_temp_table_names": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_temp_table_unique_constraints": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_temp_view_columns": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_temp_view_names": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_unique_constraints": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_unique_constraints_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_view_columns": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_view_columns_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_view_definition": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_view_definition_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_view_names": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_view_names_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_nullable_reflection": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_numeric_reflection": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_reflect_expression_based_indexes": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_varchar_reflection": "5807", - "test/dialect/test_suite.py::ComputedColumnTest_cockroachdb+psycopg2_9_5_0::test_select_all": "42418", - "test/dialect/test_suite.py::ComputedColumnTest_cockroachdb+psycopg2_9_5_0::test_select_columns": "42418", - "test/dialect/test_suite.py::DateTimeCoercedToDateTimeTest_cockroachdb+psycopg2_9_5_0::test_null_bound_comparison": "36179", - "test/dialect/test_suite.py::DateTimeCoercedToDateTimeTest_cockroachdb+psycopg2_9_5_0::test_round_trip": "36179", - "test/dialect/test_suite.py::ExpandingBoundInTest_cockroachdb+psycopg2_9_5_0::test_null_in_empty_set_is_false": "41596", - "test/dialect/test_suite.py::HasTableTest_cockroachdb+psycopg2_9_5_0::test_has_table": "26443", - "test/dialect/test_suite.py::HasTableTest_cockroachdb+psycopg2_9_5_0::test_has_table_schema": "26443", - "test/dialect/test_suite.py::LastrowidTest_cockroachdb+psycopg2_9_5_0::test_autoincrement_on_insert": "41690", - "test/dialect/test_suite.py::ReturningTest_cockroachdb+psycopg2_9_5_0::test_autoincrement_on_insert_implicit_returning": "41690", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_aliases_and_ss": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_conn_option": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_for_update_expr": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_for_update_string": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_global_expr": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_global_string": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_global_text": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_roundtrip": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_stmt_option": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_stmt_option_disabled": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_text_ss_option": "41412", - "test/dialect/test_suite.py::TableDDLTest_cockroachdb+psycopg2_9_5_0::test_create_table_schema": "unknown", -} - -var sqlAlchemyBlacklist = blacklist{ - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_autoincrement_col": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_deprecated_get_primary_keys": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_dialect_initialize": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_check_constraints": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_check_constraints_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_columns": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_columns_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_comments": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_comments_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_default_schema_name": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_foreign_key_options_ondelete": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_foreign_key_options_onupdate": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_foreign_keys": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_foreign_keys_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_indexes": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_indexes_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_inter_schema_foreign_keys": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_noncol_index_no_pk": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_noncol_index_pk": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_pk_constraint": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_pk_constraint_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_schema_names": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_table_names": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_table_names_fks": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_table_names_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_table_oid": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_table_oid_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_tables_and_views": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_temp_table_columns": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_temp_table_indexes": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_temp_table_names": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_temp_table_unique_constraints": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_temp_view_columns": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_temp_view_names": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_unique_constraints": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_unique_constraints_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_view_columns": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_view_columns_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_view_definition": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_view_definition_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_view_names": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_get_view_names_with_schema": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_nullable_reflection": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_numeric_reflection": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_reflect_expression_based_indexes": "5807", - "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_varchar_reflection": "5807", - "test/dialect/test_suite.py::ComputedColumnTest_cockroachdb+psycopg2_9_5_0::test_select_all": "42418", - "test/dialect/test_suite.py::ComputedColumnTest_cockroachdb+psycopg2_9_5_0::test_select_columns": "42418", - "test/dialect/test_suite.py::DateTimeCoercedToDateTimeTest_cockroachdb+psycopg2_9_5_0::test_null_bound_comparison": "36179", - "test/dialect/test_suite.py::DateTimeCoercedToDateTimeTest_cockroachdb+psycopg2_9_5_0::test_round_trip": "36179", - "test/dialect/test_suite.py::ExpandingBoundInTest_cockroachdb+psycopg2_9_5_0::test_null_in_empty_set_is_false": "41596", - "test/dialect/test_suite.py::HasTableTest_cockroachdb+psycopg2_9_5_0::test_has_table": "26443", - "test/dialect/test_suite.py::HasTableTest_cockroachdb+psycopg2_9_5_0::test_has_table_schema": "26443", - "test/dialect/test_suite.py::LastrowidTest_cockroachdb+psycopg2_9_5_0::test_autoincrement_on_insert": "41690", - "test/dialect/test_suite.py::ReturningTest_cockroachdb+psycopg2_9_5_0::test_autoincrement_on_insert_implicit_returning": "41690", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_aliases_and_ss": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_conn_option": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_for_update_expr": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_for_update_string": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_global_expr": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_global_string": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_global_text": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_roundtrip": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_stmt_option": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_stmt_option_disabled": "41412", - "test/dialect/test_suite.py::ServerSideCursorsTest_cockroachdb+psycopg2_9_5_0::test_text_ss_option": "41412", - "test/dialect/test_suite.py::TableDDLTest_cockroachdb+psycopg2_9_5_0::test_create_table_schema": "26443", -} - -var sqlAlchemyIgnoreList20_1 = sqlAlchemyIgnoreList - -var sqlAlchemyIgnoreList = blacklist{ - "test/dialect/test_suite.py::TableDDLTest_cockroachdb+psycopg2_9_5_0::test_create_table": "flaky", -} diff --git a/pkg/cmd/roachtest/sqlalchemy_blocklist.go b/pkg/cmd/roachtest/sqlalchemy_blocklist.go new file mode 100644 index 000000000000..e82ca94f6faa --- /dev/null +++ b/pkg/cmd/roachtest/sqlalchemy_blocklist.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +var sqlAlchemyBlocklists = blocklistsForVersion{ + {"v2.1", "sqlAlchemyBlocklist", sqlAlchemyBlocklist, "sqlAlchemyIgnoreList", sqlAlchemyIgnoreList}, + {"v19.1", "sqlAlchemyBlocklist", sqlAlchemyBlocklist, "sqlAlchemyIgnoreList", sqlAlchemyIgnoreList}, + {"v19.2", "sqlAlchemyBlocklist", sqlAlchemyBlocklist, "sqlAlchemyIgnoreList", sqlAlchemyIgnoreList}, + {"v20.1", "sqlAlchemyBlocklist20_1", sqlAlchemyBlocklist20_1, "sqlAlchemyIgnoreList20_1", sqlAlchemyIgnoreList20_1}, + {"v20.2", "sqlAlchemyBlocklist20_2", sqlAlchemyBlocklist20_2, "sqlAlchemyIgnoreList20_2", sqlAlchemyIgnoreList20_2}, +} + +var sqlAlchemyBlocklist20_2 = blocklist{} + +var sqlAlchemyBlocklist20_1 = blocklist{ + "test/dialect/test_suite.py::ExpandingBoundInTest_cockroachdb+psycopg2_9_5_0::test_null_in_empty_set_is_false": "41596", +} + +var sqlAlchemyBlocklist = blocklist{ + "test/dialect/test_suite.py::ExpandingBoundInTest_cockroachdb+psycopg2_9_5_0::test_null_in_empty_set_is_false": "41596", +} + +var sqlAlchemyIgnoreList20_2 = sqlAlchemyIgnoreList + +var sqlAlchemyIgnoreList20_1 = sqlAlchemyIgnoreList + +var sqlAlchemyIgnoreList = blocklist{ + "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_deprecated_get_primary_keys": "test has a bug and is getting removed", +} diff --git a/pkg/cmd/roachtest/sqlsmith.go b/pkg/cmd/roachtest/sqlsmith.go index 88ff7d4d90a2..d5e3fc3a6ad0 100644 --- a/pkg/cmd/roachtest/sqlsmith.go +++ b/pkg/cmd/roachtest/sqlsmith.go @@ -81,6 +81,9 @@ func registerSQLSmith(r *testRegistry) { c.l.Printf("seed: %d", seed) c.Put(ctx, cockroach, "./cockroach") + if err := c.PutLibraries(ctx, "./lib"); err != nil { + t.Fatalf("could not initialize libraries: %v", err) + } c.Start(ctx, t) setupFunc, ok := setups[setupName] @@ -95,6 +98,15 @@ func registerSQLSmith(r *testRegistry) { setup := setupFunc(rng) setting := settingFunc(rng) + // We will enable panic injection on this connection in the vectorized + // engine (and will ignore the injected errors) in order to test that + // the panic-catching mechanism of error propagation works as expected. + // TODO(yuzefovich): this setting is only supported on master (i.e. + // after 20.2 version), so we need to gate it, yet we can't do so + // because 21.1 version hasn't been minted yet. We skip this check for + // now. + //setup += "SET testing_vectorize_inject_panics=true;" + conn := c.Conn(ctx, 1) t.Status("executing setup") c.l.Printf("setup:\n%s", setup) @@ -193,7 +205,7 @@ func registerSQLSmith(r *testRegistry) { // NB: sqlsmith failures should never block a release. Owner: OwnerSQLExec, Cluster: makeClusterSpec(4), - MinVersion: "v20.1.0", + MinVersion: "v20.2.0", Timeout: time.Minute * 20, Run: func(ctx context.Context, t *test, c *cluster) { runSQLSmith(ctx, t, c, setup, setting) diff --git a/pkg/cmd/roachtest/synctest.go b/pkg/cmd/roachtest/synctest.go index e79fd5b9034a..b5915bc45ad3 100644 --- a/pkg/cmd/roachtest/synctest.go +++ b/pkg/cmd/roachtest/synctest.go @@ -28,6 +28,7 @@ fi ` r.Add(testSpec{ + Skip: "#48603: broken on Pebble", Name: "synctest", Owner: OwnerStorage, MinVersion: "v19.1.0", diff --git a/pkg/cmd/roachtest/sysbench.go b/pkg/cmd/roachtest/sysbench.go index 2222ab8ecff8..10fe27dfd2f4 100644 --- a/pkg/cmd/roachtest/sysbench.go +++ b/pkg/cmd/roachtest/sysbench.go @@ -13,6 +13,7 @@ package main import ( "context" "fmt" + "strings" "time" ) @@ -111,7 +112,13 @@ func runSysbench(ctx context.Context, t *test, c *cluster, opts sysbenchOptions) c.Run(ctx, loadNode, opts.cmd(false /* haproxy */)+" prepare") t.Status("running workload") - c.Run(ctx, loadNode, opts.cmd(true /* haproxy */)+" run") + err := c.RunE(ctx, loadNode, opts.cmd(true /* haproxy */)+" run") + // Sysbench occasionally segfaults. When that happens, don't fail the + // test. + if err != nil && !strings.Contains(err.Error(), "Segmentation fault") { + c.l.Printf("sysbench segfaulted; passing test anyway") + return err + } return nil }) m.Wait() diff --git a/pkg/cmd/roachtest/test.go b/pkg/cmd/roachtest/test.go index 688974c3c5bf..8e6992176172 100644 --- a/pkg/cmd/roachtest/test.go +++ b/pkg/cmd/roachtest/test.go @@ -17,12 +17,11 @@ import ( "io" // For the debug http handlers. _ "net/http/pprof" - "os/exec" - "regexp" "runtime" "strings" "time" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/version" @@ -234,22 +233,35 @@ func (t *test) WorkerProgress(frac float64) { t.progress(goid.Get(), frac) } -// Skip records msg into t.spec.Skip and calls runtime.Goexit() - thus -// interrupting the running of the test. -func (t *test) Skip(msg string, details string) { - t.spec.Skip = msg - t.spec.SkipDetails = details - runtime.Goexit() +var _ skip.SkippableTest = (*test)(nil) + +// Skip skips the test. The first argument if any is the main message. +// The remaining argument, if any, form the details. +// This implements the skip.SkippableTest interface. +func (t *test) Skip(args ...interface{}) { + if len(args) > 0 { + t.spec.Skip = fmt.Sprint(args[0]) + args = args[1:] + } + t.spec.SkipDetails = fmt.Sprint(args...) + panic(errTestFatal) +} + +// Skipf skips the test. The formatted message becomes the skip reason. +// This implements the skip.SkippableTest interface. +func (t *test) Skipf(format string, args ...interface{}) { + t.spec.Skip = fmt.Sprintf(format, args...) + panic(errTestFatal) } // Fatal marks the test as failed, prints the args to t.l, and calls -// runtime.GoExit(). It can be called multiple times. +// panic(errTestFatal). It can be called multiple times. // // If the only argument is an error, it is formatted by "%+v", so it will show // stack traces and such. // -// ATTENTION: Since this calls runtime.GoExit(), it should only be called from a -// test's closure. The test runner itself should never call this. +// ATTENTION: Since this calls panic(errTestFatal), it should only be called +// from a test's closure. The test runner itself should never call this. func (t *test) Fatal(args ...interface{}) { t.fatalfInner("" /* format */, args...) } @@ -276,7 +288,7 @@ func (t *test) fatalfInner(format string, args ...interface{}) { } else { t.printAndFail(2 /* skip */, args...) } - runtime.Goexit() + panic(errTestFatal) } // FatalIfErr calls t.Fatal() if err != nil. @@ -456,39 +468,6 @@ func teamCityNameEscape(name string) string { return strings.Replace(name, ",", "_", -1) } -// getAuthorEmail retrieves the author of a line of code. Returns the empty -// string if the author cannot be determined. Some test tags override this -// behavior and have a hardcoded author email. -func getAuthorEmail(tags []string, file string, line int) string { - for _, tag := range tags { - if tag == `orm` || tag == `driver` { - return `rafi@cockroachlabs.com` - } - } - const repo = "github.com/cockroachdb/cockroach/" - i := strings.Index(file, repo) - if i == -1 { - return "" - } - file = file[i+len(repo):] - - cmd := exec.Command(`/bin/bash`, `-c`, - fmt.Sprintf(`git blame --porcelain -L%d,+1 $(git rev-parse --show-toplevel)/%s | grep author-mail`, - line, file)) - // This command returns output such as: - // author-mail - out, err := cmd.CombinedOutput() - if err != nil { - return "" - } - re := regexp.MustCompile("author-mail <(.*)>") - matches := re.FindSubmatch(out) - if matches == nil { - return "" - } - return string(matches[1]) -} - type testWithCount struct { spec testSpec // count maintains the number of runs remaining for a test. diff --git a/pkg/cmd/roachtest/test_registry.go b/pkg/cmd/roachtest/test_registry.go index d411920602a0..b3cef0f7696b 100644 --- a/pkg/cmd/roachtest/test_registry.go +++ b/pkg/cmd/roachtest/test_registry.go @@ -20,7 +20,7 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/util/version" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) // Owner is a valid entry for the Owners field of a roachtest. They should be @@ -44,23 +44,56 @@ const ( type OwnerMetadata struct { SlackRoom string ContactEmail string + // TriageColumnID is the column id of the project column the team uses to + // triage issues. Unfortunately, there appears to be no way to retrieve this + // programmatically from the API. + // + // To find the triage column for a project, run the following curl command: + // curl -u yourusername:githubaccesstoken -H "Accept: application/vnd.githubinertia-preview+json" \ + // https://api.github.com/repos/cockroachdb/cockroach/projects + // + // Then, for the project you care about, curl its columns URL, which looks + // like this: + // https://api.github.com/projects/3842382/columns + // + // Find the triage column you want, and pick its ID field. + TriageColumnID int } // roachtestOwners maps an owner in code (as specified on a roachtest spec) to // metadata used for github issue posting/slack rooms, etc. var roachtestOwners = map[Owner]OwnerMetadata{ - OwnerAppDev: {SlackRoom: `app-dev`, ContactEmail: `rafi@cockroachlabs.com`}, - OwnerBulkIO: {SlackRoom: `bulk-io`, ContactEmail: `david@cockroachlabs.com`}, - OwnerCDC: {SlackRoom: `cdc`, ContactEmail: `ajwerner@cockroachlabs.com`}, - OwnerKV: {SlackRoom: `kv`, ContactEmail: `andrei@cockroachlabs.com`}, - OwnerPartitioning: {SlackRoom: `partitioning`, ContactEmail: `andrei@cockroachlabs.com`}, - OwnerSQLExec: {SlackRoom: `sql-execution-team`, ContactEmail: `jordan@cockroachlabs.com`}, - OwnerSQLSchema: {SlackRoom: `sql-schema`, ContactEmail: `lucy@cockroachlabs.com`}, - OwnerStorage: {SlackRoom: `storage`, ContactEmail: `peter@cockroachlabs.com`}, + OwnerAppDev: {SlackRoom: `app-dev`, ContactEmail: `rafi@cockroachlabs.com`, + TriageColumnID: 7259065, + }, + OwnerBulkIO: {SlackRoom: `bulk-io`, ContactEmail: `david@cockroachlabs.com`, + TriageColumnID: 3097123, + }, + OwnerCDC: {SlackRoom: `cdc`, ContactEmail: `ajwerner@cockroachlabs.com`, + TriageColumnID: 3570120, + }, + OwnerKV: {SlackRoom: `kv`, ContactEmail: `andrei@cockroachlabs.com`, + TriageColumnID: 3550674, + }, + OwnerPartitioning: {SlackRoom: `partitioning`, ContactEmail: `andrei@cockroachlabs.com`, + // Partitioning issues get sent to the KV triage column for now. + TriageColumnID: 3550674, + }, + OwnerSQLExec: {SlackRoom: `sql-execution-team`, ContactEmail: `alfonso@cockroachlabs.com`, + TriageColumnID: 6837155, + }, + OwnerSQLSchema: {SlackRoom: `sql-schema`, ContactEmail: `lucy@cockroachlabs.com`, + TriageColumnID: 8946818, + }, + OwnerStorage: {SlackRoom: `storage`, ContactEmail: `peter@cockroachlabs.com`, + TriageColumnID: 6668367, + }, // Only for use in roachtest package unittests. `unittest`: {}, } +const defaultTag = "default" + type testRegistry struct { m map[string]*testSpec // buildVersion is the version of the Cockroach binary that tests will run against. @@ -133,6 +166,10 @@ func (r *testRegistry) prepareSpec(spec *testSpec) error { if _, ok := roachtestOwners[spec.Owner]; !ok { return fmt.Errorf(`%s: unknown owner [%s]`, spec.Name, spec.Owner) } + if len(spec.Tags) == 0 { + spec.Tags = []string{defaultTag} + } + spec.Tags = append(spec.Tags, "owner-"+string(spec.Owner)) return nil } @@ -211,8 +248,8 @@ func newFilter(filter []string) *testFilter { } if len(tag) == 0 { - tag = []string{"default"} - rawTag = []string{"tag:default"} + tag = []string{defaultTag} + rawTag = []string{"tag:" + defaultTag} } makeRE := func(strs []string) *regexp.Regexp { diff --git a/pkg/cmd/roachtest/test_runner.go b/pkg/cmd/roachtest/test_runner.go index a6414dcf3fb7..1b7dfa382775 100644 --- a/pkg/cmd/roachtest/test_runner.go +++ b/pkg/cmd/roachtest/test_runner.go @@ -35,9 +35,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/version" + "github.com/cockroachdb/errors" "github.com/cockroachdb/logtags" "github.com/petermattis/goid" - "github.com/pkg/errors" ) // testRunner runs tests. @@ -370,6 +370,7 @@ func (r *testRunner) runWorker( wStatus.SetCluster(nil) if c == nil { + l.PrintfCtx(ctx, "Worker exiting; no cluster to destroy.") return } doDestroy := ctx.Err() == nil @@ -399,7 +400,7 @@ func (r *testRunner) runWorker( if _, ok := c.spec.ReusePolicy.(reusePolicyNone); ok { wStatus.SetStatus("destroying cluster") // We use a context that can't be canceled for the Destroy(). - c.Destroy(ctx, closeLogger, l) + c.Destroy(context.Background(), closeLogger, l) c = nil } } @@ -494,7 +495,7 @@ func (r *testRunner) runWorker( // On any test failure or error, we destroy the cluster. We could be // more selective, but this sounds safer. l.PrintfCtx(ctx, "destroying cluster %s because: %s", c, failureMsg) - c.Destroy(ctx, closeLogger, l) + c.Destroy(context.Background(), closeLogger, l) c = nil } @@ -593,7 +594,9 @@ func (r *testRunner) runTest( defer func() { t.end = timeutil.Now() - if err := recover(); err != nil { + // We only have to record panics if the panic'd value is not the sentinel + // produced by t.Fatal*(). + if err := recover(); err != nil && err != errTestFatal { t.mu.Lock() t.mu.failed = true t.mu.output = append(t.mu.output, t.decorate(0 /* skip */, fmt.Sprint(err))...) @@ -608,7 +611,6 @@ func (r *testRunner) runTest( if t.Failed() { t.mu.Lock() output := fmt.Sprintf("test artifacts and logs in: %s\n", t.ArtifactsDir()) + string(t.mu.output) - failLoc := t.mu.failLoc t.mu.Unlock() if teamCity { @@ -627,13 +629,6 @@ func (r *testRunner) runTest( shout(ctx, l, stdout, "--- FAIL: %s (%s)\n%s", t.Name(), durationStr, output) // NB: check NodeCount > 0 to avoid posting issues from this pkg's unit tests. if issues.CanPost() && t.spec.Run != nil && t.spec.Cluster.NodeCount > 0 { - authorEmail := getAuthorEmail(t.spec.Tags, failLoc.file, failLoc.line) - if authorEmail == "" { - ownerInfo, ok := roachtestOwners[t.spec.Owner] - if ok { - authorEmail = ownerInfo.ContactEmail - } - } branch := "" if b := os.Getenv("TC_BUILD_BRANCH"); b != "" { branch = b @@ -651,7 +646,6 @@ func (r *testRunner) runTest( TestName: t.Name(), Message: msg, Artifacts: artifacts, - AuthorEmail: authorEmail, // Issues posted from roachtest are identifiable as such and // they are also release blockers (this label may be removed // by a human upon closer investigation). @@ -744,7 +738,9 @@ func (r *testRunner) runTest( // This is the call to actually run the test. defer func() { - if r := recover(); r != nil { + // We only have to record panics if the panic'd value is not the sentinel + // produced by t.Fatal*(). + if r := recover(); r != nil && r != errTestFatal { // TODO(andreimatei): prevent the cluster from being reused. t.Fatalf("test panicked: %v", r) } @@ -753,9 +749,21 @@ func (r *testRunner) runTest( t.spec.Run(runCtx, t, c) }() + teardownL, err := c.l.ChildLogger("teardown", quietStderr, quietStdout) + if err != nil { + return false, err + } select { case <-done: + s := "success" + if t.Failed() { + s = "failure" + } + c.l.Printf("tearing down after %s; see teardown.log", s) + l, c.l, t.l = teardownL, teardownL, teardownL case <-time.After(timeout): + c.l.Printf("tearing down after timeout; see teardown.log") + l, c.l, t.l = teardownL, teardownL, teardownL // Timeouts are often opaque. Improve our changes by dumping the stack // so that at least we can piece together what the test is trying to // do at this very moment. @@ -807,23 +815,35 @@ func (r *testRunner) runTest( // We really shouldn't get here unless the test code somehow managed // to deadlock without blocking on anything remote - since we killed // everything. - msg := "test timed out and afterwards failed to respond to cancelation" + const msg = "test timed out and afterwards failed to respond to cancelation" t.l.PrintfCtx(ctx, msg) r.collectClusterLogs(ctx, c, t.l) // We return an error here because the test goroutine is still running, so // we want to alert the caller of this unusual situation. - return false, fmt.Errorf(msg) + return false, errors.New(msg) } } - // Detect replica divergence (i.e. ranges in which replicas have arrived - // at the same log position with different states). - c.FailOnReplicaDivergence(ctx, t) // Detect dead nodes in an inner defer. Note that this will call // t.printfAndFail() when appropriate, which will cause the code below to // enter the t.Failed() branch. c.FailOnDeadNodes(ctx, t) + if !t.Failed() { + // Detect replica divergence (i.e. ranges in which replicas have arrived + // at the same log position with different states). + // + // We avoid trying to do this when t.Failed() (and in particular when there + // are dead nodes) because for reasons @tbg does not understand this gets + // stuck occasionally, which really ruins the roachtest run. The method + // below already uses a ctx timeout and SQL statement_timeout, but it does + // not seem to be enough. + // + // TODO(testinfra): figure out why this can still get stuck despite the + // above. + c.FailOnReplicaDivergence(ctx, t) + } + if t.Failed() { r.collectClusterLogs(ctx, c, t.l) return false, nil @@ -856,6 +876,9 @@ func (r *testRunner) collectClusterLogs(ctx context.Context, c *cluster, l *logg if err := c.CopyRoachprodState(ctx); err != nil { l.Printf("failed to copy roachprod state: %s", err) } + if err := c.FetchDiskUsage(ctx); err != nil { + l.Printf("failed to fetch disk uage summary: %s", err) + } if err := c.FetchDebugZip(ctx); err != nil { l.Printf("failed to collect zip: %s", err) } @@ -1136,9 +1159,15 @@ func PredecessorVersion(buildVersion version.Version) (string, error) { buildVersionMajorMinor := fmt.Sprintf("%d.%d", buildVersion.Major(), buildVersion.Minor()) + // NB: you can update the values in this map to point at newer patch + // releases. You will need to run acceptance/version-upgrade with the + // checkpoint option enabled to create the missing store directory fixture + // (see runVersionUpgrade). The same is true for adding a new key to this + // map. verMap := map[string]string{ - "20.1": "19.2.1", - "19.2": "19.1.5", + "20.2": "20.1.8", + "20.1": "19.2.11", + "19.2": "19.1.11", "19.1": "2.1.9", "2.2": "2.1.9", "2.1": "2.0.7", diff --git a/pkg/cmd/roachtest/toxiproxy.go b/pkg/cmd/roachtest/toxiproxy.go index b491765efe91..0f25e5220a45 100644 --- a/pkg/cmd/roachtest/toxiproxy.go +++ b/pkg/cmd/roachtest/toxiproxy.go @@ -21,7 +21,7 @@ import ( "time" toxiproxy "github.com/Shopify/toxiproxy/client" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) // cockroachToxiWrapper replaces the cockroach binary. It modifies the listening port so diff --git a/pkg/cmd/roachtest/tpc_utils.go b/pkg/cmd/roachtest/tpc_utils.go new file mode 100644 index 000000000000..450a8c85232c --- /dev/null +++ b/pkg/cmd/roachtest/tpc_utils.go @@ -0,0 +1,125 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +import ( + "context" + gosql "database/sql" + "fmt" + + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" + "github.com/cockroachdb/errors" + "github.com/lib/pq" +) + +// loadTPCHDataset loads a TPC-H dataset for the specific benchmark spec on the +// provided roachNodes. The function is idempotent and first checks whether a +// compatible dataset exists (compatible is defined as a tpch dataset with a +// scale factor at least as large as the provided scale factor), performing an +// expensive dataset restore only if it doesn't. +func loadTPCHDataset( + ctx context.Context, t *test, c *cluster, sf int, m *monitor, roachNodes nodeListOption, +) error { + db := c.Conn(ctx, roachNodes[0]) + defer db.Close() + + if _, err := db.ExecContext(ctx, `USE tpch`); err == nil { + t.l.Printf("found existing tpch dataset, verifying scale factor\n") + + var supplierCardinality int + if err := db.QueryRowContext( + ctx, `SELECT count(*) FROM tpch.supplier`, + ).Scan(&supplierCardinality); err != nil { + if pqErr := (*pq.Error)(nil); !(errors.As(err, &pqErr) && + string(pqErr.Code) == pgcode.UndefinedTable) { + return err + } + // Table does not exist. Set cardinality to 0. + supplierCardinality = 0 + } + + // Check if a tpch database with the required scale factor exists. + // 10000 is the number of rows in the supplier table at scale factor 1. + // supplier is the smallest table whose cardinality scales with the scale + // factor. + expectedSupplierCardinality := 10000 * sf + if supplierCardinality >= expectedSupplierCardinality { + t.l.Printf("dataset is at least of scale factor %d, continuing", sf) + return nil + } + + // If the scale factor was smaller than the required scale factor, wipe the + // cluster and restore. + m.ExpectDeaths(int32(c.spec.NodeCount)) + c.Wipe(ctx, roachNodes) + c.Start(ctx, t, roachNodes) + m.ResetDeaths() + } else if pqErr := (*pq.Error)(nil); !(errors.As(err, &pqErr) && + string(pqErr.Code) == pgcode.InvalidCatalogName) { + return err + } + + t.l.Printf("restoring tpch scale factor %d\n", sf) + tpchURL := fmt.Sprintf("gs://cockroach-fixtures/workload/tpch/scalefactor=%d/backup", sf) + query := fmt.Sprintf(`CREATE DATABASE IF NOT EXISTS tpch; RESTORE tpch.* FROM '%s' WITH into_db = 'tpch';`, tpchURL) + _, err := db.ExecContext(ctx, query) + return err +} + +// scatterTables runs "ALTER TABLE ... SCATTER" statement for every table in +// tableNames. It assumes that conn is already using the target database. If an +// error is encountered, the test is failed. +func scatterTables(t *test, conn *gosql.DB, tableNames []string) { + t.Status("scattering the data") + for _, table := range tableNames { + scatter := fmt.Sprintf("ALTER TABLE %s SCATTER;", table) + if _, err := conn.Exec(scatter); err != nil { + t.Fatal(err) + } + } +} + +// disableAutoStats disables automatic collection of statistics on the cluster. +func disableAutoStats(t *test, conn *gosql.DB) { + t.Status("disabling automatic collection of stats") + if _, err := conn.Exec( + `SET CLUSTER SETTING sql.stats.automatic_collection.enabled=false;`, + ); err != nil { + t.Fatal(err) + } +} + +// createStatsFromTables runs "CREATE STATISTICS" statement for every table in +// tableNames. It assumes that conn is already using the target database. If an +// error is encountered, the test is failed. +func createStatsFromTables(t *test, conn *gosql.DB, tableNames []string) { + t.Status("collecting stats") + for _, tableName := range tableNames { + t.Status(fmt.Sprintf("creating statistics from table %q", tableName)) + if _, err := conn.Exec( + fmt.Sprintf(`CREATE STATISTICS %s FROM %s;`, tableName, tableName), + ); err != nil { + t.Fatal(err) + } + } +} + +// disableVectorizeRowCountThresholdHeuristic sets +// 'vectorize_row_count_threshold' cluster setting to zero so that the test +// would use the vectorized engine with 'vectorize=on' regardless of the +// fact whether the stats are present or not (if we don't set it, then when +// the stats are not present, we fallback to row-by-row engine even with +// `vectorize=on` set). +func disableVectorizeRowCountThresholdHeuristic(t *test, conn *gosql.DB) { + if _, err := conn.Exec("SET CLUSTER SETTING sql.defaults.vectorize_row_count_threshold=0"); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/cmd/roachtest/tpcc.go b/pkg/cmd/roachtest/tpcc.go index 3e45fcaa6b8e..1e4cec9c75d6 100644 --- a/pkg/cmd/roachtest/tpcc.go +++ b/pkg/cmd/roachtest/tpcc.go @@ -23,23 +23,33 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/binfetcher" "github.com/cockroachdb/cockroach/pkg/util/search" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/version" "github.com/cockroachdb/cockroach/pkg/workload/histogram" "github.com/cockroachdb/cockroach/pkg/workload/tpcc" + "github.com/cockroachdb/errors" "github.com/cockroachdb/ttycolor" "github.com/lib/pq" - "github.com/pkg/errors" +) + +type tpccSetupType int + +const ( + usingImport tpccSetupType = iota + usingInit ) type tpccOptions struct { - Warehouses int - Extra string - Chaos func() Chaos // for late binding of stopper - During func(context.Context) error // for running a function during the test - Duration time.Duration + Warehouses int + ExtraRunArgs string + ExtraSetupArgs string + Chaos func() Chaos // for late binding of stopper + During func(context.Context) error // for running a function during the test + Duration time.Duration + SetupType tpccSetupType // The CockroachDB versions to deploy. The first one indicates the first node, // etc. To use the main binary, specify "". When Versions is nil, it defaults @@ -53,57 +63,37 @@ type tpccOptions struct { Versions []string } -// tpccFixturesCmd generates the command string to load tpcc data for the -// specified warehouse count into a cluster using either `fixtures import` -// or `fixtures load` depending on the cloud. -func tpccFixturesCmd(t *test, cloud string, warehouses int, extraArgs string) string { - var command string - switch cloud { - case gce: - // TODO(nvanbenschoten): We could switch to import for both clouds. - // At the moment, import is still a little unstable and load is still - // marginally faster. - command = "./workload fixtures load" - fixtureWarehouses := -1 - for _, w := range []int{1, 10, 100, 1000, 2000, 5000, 10000} { - if w >= warehouses { - fixtureWarehouses = w - break - } - } - if fixtureWarehouses == -1 { - t.Fatalf("could not find fixture big enough for %d warehouses", warehouses) - } - warehouses = fixtureWarehouses - case aws, azure: - // For fixtures import, use the version built into the cockroach binary - // so the tpcc workload-versions match on release branches. - command = "./cockroach workload fixtures import" - default: - t.Fatalf("unknown cloud: %q", cloud) - } - return fmt.Sprintf("%s tpcc --warehouses=%d %s {pgurl:1}", - command, warehouses, extraArgs) +// tpccImportCmd generates the command string to load tpcc data for the +// specified warehouse count into a cluster. +// +// The command uses `cockroach workload` instead of `workload` so the tpcc +// workload-versions match on release branches. Similarly, the command does not +// specify pgurl to ensure that it is run on a node with a running cockroach +// instance to ensure that the workload version matches the gateway version in a +// mixed version cluster. +func tpccImportCmd(t *test, warehouses int, extraArgs string) string { + return fmt.Sprintf("./cockroach workload fixtures import tpcc --warehouses=%d %s", + warehouses, extraArgs) } func setupTPCC( - ctx context.Context, t *test, c *cluster, warehouses int, versions []string, + ctx context.Context, t *test, c *cluster, opts tpccOptions, ) (crdbNodes, workloadNode nodeListOption) { crdbNodes = c.Range(1, c.spec.NodeCount-1) workloadNode = c.Node(c.spec.NodeCount) if c.isLocal() { - warehouses = 1 + opts.Warehouses = 1 } - if n := len(versions); n == 0 { - versions = make([]string, c.spec.NodeCount-1) + if n := len(opts.Versions); n == 0 { + opts.Versions = make([]string, c.spec.NodeCount-1) } else if n != c.spec.NodeCount-1 { - t.Fatalf("must specify Versions for all %d nodes: %v", c.spec.NodeCount-1, versions) + t.Fatalf("must specify Versions for all %d nodes: %v", c.spec.NodeCount-1, opts.Versions) } { var regularNodes []option - for i, v := range versions { + for i, v := range opts.Versions { if v == "" { regularNodes = append(regularNodes, c.Node(i+1)) } else { @@ -132,8 +122,24 @@ func setupTPCC( defer db.Close() c.Start(ctx, t, crdbNodes, startArgsDontEncrypt) waitForFullReplication(t, c.Conn(ctx, crdbNodes[0])) - t.Status("loading fixture") - c.Run(ctx, workloadNode, tpccFixturesCmd(t, cloud, warehouses, "")) + switch opts.SetupType { + case usingImport: + t.Status("loading fixture") + c.Run(ctx, crdbNodes[:1], tpccImportCmd(t, opts.Warehouses, opts.ExtraSetupArgs)) + case usingInit: + t.Status("initializing tables") + extraArgs := opts.ExtraSetupArgs + if !t.buildVersion.AtLeast(version.MustParse("v20.2.0")) { + extraArgs += " --deprecated-fk-indexes" + } + cmd := fmt.Sprintf( + "./workload init tpcc --warehouses=%d %s {pgurl:1}", + opts.Warehouses, extraArgs, + ) + c.Run(ctx, workloadNode, cmd) + default: + t.Fatal("unknown tpcc setup type") + } t.Status("") }() return crdbNodes, workloadNode @@ -146,14 +152,14 @@ func runTPCC(ctx context.Context, t *test, c *cluster, opts tpccOptions) { opts.Duration = time.Minute rampDuration = 30 * time.Second } - crdbNodes, workloadNode := setupTPCC(ctx, t, c, opts.Warehouses, opts.Versions) + crdbNodes, workloadNode := setupTPCC(ctx, t, c, opts) t.Status("waiting") m := newMonitor(ctx, c, crdbNodes) m.Go(func(ctx context.Context) error { t.WorkerStatus("running tpcc") cmd := fmt.Sprintf( "./workload run tpcc --warehouses=%d --histograms="+perfArtifactsDir+"/stats.json "+ - opts.Extra+" --ramp=%s --duration=%s {pgurl:1-%d}", + opts.ExtraRunArgs+" --ramp=%s --duration=%s {pgurl:1-%d}", opts.Warehouses, rampDuration, opts.Duration, c.spec.NodeCount-1) c.Run(ctx, workloadNode, cmd) return nil @@ -220,11 +226,9 @@ func registerTPCC(r *testRegistry) { // w=headroom runs tpcc for a semi-extended period with some amount of // headroom, more closely mirroring a real production deployment than // running with the max supported warehouses. - Name: "tpcc/headroom/" + headroomSpec.String(), - Owner: OwnerKV, - // TODO(dan): Backfill tpccSupportedWarehouses and remove this "v2.1.0" - // minimum on gce. - MinVersion: maxVersion("v2.1.0", maybeMinVersionForFixturesImport(cloud)), + Name: "tpcc/headroom/" + headroomSpec.String(), + Owner: OwnerKV, + MinVersion: "v19.1.0", Tags: []string{`default`, `release_qualification`}, Cluster: headroomSpec, Run: func(ctx context.Context, t *test, c *cluster) { @@ -234,19 +238,20 @@ func registerTPCC(r *testRegistry) { runTPCC(ctx, t, c, tpccOptions{ Warehouses: headroomWarehouses, Duration: 120 * time.Minute, + SetupType: usingImport, }) }, }) mixedHeadroomSpec := makeClusterSpec(5, cpu(16)) + + // TODO(tbg): rewrite and extend this using the harness in versionupgrade.go. r.Add(testSpec{ // mixed-headroom is similar to w=headroom, but with an additional node // and on a mixed version cluster. It simulates a real production // deployment in the middle of the migration into a new cluster version. - Name: "tpcc/mixed-headroom/" + mixedHeadroomSpec.String(), - Owner: OwnerKV, - // TODO(dan): Backfill tpccSupportedWarehouses and remove this "v2.1.0" - // minimum on gce. - MinVersion: maxVersion("v2.1.0", maybeMinVersionForFixturesImport(cloud)), + Name: "tpcc/mixed-headroom/" + mixedHeadroomSpec.String(), + Owner: OwnerKV, + MinVersion: "v19.1.0", // TODO(tbg): add release_qualification tag once we know the test isn't // buggy. Tags: []string{`default`}, @@ -265,6 +270,7 @@ func registerTPCC(r *testRegistry) { Warehouses: headroomWarehouses, Duration: 120 * time.Minute, Versions: []string{oldV, "", oldV, ""}, + SetupType: usingImport, }) // TODO(tbg): run another TPCC with the final binaries here and // teach TPCC to re-use the dataset (seems easy enough) to at least @@ -274,28 +280,30 @@ func registerTPCC(r *testRegistry) { r.Add(testSpec{ Name: "tpcc-nowait/nodes=3/w=1", Owner: OwnerKV, - MinVersion: maybeMinVersionForFixturesImport(cloud), + MinVersion: "v19.1.0", Cluster: makeClusterSpec(4, cpu(16)), Run: func(ctx context.Context, t *test, c *cluster) { runTPCC(ctx, t, c, tpccOptions{ - Warehouses: 1, - Duration: 10 * time.Minute, - Extra: "--wait=false", + Warehouses: 1, + Duration: 10 * time.Minute, + ExtraRunArgs: "--wait=false", + SetupType: usingImport, }) }, }) r.Add(testSpec{ - Name: "weekly/tpcc-max", + Name: "weekly/tpcc/headroom", Owner: OwnerKV, - MinVersion: maybeMinVersionForFixturesImport(cloud), + MinVersion: "v19.1.0", Tags: []string{`weekly`}, Cluster: makeClusterSpec(4, cpu(16)), Timeout: time.Duration(6*24)*time.Hour + time.Duration(10)*time.Minute, Run: func(ctx context.Context, t *test, c *cluster) { - warehouses := 1350 + warehouses := 1000 runTPCC(ctx, t, c, tpccOptions{ Warehouses: warehouses, Duration: 6 * 24 * time.Hour, + SetupType: usingImport, }) }, }) @@ -303,14 +311,16 @@ func registerTPCC(r *testRegistry) { r.Add(testSpec{ Name: "tpcc/w=100/nodes=3/chaos=true", Owner: OwnerKV, + MinVersion: "v19.1.0", Cluster: makeClusterSpec(4), - MinVersion: maybeMinVersionForFixturesImport(cloud), Run: func(ctx context.Context, t *test, c *cluster) { duration := 30 * time.Minute runTPCC(ctx, t, c, tpccOptions{ Warehouses: 100, Duration: duration, - Extra: "--wait=false --tolerate-errors", + // For chaos tests, we don't want to use the default method because it + // involves preparing statements on all connections (see #51785). + ExtraRunArgs: "--method=simple --wait=false --tolerate-errors", Chaos: func() Chaos { return Chaos{ Timer: Periodic{ @@ -322,6 +332,27 @@ func registerTPCC(r *testRegistry) { DrainAndQuit: false, } }, + SetupType: usingImport, + }) + }, + }) + r.Add(testSpec{ + Name: "tpcc/interleaved/nodes=3/cpu=16/w=500", + Owner: OwnerSQLExec, + MinVersion: "v20.1.0", + Cluster: makeClusterSpec(4, cpu(16)), + Timeout: 6 * time.Hour, + Run: func(ctx context.Context, t *test, c *cluster) { + skip.WithIssue(t, 53886) + runTPCC(ctx, t, c, tpccOptions{ + // Currently, we do not support import on interleaved tables which + // prohibits loading/importing a fixture. If/when this is supported the + // number of warehouses should be increased as we would no longer + // bottleneck on initialization which is significantly slower than import. + Warehouses: 500, + Duration: time.Minute * 15, + ExtraSetupArgs: fmt.Sprintf("--interleaved=true"), + SetupType: usingInit, }) }, }) @@ -332,14 +363,14 @@ func registerTPCC(r *testRegistry) { CPUs: 4, LoadWarehouses: 1000, - EstimatedMax: gceOrAws(cloud, 400, 600), + EstimatedMax: gceOrAws(cloud, 650, 800), }) registerTPCCBenchSpec(r, tpccBenchSpec{ Nodes: 3, CPUs: 16, - LoadWarehouses: gceOrAws(cloud, 2000, 2500), - EstimatedMax: gceOrAws(cloud, 1600, 2350), + LoadWarehouses: gceOrAws(cloud, 2500, 3000), + EstimatedMax: gceOrAws(cloud, 2200, 2500), }) registerTPCCBenchSpec(r, tpccBenchSpec{ Nodes: 12, @@ -367,7 +398,7 @@ func registerTPCC(r *testRegistry) { LoadWarehouses: 5000, EstimatedMax: 3000, - MinVersion: "v19.1.0", + MinVersion: "v20.1.0", }) registerTPCCBenchSpec(r, tpccBenchSpec{ Nodes: 9, @@ -378,27 +409,10 @@ func registerTPCC(r *testRegistry) { LoadWarehouses: 2000, EstimatedMax: 900, - MinVersion: "v19.1.0", + MinVersion: "v20.1.0", }) } -func maxVersion(vers ...string) string { - var max *version.Version - for _, v := range vers { - v, err := version.Parse(v) - if err != nil { - continue - } - if max == nil || v.AtLeast(max) { - max = v - } - } - if max == nil { - return "" - } - return max.String() -} - func gceOrAws(cloud string, gce, aws int) int { if cloud == "aws" { return aws @@ -406,14 +420,6 @@ func gceOrAws(cloud string, gce, aws int) int { return gce } -func maybeMinVersionForFixturesImport(cloud string) string { - const minVersionForFixturesImport = "v19.1.0" - if cloud == "aws" { - return minVersionForFixturesImport - } - return "" -} - // tpccBenchDistribution represents a distribution of nodes in a tpccbench // cluster. type tpccBenchDistribution int @@ -557,7 +563,7 @@ func registerTPCCBenchSpec(r *testRegistry, b tpccBenchSpec) { minVersion := b.MinVersion if minVersion == "" { - minVersion = maybeMinVersionForFixturesImport(cloud) + minVersion = "v19.1.0" // needed for import } r.Add(testSpec{ @@ -601,8 +607,8 @@ func loadTPCCBench( // before restoring. c.Wipe(ctx, roachNodes) c.Start(ctx, t, append(b.startOpts(), roachNodes)...) - } else if pqErr, ok := err.(*pq.Error); !ok || - string(pqErr.Code) != pgcode.InvalidCatalogName { + } else if pqErr := (*pq.Error)(nil); !(errors.As(err, &pqErr) && + string(pqErr.Code) == pgcode.InvalidCatalogName) { return err } @@ -633,8 +639,8 @@ func loadTPCCBench( // Load the corresponding fixture. t.l.Printf("restoring tpcc fixture\n") waitForFullReplication(t, db) - cmd := tpccFixturesCmd(t, cloud, b.LoadWarehouses, loadArgs) - if err := c.RunE(ctx, loadNode, cmd); err != nil { + cmd := tpccImportCmd(t, b.LoadWarehouses, loadArgs) + if err := c.RunE(ctx, roachNodes[:1], cmd); err != nil { return err } if rebalanceWait == 0 || len(roachNodes) <= 3 { @@ -650,9 +656,15 @@ func loadTPCCBench( // Split and scatter the tables. Ramp up to the expected load in the desired // distribution. This should allow for load-based rebalancing to help // distribute load. Optionally pass some load configuration-specific flags. + method := "" + if b.Chaos { + // For chaos tests, we don't want to use the default method because it + // involves preparing statements on all connections (see #51785). + method = "--method=simple" + } cmd = fmt.Sprintf("./workload run tpcc --warehouses=%d --workers=%d --max-rate=%d "+ - "--wait=false --duration=%s --scatter --tolerate-errors {pgurl%s}", - b.LoadWarehouses, b.LoadWarehouses, b.LoadWarehouses/2, rebalanceWait, roachNodes) + "--wait=false --duration=%s --scatter --tolerate-errors %s {pgurl%s}", + b.LoadWarehouses, b.LoadWarehouses, b.LoadWarehouses/2, rebalanceWait, method, roachNodes) if out, err := c.RunWithBuffer(ctx, c.l, loadNode, cmd); err != nil { return errors.Wrapf(err, "failed with output %q", string(out)) } @@ -743,11 +755,11 @@ func runTPCCBench(ctx context.Context, t *test, c *cluster, b tpccBenchSpec) { } defer func() { _ = os.RemoveAll(resultsDir) }() s := search.NewLineSearcher(1, b.LoadWarehouses, b.EstimatedMax, initStepSize, precision) + iteration := 0 if res, err := s.Search(func(warehouses int) (bool, error) { + iteration++ + t.l.Printf("initializing cluster for %d warehouses (search attempt: %d)", warehouses, iteration) m := newMonitor(ctx, c, roachNodes) - // Restart the cluster before each iteration to help eliminate - // inter-trial interactions. - m.ExpectDeaths(int32(len(roachNodes))) c.Stop(ctx, roachNodes) c.Start(ctx, t, append(b.startOpts(), roachNodes)...) time.Sleep(restartWait) @@ -804,7 +816,11 @@ func runTPCCBench(ctx context.Context, t *test, c *cluster, b tpccBenchSpec) { default: panic("unexpected") } - + if b.Chaos { + // For chaos tests, we don't want to use the default method because it + // involves preparing statements on all connections (see #51785). + extraFlags += " --method=simple" + } t.Status(fmt.Sprintf("running benchmark, warehouses=%d", warehouses)) histogramsPath := fmt.Sprintf("%s/warehouses=%d/stats.json", perfArtifactsDir, activeWarehouses) cmd := fmt.Sprintf("./workload run tpcc --warehouses=%d --active-warehouses=%d "+ @@ -845,11 +861,11 @@ func runTPCCBench(ctx context.Context, t *test, c *cluster, b tpccBenchSpec) { // Print the result. if failErr == nil { ttycolor.Stdout(ttycolor.Green) - t.l.Printf("--- PASS: tpcc %d resulted in %.1f tpmC (%.1f%% of max tpmC)\n\n", + t.l.Printf("--- SEARCH ITER PASS: TPCC %d resulted in %.1f tpmC (%.1f%% of max tpmC)\n\n", warehouses, res.TpmC(), res.Efficiency()) } else { ttycolor.Stdout(ttycolor.Red) - t.l.Printf("--- FAIL: tpcc %d resulted in %.1f tpmC and failed due to %v", + t.l.Printf("--- SEARCH ITER FAIL: TPCC %d resulted in %.1f tpmC and failed due to %v", warehouses, res.TpmC(), failErr) } ttycolor.Stdout(ttycolor.Reset) diff --git a/pkg/cmd/roachtest/tpcdsvec.go b/pkg/cmd/roachtest/tpcdsvec.go new file mode 100644 index 000000000000..5f2b9295fcbf --- /dev/null +++ b/pkg/cmd/roachtest/tpcdsvec.go @@ -0,0 +1,226 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/cockroachdb/cockroach/pkg/cmd/smithcmp/cmpconn" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/cockroach/pkg/workload/tpcds" + "github.com/cockroachdb/errors" +) + +func registerTPCDSVec(r *testRegistry) { + const ( + timeout = 5 * time.Minute + withStatsSlowerWarningThreshold = 1.25 + ) + + queriesToSkip := map[int]bool{ + // The plans for these queries contain processors with + // core.LocalPlanNode which currently cannot be wrapped by the + // vectorized engine, so 'vectorize' session variable will make no + // difference. + 1: true, + 2: true, + 4: true, + 11: true, + 23: true, + 24: true, + 30: true, + 31: true, + 39: true, + 45: true, + 47: true, + 57: true, + 59: true, + 64: true, + 74: true, + 75: true, + 81: true, + 95: true, + + // These queries contain unsupported function 'rollup' (#46280). + 5: true, + 14: true, + 18: true, + 22: true, + 67: true, + 77: true, + 80: true, + } + + queriesToSkip20_1 := map[int]bool{ + // These queries do not finish in 5 minutes on 20.1 branch. + 7: true, + 13: true, + 17: true, + 19: true, + 25: true, + 26: true, + 29: true, + //45: true, + 46: true, + 48: true, + 50: true, + 61: true, + //64: true, + 66: true, + 68: true, + 72: true, + 84: true, + 85: true, + } + + tpcdsTables := []string{ + `call_center`, `catalog_page`, `catalog_returns`, `catalog_sales`, + `customer`, `customer_address`, `customer_demographics`, `date_dim`, + `dbgen_version`, `household_demographics`, `income_band`, `inventory`, + `item`, `promotion`, `reason`, `ship_mode`, `store`, `store_returns`, + `store_sales`, `time_dim`, `warehouse`, `web_page`, `web_returns`, + `web_sales`, `web_site`, + } + + runTPCDSVec := func(ctx context.Context, t *test, c *cluster) { + c.Put(ctx, cockroach, "./cockroach", c.All()) + c.Start(ctx, t) + + clusterConn := c.Conn(ctx, 1) + disableAutoStats(t, clusterConn) + disableVectorizeRowCountThresholdHeuristic(t, clusterConn) + t.Status("restoring TPCDS dataset for Scale Factor 1") + if _, err := clusterConn.Exec( + `RESTORE DATABASE tpcds FROM 'gs://cockroach-fixtures/workload/tpcds/scalefactor=1/backup';`, + ); err != nil { + t.Fatal(err) + } + + if _, err := clusterConn.Exec("USE tpcds;"); err != nil { + t.Fatal(err) + } + scatterTables(t, clusterConn, tpcdsTables) + t.Status("waiting for full replication") + waitForFullReplication(t, clusterConn) + versionString, err := fetchCockroachVersion(ctx, c, c.Node(1)[0]) + if err != nil { + t.Fatal(err) + } + + // TODO(yuzefovich): it seems like if cmpconn.CompareConns hits a + // timeout, the query actually keeps on going and the connection + // becomes kinda stale. To go around it, we set a statement timeout + // variable on the connections and pass in 3 x timeout into + // CompareConns hoping that the session variable is better respected. + // We additionally open fresh connections for each query. + setStmtTimeout := fmt.Sprintf("SET statement_timeout='%s';", timeout) + firstNode := c.Node(1) + firstNodeURL := c.ExternalPGUrl(ctx, firstNode)[0] + openNewConnections := func() (map[string]*cmpconn.Conn, func()) { + conns := map[string]*cmpconn.Conn{} + vecOffConn, err := cmpconn.NewConn( + firstNodeURL, nil, nil, setStmtTimeout+"SET vectorize=off; USE tpcds;", + ) + if err != nil { + t.Fatal(err) + } + conns["vectorize=OFF"] = vecOffConn + vecOnConn, err := cmpconn.NewConn( + firstNodeURL, nil, nil, setStmtTimeout+"SET vectorize=on; USE tpcds;", + ) + if err != nil { + t.Fatal(err) + } + conns["vectorize=ON"] = vecOnConn + // A sanity check that we have different values of 'vectorize' + // session variable on two connections and that the comparator will + // emit an error because of that difference. + if err := cmpconn.CompareConns(ctx, timeout, conns, "", "SHOW vectorize;"); err == nil { + t.Fatal("unexpectedly SHOW vectorize didn't trigger an error on comparison") + } + return conns, func() { + vecOffConn.Close() + vecOnConn.Close() + } + } + + noStatsRunTimes := make(map[int]float64) + var errToReport error + // We will run all queries in two scenarios: without stats and with + // auto stats. The idea is that the plans are likely to be different, + // so we will be testing different execution scenarios. We additionally + // will compare the queries' run times in both scenarios and print out + // warnings when in presence of stats we seem to be choosing worse + // plans. + for _, haveStats := range []bool{false, true} { + for queryNum := 1; queryNum <= tpcds.NumQueries; queryNum++ { + if _, toSkip := queriesToSkip[queryNum]; toSkip { + continue + } + if strings.HasPrefix(versionString, "v20.1") { + if _, toSkip := queriesToSkip20_1[queryNum]; toSkip { + continue + } + } + query, ok := tpcds.QueriesByNumber[queryNum] + if !ok { + continue + } + t.Status(fmt.Sprintf("running query %d\n", queryNum)) + // We will be opening fresh connections for every query to go + // around issues with cancellation. + conns, cleanup := openNewConnections() + start := timeutil.Now() + if err := cmpconn.CompareConns( + ctx, 3*timeout, conns, "", query); err != nil { + t.Status(fmt.Sprintf("encountered an error: %s\n", err)) + errToReport = errors.CombineErrors(errToReport, err) + } else { + runTimeInSeconds := timeutil.Since(start).Seconds() + t.Status( + fmt.Sprintf("[q%d] took about %.2fs to run on both configs", + queryNum, runTimeInSeconds), + ) + if haveStats { + noStatsRunTime, ok := noStatsRunTimes[queryNum] + if ok && noStatsRunTime*withStatsSlowerWarningThreshold < runTimeInSeconds { + t.Status(fmt.Sprintf("WARNING: suboptimal plan when stats are present\n"+ + "no stats: %.2fs\twith stats: %.2fs", noStatsRunTime, runTimeInSeconds)) + } + } else { + noStatsRunTimes[queryNum] = runTimeInSeconds + } + } + cleanup() + } + + if !haveStats { + createStatsFromTables(t, clusterConn, tpcdsTables) + } + } + if errToReport != nil { + t.Fatal(errToReport) + } + } + + r.Add(testSpec{ + Name: "tpcdsvec", + Owner: OwnerSQLExec, + Cluster: makeClusterSpec(3), + MinVersion: "v20.1.0", + Run: func(ctx context.Context, t *test, c *cluster) { + runTPCDSVec(ctx, t, c) + }, + }) +} diff --git a/pkg/cmd/roachtest/tpchbench.go b/pkg/cmd/roachtest/tpchbench.go index 14fb8052dca4..ea3b9252c608 100644 --- a/pkg/cmd/roachtest/tpchbench.go +++ b/pkg/cmd/roachtest/tpchbench.go @@ -19,36 +19,19 @@ import ( "strings" "time" - "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/util/httputil" "github.com/cockroachdb/cockroach/pkg/workload/querybench" - "github.com/lib/pq" ) -// tpchBench is a benchmark run on tpch data. There are different groups of -// queries we run against tpch data, represented by different tpchBench values. -type tpchBench int - -//go:generate stringer -type=tpchBench - -const ( - sql20 tpchBench = iota - tpch -) - -var urlMap = map[tpchBench]string{ - sql20: `https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/workload/querybench/2.1-sql-20`, - tpch: `https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/workload/querybench/tpch-queries`, -} - type tpchBenchSpec struct { Nodes int CPUs int ScaleFactor int - benchType tpchBench + benchType string + url string numRunsPerQuery int // minVersion specifies the minimum version of CRDB nodes. If omitted, it - // will default to maybeMinVersionForFixturesImport. + // will default to v19.1.0. minVersion string // maxLatency is the expected maximum time that a query will take to execute // needed to correctly initialize histograms. @@ -73,10 +56,9 @@ func runTPCHBench(ctx context.Context, t *test, c *cluster, b tpchBenchSpec) { c.Put(ctx, cockroach, "./cockroach", roachNodes) c.Put(ctx, workload, "./workload", loadNode) - url := urlMap[b.benchType] - filename := b.benchType.String() - t.Status(fmt.Sprintf("downloading %s query file from %s", filename, url)) - if err := c.RunE(ctx, loadNode, fmt.Sprintf("curl %s > %s", url, filename)); err != nil { + filename := b.benchType + t.Status(fmt.Sprintf("downloading %s query file from %s", filename, b.url)) + if err := c.RunE(ctx, loadNode, fmt.Sprintf("curl %s > %s", b.url, filename)); err != nil { t.Fatal(err) } @@ -86,14 +68,14 @@ func runTPCHBench(ctx context.Context, t *test, c *cluster, b tpchBenchSpec) { m := newMonitor(ctx, c, roachNodes) m.Go(func(ctx context.Context) error { t.Status("setting up dataset") - err := loadTPCHBench(ctx, t, c, b, m, roachNodes, loadNode) + err := loadTPCHDataset(ctx, t, c, b.ScaleFactor, m, roachNodes) if err != nil { return err } t.l.Printf("running %s benchmark on tpch scale-factor=%d", filename, b.ScaleFactor) - numQueries, err := getNumQueriesInFile(filename, url) + numQueries, err := getNumQueriesInFile(filename, b.url) if err != nil { t.Fatal(err) } @@ -164,66 +146,10 @@ func downloadFile(filename string, url string) (*os.File, error) { return out, err } -// loadTPCHBench loads a TPC-H dataset for the specific benchmark spec. The -// function is idempotent and first checks whether a compatible dataset exists, -// performing an expensive dataset restore only if it doesn't. -func loadTPCHBench( - ctx context.Context, - t *test, - c *cluster, - b tpchBenchSpec, - m *monitor, - roachNodes, loadNode nodeListOption, -) error { - db := c.Conn(ctx, roachNodes[0]) - defer db.Close() - - if _, err := db.ExecContext(ctx, `USE tpch`); err == nil { - t.l.Printf("found existing tpch dataset, verifying scale factor\n") - - var supplierCardinality int - if err := db.QueryRowContext( - ctx, `SELECT count(*) FROM tpch.supplier`, - ).Scan(&supplierCardinality); err != nil { - if pqErr, ok := err.(*pq.Error); !(ok && pqErr.Code == pgcode.UndefinedTable) { - return err - } - // Table does not exist. Set cardinality to 0. - supplierCardinality = 0 - } - - // Check if a tpch database with the required scale factor exists. - // 10000 is the number of rows in the supplier table at scale factor 1. - // supplier is the smallest table whose cardinality scales with the scale - // factor. - expectedSupplierCardinality := 10000 * b.ScaleFactor - if supplierCardinality >= expectedSupplierCardinality { - t.l.Printf("dataset is at least of scale factor %d, continuing", b.ScaleFactor) - return nil - } - - // If the scale factor was smaller than the required scale factor, wipe the - // cluster and restore. - m.ExpectDeaths(int32(c.spec.NodeCount)) - c.Wipe(ctx, roachNodes) - c.Start(ctx, t, roachNodes) - m.ResetDeaths() - } else if pqErr, ok := err.(*pq.Error); !ok || - string(pqErr.Code) != pgcode.InvalidCatalogName { - return err - } - - t.l.Printf("restoring tpch scale factor %d\n", b.ScaleFactor) - tpchURL := fmt.Sprintf("gs://cockroach-fixtures/workload/tpch/scalefactor=%d/backup", b.ScaleFactor) - query := fmt.Sprintf(`CREATE DATABASE IF NOT EXISTS tpch; RESTORE tpch.* FROM '%s' WITH into_db = 'tpch';`, tpchURL) - _, err := db.ExecContext(ctx, query) - return err -} - func registerTPCHBenchSpec(r *testRegistry, b tpchBenchSpec) { nameParts := []string{ "tpchbench", - b.benchType.String(), + b.benchType, fmt.Sprintf("nodes=%d", b.Nodes), fmt.Sprintf("cpu=%d", b.CPUs), fmt.Sprintf("sf=%d", b.ScaleFactor), @@ -233,7 +159,7 @@ func registerTPCHBenchSpec(r *testRegistry, b tpchBenchSpec) { numNodes := b.Nodes + 1 minVersion := b.minVersion if minVersion == `` { - minVersion = maybeMinVersionForFixturesImport(cloud) + minVersion = "v19.1.0" // needed for import } r.Add(testSpec{ @@ -253,7 +179,8 @@ func registerTPCHBench(r *testRegistry) { Nodes: 3, CPUs: 4, ScaleFactor: 1, - benchType: sql20, + benchType: `sql20`, + url: `https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/workload/querybench/2.1-sql-20`, numRunsPerQuery: 3, maxLatency: 100 * time.Second, }, @@ -261,7 +188,8 @@ func registerTPCHBench(r *testRegistry) { Nodes: 3, CPUs: 4, ScaleFactor: 1, - benchType: tpch, + benchType: `tpch`, + url: `https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/workload/querybench/tpch-queries`, numRunsPerQuery: 3, minVersion: `v19.2.0`, maxLatency: 500 * time.Second, diff --git a/pkg/cmd/roachtest/tpchbench_string.go b/pkg/cmd/roachtest/tpchbench_string.go deleted file mode 100644 index 3a1d01e3f6ec..000000000000 --- a/pkg/cmd/roachtest/tpchbench_string.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by "stringer -type=tpchBench"; DO NOT EDIT. - -package main - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[sql20-0] - _ = x[tpch-1] -} - -const _tpchBench_name = "sql20tpch" - -var _tpchBench_index = [...]uint8{0, 5, 9} - -func (i tpchBench) String() string { - if i < 0 || i >= tpchBench(len(_tpchBench_index)-1) { - return "tpchBench(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _tpchBench_name[_tpchBench_index[i]:_tpchBench_index[i+1]] -} diff --git a/pkg/cmd/roachtest/tpchvec.go b/pkg/cmd/roachtest/tpchvec.go index 8b4ac07f1984..bf0ebd25ece5 100644 --- a/pkg/cmd/roachtest/tpchvec.go +++ b/pkg/cmd/roachtest/tpchvec.go @@ -14,660 +14,706 @@ import ( "bufio" "bytes" "context" + gosql "database/sql" "fmt" + "math" + "math/rand" "regexp" + "runtime" "sort" "strconv" "strings" + "github.com/cockroachdb/cockroach/pkg/util/binfetcher" "github.com/cockroachdb/cockroach/pkg/util/randutil" + "github.com/cockroachdb/cockroach/pkg/workload/tpch" "github.com/cockroachdb/errors" ) -func registerTPCHVec(r *testRegistry) { - const ( - nodeCount = 3 - numTPCHQueries = 22 - ) +type crdbVersion int - type crdbVersion int - const ( - version19_2 crdbVersion = iota - version20_1 - ) - toCRDBVersion := func(v string) (crdbVersion, error) { - if strings.HasPrefix(v, "v19.2") { - return version19_2, nil - } else if strings.HasPrefix(v, "v20.1") { - return version20_1, nil - } else { - return 0, errors.Errorf("unrecognized version: %s", v) - } +const ( + tpchVecVersion19_2 crdbVersion = iota + tpchVecVersion20_1 + tpchVecVersion20_2 +) + +func toCRDBVersion(v string) (crdbVersion, error) { + if strings.HasPrefix(v, "v19.2") { + return tpchVecVersion19_2, nil + } else if strings.HasPrefix(v, "v20.1") { + return tpchVecVersion20_1, nil + } else if strings.HasPrefix(v, "v20.2") { + return tpchVecVersion20_2, nil + } else { + return 0, errors.Errorf("unrecognized version: %s", v) } +} - // queriesToSkipByVersion is a map from crdbVersion to another map that - // contains query numbers to be skipped (as well as the reasons for why - // they are skipped). - queriesToSkipByVersion := make(map[crdbVersion]map[int]string) - queriesToSkipByVersion[version19_2] = map[int]string{ +func vectorizeOptionToSetting(vectorize bool, version crdbVersion) string { + if !vectorize { + return "off" + } + switch version { + case tpchVecVersion19_2: + return "experimental_on" + default: + return "on" + } +} + +// queriesToSkipByVersion is a map keyed by version that contains query numbers +// to be skipped for the given version (as well as the reasons for why they are +// skipped). +var queriesToSkipByVersion = map[crdbVersion]map[int]string{ + tpchVecVersion19_2: { 5: "can cause OOM", 7: "can cause OOM", 8: "can cause OOM", 9: "can cause OOM", 19: "can cause OOM", - } - vectorizeOnOptionByVersion := map[crdbVersion]string{ - version19_2: "experimental_on", - version20_1: "on", - } - // slownessThreshold describes the threshold at which we fail the test - // if vec ON is slower that vec OFF, meaning that if - // vec_on_time > vecOnSlowerFailFactor * vec_off_time, the test is failed. - // This will help catch any regressions. + }, +} + +// getSlownessThreshold returns the threshold at which we fail the test if vec +// ON is slower that vec OFF, meaning that if +// vec_on_time >= slownessThreshold * vec_off_time +// the test is failed. This will help catch any regressions. +func getSlownessThreshold(version crdbVersion) float64 { + switch version { // Note that for 19.2 version the threshold is higher in order to reduce // the noise. - slownessThresholdByVersion := map[crdbVersion]float64{ - version19_2: 1.5, - version20_1: 1.15, + case tpchVecVersion19_2: + return 1.5 + default: + return 1.2 } +} - TPCHTables := []string{ - "nation", "region", "part", "supplier", - "partsupp", "customer", "orders", "lineitem", - } - TPCHTableStatsInjection := []string{ - `ALTER TABLE region INJECT STATISTICS '[ - { - "columns": ["r_regionkey"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 5, - "distinct_count": 5 - }, - { - "columns": ["r_name"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 5, - "distinct_count": 5 - }, - { - "columns": ["r_comment"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 5, - "distinct_count": 5 - } - ]';`, - `ALTER TABLE nation INJECT STATISTICS '[ - { - "columns": ["n_nationkey"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 25, - "distinct_count": 25 - }, - { - "columns": ["n_name"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 25, - "distinct_count": 25 - }, - { - "columns": ["n_regionkey"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 25, - "distinct_count": 5 - }, - { - "columns": ["n_comment"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 25, - "distinct_count": 25 - } - ]';`, - `ALTER TABLE supplier INJECT STATISTICS '[ - { - "columns": ["s_suppkey"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 10000, - "distinct_count": 10000 - }, - { - "columns": ["s_name"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 10000, - "distinct_count": 10000 - }, - { - "columns": ["s_address"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 10000, - "distinct_count": 10000 - }, - { - "columns": ["s_nationkey"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 10000, - "distinct_count": 25 - }, - { - "columns": ["s_phone"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 10000, - "distinct_count": 10000 - }, - { - "columns": ["s_acctbal"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 10000, - "distinct_count": 10000 - }, - { - "columns": ["s_comment"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 10000, - "distinct_count": 10000 - } - ]';`, - `ALTER TABLE public.part INJECT STATISTICS '[ - { - "columns": ["p_partkey"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 200000, - "distinct_count": 200000 - }, - { - "columns": ["p_name"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 200000, - "distinct_count": 200000 - }, - { - "columns": ["p_mfgr"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 200000, - "distinct_count": 5 - }, - { - "columns": ["p_brand"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 200000, - "distinct_count": 25 - }, - { - "columns": ["p_type"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 200000, - "distinct_count": 150 - }, - { - "columns": ["p_size"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 200000, - "distinct_count": 50 - }, - { - "columns": ["p_container"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 200000, - "distinct_count": 40 - }, - { - "columns": ["p_retailprice"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 200000, - "distinct_count": 20000 - }, - { - "columns": ["p_comment"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 200000, - "distinct_count": 130000 - } - ]';`, - `ALTER TABLE partsupp INJECT STATISTICS '[ - { - "columns": ["ps_partkey"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 800000, - "distinct_count": 200000 - }, - { - "columns": ["ps_suppkey"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 800000, - "distinct_count": 10000 - }, - { - "columns": ["ps_availqty"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 800000, - "distinct_count": 10000 - }, - { - "columns": ["ps_supplycost"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 800000, - "distinct_count": 100000 - }, - { - "columns": ["ps_comment"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 800000, - "distinct_count": 800000 - } - ]';`, - `ALTER TABLE customer INJECT STATISTICS '[ - { - "columns": ["c_custkey"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 150000, - "distinct_count": 150000 - }, - { - "columns": ["c_name"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 150000, - "distinct_count": 150000 - }, - { - "columns": ["c_address"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 150000, - "distinct_count": 150000 - }, - { - "columns": ["c_nationkey"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 150000, - "distinct_count": 25 - }, - { - "columns": ["c_phone"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 150000, - "distinct_count": 150000 - }, - { - "columns": ["c_acctbal"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 150000, - "distinct_count": 150000 - }, - { - "columns": ["c_mktsegment"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 150000, - "distinct_count": 5 - }, - { - "columns": ["c_comment"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 150000, - "distinct_count": 150000 - } - ]';`, - `ALTER TABLE orders INJECT STATISTICS '[ - { - "columns": ["o_orderkey"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 1500000, - "distinct_count": 1500000 - }, - { - "columns": ["o_custkey"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 1500000, - "distinct_count": 100000 - }, - { - "columns": ["o_orderstatus"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 1500000, - "distinct_count": 3 - }, - { - "columns": ["o_totalprice"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 1500000, - "distinct_count": 1500000 - }, - { - "columns": ["o_orderdate"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 1500000, - "distinct_count": 2500 - }, - { - "columns": ["o_orderpriority"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 1500000, - "distinct_count": 5 - }, - { - "columns": ["o_clerk"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 1500000, - "distinct_count": 1000 - }, - { - "columns": ["o_shippriority"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 1500000, - "distinct_count": 1 - }, - { - "columns": ["o_comment"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 1500000, - "distinct_count": 1500000 - } - ]';`, - `ALTER TABLE lineitem INJECT STATISTICS '[ - { - "columns": ["l_orderkey"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 1500000 - }, - { - "columns": ["l_partkey"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 200000 - }, - { - "columns": ["l_suppkey"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 10000 - }, - { - "columns": ["l_linenumber"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 7 - }, - { - "columns": ["l_quantity"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 50 - }, - { - "columns": ["l_extendedprice"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 1000000 - }, - { - "columns": ["l_discount"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 11 - }, - { - "columns": ["l_tax"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 9 - }, - { - "columns": ["l_returnflag"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 3 - }, - { - "columns": ["l_linestatus"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 2 - }, - { - "columns": ["l_shipdate"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 2500 - }, - { - "columns": ["l_commitdate"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 2500 - }, - { - "columns": ["l_receiptdate"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 2500 - }, - { - "columns": ["l_shipinstruct"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 4 - }, - { - "columns": ["l_shipmode"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 7 - }, - { - "columns": ["l_comment"], - "created_at": "2018-01-01 1:00:00.00000+00:00", - "row_count": 6001215, - "distinct_count": 4500000 - } - ]';`, +var tpchTables = []string{ + "nation", "region", "part", "supplier", + "partsupp", "customer", "orders", "lineitem", +} + +// tpchVecTestRunConfig specifies the configuration of a tpchvec test run. +type tpchVecTestRunConfig struct { + // numRunsPerQuery determines how many time a single query runs, set to 1 + // by default. + numRunsPerQuery int + // queriesToRun specifies the number of queries to run (in [1, + // tpch.NumQueries] range). + queriesToRun []int + // clusterSetups specifies all cluster setup queries that need to be + // executed before running any of the TPCH queries. First dimension + // determines the number of different clusterSetups a tpchvec test is run + // with, and every clusterSetups[i] specifies all queries for setup with + // index i. + // Note: these are expected to modify cluster-wide settings. + clusterSetups [][]string + // setupNames contains 1-to-1 mapping with clusterSetups to provide + // user-friendly names for the setups. + setupNames []string +} + +// performClusterSetup executes all queries in clusterSetup on conn. +func performClusterSetup(t *test, conn *gosql.DB, clusterSetup []string) { + for _, query := range clusterSetup { + if _, err := conn.Exec(query); err != nil { + t.Fatal(err) + } } +} - type runOption int - const ( - // perf configuration is meant to be used to check the correctness of - // the vectorized engine and compare the queries' runtimes against - // row-by-row engine. - perf runOption = iota - // stressDiskSpilling configuration is meant to stress disk spilling of - // the vectorized engine. There is no comparison of the runtimes. - stressDiskSpilling - ) - type runConfig struct { - vectorizeOptions []bool - stressDiskSpilling bool - numRunsPerQuery int +type tpchVecTestCase interface { + // getRunConfig returns the configuration of tpchvec test run. + getRunConfig(version crdbVersion, queriesToSkip map[int]string) tpchVecTestRunConfig + // preTestRunHook is called before any tpch query is run. Can be used to + // perform any setup that cannot be expressed as a modification to + // cluster-wide settings (those should go into tpchVecTestRunConfig). + preTestRunHook(ctx context.Context, t *test, c *cluster, conn *gosql.DB, version crdbVersion, clusterSetup []string) + // postQueryRunHook is called after each tpch query is run with the output and + // the index of the setup it was run in. + postQueryRunHook(t *test, output []byte, setupIdx int) + // postTestRunHook is called after all tpch queries are run. Can be used to + // perform teardown or general validation. + postTestRunHook(ctx context.Context, t *test, c *cluster, conn *gosql.DB, version crdbVersion) +} + +// tpchVecTestCaseBase is a default tpchVecTestCase implementation that can be +// embedded and extended. +type tpchVecTestCaseBase struct{} + +func (b tpchVecTestCaseBase) getRunConfig( + version crdbVersion, queriesToSkip map[int]string, +) tpchVecTestRunConfig { + runConfig := tpchVecTestRunConfig{ + numRunsPerQuery: 1, + clusterSetups: [][]string{{ + "RESET CLUSTER SETTING sql.distsql.temp_storage.workmem", + fmt.Sprintf("SET CLUSTER SETTING sql.defaults.vectorize=%s", + vectorizeOptionToSetting(true, version)), + }}, + setupNames: []string{"default"}, } - runConfigs := make(map[runOption]runConfig) - const ( - // These correspond to "perf" run configuration below. - vecOnConfig = 0 - vecOffConfig = 1 - ) - runConfigs[perf] = runConfig{ - vectorizeOptions: []bool{true, false}, - stressDiskSpilling: false, - numRunsPerQuery: 3, + if version != tpchVecVersion19_2 { + runConfig.clusterSetups[0] = append(runConfig.clusterSetups[0], + "RESET CLUSTER SETTING sql.testing.vectorize.batch_size", + ) } - runConfigs[stressDiskSpilling] = runConfig{ - vectorizeOptions: []bool{true}, - stressDiskSpilling: true, - numRunsPerQuery: 1, + for queryNum := 1; queryNum <= tpch.NumQueries; queryNum++ { + if _, shouldSkip := queriesToSkip[queryNum]; !shouldSkip { + runConfig.queriesToRun = append(runConfig.queriesToRun, queryNum) + } } + return runConfig +} - runTPCHVec := func(ctx context.Context, t *test, c *cluster, option runOption) { - firstNode := c.Node(1) - c.Put(ctx, cockroach, "./cockroach", c.All()) - c.Put(ctx, workload, "./workload", firstNode) - c.Start(ctx, t) - - conn := c.Conn(ctx, 1) - t.Status("restoring TPCH dataset for Scale Factor 1") - setup := ` -CREATE DATABASE tpch; -RESTORE tpch.* FROM 'gs://cockroach-fixtures/workload/tpch/scalefactor=1/backup' WITH into_db = 'tpch'; -` - if _, err := conn.Exec(setup); err != nil { - t.Fatal(err) - } +func (b tpchVecTestCaseBase) preTestRunHook( + _ context.Context, + t *test, + _ *cluster, + conn *gosql.DB, + version crdbVersion, + clusterSetup []string, +) { + performClusterSetup(t, conn, clusterSetup) +} - t.Status("scattering the data") - if _, err := conn.Exec("USE tpch;"); err != nil { - t.Fatal(err) - } - for _, table := range TPCHTables { - scatter := fmt.Sprintf("ALTER TABLE %s SCATTER;", table) - if _, err := conn.Exec(scatter); err != nil { - t.Fatal(err) +func (b tpchVecTestCaseBase) postQueryRunHook(*test, []byte, int) {} + +func (b tpchVecTestCaseBase) postTestRunHook( + context.Context, *test, *cluster, *gosql.DB, crdbVersion, +) { +} + +type tpchVecPerfHelper struct { + timeByQueryNum []map[int][]float64 +} + +func newTpchVecPerfHelper(numSetups int) *tpchVecPerfHelper { + timeByQueryNum := make([]map[int][]float64, numSetups) + for i := range timeByQueryNum { + timeByQueryNum[i] = make(map[int][]float64) + } + return &tpchVecPerfHelper{ + timeByQueryNum: timeByQueryNum, + } +} + +func (h *tpchVecPerfHelper) parseQueryOutput(t *test, output []byte, setupIdx int) { + runtimeRegex := regexp.MustCompile(`.*\[q([\d]+)\] returned \d+ rows after ([\d]+\.[\d]+) seconds.*`) + scanner := bufio.NewScanner(bytes.NewReader(output)) + for scanner.Scan() { + line := scanner.Bytes() + match := runtimeRegex.FindSubmatch(line) + if match != nil { + queryNum, err := strconv.Atoi(string(match[1])) + if err != nil { + t.Fatalf("failed parsing %q as int with %s", match[1], err) } - } - t.Status("waiting for full replication") - waitForFullReplication(t, conn) - t.Status("injecting stats") - for _, injectStats := range TPCHTableStatsInjection { - if _, err := conn.Exec(injectStats); err != nil { - t.Fatal(err) + queryTime, err := strconv.ParseFloat(string(match[2]), 64) + if err != nil { + t.Fatalf("failed parsing %q as float with %s", match[2], err) } + h.timeByQueryNum[setupIdx][queryNum] = append(h.timeByQueryNum[setupIdx][queryNum], queryTime) } - versionString, err := fetchCockroachVersion(ctx, c, c.Node(1)[0]) - if err != nil { - t.Fatal(err) + } +} + +const ( + tpchPerfTestVecOnConfigIdx = 1 + tpchPerfTestVecOffConfigIdx = 0 +) + +type tpchVecPerfTest struct { + tpchVecTestCaseBase + *tpchVecPerfHelper + + disableStatsCreation bool +} + +var _ tpchVecTestCase = &tpchVecPerfTest{} + +func newTpchVecPerfTest(disableStatsCreation bool) *tpchVecPerfTest { + return &tpchVecPerfTest{ + tpchVecPerfHelper: newTpchVecPerfHelper(2 /* numSetups */), + disableStatsCreation: disableStatsCreation, + } +} + +func (p tpchVecPerfTest) getRunConfig(version crdbVersion, _ map[int]string) tpchVecTestRunConfig { + var queriesToSkip map[int]string + if p.disableStatsCreation { + queriesToSkip = map[int]string{ + 9: "takes too long without stats", } - version, err := toCRDBVersion(versionString) - if err != nil { - t.Fatal(err) + } else { + queriesToSkip = queriesToSkipByVersion[version] + } + runConfig := p.tpchVecTestCaseBase.getRunConfig(version, queriesToSkip) + runConfig.numRunsPerQuery = 3 + // Make a copy of the default configuration setup and add different + // vectorize setting updates. Note that it's ok that the default setup + // sets vectorize cluster setting to 'on' - we will override it with + // queries below. + defaultSetup := runConfig.clusterSetups[0] + runConfig.clusterSetups = append(runConfig.clusterSetups, make([]string, len(defaultSetup))) + copy(runConfig.clusterSetups[1], defaultSetup) + runConfig.clusterSetups[tpchPerfTestVecOffConfigIdx] = append(runConfig.clusterSetups[tpchPerfTestVecOffConfigIdx], + fmt.Sprintf("SET CLUSTER SETTING sql.defaults.vectorize=%s", vectorizeOptionToSetting(false, version))) + runConfig.clusterSetups[tpchPerfTestVecOnConfigIdx] = append(runConfig.clusterSetups[tpchPerfTestVecOnConfigIdx], + fmt.Sprintf("SET CLUSTER SETTING sql.defaults.vectorize=%s", vectorizeOptionToSetting(true, version))) + runConfig.setupNames = make([]string, 2) + runConfig.setupNames[tpchPerfTestVecOffConfigIdx] = "off" + runConfig.setupNames[tpchPerfTestVecOnConfigIdx] = "on" + return runConfig +} + +func (p tpchVecPerfTest) preTestRunHook( + ctx context.Context, + t *test, + c *cluster, + conn *gosql.DB, + version crdbVersion, + clusterSetup []string, +) { + p.tpchVecTestCaseBase.preTestRunHook(ctx, t, c, conn, version, clusterSetup) + if !p.disableStatsCreation { + createStatsFromTables(t, conn, tpchTables) + } +} + +func (p *tpchVecPerfTest) postQueryRunHook(t *test, output []byte, setupIdx int) { + p.parseQueryOutput(t, output, setupIdx) +} + +func (p *tpchVecPerfTest) postTestRunHook( + ctx context.Context, t *test, c *cluster, conn *gosql.DB, version crdbVersion, +) { + runConfig := p.getRunConfig(version, queriesToSkipByVersion[version]) + t.Status("comparing the runtimes (only median values for each query are compared)") + for _, queryNum := range runConfig.queriesToRun { + findMedian := func(times []float64) float64 { + sort.Float64s(times) + return times[len(times)/2] } - queriesToSkip := queriesToSkipByVersion[version] - runConfig := runConfigs[option] - rng, _ := randutil.NewPseudoRand() - if runConfig.stressDiskSpilling { - // In order to stress the disk spilling of the vectorized - // engine, we will set workmem limit to a random value in range - // [16KiB, 256KiB). - workmemInKiB := 16 + rng.Intn(240) - workmem := fmt.Sprintf("%dKiB", workmemInKiB) - t.Status(fmt.Sprintf("setting workmem='%s'", workmem)) - if _, err := conn.Exec(fmt.Sprintf("SET CLUSTER SETTING sql.distsql.temp_storage.workmem='%s'", workmem)); err != nil { - t.Fatal(err) - } + vecOnTimes := p.timeByQueryNum[tpchPerfTestVecOnConfigIdx][queryNum] + vecOffTimes := p.timeByQueryNum[tpchPerfTestVecOffConfigIdx][queryNum] + if len(vecOnTimes) != runConfig.numRunsPerQuery { + t.Fatal(fmt.Sprintf("[q%d] unexpectedly wrong number of run times "+ + "recorded with vec ON config: %v", queryNum, vecOnTimes)) + } + if len(vecOffTimes) != runConfig.numRunsPerQuery { + t.Fatal(fmt.Sprintf("[q%d] unexpectedly wrong number of run times "+ + "recorded with vec OFF config: %v", queryNum, vecOffTimes)) + } + vecOnTime := findMedian(vecOnTimes) + vecOffTime := findMedian(vecOffTimes) + if vecOffTime < vecOnTime { + t.l.Printf( + fmt.Sprintf("[q%d] vec OFF was faster by %.2f%%: "+ + "%.2fs ON vs %.2fs OFF --- WARNING\n"+ + "vec ON times: %v\t vec OFF times: %v", + queryNum, 100*(vecOnTime-vecOffTime)/vecOffTime, + vecOnTime, vecOffTime, vecOnTimes, vecOffTimes)) } else { - // We are interested in the performance comparison between - // vectorized and row-by-row engines, so we will reset workmem - // limit to the default value. - t.Status("resetting workmem to default") - if _, err := conn.Exec("RESET CLUSTER SETTING sql.distsql.temp_storage.workmem"); err != nil { - t.Fatal(err) - } + t.l.Printf( + fmt.Sprintf("[q%d] vec ON was faster by %.2f%%: "+ + "%.2fs ON vs %.2fs OFF\n"+ + "vec ON times: %v\t vec OFF times: %v", + queryNum, 100*(vecOffTime-vecOnTime)/vecOnTime, + vecOnTime, vecOffTime, vecOnTimes, vecOffTimes)) } - timeByQueryNum := []map[int][]float64{make(map[int][]float64), make(map[int][]float64)} - for queryNum := 1; queryNum <= numTPCHQueries; queryNum++ { - for configIdx, vectorize := range runConfig.vectorizeOptions { - if reason, skip := queriesToSkip[queryNum]; skip { - t.Status(fmt.Sprintf("skipping q%d because of %q", queryNum, reason)) - continue - } - vectorizeSetting := "off" - if vectorize { - vectorizeSetting = vectorizeOnOptionByVersion[version] - } - cmd := fmt.Sprintf("./workload run tpch --concurrency=1 --db=tpch "+ - "--max-ops=%d --queries=%d --vectorize=%s {pgurl:1-%d}", - runConfig.numRunsPerQuery, queryNum, vectorizeSetting, nodeCount) - workloadOutput, err := c.RunWithBuffer(ctx, t.l, firstNode, cmd) - t.l.Printf("\n" + string(workloadOutput)) - if err != nil { - // Note: if you see an error like "exit status 1", it is likely caused - // by the erroneous output of the query. + if vecOnTime >= getSlownessThreshold(version)*vecOffTime { + // For some reason, the vectorized engine executed the query a lot + // slower than the row-by-row engine which is unexpected. In order + // to understand where the slowness comes from, we will run EXPLAIN + // ANALYZE (DEBUG) of the query with all `vectorize` options + // tpchPerfTestNumRunsPerQuery times (hoping at least one will + // "catch" the slowness). + for setupIdx, setup := range runConfig.clusterSetups { + performClusterSetup(t, conn, setup) + // performClusterSetup has changed the cluster settings; + // however, the session variables might contain the old values, + // so we will open up new connections for each of the setups in + // order to get the correct cluster setup on each. + tempConn := c.Conn(ctx, 1) + defer tempConn.Close() + if _, err := tempConn.Exec("USE tpch;"); err != nil { t.Fatal(err) } - parseOutput := func(output []byte, timeByQueryNum map[int][]float64) { - runtimeRegex := regexp.MustCompile(`.*\[q([\d]+)\] returned \d+ rows after ([\d]+\.[\d]+) seconds.*`) - scanner := bufio.NewScanner(bytes.NewReader(output)) - for scanner.Scan() { - line := scanner.Bytes() - match := runtimeRegex.FindSubmatch(line) - if match != nil { - queryNum, err := strconv.Atoi(string(match[1])) - if err != nil { - t.Fatalf("failed parsing %q as int with %s", match[1], err) - } - queryTime, err := strconv.ParseFloat(string(match[2]), 64) - if err != nil { - t.Fatalf("failed parsing %q as float with %s", match[2], err) - } - timeByQueryNum[queryNum] = append(timeByQueryNum[queryNum], queryTime) + for i := 0; i < runConfig.numRunsPerQuery; i++ { + t.Status(fmt.Sprintf("\nRunning EXPLAIN ANALYZE (DEBUG) for setup=%s\n", runConfig.setupNames[setupIdx])) + rows, err := tempConn.Query(fmt.Sprintf( + "EXPLAIN ANALYZE (DEBUG) %s;", tpch.QueriesByNumber[queryNum], + )) + if err != nil { + t.Fatal(err) + } + // The output of the command looks like: + // Statement diagnostics bundle generated. Download from the Admin UI (Advanced + // Debug -> Statement Diagnostics History), via the direct link below, or using + // the command line. + // Admin UI: http://Yahors-MacBook-Pro.local:8081 + // Direct link: http://Yahors-MacBook-Pro.local:8081/_admin/v1/stmtbundle/574364979110641665 + // Command line: cockroach statement-diag list / download + // We are interested in the line that contains the url that + // we will curl below. + directLinkPrefix := "Direct link: " + var line, url, debugOutput string + for rows.Next() { + if err = rows.Scan(&line); err != nil { + t.Fatal(err) + } + debugOutput += line + "\n" + if strings.HasPrefix(line, directLinkPrefix) { + url = line[len(directLinkPrefix):] + break } } - } - if option == perf { - // We only need to parse the output with 'perf' run option. - parseOutput(workloadOutput, timeByQueryNum[configIdx]) + if err = rows.Close(); err != nil { + t.Fatal(err) + } + if url == "" { + t.Fatal(fmt.Sprintf("unexpectedly didn't find a line "+ + "with %q prefix in EXPLAIN ANALYZE (DEBUG) output\n%s", + directLinkPrefix, debugOutput)) + } + // We will curl into the logs folder so that test runner + // retrieves the bundle together with the log files. + curlCmd := fmt.Sprintf( + "curl %s > logs/bundle_%s_%d.zip", url, runConfig.setupNames[setupIdx], i, + ) + if err = c.RunL(ctx, t.l, c.Node(1), curlCmd); err != nil { + t.Fatal(err) + } } } + t.Fatal(fmt.Sprintf( + "[q%d] vec ON is slower by %.2f%% than vec OFF\n"+ + "vec ON times: %v\nvec OFF times: %v", + queryNum, 100*(vecOnTime-vecOffTime)/vecOffTime, vecOnTimes, vecOffTimes)) } - if option == perf { - // We are only interested in comparison with 'perf' run option. - t.Status("comparing the runtimes (only median values for each query are compared)") - for queryNum := 1; queryNum <= numTPCHQueries; queryNum++ { - if _, skipped := queriesToSkip[queryNum]; skipped { - continue - } - findMedian := func(times []float64) float64 { - sort.Float64s(times) - return times[len(times)/2] - } - vecOnTimes := timeByQueryNum[vecOnConfig][queryNum] - vecOffTimes := timeByQueryNum[vecOffConfig][queryNum] - if len(vecOnTimes) != runConfig.numRunsPerQuery { - t.Fatal(fmt.Sprintf("[q%d] unexpectedly wrong number of run times "+ - "recorded with vec ON config: %v", queryNum, vecOnTimes)) - } - if len(vecOffTimes) != runConfig.numRunsPerQuery { - t.Fatal(fmt.Sprintf("[q%d] unexpectedly wrong number of run times "+ - "recorded with vec OFF config: %v", queryNum, vecOffTimes)) - } - vecOnTime := findMedian(vecOnTimes) - vecOffTime := findMedian(vecOffTimes) - if vecOffTime < vecOnTime { - t.l.Printf( - fmt.Sprintf("[q%d] vec OFF was faster by %.2f%%: "+ - "%.2fs ON vs %.2fs OFF --- WARNING\n"+ - "vec ON times: %v\t vec OFF times: %v", - queryNum, 100*(vecOnTime-vecOffTime)/vecOffTime, - vecOnTime, vecOffTime, vecOnTimes, vecOffTimes)) - } else { - t.l.Printf( - fmt.Sprintf("[q%d] vec ON was faster by %.2f%%: "+ - "%.2fs ON vs %.2fs OFF\n"+ - "vec ON times: %v\t vec OFF times: %v", - queryNum, 100*(vecOffTime-vecOnTime)/vecOnTime, - vecOnTime, vecOffTime, vecOnTimes, vecOffTimes)) - } - if vecOnTime >= slownessThresholdByVersion[version]*vecOffTime { - t.Fatal(fmt.Sprintf( - "[q%d] vec ON is slower by %.2f%% than vec OFF\n"+ - "vec ON times: %v\nvec OFF times: %v", - queryNum, 100*(vecOnTime-vecOffTime)/vecOffTime, vecOnTimes, vecOffTimes)) - } + } +} + +type tpchVecBenchTest struct { + tpchVecTestCaseBase + *tpchVecPerfHelper + + numRunsPerQuery int + queriesToRun []int + clusterSetups [][]string + setupNames []string +} + +var _ tpchVecTestCase = &tpchVecBenchTest{} + +func (b tpchVecBenchTest) getRunConfig(version crdbVersion, _ map[int]string) tpchVecTestRunConfig { + runConfig := b.tpchVecTestCaseBase.getRunConfig(version, queriesToSkipByVersion[version]) + runConfig.numRunsPerQuery = b.numRunsPerQuery + if b.queriesToRun != nil { + runConfig.queriesToRun = b.queriesToRun + } + defaultSetup := runConfig.clusterSetups[0] + // We slice up defaultSetup to make sure that new slices are allocated in + // appends below. + defaultSetup = defaultSetup[:len(defaultSetup):len(defaultSetup)] + runConfig.clusterSetups = make([][]string, len(b.clusterSetups)) + runConfig.setupNames = b.setupNames + for setupIdx, configSetup := range b.clusterSetups { + runConfig.clusterSetups[setupIdx] = append(defaultSetup, configSetup...) + } + return runConfig +} + +func (b *tpchVecBenchTest) postQueryRunHook(t *test, output []byte, setupIdx int) { + b.tpchVecPerfHelper.parseQueryOutput(t, output, setupIdx) +} + +func (b *tpchVecBenchTest) postTestRunHook( + ctx context.Context, t *test, c *cluster, conn *gosql.DB, version crdbVersion, +) { + runConfig := b.getRunConfig(version, queriesToSkipByVersion[version]) + t.Status("comparing the runtimes (average of values (excluding best and worst) for each query are compared)") + // A score for a single query is calculated as + // / , + // and then all query scores are summed. So the lower the total score, the + // better the config is. + scores := make([]float64, len(runConfig.setupNames)) + for _, queryNum := range runConfig.queriesToRun { + // findAvgTime finds the average of times excluding best and worst as + // possible outliers. It expects that len(times) >= 3. + findAvgTime := func(times []float64) float64 { + if len(times) < 3 { + t.Fatal(fmt.Sprintf("unexpectedly query %d ran %d times on one of the setups", queryNum, len(times))) + } + sort.Float64s(times) + sum, count := 0.0, 0 + for _, time := range times[1 : len(times)-1] { + sum += time + count++ } + return sum / float64(count) + } + bestTime := math.MaxFloat64 + var bestSetupIdx int + for setupIdx := range runConfig.setupNames { + setupTime := findAvgTime(b.timeByQueryNum[setupIdx][queryNum]) + if setupTime < bestTime { + bestTime = setupTime + bestSetupIdx = setupIdx + } + } + t.l.Printf(fmt.Sprintf("[q%d] best setup is %s", queryNum, runConfig.setupNames[bestSetupIdx])) + for setupIdx, setupName := range runConfig.setupNames { + setupTime := findAvgTime(b.timeByQueryNum[setupIdx][queryNum]) + scores[setupIdx] += setupTime / bestTime + t.l.Printf(fmt.Sprintf("[q%d] setup %s took %.2fs", queryNum, setupName, setupTime)) } } + t.Status("----- scores of the setups -----") + bestScore := math.MaxFloat64 + var bestSetupIdx int + for setupIdx, setupName := range runConfig.setupNames { + score := scores[setupIdx] + t.l.Printf(fmt.Sprintf("score of %s is %.2f", setupName, score)) + if bestScore > score { + bestScore = score + bestSetupIdx = setupIdx + } + } + t.Status(fmt.Sprintf("----- best setup is %s -----", runConfig.setupNames[bestSetupIdx])) +} +type tpchVecDiskTest struct { + tpchVecTestCaseBase +} + +func (d tpchVecDiskTest) preTestRunHook( + ctx context.Context, + t *test, + c *cluster, + conn *gosql.DB, + version crdbVersion, + clusterSetup []string, +) { + d.tpchVecTestCaseBase.preTestRunHook(ctx, t, c, conn, version, clusterSetup) + createStatsFromTables(t, conn, tpchTables) + // In order to stress the disk spilling of the vectorized + // engine, we will set workmem limit to a random value in range + // [16KiB, 256KiB). + rng, _ := randutil.NewPseudoRand() + workmemInKiB := 16 + rng.Intn(240) + workmem := fmt.Sprintf("%dKiB", workmemInKiB) + t.Status(fmt.Sprintf("setting workmem='%s'", workmem)) + if _, err := conn.Exec(fmt.Sprintf("SET CLUSTER SETTING sql.distsql.temp_storage.workmem='%s'", workmem)); err != nil { + t.Fatal(err) + } +} + +// setSmallBatchSize sets a cluster setting to override the batch size to be in +// [1, 5) range. +func setSmallBatchSize(t *test, conn *gosql.DB, rng *rand.Rand) { + batchSize := 1 + rng.Intn(4) + t.Status(fmt.Sprintf("setting sql.testing.vectorize.batch_size to %d", batchSize)) + if _, err := conn.Exec(fmt.Sprintf("SET CLUSTER SETTING sql.testing.vectorize.batch_size=%d", batchSize)); err != nil { + t.Fatal(err) + } +} + +type tpchVecSmallBatchSizeTest struct { + tpchVecTestCaseBase +} + +func (b tpchVecSmallBatchSizeTest) preTestRunHook( + ctx context.Context, + t *test, + c *cluster, + conn *gosql.DB, + version crdbVersion, + clusterSetup []string, +) { + b.tpchVecTestCaseBase.preTestRunHook(ctx, t, c, conn, version, clusterSetup) + createStatsFromTables(t, conn, tpchTables) + rng, _ := randutil.NewPseudoRand() + setSmallBatchSize(t, conn, rng) +} + +func baseTestRun( + ctx context.Context, t *test, c *cluster, conn *gosql.DB, version crdbVersion, tc tpchVecTestCase, +) { + firstNode := c.Node(1) + runConfig := tc.getRunConfig(version, queriesToSkipByVersion[version]) + for setupIdx, setup := range runConfig.clusterSetups { + t.Status(fmt.Sprintf("running setup=%s", runConfig.setupNames[setupIdx])) + tc.preTestRunHook(ctx, t, c, conn, version, setup) + for _, queryNum := range runConfig.queriesToRun { + // Note that we use --default-vectorize flag which tells tpch + // workload to use the current cluster setting + // sql.defaults.vectorize which must have been set correctly in + // preTestRunHook. + cmd := fmt.Sprintf("./workload run tpch --concurrency=1 --db=tpch "+ + "--default-vectorize --max-ops=%d --queries=%d {pgurl:1}", + runConfig.numRunsPerQuery, queryNum) + workloadOutput, err := c.RunWithBuffer(ctx, t.l, firstNode, cmd) + t.l.Printf("\n" + string(workloadOutput)) + if err != nil { + // Note: if you see an error like "exit status 1", it is likely caused + // by the erroneous output of the query. + t.Fatal(err) + } + tc.postQueryRunHook(t, workloadOutput, setupIdx) + } + } +} + +type tpchVecSmithcmpTest struct { + tpchVecTestCaseBase +} + +const tpchVecSmithcmp = "smithcmp" + +func (s tpchVecSmithcmpTest) preTestRunHook( + ctx context.Context, + t *test, + c *cluster, + conn *gosql.DB, + version crdbVersion, + clusterSetup []string, +) { + s.tpchVecTestCaseBase.preTestRunHook(ctx, t, c, conn, version, clusterSetup) + createStatsFromTables(t, conn, tpchTables) + const smithcmpSHA = "a3f41f5ba9273249c5ecfa6348ea8ee3ac4b77e3" + node := c.Node(1) + if local && runtime.GOOS != "linux" { + t.Fatalf("must run on linux os, found %s", runtime.GOOS) + } + // This binary has been manually compiled using + // './build/builder.sh go build ./pkg/cmd/smithcmp' and uploaded to S3 + // bucket at cockroach/smithcmp. The binary shouldn't change much, so it is + // acceptable. + smithcmp, err := binfetcher.Download(ctx, binfetcher.Options{ + Component: tpchVecSmithcmp, + Binary: tpchVecSmithcmp, + Version: smithcmpSHA, + GOOS: "linux", + GOARCH: "amd64", + }) + if err != nil { + t.Fatal(err) + } + c.Put(ctx, smithcmp, "./"+tpchVecSmithcmp, node) + // To increase test coverage, we will be randomizing the batch size in 50% + // of the runs. + rng, _ := randutil.NewPseudoRand() + if rng.Float64() < 0.5 { + setSmallBatchSize(t, conn, rng) + } +} + +func smithcmpTestRun( + ctx context.Context, t *test, c *cluster, conn *gosql.DB, version crdbVersion, tc tpchVecTestCase, +) { + runConfig := tc.getRunConfig(version, queriesToSkipByVersion[version]) + tc.preTestRunHook(ctx, t, c, conn, version, runConfig.clusterSetups[0]) + const ( + configFile = `tpchvec_smithcmp.toml` + configURL = `https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/` + configFile + ) + firstNode := c.Node(1) + if err := c.RunE(ctx, firstNode, fmt.Sprintf("curl %s > %s", configURL, configFile)); err != nil { + t.Fatal(err) + } + cmd := fmt.Sprintf("./%s %s", tpchVecSmithcmp, configFile) + if err := c.RunE(ctx, firstNode, cmd); err != nil { + t.Fatal(err) + } +} + +func runTPCHVec( + ctx context.Context, + t *test, + c *cluster, + testCase tpchVecTestCase, + testRun func(ctx context.Context, t *test, c *cluster, conn *gosql.DB, version crdbVersion, tc tpchVecTestCase), +) { + firstNode := c.Node(1) + c.Put(ctx, cockroach, "./cockroach", c.All()) + c.Put(ctx, workload, "./workload", firstNode) + c.Start(ctx, t) + + conn := c.Conn(ctx, 1) + disableAutoStats(t, conn) + disableVectorizeRowCountThresholdHeuristic(t, conn) + t.Status("restoring TPCH dataset for Scale Factor 1") + if err := loadTPCHDataset(ctx, t, c, 1 /* sf */, newMonitor(ctx, c), c.All()); err != nil { + t.Fatal(err) + } + + if _, err := conn.Exec("USE tpch;"); err != nil { + t.Fatal(err) + } + scatterTables(t, conn, tpchTables) + t.Status("waiting for full replication") + waitForFullReplication(t, conn) + versionString, err := fetchCockroachVersion(ctx, c, c.Node(1)[0]) + if err != nil { + t.Fatal(err) + } + version, err := toCRDBVersion(versionString) + if err != nil { + t.Fatal(err) + } + + testRun(ctx, t, c, conn, version, testCase) + testCase.postTestRunHook(ctx, t, c, conn, version) +} + +const tpchVecNodeCount = 3 + +func registerTPCHVec(r *testRegistry) { r.Add(testSpec{ Name: "tpchvec/perf", Owner: OwnerSQLExec, - Cluster: makeClusterSpec(nodeCount), + Cluster: makeClusterSpec(tpchVecNodeCount), MinVersion: "v19.2.0", Run: func(ctx context.Context, t *test, c *cluster) { - runTPCHVec(ctx, t, c, perf) + runTPCHVec(ctx, t, c, newTpchVecPerfTest(false /* disableStatsCreation */), baseTestRun) }, }) + r.Add(testSpec{ Name: "tpchvec/disk", Owner: OwnerSQLExec, - Cluster: makeClusterSpec(nodeCount), + Cluster: makeClusterSpec(tpchVecNodeCount), // 19.2 version doesn't have disk spilling nor memory monitoring, so // there is no point in running this config on that version. MinVersion: "v20.1.0", Run: func(ctx context.Context, t *test, c *cluster) { - runTPCHVec(ctx, t, c, stressDiskSpilling) + runTPCHVec(ctx, t, c, tpchVecDiskTest{}, baseTestRun) + }, + }) + + r.Add(testSpec{ + Name: "tpchvec/smallbatchsize", + Owner: OwnerSQLExec, + Cluster: makeClusterSpec(tpchVecNodeCount), + // 19.2 version doesn't have the testing cluster setting to change the batch + // size, so only run on versions >= 20.1.0. + MinVersion: "v20.1.0", + Run: func(ctx context.Context, t *test, c *cluster) { + runTPCHVec(ctx, t, c, tpchVecSmallBatchSizeTest{}, baseTestRun) + }, + }) + + r.Add(testSpec{ + Name: "tpchvec/smithcmp", + Owner: OwnerSQLExec, + Cluster: makeClusterSpec(tpchVecNodeCount), + MinVersion: "v20.1.0", + Run: func(ctx context.Context, t *test, c *cluster) { + runTPCHVec(ctx, t, c, tpchVecSmithcmpTest{}, smithcmpTestRun) }, }) } diff --git a/pkg/cmd/roachtest/tpchvec_smithcmp.toml b/pkg/cmd/roachtest/tpchvec_smithcmp.toml new file mode 100644 index 000000000000..951103a22d53 --- /dev/null +++ b/pkg/cmd/roachtest/tpchvec_smithcmp.toml @@ -0,0 +1,519 @@ +smither = "vec-off" +seed = -1 +timeoutmins = 30 +stmttimeoutsecs = 120 + +sql = [ +""" +SELECT + l_returnflag, + l_linestatus, + sum(l_quantity) AS sum_qty, + sum(l_extendedprice) AS sum_base_price, + sum(l_extendedprice * (1 - l_discount)) AS sum_disc_price, + sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) AS sum_charge, + avg(l_quantity) AS avg_qty, + avg(l_extendedprice) AS avg_price, + avg(l_discount) AS avg_disc, + count(*) AS count_order +FROM + lineitem +WHERE + l_shipdate <= $1::DATE - $2::INTERVAL +GROUP BY + l_returnflag, + l_linestatus +ORDER BY + l_returnflag, + l_linestatus; +""", +""" +SELECT + s_acctbal, + s_name, + n_name, + p_partkey, + p_mfgr, + s_address, + s_phone, + s_comment +FROM + part, + supplier, + partsupp, + nation, + region +WHERE + p_partkey = ps_partkey + AND s_suppkey = ps_suppkey + AND p_size = $1 + AND p_type LIKE '%BRASS' + AND s_nationkey = n_nationkey + AND n_regionkey = r_regionkey + AND r_name = 'EUROPE' + AND ps_supplycost = ( + SELECT + min(ps_supplycost) + FROM + partsupp, + supplier, + nation, + region + WHERE + p_partkey = ps_partkey + AND s_suppkey = ps_suppkey + AND s_nationkey = n_nationkey + AND n_regionkey = r_regionkey + AND r_name = 'EUROPE' + ) +ORDER BY + s_acctbal DESC, + n_name, + s_name, + p_partkey +LIMIT 100; +""", +""" +SELECT + l_orderkey, + sum(l_extendedprice * (1 - l_discount)) AS revenue, + o_orderdate, + o_shippriority +FROM + customer, + orders, + lineitem +WHERE + c_mktsegment = 'BUILDING' + AND c_custkey = o_custkey + AND l_orderkey = o_orderkey + AND o_orderDATE < $1::DATE + AND l_shipdate > $2::DATE +GROUP BY + l_orderkey, + o_orderdate, + o_shippriority +ORDER BY + revenue DESC, + o_orderdate +LIMIT 10; +""", +""" +SELECT + o_orderpriority, + count(*) AS order_count +FROM + orders +WHERE + o_orderdate >= $1::DATE + AND o_orderdate < $2::DATE + $3::INTERVAL + AND EXISTS ( + SELECT + * + FROM + lineitem + WHERE + l_orderkey = o_orderkey + AND l_commitDATE < l_receiptdate + ) +GROUP BY + o_orderpriority +ORDER BY + o_orderpriority; +""", +""" +SELECT + n_name, + sum(l_extendedprice * (1 - l_discount)) AS revenue +FROM + customer, + orders, + lineitem, + supplier, + nation, + region +WHERE + c_custkey = o_custkey + AND l_orderkey = o_orderkey + AND l_suppkey = s_suppkey + AND c_nationkey = s_nationkey + AND s_nationkey = n_nationkey + AND n_regionkey = r_regionkey + AND r_name = 'ASIA' + AND o_orderDATE >= $1::DATE + AND o_orderDATE < $2::DATE + $3::INTERVAL +GROUP BY + n_name +ORDER BY + revenue DESC; +""", +""" +SELECT + sum(l_extendedprice * l_discount) AS revenue +FROM + lineitem +WHERE + l_shipdate >= $1::DATE + AND l_shipdate < $2::DATE + $3::INTERVAL + AND l_discount BETWEEN $4::FLOAT8 - $5::FLOAT8 AND $6::FLOAT8 + $7::FLOAT8 + AND l_quantity < $8::FLOAT8; +""", +""" +SELECT + supp_nation, + cust_nation, + l_year, + sum(volume) AS revenue +FROM + ( + SELECT + n1.n_name AS supp_nation, + n2.n_name AS cust_nation, + EXTRACT(year FROM l_shipdate) AS l_year, + l_extendedprice * (1 - l_discount) AS volume + FROM + supplier, + lineitem, + orders, + customer, + nation n1, + nation n2 + WHERE + s_suppkey = l_suppkey + AND o_orderkey = l_orderkey + AND c_custkey = o_custkey + AND s_nationkey = n1.n_nationkey + AND c_nationkey = n2.n_nationkey + AND ( + (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') + or (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') + ) + AND l_shipdate BETWEEN $1::DATE AND $2::DATE + ) AS shipping +GROUP BY + supp_nation, + cust_nation, + l_year +ORDER BY + supp_nation, + cust_nation, + l_year; +""", +""" +SELECT + o_year, + sum(CASE + WHEN nation = 'BRAZIL' THEN volume + ELSE 0 + END) / sum(volume) AS mkt_share +FROM + ( + SELECT + EXTRACT(year FROM o_orderdate) AS o_year, + l_extendedprice * (1 - l_discount) AS volume, + n2.n_name AS nation + FROM + part, + supplier, + lineitem, + orders, + customer, + nation n1, + nation n2, + region + WHERE + p_partkey = l_partkey + AND s_suppkey = l_suppkey + AND l_orderkey = o_orderkey + AND o_custkey = c_custkey + AND c_nationkey = n1.n_nationkey + AND n1.n_regionkey = r_regionkey + AND r_name = 'AMERICA' + AND s_nationkey = n2.n_nationkey + AND o_orderdate BETWEEN $1::DATE AND $2::DATE + AND p_type = 'ECONOMY ANODIZED STEEL' + ) AS all_nations +GROUP BY + o_year +ORDER BY + o_year; +""", +""" +SELECT + c_custkey, + c_name, + sum(l_extendedprice * (1 - l_discount)) AS revenue, + c_acctbal, + n_name, + c_address, + c_phone, + c_comment +FROM + customer, + orders, + lineitem, + nation +WHERE + c_custkey = o_custkey + AND l_orderkey = o_orderkey + AND o_orderDATE >= $1::DATE + AND o_orderDATE < $2::DATE + $3::INTERVAL + AND l_returnflag = 'R' + AND c_nationkey = n_nationkey +GROUP BY + c_custkey, + c_name, + c_acctbal, + c_phone, + n_name, + c_address, + c_comment +ORDER BY + revenue DESC +LIMIT 20; +""", +""" +SELECT + ps_partkey, + sum(ps_supplycost * ps_availqty::float) AS value +FROM + partsupp, + supplier, + nation +WHERE + ps_suppkey = s_suppkey + AND s_nationkey = n_nationkey + AND n_name = 'GERMANY' +GROUP BY + ps_partkey HAVING + sum(ps_supplycost * ps_availqty::float) > ( + SELECT + sum(ps_supplycost * ps_availqty::float) * $1::FLOAT8 + FROM + partsupp, + supplier, + nation + WHERE + ps_suppkey = s_suppkey + AND s_nationkey = n_nationkey + AND n_name = 'GERMANY' + ) +ORDER BY + value DESC, ps_partkey; +""", +""" +SELECT + l_shipmode, + sum(CASE + WHEN o_orderpriority = '1-URGENT' + or o_orderpriority = '2-HIGH' + THEN 1 + ELSE 0 + END) AS high_line_count, + sum(CASE + WHEN o_orderpriority <> '1-URGENT' + AND o_orderpriority <> '2-HIGH' + THEN 1 + ELSE 0 + END) AS low_line_count +FROM + orders, + lineitem +WHERE + o_orderkey = l_orderkey + AND l_shipmode IN ('MAIL', 'SHIP') + AND l_commitdate < l_receiptdate + AND l_shipdate < l_commitdate + AND l_receiptdate >= $1::DATE + AND l_receiptdate < $2::DATE + $3::INTERVAL +GROUP BY + l_shipmode +ORDER BY + l_shipmode; +""", +""" +SELECT + 100.00 * sum(CASE + WHEN p_type LIKE 'PROMO%' + THEN l_extendedprice * (1 - l_discount) + ELSE 0 + END) / sum(l_extendedprice * (1 - l_discount)) AS promo_revenue +FROM + lineitem, + part +WHERE + l_partkey = p_partkey + AND l_shipdate >= $1::DATE + AND l_shipdate < $2::DATE + $3::INTERVAL; +""", +""" +SELECT + c_name, + c_custkey, + o_orderkey, + o_orderdate, + o_totalprice, + sum(l_quantity) +FROM + customer, + orders, + lineitem +WHERE + o_orderkey IN ( + SELECT + l_orderkey + FROM + lineitem + GROUP BY + l_orderkey HAVING + sum(l_quantity) > $1::INT8 + ) + AND c_custkey = o_custkey + AND o_orderkey = l_orderkey +GROUP BY + c_name, + c_custkey, + o_orderkey, + o_orderdate, + o_totalprice +ORDER BY + o_totalprice DESC, + o_orderdate +LIMIT 100; +""", +""" +SELECT + sum(l_extendedprice* (1 - l_discount)) AS revenue +FROM + lineitem, + part +WHERE + ( + p_partkey = l_partkey + AND p_brand = 'Brand#12' + AND p_container IN ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') + AND l_quantity >= $1::INT8 AND l_quantity <= $2::INT8 + $3::INT8 + AND p_size BETWEEN $4::INT8 AND $5::INT8 + AND l_shipmode IN ('AIR', 'AIR REG') + AND l_shipinstruct = 'DELIVER IN PERSON' + ) + OR + ( + p_partkey = l_partkey + AND p_brand = 'Brand#23' + AND p_container IN ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') + AND l_quantity >= $6::INT8 AND l_quantity <= $7::INT8 + $8::INT8 + AND p_size BETWEEN $9::INT8 AND $10::INT8 + AND l_shipmode IN ('AIR', 'AIR REG') + AND l_shipinstruct = 'DELIVER IN PERSON' + ) + OR + ( + p_partkey = l_partkey + AND p_brand = 'Brand#34' + AND p_container IN ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') + AND l_quantity >= $11::INT8 AND l_quantity <= $12::INT8 + $13::INT8 + AND p_size BETWEEN $14::INT8 AND $15::INT8 + AND l_shipmode IN ('AIR', 'AIR REG') + AND l_shipinstruct = 'DELIVER IN PERSON' + ); +""", +""" +SELECT + s_name, + s_address +FROM + supplier, + nation +WHERE + s_suppkey IN ( + SELECT + ps_suppkey + FROM + partsupp + WHERE + ps_partkey IN ( + SELECT + p_partkey + FROM + part + WHERE + p_name LIKE 'forest%' + ) + AND ps_availqty > ( + SELECT + $1::FLOAT8 * sum(l_quantity) + FROM + lineitem + WHERE + l_partkey = ps_partkey + AND l_suppkey = ps_suppkey + AND l_shipdate >= $2::DATE + AND l_shipdate < $3::DATE + $4::INTERVAL + ) + ) + AND s_nationkey = n_nationkey + AND n_name = 'CANADA' +ORDER BY + s_name; +""", +""" +SELECT + cntrycode, + count(*) AS numcust, + sum(c_acctbal) AS totacctbal +FROM + ( + SELECT + substring(c_phone FROM $1::INT4 FOR $2::INT4) AS cntrycode, + c_acctbal + FROM + customer + WHERE + substring(c_phone FROM $3::INT4 FOR $4::INT4) in + ('13', '31', '23', '29', '30', '18', '17') + AND c_acctbal > ( + SELECT + avg(c_acctbal) + FROM + customer + WHERE + c_acctbal > $5::FLOAT8 + AND substring(c_phone FROM $6::INT4 FOR $7::INT4) in + ('13', '31', '23', '29', '30', '18', '17') + ) + AND NOT EXISTS ( + SELECT + * + FROM + orders + WHERE + o_custkey = c_custkey + ) + ) AS custsale +GROUP BY + cntrycode +ORDER BY + cntrycode; +""", +] + +# Missing: 9, 13, 15, 16, 17, 21 +# These are missing either because 1) they use a CREATE VIEW, or 2) +# they don't have any parameters that make sense to randomize, and we'd +# thus be executing the same query each time. Queries that don't change +# should be tested in other places; smithcmp is for random testing. + +[databases.vec-off] +addr = "postgresql://root@localhost:26257/tpch?sslmode=disable" +allowmutations = true +initsql = """ +set vectorize=off; +""" + +[databases.vec-on] +addr = "postgresql://root@localhost:26257/tpch?sslmode=disable" +allowmutations = true +initsql = """ +set vectorize=on; +""" diff --git a/pkg/cmd/roachtest/typeorm.go b/pkg/cmd/roachtest/typeorm.go index 918282202abe..51eb610a9a94 100644 --- a/pkg/cmd/roachtest/typeorm.go +++ b/pkg/cmd/roachtest/typeorm.go @@ -18,6 +18,7 @@ import ( ) var typeORMReleaseTagRegex = regexp.MustCompile(`^(?P\d+)\.(?P\d+)\.(?P\d+)$`) +var supportedTypeORMRelease = "0.2.24" // This test runs TypeORM's full test suite against a single cockroach node. func registerTypeORM(r *testRegistry) { @@ -34,12 +35,22 @@ func registerTypeORM(r *testRegistry) { c.Put(ctx, cockroach, "./cockroach", c.All()) c.Start(ctx, t, c.All()) + version, err := fetchCockroachVersion(ctx, c, node[0]) + if err != nil { + t.Fatal(err) + } + + if err := alterZoneConfigAndClusterSettings(ctx, version, c, node[0]); err != nil { + t.Fatal(err) + } + t.Status("cloning TypeORM and installing prerequisites") latestTag, err := repeatGetLatestTag(ctx, c, "typeorm", "typeorm", typeORMReleaseTagRegex) if err != nil { t.Fatal(err) } c.l.Printf("Latest TypeORM release is %s.", latestTag) + c.l.Printf("Supported TypeORM release is %s.", supportedTypeORMRelease) if err := repeatRunE( ctx, c, node, "update apt-get", `sudo apt-get -qq update`, @@ -92,7 +103,7 @@ func registerTypeORM(r *testRegistry) { c, "https://github.com/typeorm/typeorm.git", "/mnt/data1/typeorm", - latestTag, + supportedTypeORMRelease, node, ); err != nil { t.Fatal(err) diff --git a/pkg/cmd/roachtest/upgrade.go b/pkg/cmd/roachtest/upgrade.go deleted file mode 100644 index ada34d79fb41..000000000000 --- a/pkg/cmd/roachtest/upgrade.go +++ /dev/null @@ -1,641 +0,0 @@ -// Copyright 2018 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package main - -import ( - "context" - "fmt" - "math/rand" - "runtime" - "time" - - "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/testutils" - "github.com/cockroachdb/cockroach/pkg/util/binfetcher" - "github.com/cockroachdb/cockroach/pkg/util/retry" - _ "github.com/lib/pq" - "github.com/pkg/errors" -) - -func registerUpgrade(r *testRegistry) { - runUpgrade := func(ctx context.Context, t *test, c *cluster, oldVersion string) { - nodes := c.spec.NodeCount - goos := ifLocal(runtime.GOOS, "linux") - - b, err := binfetcher.Download(ctx, binfetcher.Options{ - Binary: "cockroach", - Version: "v" + oldVersion, - GOOS: goos, - GOARCH: "amd64", - }) - if err != nil { - t.Fatal(err) - } - - c.Put(ctx, b, "./cockroach", c.Range(1, nodes)) - - // NB: remove startArgsDontEncrypt across this file once we're not running - // roachtest against v2.1 any more (which would start a v2.0 cluster here). - c.Start(ctx, t, c.Range(1, nodes), startArgsDontEncrypt) - - const stageDuration = 30 * time.Second - const timeUntilStoreDead = 90 * time.Second - const buff = 10 * time.Second - - sleep := func(ts time.Duration) error { - t.WorkerStatus("sleeping") - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(ts): - return nil - } - } - - db := c.Conn(ctx, 1) - defer db.Close() - // Without this line, the test reliably fails (at least on OSX), presumably - // because a connection to a node that gets restarted somehow sticks around - // in the pool and throws an error at the next client using it (instead of - // transparently reconnecting). - db.SetMaxIdleConns(0) - - if _, err := db.ExecContext(ctx, - "SET CLUSTER SETTING server.time_until_store_dead = $1", timeUntilStoreDead.String(), - ); err != nil { - t.Fatal(err) - } - - if err := sleep(stageDuration); err != nil { - t.Fatal(err) - } - - decommissionAndStop := func(node int) error { - t.WorkerStatus("decomission") - port := fmt.Sprintf("{pgport:%d}", node) - // Note that the following command line needs to run against both v2.1 - // and the current branch. Do not change it in a manner that is - // incompatible with 2.1. - if err := c.RunE(ctx, c.Node(node), "./cockroach quit --decommission --insecure --port="+port); err != nil { - return err - } - t.WorkerStatus("stop") - c.Stop(ctx, c.Node(node)) - return nil - } - - clusterVersion := func() (string, error) { - var version string - if err := db.QueryRowContext(ctx, `SHOW CLUSTER SETTING version`).Scan(&version); err != nil { - return "", errors.Wrap(err, "determining cluster version") - } - return version, nil - } - - oldVersion, err = clusterVersion() - if err != nil { - t.Fatal(err) - } - - checkUpgraded := func() (bool, error) { - upgradedVersion, err := clusterVersion() - if err != nil { - return false, err - } - return upgradedVersion != oldVersion, nil - } - - checkDowngradeOption := func(version string) error { - if _, err := db.ExecContext(ctx, - "SET CLUSTER SETTING cluster.preserve_downgrade_option = $1;", version, - ); err == nil { - return fmt.Errorf("cluster.preserve_downgrade_option shouldn't be set to any other values besides current cluster version; was able to set it to %s", version) - } else if !testutils.IsError(err, "cannot set cluster.preserve_downgrade_option") { - return err - } - return nil - } - - // Now perform a rolling restart into the new binary, except the last node. - for i := 1; i < nodes; i++ { - t.WorkerStatus("upgrading ", i) - if err := c.StopCockroachGracefullyOnNode(ctx, i); err != nil { - t.Fatal(err) - } - c.Put(ctx, cockroach, "./cockroach", c.Node(i)) - c.Start(ctx, t, c.Node(i), startArgsDontEncrypt) - if err := sleep(stageDuration); err != nil { - t.Fatal(err) - } - - // Check cluster version is not upgraded until all nodes are running the new version. - if upgraded, err := checkUpgraded(); err != nil { - t.Fatal(err) - } else if upgraded { - t.Fatal("cluster setting version shouldn't be upgraded before all nodes are running the new version") - } - } - - // Now stop a previously started node and upgrade the last node. - // Check cluster version is not upgraded. - if err := c.StopCockroachGracefullyOnNode(ctx, nodes-1); err != nil { - t.Fatal(err) - } - if err := c.StopCockroachGracefullyOnNode(ctx, nodes); err != nil { - t.Fatal(err) - } - c.Put(ctx, cockroach, "./cockroach", c.Node(nodes)) - c.Start(ctx, t, c.Node(nodes), startArgsDontEncrypt) - if err := sleep(stageDuration); err != nil { - t.Fatal(err) - } - - if upgraded, err := checkUpgraded(); err != nil { - t.Fatal(err) - } else if upgraded { - t.Fatal("cluster setting version shouldn't be upgraded before all non-decommissioned nodes are alive") - } - - // Now decommission and stop n3. - // The decommissioned nodes should not prevent auto upgrade. - if err := decommissionAndStop(nodes - 2); err != nil { - t.Fatal(err) - } - if err := sleep(timeUntilStoreDead + buff); err != nil { - t.Fatal(err) - } - - // Check cannot set cluster setting cluster.preserve_downgrade_option to any - // value besides the old cluster version. - if err := checkDowngradeOption("1.9"); err != nil { - t.Fatal(err) - } - if err := checkDowngradeOption("99.9"); err != nil { - t.Fatal(err) - } - - // Set cluster setting cluster.preserve_downgrade_option to be current - // cluster version to prevent upgrade. - if _, err := db.ExecContext(ctx, - "SET CLUSTER SETTING cluster.preserve_downgrade_option = $1;", oldVersion, - ); err != nil { - t.Fatal(err) - } - if err := sleep(stageDuration); err != nil { - t.Fatal(err) - } - - // Restart the previously stopped node. - c.Start(ctx, t, c.Node(nodes-1), startArgsDontEncrypt) - if err := sleep(stageDuration); err != nil { - t.Fatal(err) - } - - t.WorkerStatus("check cluster version has not been upgraded") - if upgraded, err := checkUpgraded(); err != nil { - t.Fatal(err) - } else if upgraded { - t.Fatal("cluster setting version shouldn't be upgraded because cluster.preserve_downgrade_option is set properly") - } - - // Check cannot set cluster setting version until cluster.preserve_downgrade_option - // is cleared. - if _, err := db.ExecContext(ctx, - "SET CLUSTER SETTING version = crdb_internal.node_executable_version();", - ); err == nil { - t.Fatal("should not be able to set cluster setting version before resetting cluster.preserve_downgrade_option") - } else if !testutils.IsError(err, "cluster.preserve_downgrade_option is set to") { - t.Fatal(err) - } - - // Reset cluster.preserve_downgrade_option to enable upgrade. - if _, err := db.ExecContext(ctx, - "RESET CLUSTER SETTING cluster.preserve_downgrade_option;", - ); err != nil { - t.Fatal(err) - } - if err := sleep(stageDuration); err != nil { - t.Fatal(err) - } - - // Check if the cluster version has been upgraded. - t.WorkerStatus("check cluster version has been upgraded") - if upgraded, err := checkUpgraded(); err != nil { - t.Fatal(err) - } else if !upgraded { - t.Fatalf("cluster setting version is not upgraded, still %s", oldVersion) - } - - // Finally, check if the cluster.preserve_downgrade_option has been reset. - t.WorkerStatus("check cluster setting cluster.preserve_downgrade_option has been set to an empty string") - var downgradeVersion string - if err := db.QueryRowContext(ctx, - "SHOW CLUSTER SETTING cluster.preserve_downgrade_option", - ).Scan(&downgradeVersion); err != nil { - t.Fatal(err) - } - if downgradeVersion != "" { - t.Fatalf("cluster setting cluster.preserve_downgrade_option is %s, should be an empty string", downgradeVersion) - } - - // Start n3 again to satisfy the dead node detector. - c.Start(ctx, t, c.Node(nodes-2)) - } - - r.Add(testSpec{ - Name: fmt.Sprintf("upgrade"), - Owner: OwnerKV, - MinVersion: "v2.1.0", - Cluster: makeClusterSpec(5), - Run: func(ctx context.Context, t *test, c *cluster) { - pred, err := PredecessorVersion(r.buildVersion) - if err != nil { - t.Fatal(err) - } - runUpgrade(ctx, t, c, pred) - }, - }) -} - -func runVersionUpgrade(ctx context.Context, t *test, c *cluster) { - // This is ugly, but we can't pass `--encrypt=false` to old versions of - // Cockroach. - c.encryptDefault = false - - nodes := c.Range(1, 3) - goos := ifLocal(runtime.GOOS, "linux") - const headVersion = "HEAD" - - // versionStep is an isolated version migration on a running cluster. - type versionStep struct { - clusterVersion string // if empty, use crdb_internal.node_executable_version - run func() - } - - uploadVersion := func(newVersion string) option { - var binary string - if newVersion == headVersion { - binary = cockroach - } else { - var err error - binary, err = binfetcher.Download(ctx, binfetcher.Options{ - Binary: "cockroach", - Version: newVersion, - GOOS: goos, - GOARCH: "amd64", - }) - if err != nil { - t.Fatal(err) - } - } - - target := "./cockroach-" + newVersion - c.Put(ctx, binary, target, nodes) - return startArgs("--binary=" + target) - } - - checkNode := func(nodeIdx int, newVersion string) { - err := retry.ForDuration(30*time.Second, func() error { - db := c.Conn(ctx, nodeIdx) - defer db.Close() - - // 'Version' for 1.1, 'Tag' in 1.0.x. - var version string - if err := db.QueryRow( - `SELECT value FROM crdb_internal.node_build_info where field IN ('Version' , 'Tag')`, - ).Scan(&version); err != nil { - return err - } - if version != newVersion && newVersion != headVersion { - t.Fatalf("created node at v%s, but it is %s", newVersion, version) - } - return nil - }) - if err != nil { - t.Fatal(err) - } - } - - // binaryVersionUpgrade performs a rolling upgrade of the specified nodes in - // the cluster. - binaryVersionUpgrade := func(newVersion string, nodes nodeListOption) versionStep { - return versionStep{ - run: func() { - t.l.Printf("%s: binary\n", newVersion) - args := uploadVersion(newVersion) - - // Restart nodes in a random order; otherwise node 1 would be running all - // the migrations and it probably also has all the leases. - rand.Shuffle(len(nodes), func(i, j int) { - nodes[i], nodes[j] = nodes[j], nodes[i] - }) - for _, node := range nodes { - t.l.Printf("%s: upgrading node %d\n", newVersion, node) - c.Stop(ctx, c.Node(node)) - c.Start(ctx, t, c.Node(node), args, startArgsDontEncrypt) - - checkNode(node, newVersion) - - // TODO(nvanbenschoten): add upgrade qualification step. What should we - // test? We could run logictests. We could add custom logic here. Maybe - // this should all be pushed to nightly migration tests instead. - time.Sleep(1 * time.Second) - } - }, - } - } - - // clusterVersionUpgrade performs a cluster version upgrade to its version. - // It waits until all nodes have seen the upgraded cluster version. - // If newVersion is set, we'll performe a SET CLUSTER SETTING version = - // . If it's not, we'll rely on the automatic cluster version - // upgrade mechanism (which is not inhibited by the - // cluster.preserve_downgrade_option cluster setting in this test. - var currentVersion string - clusterVersionUpgrade := func(newVersion string) versionStep { - return versionStep{ - clusterVersion: newVersion, - run: func() { - manual := newVersion != "" // old binary; needs hacks - - func() { - if manual { - return - } - db1 := c.Conn(ctx, 1) - defer db1.Close() - if err := db1.QueryRow(`SELECT crdb_internal.node_executable_version()`).Scan(&newVersion); err != nil { - t.Fatal(err) - } - t.l.Printf("%s: auto-resolved target version via node_executable_version()\n", newVersion) - }() - t.l.Printf("%s: cluster\n", newVersion) - - // hasShowSettingBug is true when we're working around - // https://github.com/cockroachdb/cockroach/issues/22796. - // - // The problem there is that `SHOW CLUSTER SETTING version` does not - // take into account the gossiped value of that setting but reads it - // straight from the KV store. This means that even though a node may - // report a certain version, it may not actually have processed it yet, - // which leads to illegal upgrades in this test. When this flag is set - // to true, we query `crdb_internal.cluster_settings` instead, which - // *does* take everything from Gossip. - v, err := roachpb.ParseVersion(newVersion) - if err != nil { - t.Fatal(err) - } - hasShowSettingBug := v.Less(roachpb.Version{Major: 1, Minor: 1, Unstable: 1}) - - if manual { - func() { - node := nodes.randNode()[0] - db := c.Conn(ctx, node) - defer db.Close() - - t.l.Printf("%s: upgrading cluster version (node %d)\n", newVersion, node) - if _, err := db.Exec(fmt.Sprintf(`SET CLUSTER SETTING version = '%s'`, newVersion)); err != nil { - t.Fatal(err) - } - }() - } - - if hasShowSettingBug { - t.l.Printf("%s: using workaround for upgrade\n", newVersion) - } - - for i := 1; i < c.spec.NodeCount; i++ { - err := retry.ForDuration(30*time.Second, func() error { - db := c.Conn(ctx, i) - defer db.Close() - - if !hasShowSettingBug { - if err := db.QueryRow("SHOW CLUSTER SETTING version").Scan(¤tVersion); err != nil { - t.Fatalf("%d: %s", i, err) - } - } else { - // This uses the receiving node's Gossip and as such allows us to verify that all of the - // nodes have gotten wind of the version bump. - if err := db.QueryRow( - `SELECT current_value FROM crdb_internal.cluster_settings WHERE name = 'version'`, - ).Scan(¤tVersion); err != nil { - t.Fatalf("%d: %s", i, err) - } - } - if currentVersion != newVersion { - return fmt.Errorf("%d: expected version %s, got %s", i, newVersion, currentVersion) - } - return nil - }) - if err != nil { - t.Fatal(err) - } - } - - t.l.Printf("%s: cluster is upgraded\n", newVersion) - - // TODO(nvanbenschoten): add upgrade qualification step. - time.Sleep(1 * time.Second) - }, - } - } - - const baseVersion = "v1.0.6" - steps := []versionStep{ - // v1.1.0 is the first binary version that knows about cluster versions. - binaryVersionUpgrade("v1.1.9", nodes), - clusterVersionUpgrade("1.1"), - - binaryVersionUpgrade("v2.0.7", nodes), - clusterVersionUpgrade("2.0"), - - binaryVersionUpgrade("v2.1.2", nodes), - clusterVersionUpgrade("2.1"), - - // From now on, all version upgrade steps pass an empty version which - // means the test will look it up from node_executable_version(). - - binaryVersionUpgrade("v19.1.5", nodes), - clusterVersionUpgrade(""), - - binaryVersionUpgrade("v19.2.1", nodes), - clusterVersionUpgrade(""), - - // Each new release has to be added here. When adding a new release, you'll - // probably need to use a release candidate binary. - - // HEAD gives us the main binary for this roachtest run. - binaryVersionUpgrade("HEAD", nodes), - clusterVersionUpgrade(""), - } - - type feature struct { - name string - minAllowedVersion string - query string - } - - features := []feature{ - { - name: "JSONB", - minAllowedVersion: "2.0-0", - query: ` - CREATE DATABASE IF NOT EXISTS test; - CREATE TABLE test.t (j JSONB); - DROP TABLE test.t; - `, - }, { - name: "Sequences", - minAllowedVersion: "2.0-0", - query: ` - CREATE DATABASE IF NOT EXISTS test; - CREATE SEQUENCE test.test_sequence; - DROP SEQUENCE test.test_sequence; - `, - }, { - name: "Computed Columns", - minAllowedVersion: "2.0-0", - query: ` - CREATE DATABASE IF NOT EXISTS test; - CREATE TABLE test.t (x INT AS (3) STORED); - DROP TABLE test.t; - `, - }, - } - - testFeature := func(f feature) { - db := c.Conn(ctx, 1) - defer db.Close() - - var cv string - if err := db.QueryRowContext(ctx, `SHOW CLUSTER SETTING version`).Scan(&cv); err != nil { - t.Fatal(err) - } - - minAllowedVersion, err := roachpb.ParseVersion(f.minAllowedVersion) - if err != nil { - t.Fatal(err) - } - actualVersion, err := roachpb.ParseVersion(cv) - if err != nil { - t.Fatal(err) - } - - _, err = db.Exec(f.query) - if actualVersion.Less(minAllowedVersion) { - if err == nil { - t.Fatalf("expected %s to fail on cluster version %s", f.name, cv) - } - t.l.Printf("%s: %s fails expected\n", cv, f.name) - } else { - if err != nil { - t.Fatalf("expected %s to succeed on cluster version %s, got %s", f.name, cv, err) - } - t.l.Printf("%s: %s works as expected\n", cv, f.name) - } - } - - args := uploadVersion(baseVersion) - // Hack to skip initializing settings which doesn't work on very old versions - // of cockroach. - c.Run(ctx, c.Node(1), "mkdir -p {store-dir} && touch {store-dir}/settings-initialized") - c.Start(ctx, t, nodes, args, startArgsDontEncrypt) - - func() { - // Create a bunch of tables, over the batch size on which some migrations - // operate. It generally seems like a good idea to have a bunch of tables in - // the cluster, and we had a bug about migrations on large numbers of tables: - // #22370. - db := c.Conn(ctx, 1) - defer db.Close() - if _, err := db.Exec(fmt.Sprintf("create database lotsatables")); err != nil { - t.Fatal(err) - } - for i := 0; i < 100; i++ { - _, err := db.Exec(fmt.Sprintf("create table lotsatables.t%d (x int primary key)", i)) - if err != nil { - t.Fatal(err) - } - } - }() - - setupEnsureObjectAccess := func() { - // setupEnsureObjectAccess creates a db/table the first time, before - // the upgrade sequence starts. This is done to create the db/table without - // relying on "if not exists" modifier. - db := c.Conn(ctx, 1) - defer db.Close() - - _, err := db.Exec(fmt.Sprintf("create database persistent_db")) - if err != nil { - t.Fatal(err) - } - if _, err := db.Exec(fmt.Sprintf("create table persistent_db.persistent_table(a int)")); err != nil { - t.Fatal(err) - } - } - ensureObjectAccess := func() { - // run the setup function to create a db with one table before the upgrade - // sequence begins. After each step, we should be able to successfully select - // from the objects using their FQNs. Prevents bugs such as #43141, where - // databases created before the migration were inaccessible after the - // migration. - db := c.Conn(ctx, 1) - defer db.Close() - var cv string - if err := db.QueryRowContext(ctx, `SHOW CLUSTER SETTING version`).Scan(&cv); err != nil { - t.Fatal(err) - } - - _, err := db.Query(fmt.Sprintf("select * from persistent_db.persistent_table")) - if err != nil { - t.Fatalf( - "expected querying a table created before upgrade to succeed in version %s, got %s", - cv, err) - } - _, err = db.Query(fmt.Sprintf("show tables from persistent_db")) - if err != nil { - t.Fatalf( - "expected querying show tables on a database created before upgrade to succeed in version %s, got %s", - cv, err) - } - t.l.Printf("%s: querying a table/db created before upgrade works as expected\n", cv) - } - - for _, node := range nodes { - checkNode(node, baseVersion) - } - setupEnsureObjectAccess() - for _, step := range steps { - step.run() - ensureObjectAccess() - for _, feature := range features { - testFeature(feature) - } - } - - func() { - db := c.Conn(ctx, 1) - defer db.Close() - - var nodeVersion string - if err := db.QueryRow( - `SELECT crdb_internal.node_executable_version()`, - ).Scan(&nodeVersion); err != nil { - t.Fatal(err) - } - if nodeVersion != currentVersion { - t.Fatalf("not fully upgraded at end of test; have cluster setting at %s but node could run at %s", - currentVersion, nodeVersion, - ) - } - }() -} diff --git a/pkg/cmd/roachtest/version.go b/pkg/cmd/roachtest/version.go index 7fe94e97c057..46e9e5d2cbd6 100644 --- a/pkg/cmd/roachtest/version.go +++ b/pkg/cmd/roachtest/version.go @@ -18,18 +18,21 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/util/binfetcher" + "github.com/cockroachdb/cockroach/pkg/util/version" + "github.com/cockroachdb/errors" _ "github.com/lib/pq" - "github.com/pkg/errors" ) +// TODO(tbg): remove this test. Use the harness in versionupgrade.go +// to make a much better one, much more easily. func registerVersion(r *testRegistry) { - runVersion := func(ctx context.Context, t *test, c *cluster, version string) { + runVersion := func(ctx context.Context, t *test, c *cluster, binaryVersion string) { nodes := c.spec.NodeCount - 1 goos := ifLocal(runtime.GOOS, "linux") b, err := binfetcher.Download(ctx, binfetcher.Options{ Binary: "cockroach", - Version: "v" + version, + Version: "v" + binaryVersion, GOOS: goos, GOARCH: "amd64", }) @@ -54,8 +57,13 @@ func registerVersion(r *testRegistry) { loadDuration := " --duration=" + (time.Duration(3*nodes+2)*stageDuration + buffer).String() + var deprecatedWorkloadsStr string + if !t.buildVersion.AtLeast(version.MustParse("v20.2.0")) { + deprecatedWorkloadsStr += " --deprecated-fk-indexes" + } + workloads := []string{ - "./workload run tpcc --tolerate-errors --wait=false --drop --init --warehouses=1 " + loadDuration + " {pgurl:1-%d}", + "./workload run tpcc --tolerate-errors --wait=false --drop --init --warehouses=1 " + deprecatedWorkloadsStr + loadDuration + " {pgurl:1-%d}", "./workload run kv --tolerate-errors --init" + loadDuration + " {pgurl:1-%d}", } @@ -99,7 +107,7 @@ func registerVersion(r *testRegistry) { // checks had been broken for a long time. See: // // https://github.com/cockroachdb/cockroach/issues/37737#issuecomment-496026918 - if !strings.HasPrefix(version, "2.") { + if !strings.HasPrefix(binaryVersion, "2.") { if err := c.CheckReplicaDivergenceOnDB(ctx, db); err != nil { return errors.Wrapf(err, "node %d", i) } diff --git a/pkg/cmd/roachtest/versionupgrade.go b/pkg/cmd/roachtest/versionupgrade.go new file mode 100644 index 000000000000..3dce8c542a3f --- /dev/null +++ b/pkg/cmd/roachtest/versionupgrade.go @@ -0,0 +1,525 @@ +// Copyright 2018 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package main + +import ( + "context" + gosql "database/sql" + "fmt" + "math/rand" + "runtime" + "strconv" + "time" + + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/util/binfetcher" + "github.com/cockroachdb/cockroach/pkg/util/retry" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/cockroach/pkg/util/version" + _ "github.com/lib/pq" + "github.com/stretchr/testify/require" +) + +var ( + v201 = roachpb.Version{Major: 20, Minor: 1} + v202 = roachpb.Version{Major: 20, Minor: 2} +) + +// Feature tests that are invoked between each step of the version upgrade test. +// Tests can use u.clusterVersion to determine which version is active at the +// moment. +// +// A gotcha is that these feature tests are also invoked when the cluster is +// in the middle of upgrading -- i.e. a state where the cluster version has +// already been bumped, but not all nodes are aware). This should be considered +// a feature of this test, and feature tests that flake because of it need to +// be fixed. +var versionUpgradeTestFeatures = versionFeatureStep{ + // NB: the next four tests are ancient and supported since v2.0. However, + // in 19.2 -> 20.1 we had a migration that disallowed most DDL in the + // mixed version state, and so for convenience we gate them on v20.1. + stmtFeatureTest("Object Access", v201, ` +-- We should be able to successfully select from objects created in ancient +-- versions of CRDB using their FQNs. Prevents bugs such as #43141, where +-- databases created before a migration were inaccessible after the +-- migration. +-- +-- NB: the data has been baked into the fixtures. Originally created via: +-- create database persistent_db +-- create table persistent_db.persistent_table(a int)")) +-- on CRDB v1.0 +select * from persistent_db.persistent_table; +show tables from persistent_db; +`), + stmtFeatureTest("JSONB", v201, ` +CREATE DATABASE IF NOT EXISTS test; +CREATE TABLE test.t (j JSONB); +DROP TABLE test.t; + `), + stmtFeatureTest("Sequences", v201, ` +CREATE DATABASE IF NOT EXISTS test; +CREATE SEQUENCE test.test_sequence; +DROP SEQUENCE test.test_sequence; + `), + stmtFeatureTest("Computed Columns", v201, ` +CREATE DATABASE IF NOT EXISTS test; +CREATE TABLE test.t (x INT AS (3) STORED); +DROP TABLE test.t; + `), +} + +func runVersionUpgrade(ctx context.Context, t *test, c *cluster, buildVersion version.Version) { + predecessorVersion, err := PredecessorVersion(buildVersion) + if err != nil { + t.Fatal(err) + } + // This test uses fixtures and we do not have encrypted fixtures right now. + c.encryptDefault = false + + // Set the bool within to true to create a new fixture for this test. This + // is necessary after every release. For example, the day `master` becomes + // the 20.2 release, this test will fail because it is missing a fixture for + // 20.1; run the test (on 20.1) with the bool flipped to create the fixture. + // Check it in (instructions will be logged below) and off we go. + if false { + // The version to create/update the fixture for. Must be released (i.e. + // can download it from the homepage); if that is not the case use the + // empty string which uses the local cockroach binary. + newV := "20.1.7" + predV, err := PredecessorVersion(*version.MustParse("v" + newV)) + if err != nil { + t.Fatal(err) + } + makeVersionFixtureAndFatal(ctx, t, c, predV, newV) + } + + testFeaturesStep := versionUpgradeTestFeatures.step(c.All()) + schemaChangeStep := runSchemaChangeWorkloadStep(c.All().randNode()[0], 10 /* maxOps */, 2 /* concurrency */) + backupStep := func(ctx context.Context, t *test, u *versionUpgradeTest) { + // This check was introduced for the system.tenants table and the associated + // changes to full-cluster backup to include tenants. It mostly wants to + // check that 20.1 (which does not have system.tenants) and 20.2 (which + // does have the table) can both run full cluster backups. + // + // This step can be removed once 20.2 is released. + if u.binaryVersion(ctx, t, 1).Major != 20 { + return + } + dest := fmt.Sprintf("nodelocal://0/%d", timeutil.Now().UnixNano()) + _, err := u.conn(ctx, t, 1).ExecContext(ctx, `BACKUP TO $1`, dest) + require.NoError(t, err) + } + + // The steps below start a cluster at predecessorVersion (from a fixture), + // then start an upgrade that is rolled back, and finally start and finalize + // the upgrade. Between each step, we run the feature tests defined in + // versionUpgradeTestFeatures. + u := newVersionUpgradeTest(c, + // Start the cluster from a fixture. That fixture's cluster version may + // be at the predecessor version (though in practice it's fully up to + // date, if it was created via the checkpointer above), so add a + // waitForUpgradeStep to make sure we're upgraded all the way before + // moving on. + // + // See the comment on createCheckpoints for details on fixtures. + uploadAndStartFromCheckpointFixture(c.All(), predecessorVersion), + uploadAndInitSchemaChangeWorkload(), + waitForUpgradeStep(c.All()), + testFeaturesStep, + + // NB: at this point, cluster and binary version equal predecessorVersion, + // and auto-upgrades are on. + + // We use an empty string for the version below, which means to use the + // main ./cockroach binary (i.e. the one being tested in this run). + // We upgrade into this version more capriciously to ensure better + // coverage by first rolling the cluster into the new version with + // auto-upgrade disabled, then rolling back, and then rolling forward + // and finalizing on the auto-upgrade path. + preventAutoUpgradeStep(1), + // Roll nodes forward. + binaryUpgradeStep(c.All(), ""), + testFeaturesStep, + // Run a quick schemachange workload in between each upgrade. + // The maxOps is 10 to keep the test runtime under 1-2 minutes. + schemaChangeStep, + backupStep, + // Roll back again. Note that bad things would happen if the cluster had + // ignored our request to not auto-upgrade. The `autoupgrade` roachtest + // exercises this in more detail, so here we just rely on things working + // as they ought to. + binaryUpgradeStep(c.All(), predecessorVersion), + testFeaturesStep, + schemaChangeStep, + backupStep, + // Roll nodes forward, this time allowing them to upgrade, and waiting + // for it to happen. + binaryUpgradeStep(c.All(), ""), + allowAutoUpgradeStep(1), + testFeaturesStep, + schemaChangeStep, + backupStep, + waitForUpgradeStep(c.All()), + testFeaturesStep, + schemaChangeStep, + backupStep, + ) + + u.run(ctx, t) +} + +func (u *versionUpgradeTest) run(ctx context.Context, t *test) { + defer func() { + for _, db := range u.conns { + _ = db.Close() + } + }() + + for _, step := range u.steps { + if step != nil { + step(ctx, t, u) + } + } +} + +type versionUpgradeTest struct { + goOS string + c *cluster + steps []versionStep + + // Cache conns because opening one takes hundreds of ms, and we do it quite + // a lot. + conns []*gosql.DB +} + +func newVersionUpgradeTest(c *cluster, steps ...versionStep) *versionUpgradeTest { + return &versionUpgradeTest{ + goOS: ifLocal(runtime.GOOS, "linux"), + c: c, + steps: steps, + } +} + +func checkpointName(binaryVersion string) string { return "checkpoint-v" + binaryVersion } + +// Return a cached conn to the given node. Don't call .Close(), the test harness +// will do it. +func (u *versionUpgradeTest) conn(ctx context.Context, t *test, i int) *gosql.DB { + if u.conns == nil { + for _, i := range u.c.All() { + u.conns = append(u.conns, u.c.Conn(ctx, i)) + } + } + return u.conns[i-1] +} + +func (u *versionUpgradeTest) uploadVersion( + ctx context.Context, t *test, nodes nodeListOption, newVersion string, +) option { + var binary string + if newVersion == "" { + binary = cockroach + } else { + var err error + binary, err = binfetcher.Download(ctx, binfetcher.Options{ + Binary: "cockroach", + Version: "v" + newVersion, + GOOS: u.goOS, + GOARCH: "amd64", + }) + if err != nil { + t.Fatal(err) + } + } + + target := "./cockroach" + if newVersion != "" { + target += "-" + newVersion + } + u.c.Put(ctx, binary, target, nodes) + return startArgs("--binary=" + target) +} + +// binaryVersion returns the binary running on the (one-indexed) node. +// NB: version means major.minor[-unstable]; the patch level isn't returned. For example, a binary +// of version 19.2.4 will return 19.2. +func (u *versionUpgradeTest) binaryVersion(ctx context.Context, t *test, i int) roachpb.Version { + db := u.conn(ctx, t, i) + + var sv string + if err := db.QueryRow(`SELECT crdb_internal.node_executable_version();`).Scan(&sv); err != nil { + t.Fatal(err) + } + + if len(sv) == 0 { + t.Fatal("empty version") + } + + cv, err := roachpb.ParseVersion(sv) + if err != nil { + t.Fatal(err) + } + return cv +} + +// binaryVersion returns the cluster version active on the (one-indexed) node. Note that the +// returned value might become stale due to the cluster auto-upgrading in the background plus +// gossip asynchronicity. +// NB: cluster versions are always major.minor[-unstable]; there isn't a patch level. +func (u *versionUpgradeTest) clusterVersion(ctx context.Context, t *test, i int) roachpb.Version { + db := u.conn(ctx, t, i) + + var sv string + if err := db.QueryRowContext(ctx, `SHOW CLUSTER SETTING version`).Scan(&sv); err != nil { + t.Fatal(err) + } + + cv, err := roachpb.ParseVersion(sv) + if err != nil { + t.Fatal(err) + } + return cv +} + +// versionStep is an isolated version migration on a running cluster. +type versionStep func(ctx context.Context, t *test, u *versionUpgradeTest) + +func uploadAndStartFromCheckpointFixture(nodes nodeListOption, v string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + u.c.Run(ctx, nodes, "mkdir", "-p", "{store-dir}") + vv := version.MustParse("v" + v) + // The fixtures use cluster version (major.minor) but the input might be + // a patch release. + name := checkpointName( + roachpb.Version{Major: int32(vv.Major()), Minor: int32(vv.Minor())}.String(), + ) + for _, i := range nodes { + u.c.Put(ctx, + "pkg/cmd/roachtest/fixtures/"+strconv.Itoa(i)+"/"+name+".tgz", + "{store-dir}/fixture.tgz", u.c.Node(i), + ) + } + // Extract fixture. Fail if there's already an LSM in the store dir. + u.c.Run(ctx, nodes, "cd {store-dir} && [ ! -f {store-dir}/CURRENT ] && tar -xf fixture.tgz") + + // Put and start the binary. + args := u.uploadVersion(ctx, t, nodes, v) + // NB: can't start sequentially since cluster already bootstrapped. + u.c.Start(ctx, t, nodes, args, startArgsDontEncrypt, roachprodArgOption{"--sequential=false"}) + } +} + +// binaryUpgradeStep rolling-restarts the given nodes into the new binary +// version. Note that this does *not* wait for the cluster version to upgrade. +// Use a waitForUpgradeStep() for that. +func binaryUpgradeStep(nodes nodeListOption, newVersion string) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + c := u.c + + // NB: We could technically stage the binary on all nodes before + // restarting each one, but on Unix it's invalid to write to an + // executable file while it is currently running. So we do the + // simple thing and upload it serially instead. + + // Restart nodes in a random order; otherwise node 1 would be running all + // the migrations and it probably also has all the leases. + rand.Shuffle(len(nodes), func(i, j int) { + nodes[i], nodes[j] = nodes[j], nodes[i] + }) + for _, node := range nodes { + t.l.Printf("restarting node %d", node) + c.Stop(ctx, c.Node(node)) + args := u.uploadVersion(ctx, t, c.Node(node), newVersion) + c.Start(ctx, t, c.Node(node), args, startArgsDontEncrypt) + t.l.Printf("node %d now running binary version %s", node, u.binaryVersion(ctx, t, node)) + + // TODO(nvanbenschoten): add upgrade qualification step. What should we + // test? We could run logictests. We could add custom logic here. Maybe + // this should all be pushed to nightly migration tests instead. + } + } +} + +func preventAutoUpgradeStep(node int) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + db := u.conn(ctx, t, node) + _, err := db.ExecContext(ctx, `SET CLUSTER SETTING cluster.preserve_downgrade_option = $1`, u.binaryVersion(ctx, t, node).String()) + if err != nil { + t.Fatal(err) + } + } +} + +func allowAutoUpgradeStep(node int) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + db := u.conn(ctx, t, node) + _, err := db.ExecContext(ctx, `RESET CLUSTER SETTING cluster.preserve_downgrade_option`) + if err != nil { + t.Fatal(err) + } + } +} + +// waitForUpgradeStep waits for the cluster version to reach the first node's +// binary version (which is assumed to be every node's binary version). We rely +// on the cluster's internal self-upgrading mechanism. +// +// NB: this is intentionally kept separate from binaryUpgradeStep because we run +// feature tests between the steps, and we want to expose them (at least +// heuristically) to the real-world situation in which some nodes have already +// learned of a cluster version bump (from Gossip) where others haven't. This +// situation tends to exhibit unexpected behavior. +func waitForUpgradeStep(nodes nodeListOption) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + newVersion := u.binaryVersion(ctx, t, nodes[0]).String() + t.l.Printf("%s: waiting for cluster to auto-upgrade\n", newVersion) + + for _, i := range nodes { + err := retry.ForDuration(30*time.Second, func() error { + currentVersion := u.clusterVersion(ctx, t, i).String() + if currentVersion != newVersion { + return fmt.Errorf("%d: expected version %s, got %s", i, newVersion, currentVersion) + } + t.l.Printf("%s: acked by n%d", currentVersion, i) + return nil + }) + if err != nil { + t.Fatal(err) + } + } + + t.l.Printf("%s: nodes %v are upgraded\n", newVersion, nodes) + + // TODO(nvanbenschoten): add upgrade qualification step. + } +} + +type versionFeatureTest struct { + name string + fn func(context.Context, *test, *versionUpgradeTest, nodeListOption) (skipped bool) +} + +type versionFeatureStep []versionFeatureTest + +func (vs versionFeatureStep) step(nodes nodeListOption) versionStep { + return func(ctx context.Context, t *test, u *versionUpgradeTest) { + for _, feature := range vs { + t.l.Printf("checking %s", feature.name) + tBegin := timeutil.Now() + skipped := feature.fn(ctx, t, u, nodes) + dur := fmt.Sprintf("%.2fs", timeutil.Since(tBegin).Seconds()) + if skipped { + t.l.Printf("^-- skip (%s)", dur) + } else { + t.l.Printf("^-- ok (%s)", dur) + } + } + } +} + +func stmtFeatureTest( + name string, minVersion roachpb.Version, stmt string, args ...interface{}, +) versionFeatureTest { + return versionFeatureTest{ + name: name, + fn: func(ctx context.Context, t *test, u *versionUpgradeTest, nodes nodeListOption) (skipped bool) { + i := nodes.randNode()[0] + if u.clusterVersion(ctx, t, i).Less(minVersion) { + return true // skipped + } + db := u.conn(ctx, t, i) + if _, err := db.ExecContext(ctx, stmt, args...); err != nil { + if testutils.IsError(err, "no inbound stream connection") && u.clusterVersion(ctx, t, i).Less(v202) { + // This error has been fixed in 20.2+ but may still occur on earlier + // versions. + return true // skipped + } + t.Fatal(err) + } + return false + }, + } +} + +// makeVersionFixtureAndFatal creates fixtures to "age out" old versions of CockroachDB. +// We want to test data that was created at v1.0, but we don't actually want to +// run a long chain of binaries starting all the way at v1.0. Instead, we +// periodically bake a set of store directories that originally started out on +// v1.0 and maintain it as a fixture for this test. +// +// The checkpoints will be created in the log directories downloaded as part of +// the artifacts. The test will fail on purpose when it's done with instructions +// on where to move the files. +func makeVersionFixtureAndFatal( + ctx context.Context, t *test, c *cluster, predecessorVersion string, makeFixtureVersion string, +) { + c.l.Printf("making fixture for %s (starting at %s)", makeFixtureVersion, predecessorVersion) + c.encryptDefault = false + newVersionUpgradeTest(c, + // Start the cluster from a fixture. That fixture's cluster version may + // be at the predecessor version (though in practice it's fully up to + // date, if it was created via the checkpointer above), so add a + // waitForUpgradeStep to make sure we're upgraded all the way before + // moving on. + // + // See the comment on createCheckpoints for details on fixtures. + uploadAndStartFromCheckpointFixture(c.All(), predecessorVersion), + waitForUpgradeStep(c.All()), + + // NB: at this point, cluster and binary version equal predecessorVersion, + // and auto-upgrades are on. + + binaryUpgradeStep(c.All(), makeFixtureVersion), + waitForUpgradeStep(c.All()), + + func(ctx context.Context, t *test, u *versionUpgradeTest) { + // If we're taking checkpoints, momentarily stop the cluster (we + // need to do that to get the checkpoints to reflect a + // consistent cluster state). The binary at this point will be + // the new one, but the cluster version was not explicitly + // bumped, though auto-update may have taken place already. + // For example, if newVersion is 2.1, the cluster version in + // the store directories may be 2.0 on some stores and 2.1 on + // the others (though if any are on 2.1, then that's what's + // stored in system.settings). + // This means that when we restart from that version, we're + // going to want to use the binary mentioned in the checkpoint, + // or at least one compatible with the *predecessor* of the + // checkpoint version. For example, for checkpoint-2.1, the + // cluster version might be 2.0, so we can only use the 2.0 or + // 2.1 binary, but not the 19.1 binary (as 19.1 and 2.0 are not + // compatible). + name := checkpointName(u.binaryVersion(ctx, t, 1).String()) + u.c.Stop(ctx, c.All()) + + c.Run(ctx, c.All(), cockroach, "debug", "pebble", "db", "checkpoint", + "{store-dir}", "{store-dir}/"+name) + // The `cluster-bootstrapped` marker can already be found within + // store-dir, but the rocksdb checkpoint step above does not pick it + // up as it isn't recognized by RocksDB. We copy the marker + // manually, it's necessary for roachprod created clusters. See + // #54761. + c.Run(ctx, c.Node(1), "cp", "{store-dir}/cluster-bootstrapped", "{store-dir}/"+name) + c.Run(ctx, c.All(), "tar", "-C", "{store-dir}/"+name, "-czf", "{log-dir}/"+name+".tgz", ".") + t.Fatalf(`successfully created checkpoints; failing test on purpose. + +Invoke the following to move the archives to the right place and commit the +result: + +for i in 1 2 3 4; do + mkdir -p pkg/cmd/roachtest/fixtures/${i} && \ + mv artifacts/acceptance/version-upgrade/run_1/logs/${i}.unredacted/checkpoint-*.tgz \ + pkg/cmd/roachtest/fixtures/${i}/ +done +`) + }).run(ctx, t) +} diff --git a/pkg/sql/sem/tree/table_name.go b/pkg/sql/sem/tree/table_name.go index d4ddfb6ca718..b773897a7da6 100644 --- a/pkg/sql/sem/tree/table_name.go +++ b/pkg/sql/sem/tree/table_name.go @@ -165,6 +165,15 @@ func MakeTableNameWithSchema(db, schema, tbl Name) TableName { }} } +// MakeTableNameFromPrefix creates a table name from an unqualified name +// and a resolved prefix. +func MakeTableNameFromPrefix(prefix TableNamePrefix, object Name) TableName { + return TableName{tblName{ + TableName: object, + TableNamePrefix: prefix, + }} +} + // MakeUnqualifiedTableName creates a new base table name. func MakeUnqualifiedTableName(tbl Name) TableName { return TableName{tblName{ diff --git a/pkg/testutils/lint/lint_test.go b/pkg/testutils/lint/lint_test.go index 207e83af7d05..cb68f4c06810 100644 --- a/pkg/testutils/lint/lint_test.go +++ b/pkg/testutils/lint/lint_test.go @@ -1390,9 +1390,6 @@ func TestLint(t *testing.T) { stream.GrepNot(`pkg/sql/opt/optgen/exprgen/custom_funcs.go:.* func .* is unused`), // Using deprecated method to COPY. stream.GrepNot(`pkg/cli/nodelocal.go:.* stmt.Exec is deprecated: .*`), - // TODO(adityamaru): Delete when working on #51897. - stream.GrepNot(`pkg/testutils/skip/skip.go`), - stream.GrepNot(`pkg/testutils/skip/stress.go`), ), func(s string) { t.Errorf("\n%s", s) }); err != nil { diff --git a/pkg/testutils/lint/passes/passesutil/passes_util_test.go b/pkg/testutils/lint/passes/passesutil/passes_util_test.go index 26c3325183c9..2b156fa1211f 100644 --- a/pkg/testutils/lint/passes/passesutil/passes_util_test.go +++ b/pkg/testutils/lint/passes/passesutil/passes_util_test.go @@ -14,9 +14,9 @@ import ( "path/filepath" "testing" + "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/descriptormarshal" "github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/unconvert" - "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/stretchr/testify/require" "golang.org/x/tools/go/analysis/analysistest" ) @@ -26,14 +26,18 @@ import ( // coverage checking here. func TestDescriptorMarshal(t *testing.T) { - skip.UnderStress(t) + if testutils.NightlyStress() { + t.Skip("skip under nightly stressrace") + } testdata, err := filepath.Abs(filepath.Join("..", "descriptormarshal", "testdata")) require.NoError(t, err) analysistest.Run(t, testdata, descriptormarshal.Analyzer, "a") } func TestUnconvert(t *testing.T) { - skip.UnderStress(t) + if testutils.NightlyStress() { + t.Skip("skip under nightly stressrace") + } testdata, err := filepath.Abs(filepath.Join("..", "unconvert", "testdata")) require.NoError(t, err) analysistest.Run(t, testdata, unconvert.Analyzer, "a") diff --git a/pkg/testutils/skip/skip.go b/pkg/testutils/skip/skip.go index 9f1648f4a1ac..2e726a2d01c9 100644 --- a/pkg/testutils/skip/skip.go +++ b/pkg/testutils/skip/skip.go @@ -55,18 +55,3 @@ func UnderShort(t SkippableTest, args ...interface{}) { t.Skip(append([]interface{}{"disabled under -short"}, args...)) } } - -// UnderStress skips this test when running under stress. -func UnderStress(t SkippableTest, args ...interface{}) { - if NightlyStress() { - t.Skip(append([]interface{}{"disabled under stress"}, args...)) - } -} - -// UnderStressRace skips this test during stressrace runs, which are tests -// run under stress with the -race flag. -func UnderStressRace(t SkippableTest, args ...interface{}) { - if NightlyStress() && util.RaceEnabled { - t.Skip(append([]interface{}{"disabled under stressrace"}, args...)) - } -} diff --git a/pkg/testutils/skip/stress.go b/pkg/testutils/skip/stress.go deleted file mode 100644 index 668d3efde668..000000000000 --- a/pkg/testutils/skip/stress.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2020 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package skip - -import "github.com/cockroachdb/cockroach/pkg/util/envutil" - -var stress = envutil.EnvOrDefaultBool("COCKROACH_NIGHTLY_STRESS", false) - -// NightlyStress returns true iff the process is running as part of CockroachDB's -// nightly stress tests. -func NightlyStress() bool { - return stress -} diff --git a/pkg/workload/bank/bank.go b/pkg/workload/bank/bank.go index 2148149e6198..ca787233b288 100644 --- a/pkg/workload/bank/bank.go +++ b/pkg/workload/bank/bank.go @@ -22,7 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" "golang.org/x/exp/rand" ) @@ -177,7 +177,9 @@ func (b *bank) Tables() []workload.Table { } // Ops implements the Opser interface. -func (b *bank) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (b *bank) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { sqlDatabase, err := workload.SanitizeUrls(b, b.connFlags.DBOverride, urls) if err != nil { return workload.QueryLoad{}, err diff --git a/pkg/workload/bench_test.go b/pkg/workload/bench_test.go index 32986f43ac85..66e03b295075 100644 --- a/pkg/workload/bench_test.go +++ b/pkg/workload/bench_test.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coltypes" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/bufalloc" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/bank" @@ -47,7 +48,7 @@ func benchmarkInitialData(b *testing.B, gen workload.Generator) { for i := 0; i < b.N; i++ { // Share the Batch and ByteAllocator across tables but not across benchmark // iterations. - cb := coldata.NewMemBatch(nil) + cb := coldata.NewMemBatch(nil /* types */) var a bufalloc.ByteAllocator for _, table := range tables { for rowIdx := 0; rowIdx < table.InitialRows.NumBatches; rowIdx++ { @@ -71,9 +72,7 @@ func BenchmarkInitialData(b *testing.B) { benchmarkInitialData(b, bank.FromRows(1000)) }) b.Run(`tpch/scaleFactor=1`, func(b *testing.B) { - if testing.Short() { - b.Skip(`tpch loads a lot of data`) - } + skip.UnderShort(b, "tpch loads a lot of data") benchmarkInitialData(b, tpch.FromScaleFactor(1)) }) } diff --git a/pkg/workload/bulkingest/bulkingest.go b/pkg/workload/bulkingest/bulkingest.go index 21b19c8ba65b..f802a9dc6d81 100644 --- a/pkg/workload/bulkingest/bulkingest.go +++ b/pkg/workload/bulkingest/bulkingest.go @@ -59,7 +59,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" ) @@ -175,7 +175,9 @@ func (w *bulkingest) Tables() []workload.Table { } // Ops implements the Opser interface. -func (w *bulkingest) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (w *bulkingest) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { sqlDatabase, err := workload.SanitizeUrls(w, w.connFlags.DBOverride, urls) if err != nil { return workload.QueryLoad{}, err diff --git a/pkg/workload/cli/check.go b/pkg/workload/cli/check.go index 1d9efb3500c8..f15e714f696b 100644 --- a/pkg/workload/cli/check.go +++ b/pkg/workload/cli/check.go @@ -16,7 +16,7 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/workload" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" ) @@ -67,7 +67,7 @@ func check(gen workload.Generator, urls []string, dbName string) error { fn = hooks.Hooks().CheckConsistency } if fn == nil { - return errors.Errorf(`no consistency checks are defined for %s` + gen.Meta().Name) + return errors.Errorf(`no consistency checks are defined for %s`, gen.Meta().Name) } sqlDB, err := gosql.Open(`cockroach`, strings.Join(urls, ` `)) diff --git a/pkg/workload/cli/cli.go b/pkg/workload/cli/cli.go index b8a786e70a8d..572ec69221e3 100644 --- a/pkg/workload/cli/cli.go +++ b/pkg/workload/cli/cli.go @@ -14,6 +14,7 @@ import ( "os" "github.com/cockroachdb/cockroach/pkg/workload" + "github.com/cockroachdb/errors" "github.com/spf13/cobra" ) @@ -28,25 +29,24 @@ func WorkloadCmd(userFacing bool) *cobra.Command { rootCmd.AddCommand(subCmdFn(userFacing)) } if userFacing { - whitelist := map[string]struct{}{ + allowlist := map[string]struct{}{ `workload`: {}, `init`: {}, `run`: {}, } for _, m := range workload.Registered() { - whitelist[m.Name] = struct{}{} + allowlist[m.Name] = struct{}{} } - var addExperimental func(c *cobra.Command) - addExperimental = func(c *cobra.Command) { - c.Short = `[experimental] ` + c.Short - if _, ok := whitelist[c.Name()]; !ok { + var hideNonPublic func(c *cobra.Command) + hideNonPublic = func(c *cobra.Command) { + if _, ok := allowlist[c.Name()]; !ok { c.Hidden = true } for _, sub := range c.Commands() { - addExperimental(sub) + hideNonPublic(sub) } } - addExperimental(rootCmd) + hideNonPublic(rootCmd) } return rootCmd } @@ -73,7 +73,11 @@ func HandleErrs( return func(cmd *cobra.Command, args []string) { err := f(cmd, args) if err != nil { + hint := errors.FlattenHints(err) cmd.Println("Error:", err.Error()) + if hint != "" { + cmd.Println("Hint:", hint) + } os.Exit(1) } } diff --git a/pkg/workload/cli/run.go b/pkg/workload/cli/run.go index f0e523ea5b4c..e495ab60da05 100644 --- a/pkg/workload/cli/run.go +++ b/pkg/workload/cli/run.go @@ -29,11 +29,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/envutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/logflags" + "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" "github.com/cockroachdb/cockroach/pkg/workload/workloadsql" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" "golang.org/x/time/rate" @@ -44,7 +45,8 @@ var tolerateErrors = runFlags.Bool("tolerate-errors", false, "Keep running on er var maxRate = runFlags.Float64( "max-rate", 0, "Maximum frequency of operations (reads/writes). If 0, no limit.") var maxOps = runFlags.Uint64("max-ops", 0, "Maximum number of operations to run") -var duration = runFlags.Duration("duration", 0, "The duration to run. If 0, run forever.") +var duration = runFlags.Duration("duration", 0, + "The duration to run (in addition to --ramp). If 0, run forever.") var doInit = runFlags.Bool("init", false, "Automatically run init. DEPRECATED: Use workload init instead.") var ramp = runFlags.Duration("ramp", 0*time.Second, "The duration over which to ramp up load.") @@ -230,7 +232,7 @@ func workerRun( } if err := workFn(ctx); err != nil { - if errors.Cause(err) == ctx.Err() { + if ctx.Err() != nil && errors.Is(err, ctx.Err()) { return } errCh <- err @@ -299,7 +301,7 @@ func startPProfEndPoint(ctx context.Context) { go func() { err := http.ListenAndServe(":"+strconv.Itoa(*pprofport), nil) if err != nil { - log.Error(ctx, err) + log.Errorf(ctx, "%v", err) } }() } @@ -350,16 +352,39 @@ func runRun(gen workload.Generator, urls []string, dbName string) error { } reg := histogram.NewRegistry(*histogramsMaxLatency) var ops workload.QueryLoad - for { - ops, err = o.Ops(urls, reg) - if err == nil { - break + prepareStart := timeutil.Now() + log.Infof(ctx, "creating load generator...") + const prepareTimeout = 60 * time.Minute + prepareCtx, cancel := context.WithTimeout(ctx, prepareTimeout) + defer cancel() + if prepareErr := func(ctx context.Context) error { + retry := retry.StartWithCtx(ctx, retry.Options{}) + var err error + for retry.Next() { + if err != nil { + log.Warningf(ctx, "retrying after error while creating load: %v", err) + } + ops, err = o.Ops(ctx, urls, reg) + if err == nil { + return nil + } + err = errors.Wrapf(err, "failed to initialize the load generator") + if !*tolerateErrors { + return err + } } - if !*tolerateErrors { - return err + if ctx.Err() != nil { + // Don't retry endlessly. Note that this retry loop is not under the + // control of --duration, so we're avoiding retrying endlessly. + log.Errorf(ctx, "Attempt to create load generator failed. "+ + "It's been more than %s since we started trying to create the load generator "+ + "so we're giving up. Last failure: %s", prepareTimeout, err) } - log.Infof(ctx, "retrying after error while creating load: %v", err) + return err + }(prepareCtx); prepareErr != nil { + return prepareErr } + log.Infof(ctx, "creating load generator... done (took %s)", timeutil.Now().Sub(prepareStart)) start := timeutil.Now() errCh := make(chan error) @@ -441,7 +466,7 @@ func runRun(gen workload.Generator, urls []string, dbName string) error { formatter.outputError(err) if *tolerateErrors { if everySecond.ShouldLog() { - log.Error(ctx, err) + log.Errorf(ctx, "%v", err) } continue } diff --git a/pkg/workload/csv.go b/pkg/workload/csv.go index 37755958ce97..9123c811e3b1 100644 --- a/pkg/workload/csv.go +++ b/pkg/workload/csv.go @@ -24,7 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/col/coltypes" "github.com/cockroachdb/cockroach/pkg/util/bufalloc" "github.com/cockroachdb/cockroach/pkg/util/encoding/csv" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" ) @@ -39,7 +39,7 @@ const ( func WriteCSVRows( ctx context.Context, w io.Writer, table Table, rowStart, rowEnd int, sizeBytesLimit int64, ) (rowBatchIdx int, err error) { - cb := coldata.NewMemBatchWithSize(nil, 0) + cb := coldata.NewMemBatchWithSize(nil /* types */, 0 /* size */) var a bufalloc.ByteAllocator bytesWrittenW := &bytesWrittenWriter{w: w} @@ -91,7 +91,7 @@ type csvRowsReader struct { func (r *csvRowsReader) Read(p []byte) (n int, err error) { if r.cb == nil { - r.cb = coldata.NewMemBatchWithSize(nil, 0) + r.cb = coldata.NewMemBatchWithSize(nil /* types */, 0 /* size */) } for { diff --git a/pkg/workload/csv_test.go b/pkg/workload/csv_test.go index 3f85e444ecf0..d0db7830b450 100644 --- a/pkg/workload/csv_test.go +++ b/pkg/workload/csv_test.go @@ -57,7 +57,7 @@ func TestHandleCSV(t *testing.T) { })) defer ts.Close() - res, err := httputil.Get(context.TODO(), ts.URL+`/bank/bank`+test.params) + res, err := httputil.Get(context.Background(), ts.URL+`/bank/bank`+test.params) if err != nil { t.Fatal(err) } @@ -79,7 +79,7 @@ func BenchmarkWriteCSVRows(b *testing.B) { var batches []coldata.Batch for _, table := range tpcc.FromWarehouses(1).Tables() { - cb := coldata.NewMemBatch(nil) + cb := coldata.NewMemBatch(nil /* types */) var a bufalloc.ByteAllocator table.InitialRows.FillBatch(0, cb, &a) batches = append(batches, cb) @@ -129,7 +129,7 @@ func TestCSVRowsReader(t *testing.T) { func BenchmarkCSVRowsReader(b *testing.B) { var batches []coldata.Batch for _, table := range tpcc.FromWarehouses(1).Tables() { - cb := coldata.NewMemBatch(nil) + cb := coldata.NewMemBatch(nil /* types */) var a bufalloc.ByteAllocator table.InitialRows.FillBatch(0, cb, &a) batches = append(batches, cb) diff --git a/pkg/workload/debug/tpcc_results.go b/pkg/workload/debug/tpcc_results.go index 8833dbd12a31..1ce96b04bd29 100644 --- a/pkg/workload/debug/tpcc_results.go +++ b/pkg/workload/debug/tpcc_results.go @@ -17,7 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/workload/histogram" "github.com/cockroachdb/cockroach/pkg/workload/tpcc" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/cobra" ) diff --git a/pkg/workload/dep_test.go b/pkg/workload/dep_test.go index f3daae0f2915..06f249e79ef6 100644 --- a/pkg/workload/dep_test.go +++ b/pkg/workload/dep_test.go @@ -17,23 +17,37 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/leaktest" ) -func TestDepWhitelist(t *testing.T) { +func TestDepAllowlist(t *testing.T) { defer leaktest.AfterTest(t)() - // We want workload to be lightweight. If you need to add a package to this - // set of deps, run it by danhhz first. + // We want workload to be lightweight. buildutil.VerifyTransitiveWhitelist(t, "github.com/cockroachdb/cockroach/pkg/workload", []string{ `github.com/cockroachdb/cockroach/pkg/build`, `github.com/cockroachdb/cockroach/pkg/col/coldata`, `github.com/cockroachdb/cockroach/pkg/col/coltypes`, + `github.com/cockroachdb/cockroach/pkg/col/typeconv`, + `github.com/cockroachdb/cockroach/pkg/docs`, + `github.com/cockroachdb/cockroach/pkg/geo/geopb`, + `github.com/cockroachdb/cockroach/pkg/sql/lex`, + `github.com/cockroachdb/cockroach/pkg/sql/oidext`, + `github.com/cockroachdb/cockroach/pkg/sql/types`, + `github.com/cockroachdb/cockroach/pkg/util`, `github.com/cockroachdb/cockroach/pkg/util/arith`, `github.com/cockroachdb/cockroach/pkg/util/bufalloc`, `github.com/cockroachdb/cockroach/pkg/util/duration`, `github.com/cockroachdb/cockroach/pkg/util/encoding/csv`, + `github.com/cockroachdb/cockroach/pkg/util/envutil`, + `github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented`, + `github.com/cockroachdb/cockroach/pkg/util/humanizeutil`, + `github.com/cockroachdb/cockroach/pkg/util/protoutil`, + `github.com/cockroachdb/cockroach/pkg/util/randutil`, `github.com/cockroachdb/cockroach/pkg/util/stacktrace`, + `github.com/cockroachdb/cockroach/pkg/util/stringencoding`, `github.com/cockroachdb/cockroach/pkg/util/syncutil`, `github.com/cockroachdb/cockroach/pkg/util/timeutil`, + `github.com/cockroachdb/cockroach/pkg/util/uint128`, + `github.com/cockroachdb/cockroach/pkg/util/uuid`, `github.com/cockroachdb/cockroach/pkg/util/version`, `github.com/cockroachdb/cockroach/pkg/workload/histogram`, // TODO(dan): These really shouldn't be used in util packages, but the diff --git a/pkg/workload/examples/startrek.go b/pkg/workload/examples/startrek.go index 311c67aa36f0..df5886f44476 100644 --- a/pkg/workload/examples/startrek.go +++ b/pkg/workload/examples/startrek.go @@ -72,6 +72,13 @@ var episodesColTypes = []coltypes.T{ coltypes.Float64, } +var quotesColTypes = []coltypes.T{ + coltypes.Bytes, + coltypes.Bytes, + coltypes.Float64, + coltypes.Int64, +} + // The data that follows was derived from the 'startrek' fortune cookie file. var startrekEpisodes = [...][]interface{}{ {1, 1, 1, `The Man Trap`, 1531.1}, @@ -155,13 +162,6 @@ var startrekEpisodes = [...][]interface{}{ {79, 3, 24, `Turnabout Intruder`, 5928.5}, } -var quotesColTypes = []coltypes.T{ - coltypes.Bytes, - coltypes.Bytes, - coltypes.Float64, - coltypes.Int64, -} - var startrekQuotes = [...][]interface{}{ {`"... freedom ... is a worship word..." "It is our worship word too."`, `Cloud William and Kirk`, nil, 52}, {`"Beauty is transitory." "Beauty survives."`, `Spock and Kirk`, nil, 72}, diff --git a/pkg/workload/indexes/indexes.go b/pkg/workload/indexes/indexes.go index a8c87300ba01..d6c0556b9996 100644 --- a/pkg/workload/indexes/indexes.go +++ b/pkg/workload/indexes/indexes.go @@ -23,7 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" ) @@ -47,10 +47,11 @@ type indexes struct { flags workload.Flags connFlags *workload.ConnFlags - seed int64 - idxs int - unique bool - payload int + seed int64 + idxs int + unique bool + payload int + cycleLength uint64 } func init() { @@ -68,6 +69,8 @@ var indexesMeta = workload.Meta{ g.flags.IntVar(&g.idxs, `secondary-indexes`, 1, `Number of indexes to add to the table.`) g.flags.BoolVar(&g.unique, `unique-indexes`, false, `Use UNIQUE secondary indexes.`) g.flags.IntVar(&g.payload, `payload`, 64, `Size of the unindexed payload column.`) + g.flags.Uint64Var(&g.cycleLength, `cycle-length`, math.MaxUint64, + `Number of keys repeatedly accessed by each writer through upserts.`) g.connFlags = workload.NewConnFlags(&g.flags) return g }, @@ -141,8 +144,9 @@ func (w *indexes) Tables() []workload.Table { } // Ops implements the Opser interface. -func (w *indexes) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { - ctx := context.Background() +func (w *indexes) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { sqlDatabase, err := workload.SanitizeUrls(w, w.connFlags.DBOverride, urls) if err != nil { return workload.QueryLoad{}, err @@ -183,10 +187,10 @@ type indexesOp struct { } func (o *indexesOp) run(ctx context.Context) error { - keyHi, keyLo := o.rand.Uint64(), o.rand.Uint64() + keyLo := o.rand.Uint64() % o.config.cycleLength _, _ = o.rand.Read(o.buf[:]) args := []interface{}{ - uuid.FromUint128(uint128.FromInts(keyHi, keyLo)).String(), // key + uuid.FromUint128(uint128.FromInts(0, keyLo)).String(), // key int64(keyLo + 0), // col0 int64(keyLo + 1), // col1 int64(keyLo + 2), // col2 diff --git a/pkg/workload/interleavebench/interleavebench.go b/pkg/workload/interleavebench/interleavebench.go new file mode 100644 index 000000000000..0d5846a75194 --- /dev/null +++ b/pkg/workload/interleavebench/interleavebench.go @@ -0,0 +1,437 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package interleavebench + +import ( + "context" + gosql "database/sql" + "fmt" + "math" + "math/rand" + "strconv" + "strings" + + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/cockroach/pkg/workload" + "github.com/cockroachdb/cockroach/pkg/workload/histogram" + "github.com/cockroachdb/errors" + "github.com/spf13/pflag" +) + +// interleaveBench is a workload that can be used to benchmark the performance +// of DELETE queries on interleaved and non-interleaved tables. With `init` +// command, it creates a hierarchy of related tables that can be interleaved +// or not and can have foreign key relationship or not, etc, and then inserts +// many rows that satisfy the desired ratios and count arguments. With `run` +// command, it executes DELETE queries one at a time and measures the time it +// takes to remove all "related" rows from the whole hierarchy ("related" is in +// quotes because in a non-interleaved case without adding FKs the relationship +// between parent and child tables is implicit and is not enforced in the +// schema itself). +type interleaveBench struct { + flags workload.Flags + connFlags *workload.ConnFlags + + verbose bool + interleave bool + addFKs bool + levels int + ratio float64 + intraLevelRatio float64 + parentCount int + rangesToSplit int + hierarchy string + tablesPerLevel []int + + deleteFromLevel int + numRowsInSingleDelete int + inClause bool +} + +func init() { + workload.Register(interleaveBenchMeta) +} + +var interleaveBenchMeta = workload.Meta{ + Name: `interleavebench`, + Description: `interleavebench is a tool for benchmarking DELETE queries on interleaved and non-interleaved tables. Note that it ignores --concurrency flag and always runs with no concurrency`, + Version: `1.0.0`, + New: func() workload.Generator { + b := &interleaveBench{} + b.flags.FlagSet = pflag.NewFlagSet(`interleavebench`, pflag.ContinueOnError) + b.flags.Meta = map[string]workload.FlagMeta{ + `delete-from`: {RuntimeOnly: true}, + `rows-in-single-delete`: {RuntimeOnly: true}, + `in-clause`: {RuntimeOnly: true}, + } + b.flags.BoolVar(&b.verbose, `verbose`, false, `Specifies whether the workload should print some of the queries`) + b.flags.BoolVar(&b.interleave, `interleave`, false, `Specifies whether the hierarchy of tables is interleaved`) + b.flags.BoolVar(&b.addFKs, `fks`, true, `Specifies whether foreign keys are added`) + b.flags.IntVar(&b.levels, `levels`, 3, `Specifies the number of levels (grandparent, parent, child - these count as 3)`) + b.flags.StringVar(&b.hierarchy, `hierarchy`, ``, `Comma-separated numbers of tables at each level (e.g. "1,1,1")`) + b.flags.Float64Var(&b.ratio, `ratio`, 10, `Specifies the ratio between the first tables in consequent levels (e.g. # rows in child = 10 x # rows in parent)`) + b.flags.Float64Var(&b.intraLevelRatio, `intra-level-ratio`, 0.1, `Specifies the intra-level ratio (i.e. between consequent child tables)`) + b.flags.IntVar(&b.parentCount, `parent-count`, 100, `Specifies the number of rows on the parent level`) + b.flags.IntVar(&b.deleteFromLevel, `delete-from`, 0, `Specifies the level to issue deletes from (0 is the parent, 1 is the child, etc)`) + b.flags.IntVar(&b.numRowsInSingleDelete, `rows-in-single-delete`, 1, `Specifies the maximum number of rows to be deleted in a single DELETE query (rows from child levels don't count towards this)'`) + b.flags.IntVar(&b.rangesToSplit, `to-split`, 0, `Specifies the number of ranges to have (we will disable range merges, split the parent table into this number manually, and scatter those ranges)'`) + b.flags.BoolVar(&b.inClause, `in-clause`, true, `Specifies whether rows to delete are specified with an IN clause or a range filter`) + b.connFlags = workload.NewConnFlags(&b.flags) + return b + }, +} + +const ( + // We only support concurrency of 1 (this makes it easy to track which rows + // have already been deleted). + maxWorkers = 1 + maxSingleTableSize = 1 << 33 + maxSingleInsertRowCount = 10000 +) + +// Meta implements the Generator interface. +func (*interleaveBench) Meta() workload.Meta { return interleaveBenchMeta } + +// Flags implements the Flagser interface. +func (b *interleaveBench) Flags() workload.Flags { return b.flags } + +// Hooks implements the Hookser interface. +func (b *interleaveBench) Hooks() workload.Hooks { + return workload.Hooks{ + Validate: func() error { + // Override the concurrency flag regardless of the user-specified + // option. + b.connFlags.Concurrency = maxWorkers + if b.levels < 2 { + return errors.Errorf("invalid number of levels %d (needs to be at least 2)", b.levels) + } + b.tablesPerLevel = make([]int, b.levels) + for i := range b.tablesPerLevel { + b.tablesPerLevel[i] = 1 + } + if b.hierarchy != "" { + tokens := strings.Split(b.hierarchy, ",") + if len(tokens) != b.levels { + return errors.Errorf("mismatched --levels and --hierarchy arguments") + } + for l, token := range tokens { + numTables, err := strconv.Atoi(token) + if err != nil { + return err + } + if numTables < 1 { + return errors.Errorf("invalid number of tables %d at level %d", numTables, l) + } + if l == 0 && numTables != 1 { + return errors.Errorf("zeroth level must contain exactly one table") + } + b.tablesPerLevel[l] = numTables + } + } + if b.ratio <= 0 { + return errors.Errorf("invalid ratio %.2f (needs to be greater than 0)", b.ratio) + } + if b.intraLevelRatio <= 0 { + return errors.Errorf("invalid intra-level ratio %.2f (needs to be greater than 0)", b.intraLevelRatio) + } + if b.parentCount < 1 { + return errors.Errorf("invalid parent-count %d (needs to be at least 1)", b.parentCount) + } + if float64(b.parentCount)*math.Pow(b.ratio, float64(b.levels-1)) > maxSingleTableSize { + return errors.Errorf("a table on %d level will have more than %d rows", b.levels-1, maxSingleTableSize) + } + if b.deleteFromLevel < 0 || b.deleteFromLevel >= b.levels { + return errors.Errorf("invalid delete-from level %d (needs to be in [0, %d) range)", b.deleteFromLevel, b.levels) + } + if b.numRowsInSingleDelete < 1 { + return errors.Errorf("invalid rows-in-single-delete %d number (needs to be at least 1)", b.numRowsInSingleDelete) + } + if b.rangesToSplit < 0 || b.rangesToSplit > b.parentCount { + return errors.Errorf("invalid to-split %d (should be in [0, %d] range)", b.rangesToSplit, b.parentCount) + } + return nil + }, + PostLoad: func(db *gosql.DB) error { + if b.addFKs { + for level := 1; level < b.levels; level++ { + for ordinal := 0; ordinal < b.tablesPerLevel[level]; ordinal++ { + addFK := "ALTER TABLE " + b.getTableName(level, ordinal) + " ADD CONSTRAINT fk FOREIGN KEY " + + b.getColumnsRange(level-1) + " REFERENCES " + b.getTableName(level-1, 0) + + b.getColumnsRange(level-1) + " ON DELETE CASCADE;" + if b.verbose { + fmt.Printf("%s\n", addFK) + } + if _, err := db.Exec(addFK); err != nil { + return err + } + } + } + } + getInsert := func(level, ordinal int, start, end int) string { + s := "INSERT INTO " + b.getTableName(level, ordinal) + " (SELECT " + for l := 0; l < level; l++ { + ancestorRowCount := b.getTableRowCount(l, 0) + rowCount := b.getTableRowCount(level, ordinal) + s += fmt.Sprintf("floor((i-1)*%.10f)::INT+1, ", ancestorRowCount/rowCount) + } + s += fmt.Sprintf("i FROM generate_series(%d, %d) AS i);", start, end) + return s + } + for level := 0; level < b.levels; level++ { + for ordinal := 0; ordinal < b.tablesPerLevel[level]; ordinal++ { + start, end := 1, int(b.getTableRowCount(level, ordinal)) + numInserts := 0 + for start <= end { + currentEnd := end + if currentEnd-start > maxSingleInsertRowCount { + currentEnd = start + maxSingleInsertRowCount - 1 + } + insertStmt := getInsert(level, ordinal, start, currentEnd) + if b.verbose { + fmt.Printf("%s\n", insertStmt) + } + if _, err := db.Exec(insertStmt); err != nil { + return err + } + start = currentEnd + 1 + numInserts++ + } + } + } + if b.rangesToSplit > 0 { + // We always split and scatter the parent table. + if err := b.splitAndScatter(db, 0 /* level */); err != nil { + return err + } + if !b.interleave { + // In non-interleaved case, we split and scatter all tables. + for level := 1; level < b.levels; level++ { + if err := b.splitAndScatter(db, level); err != nil { + return err + } + } + } + } + return nil + }, + } +} + +func (b *interleaveBench) getTableRowCount(level, ordinal int) float64 { + if level == 0 { + return float64(b.parentCount) + } + rowCount := b.getTableRowCount(level-1, 0) * b.ratio + for i := 0; i < ordinal; i++ { + rowCount *= b.intraLevelRatio + } + return rowCount +} + +func (b *interleaveBench) getTableName(level, ordinal int) string { + return fmt.Sprintf("table%d_%d", level, ordinal) +} + +func (b *interleaveBench) getColumnName(level int) string { + return fmt.Sprintf("c%d", level) +} + +// Returns '(c0, c1, ..., c)'. +func (b *interleaveBench) getColumnsRange(lastLevel int) string { + s := "(" + for i := 0; i <= lastLevel; i++ { + s += b.getColumnName(i) + if i < lastLevel { + s += ", " + } + } + s += ")" + return s +} + +// splitAndScatter splits all tables at the specified level into b.rangesToSplit +// ranges and scatters them. +func (b *interleaveBench) splitAndScatter(db *gosql.DB, level int) error { + settingQuery := "SET CLUSTER SETTING kv.range_merge.queue_enabled=false;" + if b.verbose { + fmt.Printf("%s\n", settingQuery) + } + if _, err := db.Exec(settingQuery); err != nil { + return err + } + for ordinal := 0; ordinal < b.tablesPerLevel[level]; ordinal++ { + splitQuery := fmt.Sprintf( + "ALTER TABLE %s SPLIT AT SELECT i FROM generate_series(1, %d, %d) AS i;", + b.getTableName(level, ordinal), int(b.getTableRowCount(0, 0)), int(b.getTableRowCount(0, 0)/float64(b.rangesToSplit)), + ) + scatterQuery := fmt.Sprintf("ALTER TABLE %s SCATTER;", b.getTableName(level, ordinal)) + if b.verbose { + fmt.Printf("%s\n%s\n%s\n", settingQuery, splitQuery, scatterQuery) + } + if _, err := db.Exec(splitQuery); err != nil { + return err + } + if _, err := db.Exec(scatterQuery); err != nil { + return err + } + } + return nil +} + +// Tables implements the Generator interface. +func (b *interleaveBench) Tables() []workload.Table { + if b.tablesPerLevel == nil { + b.tablesPerLevel = make([]int, b.levels) + for i := range b.tablesPerLevel { + b.tablesPerLevel[i] = 1 + } + } + getTableSchema := func(level, ordinal int) string { + schema := "(" + for i := 0; i <= level; i++ { + schema += fmt.Sprintf("%s INT, ", b.getColumnName(i)) + } + schema += "PRIMARY KEY " + b.getColumnsRange(level) + ")" + if b.interleave && level > 0 { + schema += " INTERLEAVE IN PARENT " + schema += b.getTableName(level-1, 0) + schema += b.getColumnsRange(level - 1) + } + return schema + } + var tables []workload.Table + for level := 0; level < b.levels; level++ { + for ordinal := 0; ordinal < b.tablesPerLevel[level]; ordinal++ { + tables = append(tables, workload.Table{ + Name: b.getTableName(level, ordinal), + Schema: getTableSchema(level, ordinal), + }) + } + } + return tables +} + +// Ops implements the Opser interface. +func (b *interleaveBench) Ops( + _ context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { + sqlDatabase, err := workload.SanitizeUrls(b, b.connFlags.DBOverride, urls) + if err != nil { + return workload.QueryLoad{}, err + } + db, err := gosql.Open(`cockroach`, strings.Join(urls, ` `)) + if err != nil { + return workload.QueryLoad{}, err + } + // Allow a maximum of 1 connection to the database. + db.SetMaxOpenConns(maxWorkers) + db.SetMaxIdleConns(maxWorkers) + + worker := &worker{ + config: b, + hists: reg.GetHandle(), + db: db, + rng: rand.New(rand.NewSource(int64(timeutil.Now().Nanosecond()))), + alreadyDeleted: make([]bool, int(b.getTableRowCount(b.deleteFromLevel, 0))+1), + } + // Note that we always has a single worker because we override the + // concurrency flag to 1. + ql := workload.QueryLoad{ + SQLDatabase: sqlDatabase, + WorkerFns: []func(context.Context) error{worker.run}, + } + return ql, nil +} + +type worker struct { + config *interleaveBench + hists *histogram.Histograms + db *gosql.DB + rng *rand.Rand + alreadyDeleted []bool + deletedCount int +} + +func (w *worker) run(context.Context) error { + level := w.config.deleteFromLevel + levelRowCount := int(w.config.getTableRowCount(level, 0)) + if w.deletedCount >= levelRowCount { + return nil + } + var filter string + if w.config.inClause { + ids := make([]int, 0, w.config.numRowsInSingleDelete) + for len(ids) < w.config.numRowsInSingleDelete && w.deletedCount < levelRowCount { + id := 1 + w.rng.Intn(levelRowCount) + for w.alreadyDeleted[id] { + id++ + if id > levelRowCount { + id = 1 + } + } + ids = append(ids, id) + w.alreadyDeleted[id] = true + w.deletedCount++ + } + filter = fmt.Sprintf("WHERE %s IN (", w.config.getColumnName(level)) + for i, id := range ids { + if i > 0 { + filter += ", " + } + filter += fmt.Sprintf("%d", id) + } + filter += ")" + } else { + var startRow int + for { + startRow = 1 + w.rng.Intn(levelRowCount) + if !w.alreadyDeleted[startRow] { + w.alreadyDeleted[startRow] = true + break + } + } + count := 1 + for startRow+count < levelRowCount && count < w.config.numRowsInSingleDelete { + if w.alreadyDeleted[startRow+count] { + break + } + w.alreadyDeleted[startRow+count] = true + count++ + } + filter = fmt.Sprintf( + "WHERE %s >= %d AND %s < %d", + w.config.getColumnName(level), startRow, w.config.getColumnName(level), startRow+count, + ) + } + var query string + if !w.config.addFKs { + // Foreign key relationships were not established which means that we + // are simulating a "fake cascade" operator, so we manually issue the + // DELETE queries for all descendant tables. + for l := w.config.levels - 1; l > level; l-- { + for ord := 0; ord < w.config.tablesPerLevel[l]; ord++ { + query += fmt.Sprintf("DELETE FROM %s %s; ", w.config.getTableName(l, ord), filter) + } + } + } + query += fmt.Sprintf("DELETE FROM %s %s;", w.config.getTableName(level, 0), filter) + if w.config.verbose && w.rng.Float64() < 0.1 { + // We arbitrarily choose to print about every tenth query when + // verbosity is requested. + fmt.Printf("%s\n", query) + } + start := timeutil.Now() + if _, err := w.db.Exec(query); err != nil { + return err + } + w.hists.Get("").Record(timeutil.Since(start)) + return nil +} diff --git a/pkg/workload/interleavedpartitioned/interleavedpartitioned.go b/pkg/workload/interleavedpartitioned/interleavedpartitioned.go index 71bc5e8f7ea9..72ff62437092 100644 --- a/pkg/workload/interleavedpartitioned/interleavedpartitioned.go +++ b/pkg/workload/interleavedpartitioned/interleavedpartitioned.go @@ -27,7 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" ) @@ -430,7 +430,7 @@ func (w *interleavedPartitioned) Tables() []workload.Table { // Ops implements the Opser interface. func (w *interleavedPartitioned) Ops( - urls []string, reg *histogram.Registry, + ctx context.Context, urls []string, reg *histogram.Registry, ) (workload.QueryLoad, error) { sqlDatabase, err := workload.SanitizeUrls(w, ``, urls) if err != nil { @@ -664,12 +664,12 @@ func (w *interleavedPartitioned) fetchSessionID( start := timeutil.Now() baseSessionID := randomSessionID(rng, locality, localPercent) var sessionID string - if err := w.findSessionIDStatement1.QueryRowContext(ctx, baseSessionID).Scan(&sessionID); err != nil && err != gosql.ErrNoRows { + if err := w.findSessionIDStatement1.QueryRowContext(ctx, baseSessionID).Scan(&sessionID); err != nil && !errors.Is(err, gosql.ErrNoRows) { return "", err } // Didn't find a next session ID, let's try the other way. if len(sessionID) == 0 { - if err := w.findSessionIDStatement2.QueryRowContext(ctx, baseSessionID).Scan(&sessionID); err != nil && err != gosql.ErrNoRows { + if err := w.findSessionIDStatement2.QueryRowContext(ctx, baseSessionID).Scan(&sessionID); err != nil && !errors.Is(err, gosql.ErrNoRows) { return "", err } } diff --git a/pkg/workload/jsonload/json.go b/pkg/workload/jsonload/json.go index c00b703ade07..8966261fe4d4 100644 --- a/pkg/workload/jsonload/json.go +++ b/pkg/workload/jsonload/json.go @@ -27,7 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" ) @@ -126,7 +126,9 @@ func (w *jsonLoad) Tables() []workload.Table { } // Ops implements the Opser interface. -func (w *jsonLoad) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (w *jsonLoad) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { sqlDatabase, err := workload.SanitizeUrls(w, w.connFlags.DBOverride, urls) if err != nil { return workload.QueryLoad{}, err diff --git a/pkg/workload/kv/kv.go b/pkg/workload/kv/kv.go index 306d6542a6a0..40eac4a4378e 100644 --- a/pkg/workload/kv/kv.go +++ b/pkg/workload/kv/kv.go @@ -13,6 +13,7 @@ package kv import ( "context" "crypto/sha1" + gosql "database/sql" "encoding/binary" "fmt" "hash" @@ -25,7 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" ) @@ -73,6 +74,7 @@ type kv struct { secondaryIndex bool shards int targetCompressionRatio float64 + enum bool } func init() { @@ -129,6 +131,8 @@ var kvMeta = workload.Meta{ `Number of shards to create on the primary key.`) g.flags.Float64Var(&g.targetCompressionRatio, `target-compression-ratio`, 1.0, `Target compression ratio for data blocks. Must be >= 1.0`) + g.flags.BoolVar(&g.enum, `enum`, false, + `Inject an enum column and use it`) g.connFlags = workload.NewConnFlags(&g.flags) return g }, @@ -143,6 +147,15 @@ func (w *kv) Flags() workload.Flags { return w.flags } // Hooks implements the Hookser interface. func (w *kv) Hooks() workload.Hooks { return workload.Hooks{ + PostLoad: func(db *gosql.DB) error { + if !w.enum { + return nil + } + _, err := db.Exec(` +CREATE TYPE enum_type AS ENUM ('v'); +ALTER TABLE kv ADD COLUMN e enum_type NOT NULL AS ('v') STORED;`) + return err + }, Validate: func() error { if w.maxBlockSizeBytes < w.minBlockSizeBytes { return errors.Errorf("Value of 'max-block-bytes' (%d) must be greater than or equal to value of 'min-block-bytes' (%d)", @@ -205,7 +218,9 @@ func (w *kv) Tables() []workload.Table { } // Ops implements the Opser interface. -func (w *kv) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (w *kv) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { writeSeq := 0 if w.writeSeq != "" { first := w.writeSeq[0] @@ -227,7 +242,6 @@ func (w *kv) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, er } } - ctx := context.Background() sqlDatabase, err := workload.SanitizeUrls(w, w.connFlags.DBOverride, urls) if err != nil { return workload.QueryLoad{}, err @@ -250,6 +264,14 @@ func (w *kv) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, er } fmt.Fprintf(&buf, `$%d`, i+1) } + } else if w.enum { + buf.WriteString(`SELECT k, v, e FROM kv WHERE k IN (`) + for i := 0; i < w.batchSize; i++ { + if i > 0 { + buf.WriteString(", ") + } + fmt.Fprintf(&buf, `$%d`, i+1) + } } else { // TODO(ajwerner): We're currently manually plumbing down the computed shard column // since the optimizer doesn't yet support deriving values of computed columns diff --git a/pkg/workload/ledger/ledger.go b/pkg/workload/ledger/ledger.go index e47c74eb3205..3ed949ee4621 100644 --- a/pkg/workload/ledger/ledger.go +++ b/pkg/workload/ledger/ledger.go @@ -11,6 +11,7 @@ package ledger import ( + "context" gosql "database/sql" "hash/fnv" "math/rand" @@ -20,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" ) @@ -179,7 +180,9 @@ func (w *ledger) Tables() []workload.Table { } // Ops implements the Opser interface. -func (w *ledger) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (w *ledger) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { sqlDatabase, err := workload.SanitizeUrls(w, w.connFlags.DBOverride, urls) if err != nil { return workload.QueryLoad{}, err diff --git a/pkg/workload/ledger/worker.go b/pkg/workload/ledger/worker.go index 9a32be56e612..e7c9cdce2285 100644 --- a/pkg/workload/ledger/worker.go +++ b/pkg/workload/ledger/worker.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload/histogram" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) type worker struct { diff --git a/pkg/workload/movr/movr.go b/pkg/workload/movr/movr.go index c3e61b6d9913..1cb97f2c93be 100644 --- a/pkg/workload/movr/movr.go +++ b/pkg/workload/movr/movr.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/faker" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" "golang.org/x/exp/rand" ) diff --git a/pkg/workload/movr/workload.go b/pkg/workload/movr/workload.go index 15f550a16cc4..4cdcd673abf0 100644 --- a/pkg/workload/movr/workload.go +++ b/pkg/workload/movr/workload.go @@ -296,7 +296,9 @@ func (m *movrWorker) generateWorkSimulation() func(context.Context) error { } // Ops implements the Opser interface -func (m *movr) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (m *movr) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { // Initialize the faker in case it hasn't been setup already. m.fakerOnce.Do(func() { m.faker = faker.NewFaker() diff --git a/pkg/workload/pgx_helpers.go b/pkg/workload/pgx_helpers.go index 0c76c2adf6d6..625b35fb5c78 100644 --- a/pkg/workload/pgx_helpers.go +++ b/pkg/workload/pgx_helpers.go @@ -158,7 +158,9 @@ func (m *MultiConnPool) Close() { } // PgxTx is a thin wrapper that implements the crdb.Tx interface, allowing pgx -// transactions to be used with ExecuteInTx. +// transactions to be used with ExecuteInTx. The cockroach-go library has native +// support for pgx in crdb/pgx, but only for pgx v4. CRDB is stuck for now using +// pgx v3, as v4 needs Go modules. type PgxTx pgx.Tx var _ crdb.Tx = &PgxTx{} diff --git a/pkg/workload/querybench/query_bench.go b/pkg/workload/querybench/query_bench.go index ccc0bc74a94c..f0b56f19def7 100644 --- a/pkg/workload/querybench/query_bench.go +++ b/pkg/workload/querybench/query_bench.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" ) @@ -106,7 +106,9 @@ func (*queryBench) Tables() []workload.Table { } // Ops implements the Opser interface. -func (g *queryBench) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (g *queryBench) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { sqlDatabase, err := workload.SanitizeUrls(g, g.connFlags.DBOverride, urls) if err != nil { return workload.QueryLoad{}, err diff --git a/pkg/workload/querylog/querylog.go b/pkg/workload/querylog/querylog.go index 6680f3d458fc..9225b98fa0ee 100644 --- a/pkg/workload/querylog/querylog.go +++ b/pkg/workload/querylog/querylog.go @@ -34,9 +34,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" workloadrand "github.com/cockroachdb/cockroach/pkg/workload/rand" + "github.com/cockroachdb/errors" "github.com/jackc/pgx" "github.com/lib/pq/oid" - "github.com/pkg/errors" "github.com/spf13/pflag" ) @@ -171,9 +171,9 @@ func (w *querylog) Hooks() workload.Hooks { } // Ops implements the Opser interface. -func (w *querylog) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { - ctx := context.Background() - +func (w *querylog) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { sqlDatabase, err := workload.SanitizeUrls(w, w.connFlags.DBOverride, urls) if err != nil { return workload.QueryLoad{}, err @@ -494,7 +494,7 @@ func (w *worker) generatePlaceholders( // getTableNames fetches the names of all the tables in db and stores them in // w.state. func (w *querylog) getTableNames(db *gosql.DB) error { - rows, err := db.Query(`SHOW TABLES`) + rows, err := db.Query(`SELECT table_name FROM [SHOW TABLES] ORDER BY table_name`) if err != nil { return err } @@ -991,7 +991,7 @@ func printPlaceholder(i interface{}) string { case nil: return fmt.Sprintf("NULL") default: - panic(fmt.Sprintf("unsupported type: %T", i)) + panic(errors.AssertionFailedf("unsupported type: %T", i)) } } diff --git a/pkg/workload/queue/queue.go b/pkg/workload/queue/queue.go index 3d7ed9facc3b..8f3b34a1b48f 100644 --- a/pkg/workload/queue/queue.go +++ b/pkg/workload/queue/queue.go @@ -69,7 +69,9 @@ func (w *queue) Tables() []workload.Table { } // Ops implements the Opser interface. -func (w *queue) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (w *queue) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { sqlDatabase, err := workload.SanitizeUrls(w, w.connFlags.DBOverride, urls) if err != nil { return workload.QueryLoad{}, err diff --git a/pkg/workload/rand/rand.go b/pkg/workload/rand/rand.go index 02d32814ef21..6ed4cdca618b 100644 --- a/pkg/workload/rand/rand.go +++ b/pkg/workload/rand/rand.go @@ -19,7 +19,6 @@ import ( "math/rand" "reflect" "strings" - "testing" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -27,9 +26,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" + "github.com/cockroachdb/errors" "github.com/lib/pq" "github.com/lib/pq/oid" - "github.com/pkg/errors" "github.com/spf13/pflag" ) @@ -112,7 +111,9 @@ type col struct { } // Ops implements the Opser interface. -func (w *random) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (w *random) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { sqlDatabase, err := workload.SanitizeUrls(w, w.connFlags.DBOverride, urls) if err != nil { return workload.QueryLoad{}, err @@ -262,10 +263,6 @@ AND i.indisprimary`, relid) buf.WriteString(dmlSuffix.String()) - if testing.Verbose() { - fmt.Println(buf.String()) - } - writeStmt, err := db.Prepare(buf.String()) if err != nil { return workload.QueryLoad{}, err @@ -344,6 +341,8 @@ func DatumToGoSQL(d tree.Datum) (interface{}, error) { return d.UUID, nil case *tree.DIPAddr: return d.IPAddr.String(), nil + case *tree.DJSON: + return d.JSON.String(), nil } return nil, errors.Errorf("unhandled datum type: %s", reflect.TypeOf(d)) } diff --git a/pkg/workload/schemachange/deck.go b/pkg/workload/schemachange/deck.go new file mode 100644 index 000000000000..f4953d7279ab --- /dev/null +++ b/pkg/workload/schemachange/deck.go @@ -0,0 +1,68 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package schemachange + +import ( + "math/rand" + + "github.com/cockroachdb/cockroach/pkg/util/syncutil" +) + +// Deck is a random number generator that generates numbers in the range +// [0,len(weights)-1] where the probability of i is +// weights(i)/sum(weights). Unlike Weighted, the weights are specified as +// integers and used in a deck-of-cards style random number selection which +// ensures that each element is returned with a desired frequency within the +// size of the deck. +type deck struct { + rng *rand.Rand + mu struct { + syncutil.Mutex + index int + vals []int + } +} + +// newDeck returns a new deck random number generator. +func newDeck(rng *rand.Rand, weights ...int) *deck { + var sum int + for i := range weights { + sum += weights[i] + } + vals := make([]int, 0, sum) + for i := range weights { + for j := 0; j < weights[i]; j++ { + vals = append(vals, i) + } + } + d := &deck{ + rng: rng, + } + d.mu.index = len(vals) + d.mu.vals = vals + return d +} + +// Int returns a random number in the range [0,len(weights)-1] where the +// probability of i is weights(i)/sum(weights). +func (d *deck) Int() int { + d.mu.Lock() + if d.mu.index == len(d.mu.vals) { + d.rng.Shuffle(len(d.mu.vals), func(i, j int) { + d.mu.vals[i], d.mu.vals[j] = d.mu.vals[j], d.mu.vals[i] + }) + d.mu.index = 0 + } + result := d.mu.vals[d.mu.index] + d.mu.index++ + d.mu.Unlock() + return result +} diff --git a/pkg/workload/schemachange/optype_string.go b/pkg/workload/schemachange/optype_string.go new file mode 100644 index 000000000000..95858aa212d6 --- /dev/null +++ b/pkg/workload/schemachange/optype_string.go @@ -0,0 +1,49 @@ +// Code generated by "stringer -type=opType"; DO NOT EDIT. + +package schemachange + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[addColumn-0] + _ = x[addConstraint-1] + _ = x[createIndex-2] + _ = x[createSequence-3] + _ = x[createTable-4] + _ = x[createTableAs-5] + _ = x[createView-6] + _ = x[dropColumn-7] + _ = x[dropColumnDefault-8] + _ = x[dropColumnNotNull-9] + _ = x[dropColumnStored-10] + _ = x[dropConstraint-11] + _ = x[dropIndex-12] + _ = x[dropSequence-13] + _ = x[dropTable-14] + _ = x[dropView-15] + _ = x[dropSchema-16] + _ = x[renameColumn-17] + _ = x[renameIndex-18] + _ = x[renameSequence-19] + _ = x[renameTable-20] + _ = x[renameView-21] + _ = x[setColumnDefault-22] + _ = x[setColumnNotNull-23] + _ = x[setColumnType-24] + _ = x[insertRow-25] + _ = x[validate-26] +} + +const _opType_name = "addColumnaddConstraintcreateIndexcreateSequencecreateTablecreateTableAscreateViewdropColumndropColumnDefaultdropColumnNotNulldropColumnStoreddropConstraintdropIndexdropSequencedropTabledropViewdropSchemarenameColumnrenameIndexrenameSequencerenameTablerenameViewsetColumnDefaultsetColumnNotNullsetColumnTypeinsertRowvalidate" + +var _opType_index = [...]uint16{0, 9, 22, 33, 47, 58, 71, 81, 91, 108, 125, 141, 155, 164, 176, 185, 193, 203, 215, 226, 240, 251, 261, 277, 293, 306, 315, 323} + +func (i opType) String() string { + if i < 0 || i >= opType(len(_opType_index)-1) { + return "opType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _opType_name[_opType_index[i]:_opType_index[i+1]] +} diff --git a/pkg/workload/schemachange/schemachange.go b/pkg/workload/schemachange/schemachange.go new file mode 100644 index 000000000000..ec2c6f21e9e7 --- /dev/null +++ b/pkg/workload/schemachange/schemachange.go @@ -0,0 +1,1139 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package schemachange + +import ( + "context" + gosql "database/sql" + "fmt" + "math/rand" + "runtime" + "strings" + "sync/atomic" + + "github.com/cockroachdb/cockroach/pkg/sql/parser" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" + "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/cockroach/pkg/workload" + "github.com/cockroachdb/cockroach/pkg/workload/histogram" + "github.com/cockroachdb/errors" + "github.com/jackc/pgx" + "github.com/spf13/pflag" +) + +// This workload executes batches of schema changes asynchronously. Each +// batch is executed in a separate transaction and transactions run in +// parallel. Batches are drawn from a pre-defined distribution. +// Currently all schema change ops are equally likely to be chosen. This +// includes table creation but note that the tables contain no data. +// +// Example usage: +// `bin/workload run schemachange --init --concurrency=2 --verbose=0 --max-ops-per-worker=1000` +// will execute up to 1000 schema change operations per txn in two concurrent txns. +// +// TODO(peter): This is still work in progress, we need to +// - support more than 1 database +// - reference sequences in column defaults +// - create foreign keys +// - support `ADD CONSTRAINT` +// - support `SET COLUMN DEFAULT` +// +// TODO(spaskob): introspect errors returned from the workload and determine +// whether they're expected or unexpected. Flag `tolerate-errors` should be +// added to tolerate unexpected errors and then unexpected errors should fail +// the workload. +// +//For example, an attempt to do something we don't support should be swallowed (though if we can detect that maybe we should just not do it, e.g). It will be hard to use this test for anything more than liveness detection until we go through the tedious process of classifying errors.: + +const ( + defaultMaxOpsPerWorker = 5 + defaultExistingPct = 10 +) + +type schemaChange struct { + flags workload.Flags + dbOverride string + concurrency int + maxOpsPerWorker int + existingPct int + verbose int + dryRun bool +} + +var schemaChangeMeta = workload.Meta{ + Name: `schemachange`, + Description: `schemachange randomly generates concurrent schema changes`, + Version: `1.0.0`, + New: func() workload.Generator { + s := &schemaChange{} + s.flags.FlagSet = pflag.NewFlagSet(`schemachange`, pflag.ContinueOnError) + s.flags.StringVar(&s.dbOverride, `db`, ``, + `Override for the SQL database to use. If empty, defaults to the generator name`) + s.flags.IntVar(&s.concurrency, `concurrency`, 2*runtime.NumCPU(), /* TODO(spaskob): sensible default? */ + `Number of concurrent workers`) + s.flags.IntVar(&s.maxOpsPerWorker, `max-ops-per-worker`, defaultMaxOpsPerWorker, + `Number of operations to execute in a single transaction`) + s.flags.IntVar(&s.existingPct, `existing-pct`, defaultExistingPct, + `Percentage of times to use existing name`) + s.flags.IntVarP(&s.verbose, `verbose`, `v`, 0, ``) + s.flags.BoolVarP(&s.dryRun, `dry-run`, `n`, false, ``) + return s + }, +} + +func init() { + workload.Register(schemaChangeMeta) +} + +//go:generate stringer -type=opType +type opType int + +const ( + addColumn opType = iota // ALTER TABLE ADD [COLUMN] + addConstraint // ALTER TABLE
ADD CONSTRAINT + + createIndex // CREATE INDEX ON
+ createSequence // CREATE SEQUENCE + createTable // CREATE TABLE
+ createTableAs // CREATE TABLE
AS + createView // CREATE VIEW AS + + dropColumn // ALTER TABLE
DROP COLUMN + dropColumnDefault // ALTER TABLE
ALTER [COLUMN] DROP DEFAULT + dropColumnNotNull // ALTER TABLE
ALTER [COLUMN] DROP NOT NULL + dropColumnStored // ALTER TABLE
ALTER [COLUMN] DROP STORED + dropConstraint // ALTER TABLE
DROP CONSTRAINT + dropIndex // DROP INDEX @
+ dropSequence // DROP SEQUENCE + dropTable // DROP TABLE
+ dropView // DROP VIEW + dropSchema // DROP SCHEMA + + renameColumn // ALTER TABLE
RENAME [COLUMN] TO + renameIndex // ALTER TABLE
RENAME CONSTRAINT TO + renameSequence // ALTER SEQUENCE RENAME TO + renameTable // ALTER TABLE
RENAME TO
+ renameView // ALTER VIEW RENAME TO + + setColumnDefault // ALTER TABLE
ALTER [COLUMN] SET DEFAULT + setColumnNotNull // ALTER TABLE
ALTER [COLUMN] SET NOT NULL + setColumnType // ALTER TABLE
ALTER [COLUMN] [SET DATA] TYPE + + insertRow // INSERT INTO
() VALUES () + + validate // validate all table descriptors +) + +var opWeights = []int{ + addColumn: 1, + addConstraint: 0, // TODO(spaskob): unimplemented + createIndex: 1, + createSequence: 1, + createTable: 1, + createTableAs: 1, + createView: 1, + dropColumn: 1, + dropColumnDefault: 1, + dropColumnNotNull: 1, + dropColumnStored: 1, + dropConstraint: 1, + dropIndex: 1, + dropSequence: 1, + dropTable: 1, + dropView: 1, + dropSchema: 1, + renameColumn: 1, + renameIndex: 1, + renameSequence: 1, + renameTable: 1, + renameView: 1, + setColumnDefault: 0, // TODO(spaskob): unimplemented + setColumnNotNull: 1, + setColumnType: 1, + insertRow: 1, + validate: 2, // validate twice more often +} + +// Meta implements the workload.Generator interface. +func (s *schemaChange) Meta() workload.Meta { + return schemaChangeMeta +} + +// Flags implements the workload.Flagser interface. +func (s *schemaChange) Flags() workload.Flags { + return s.flags +} + +// Tables implements the workload.Generator interface. +func (s *schemaChange) Tables() []workload.Table { + return nil +} + +// Tables implements the workload.Opser interface. +func (s *schemaChange) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { + sqlDatabase, err := workload.SanitizeUrls(s, s.dbOverride, urls) + if err != nil { + return workload.QueryLoad{}, err + } + cfg := workload.MultiConnPoolCfg{ + MaxTotalConnections: s.concurrency * 2, //TODO(spaskob): pick a sensible default. + } + pool, err := workload.NewMultiConnPool(cfg, urls...) + if err != nil { + return workload.QueryLoad{}, err + } + + seqNum, err := s.initSeqNum(pool) + if err != nil { + return workload.QueryLoad{}, err + } + + ops := newDeck(rand.New(rand.NewSource(timeutil.Now().UnixNano())), opWeights...) + ql := workload.QueryLoad{SQLDatabase: sqlDatabase} + for i := 0; i < s.concurrency; i++ { + w := &schemaChangeWorker{ + verbose: s.verbose, + dryRun: s.dryRun, + maxOpsPerWorker: s.maxOpsPerWorker, + existingPct: s.existingPct, + rng: rand.New(rand.NewSource(timeutil.Now().UnixNano())), + ops: ops, + pool: pool, + hists: reg.GetHandle(), + seqNum: seqNum, + } + ql.WorkerFns = append(ql.WorkerFns, w.run) + } + return ql, nil +} + +// initSeqName returns the smallest available sequence number to be +// used to generate new unique names. Note that this assumes that no +// other workload is being run at the same time. +// TODO(spaskob): Do we need to protect from workloads running concurrently. +// It's not obvious how the workloads will behave when accessing the same +// cluster. +func (s *schemaChange) initSeqNum(pool *workload.MultiConnPool) (*int64, error) { + seqNum := new(int64) + + const q = ` +SELECT max(regexp_extract(name, '[0-9]+$')::int) + FROM ((SELECT table_name FROM [SHOW TABLES]) UNION (SELECT sequence_name FROM [SHOW SEQUENCES])) AS obj(name) + WHERE name ~ '^(table|view|seq)[0-9]+$'; +` + var max gosql.NullInt64 + if err := pool.Get().QueryRow(q).Scan(&max); err != nil { + return nil, err + } + if max.Valid { + *seqNum = max.Int64 + 1 + } + + return seqNum, nil +} + +type schemaChangeWorker struct { + verbose int + dryRun bool + maxOpsPerWorker int + existingPct int + rng *rand.Rand + ops *deck + pool *workload.MultiConnPool + hists *histogram.Histograms + seqNum *int64 +} + +// handleOpError returns an error if the op error is considered serious and +// we should terminate the workload. +func handleOpError(err error) error { + if err == nil { + return nil + } + if pgErr := (pgx.PgError{}); errors.As(err, &pgErr) { + sqlstate := pgErr.SQLState() + class := sqlstate[0:2] + switch class { + case "09": + return errors.Wrap(err, "Class 09 - Triggered Action Exception") + case "XX": + return errors.Wrap(err, "Class XX - Internal Error") + } + } else { + return errors.Wrapf(err, "unexpected error %v", err) + } + return nil +} + +var ( + errRunInTxnFatalSentinel = errors.New("fatal error when running txn") + errRunInTxnRbkSentinel = errors.New("txn needs to rollback") +) + +func (w *schemaChangeWorker) runInTxn(tx *pgx.Tx, opsNum int) (string, error) { + var log strings.Builder + for i := 0; i < opsNum; i++ { + op, noops, err := w.randOp(tx) + if err != nil { + return noops, errors.Mark( + errors.Wrap(err, "could not generate a random operation"), + errRunInTxnFatalSentinel, + ) + } + if w.verbose >= 2 { + // Print the failed attempts to produce a random operation. + log.WriteString(noops) + } + log.WriteString(fmt.Sprintf(" %s;\n", op)) + if !w.dryRun { + histBin := "opOk" + start := timeutil.Now() + if _, err = tx.Exec(op); err != nil { + histBin = "txnRbk" + log.WriteString(fmt.Sprintf("***FAIL: %v\n", err)) + log.WriteString("ROLLBACK;\n") + return log.String(), errors.Mark(err, errRunInTxnRbkSentinel) + } + elapsed := timeutil.Since(start) + w.hists.Get(histBin).Record(elapsed) + } + } + return log.String(), nil +} + +func (w *schemaChangeWorker) run(_ context.Context) error { + tx, err := w.pool.Get().Begin() + if err != nil { + return errors.Wrap(err, "cannot get a connection and begin a txn") + } + opsNum := 1 + w.rng.Intn(w.maxOpsPerWorker) + + // Run between 1 and maxOpsPerWorker schema change operations. + start := timeutil.Now() + logs, err := w.runInTxn(tx, opsNum) + logs = "BEGIN\n" + logs + defer func() { + if w.verbose >= 1 { + fmt.Print(logs) + } + }() + + if err != nil { + // Rollback in all cases to release the txn object and its conn pool. + if rbkErr := tx.Rollback(); rbkErr != nil { + return errors.Wrapf(err, "Could not rollback %v", rbkErr) + } + switch { + case errors.Is(err, errRunInTxnFatalSentinel): + return err + case errors.Is(err, errRunInTxnRbkSentinel): + if seriousErr := handleOpError(err); seriousErr != nil { + return seriousErr + } + return nil + default: + return errors.Wrapf(err, "Unexpected error") + } + } + + // If there were no errors commit the txn. + histBin := "txnOk" + cmtErrMsg := "" + if err = tx.Commit(); err != nil { + histBin = "txnCmtErr" + cmtErrMsg = fmt.Sprintf("***FAIL: %v", err) + } + w.hists.Get(histBin).Record(timeutil.Since(start)) + logs = logs + fmt.Sprintf("COMMIT; %s\n", cmtErrMsg) + return nil +} + +// randOp attempts to produce a random schema change operation. It returns a +// triple `(randOp, log, error)`. On success `randOp` is the random schema +// change constructed. Constructing a random schema change may require a few +// stochastic attempts and if verbosity is >= 2 the unsuccessful attempts are +// recorded in `log` to help with debugging of the workload. +func (w *schemaChangeWorker) randOp(tx *pgx.Tx) (string, string, error) { + var log strings.Builder + for { + var stmt string + var err error + op := opType(w.ops.Int()) + switch op { + case addColumn: + stmt, err = w.addColumn(tx) + + case addConstraint: + stmt, err = w.addConstraint(tx) + + case createIndex: + stmt, err = w.createIndex(tx) + + case createSequence: + stmt, err = w.createSequence(tx) + + case createTable: + stmt, err = w.createTable(tx) + + case createTableAs: + stmt, err = w.createTableAs(tx) + + case createView: + stmt, err = w.createView(tx) + + case dropColumn: + stmt, err = w.dropColumn(tx) + + case dropColumnDefault: + stmt, err = w.dropColumnDefault(tx) + + case dropColumnNotNull: + stmt, err = w.dropColumnNotNull(tx) + + case dropColumnStored: + stmt, err = w.dropColumnStored(tx) + + case dropConstraint: + stmt, err = w.dropConstraint(tx) + + case dropIndex: + stmt, err = w.dropIndex(tx) + + case dropSequence: + stmt, err = w.dropSequence(tx) + + case dropTable: + stmt, err = w.dropTable(tx) + + case dropView: + stmt, err = w.dropView(tx) + + case dropSchema: + stmt, err = w.dropSchema(tx) + + case renameColumn: + stmt, err = w.renameColumn(tx) + + case renameIndex: + stmt, err = w.renameIndex(tx) + + case renameSequence: + stmt, err = w.renameSequence(tx) + + case renameTable: + stmt, err = w.renameTable(tx) + + case renameView: + stmt, err = w.renameView(tx) + + case setColumnDefault: + stmt, err = w.setColumnDefault(tx) + + case setColumnNotNull: + stmt, err = w.setColumnNotNull(tx) + + case setColumnType: + stmt, err = w.setColumnType(tx) + + case insertRow: + stmt, err = w.insertRow(tx) + + case validate: + stmt, err = w.validate(tx) + } + + // TODO(spaskob): use more fine-grained error reporting. + if stmt == "" || errors.Is(err, pgx.ErrNoRows) { + log.WriteString(fmt.Sprintf("NOOP: %s -> %v\n", op, err)) + continue + } + return stmt, log.String(), err + } +} + +func (w *schemaChangeWorker) addColumn(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + + columnName, err := w.randColumn(tx, tableName.String(), w.existingPct) + if err != nil { + return "", err + } + typ, err := w.randType(tx) + if err != nil { + return "", err + } + + def := &tree.ColumnTableDef{ + Name: tree.Name(columnName), + Type: typ, + } + def.Nullable.Nullability = tree.Nullability(rand.Intn(1 + int(tree.SilentNull))) + return fmt.Sprintf(`ALTER TABLE %s ADD COLUMN %s`, tableName, tree.Serialize(def)), nil +} + +func (w *schemaChangeWorker) addConstraint(tx *pgx.Tx) (string, error) { + // TODO(peter): unimplemented + // - Export sqlbase.randColumnTableDef. + return "", nil +} + +func (w *schemaChangeWorker) createIndex(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + + columnNames, err := w.tableColumnsShuffled(tx, tableName.String()) + if err != nil { + return "", err + } + + indexName, err := w.randIndex(tx, tableName.String(), w.existingPct) + if err != nil { + return "", err + } + + def := &tree.CreateIndex{ + Name: tree.Name(indexName), + Table: *tableName, + Unique: w.rng.Intn(4) == 0, // 25% UNIQUE + Inverted: w.rng.Intn(10) == 0, // 10% INVERTED + IfNotExists: w.rng.Intn(2) == 0, // 50% IF NOT EXISTS + Columns: make(tree.IndexElemList, 1+w.rng.Intn(len(columnNames))), + } + + for i := range def.Columns { + def.Columns[i].Column = tree.Name(columnNames[i]) + def.Columns[i].Direction = tree.Direction(w.rng.Intn(1 + int(tree.Descending))) + } + columnNames = columnNames[len(def.Columns):] + + if n := len(columnNames); n > 0 { + def.Storing = make(tree.NameList, w.rng.Intn(1+n)) + for i := range def.Storing { + def.Storing[i] = tree.Name(columnNames[i]) + } + } + + return tree.Serialize(def), nil +} + +func (w *schemaChangeWorker) createSequence(tx *pgx.Tx) (string, error) { + return fmt.Sprintf(`CREATE SEQUENCE "seq%d"`, atomic.AddInt64(w.seqNum, 1)), nil +} + +func (w *schemaChangeWorker) createTable(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 10) + if err != nil { + return "", err + } + + stmt := sqlbase.RandCreateTable(w.rng, "table", int(atomic.AddInt64(w.seqNum, 1))) + stmt.Table = *tableName + stmt.IfNotExists = w.rng.Intn(2) == 0 + return tree.Serialize(stmt), nil +} + +func (w *schemaChangeWorker) createTableAs(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + + columnNames, err := w.tableColumnsShuffled(tx, tableName.String()) + if err != nil { + return "", err + } + columnNames = columnNames[:1+w.rng.Intn(len(columnNames))] + + names := make(tree.NameList, len(columnNames)) + for i := range names { + names[i] = tree.Name(columnNames[i]) + } + + destTableName, err := w.randTable(tx, 10) + if err != nil { + return "", err + } + + return fmt.Sprintf(`CREATE TABLE %s AS SELECT %s FROM %s`, + destTableName, tree.Serialize(&names), tableName), nil +} + +func (w *schemaChangeWorker) createView(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + + columnNames, err := w.tableColumnsShuffled(tx, tableName.String()) + if err != nil { + return "", err + } + columnNames = columnNames[:1+w.rng.Intn(len(columnNames))] + + names := make(tree.NameList, len(columnNames)) + for i := range names { + names[i] = tree.Name(columnNames[i]) + } + + destViewName, err := w.randView(tx, w.existingPct) + if err != nil { + return "", err + } + + // TODO(peter): Create views that are dependent on multiple tables. + return fmt.Sprintf(`CREATE VIEW %s AS SELECT %s FROM %s`, + destViewName, tree.Serialize(&names), tableName), nil +} + +func (w *schemaChangeWorker) dropColumn(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + + columnName, err := w.randColumn(tx, tableName.String(), 100) + if err != nil { + return "", err + } + return fmt.Sprintf(`ALTER TABLE %s DROP COLUMN "%s"`, tableName, columnName), nil +} + +func (w *schemaChangeWorker) dropColumnDefault(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + columnName, err := w.randColumn(tx, tableName.String(), 100) + if err != nil { + return "", err + } + return fmt.Sprintf(`ALTER TABLE %s ALTER COLUMN "%s" DROP DEFAULT`, tableName, columnName), nil +} + +func (w *schemaChangeWorker) dropColumnNotNull(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + columnName, err := w.randColumn(tx, tableName.String(), 100) + if err != nil { + return "", err + } + return fmt.Sprintf(`ALTER TABLE %s ALTER COLUMN "%s" DROP NOT NULL`, tableName, columnName), nil +} + +func (w *schemaChangeWorker) dropColumnStored(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + + columnName, err := w.randColumn(tx, tableName.String(), 100) + if err != nil { + return "", err + } + return fmt.Sprintf(`ALTER TABLE %s ALTER COLUMN "%s" DROP STORED`, tableName, columnName), nil +} + +func (w *schemaChangeWorker) dropConstraint(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + + constraintName, err := w.randConstraint(tx, tableName.String()) + if err != nil { + return "", err + } + return fmt.Sprintf(`ALTER TABLE %s DROP CONSTRAINT "%s"`, tableName, constraintName), nil +} + +func (w *schemaChangeWorker) dropIndex(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + + indexName, err := w.randIndex(tx, tableName.String(), 100) + if err != nil { + return "", err + } + return fmt.Sprintf(`DROP INDEX %s@"%s"`, tableName, indexName), nil +} + +func (w *schemaChangeWorker) dropSequence(tx *pgx.Tx) (string, error) { + sequenceName, err := w.randSequence(tx, 100) + if err != nil { + return "", err + } + return fmt.Sprintf(`DROP SEQUENCE "%s"`, sequenceName), nil +} + +func (w *schemaChangeWorker) dropTable(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + return fmt.Sprintf(`DROP TABLE %s`, tableName), nil +} + +func (w *schemaChangeWorker) dropView(tx *pgx.Tx) (string, error) { + viewName, err := w.randView(tx, 100) + if err != nil { + return "", err + } + return fmt.Sprintf(`DROP VIEW %s`, viewName), nil +} + +func (w *schemaChangeWorker) renameColumn(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + + srcColumnName, err := w.randColumn(tx, tableName.String(), 100) + if err != nil { + return "", err + } + + destColumnName, err := w.randColumn(tx, tableName.String(), 50) + if err != nil { + return "", err + } + + return fmt.Sprintf(`ALTER TABLE %s RENAME COLUMN "%s" TO "%s"`, + tableName, srcColumnName, destColumnName), nil +} + +func (w *schemaChangeWorker) renameIndex(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + + srcIndexName, err := w.randIndex(tx, tableName.String(), w.existingPct) + if err != nil { + return "", err + } + + destIndexName, err := w.randIndex(tx, tableName.String(), 50) + if err != nil { + return "", err + } + + return fmt.Sprintf(`ALTER TABLE %s RENAME CONSTRAINT "%s" TO "%s"`, + tableName, srcIndexName, destIndexName), nil +} + +func (w *schemaChangeWorker) renameSequence(tx *pgx.Tx) (string, error) { + srcSequenceName, err := w.randSequence(tx, 100) + if err != nil { + return "", err + } + + destSequenceName, err := w.randSequence(tx, 50) + if err != nil { + return "", err + } + + return fmt.Sprintf(`ALTER SEQUENCE "%s" RENAME TO "%s"`, srcSequenceName, destSequenceName), nil +} + +func (w *schemaChangeWorker) renameTable(tx *pgx.Tx) (string, error) { + srcTableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + + destTableName, err := w.randTable(tx, 50) + if err != nil { + return "", err + } + + return fmt.Sprintf(`ALTER TABLE %s RENAME TO %s`, srcTableName, destTableName), nil +} + +func (w *schemaChangeWorker) renameView(tx *pgx.Tx) (string, error) { + srcViewName, err := w.randView(tx, 100) + if err != nil { + return "", err + } + + destViewName, err := w.randView(tx, 50) + if err != nil { + return "", err + } + + return fmt.Sprintf(`ALTER VIEW %s RENAME TO %s`, srcViewName, destViewName), nil +} + +func (w *schemaChangeWorker) setColumnDefault(tx *pgx.Tx) (string, error) { + // TODO(peter): unimplemented + return "", nil +} + +func (w *schemaChangeWorker) setColumnNotNull(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + + columnName, err := w.randColumn(tx, tableName.String(), 100) + if err != nil { + return "", err + } + return fmt.Sprintf(`ALTER TABLE %s ALTER COLUMN "%s" SET NOT NULL`, tableName, columnName), nil +} + +func (w *schemaChangeWorker) setColumnType(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", err + } + columnName, err := w.randColumn(tx, tableName.String(), 100) + if err != nil { + return "", err + } + typ, err := w.randType(tx) + if err != nil { + return "", err + } + return fmt.Sprintf(`ALTER TABLE %s ALTER COLUMN "%s" SET DATA TYPE %s`, + tableName, columnName, typ), nil +} + +func (w *schemaChangeWorker) insertRow(tx *pgx.Tx) (string, error) { + tableName, err := w.randTable(tx, 100) + if err != nil { + return "", errors.Wrapf(err, "error getting random table name") + } + cols, err := w.getTableColumns(tx, tableName.String()) + if err != nil { + return "", errors.Wrapf(err, "error getting table columns for insert row") + } + colNames := []string{} + rows := []string{} + for _, col := range cols { + colNames = append(colNames, fmt.Sprintf(`"%s"`, col.name)) + } + numRows := w.rng.Intn(10) + 1 + for i := 0; i < numRows; i++ { + var row []string + for _, col := range cols { + d := sqlbase.RandDatum(w.rng, col.typ, col.nullable) + row = append(row, tree.AsStringWithFlags(d, tree.FmtParsable)) + } + rows = append(rows, fmt.Sprintf("(%s)", strings.Join(row, ","))) + } + return fmt.Sprintf( + `INSERT INTO %s (%s) VALUES %s`, + tableName, + strings.Join(colNames, ","), + strings.Join(rows, ","), + ), nil +} + +func (w *schemaChangeWorker) validate(tx *pgx.Tx) (string, error) { + validateStmt := "SELECT 'validating all objects'" + rows, err := tx.Query(`SELECT * FROM "".crdb_internal.invalid_objects ORDER BY id`) + if err != nil { + return validateStmt, err + } + defer rows.Close() + + var errs []string + for rows.Next() { + var id int64 + var dbName, schemaName, objName, errStr string + if err := rows.Scan(&id, &dbName, &schemaName, &objName, &errStr); err != nil { + return validateStmt, err + } + errs = append( + errs, + fmt.Sprintf("id %d, db %s, schema %s, name %s: %s", id, dbName, schemaName, objName, errStr), + ) + } + + if rows.Err() != nil { + return "", errors.Wrap(rows.Err(), "querying for validation erors failed") + } + + if len(errs) == 0 { + return validateStmt, nil + } + return validateStmt, errors.Errorf("Validation FAIL:\n%s", strings.Join(errs, "\n")) +} + +type column struct { + name string + typ *types.T + nullable bool +} + +func (w *schemaChangeWorker) getTableColumns(tx *pgx.Tx, tableName string) ([]column, error) { + q := fmt.Sprintf(` + SELECT column_name, data_type, is_nullable + FROM [SHOW COLUMNS FROM %s] +`, tableName) + rows, err := tx.Query(q) + if err != nil { + return nil, err + } + defer rows.Close() + var typNames []string + var ret []column + for rows.Next() { + var c column + var typName string + err := rows.Scan(&c.name, &typName, &c.nullable) + if err != nil { + return nil, err + } + typNames = append(typNames, typName) + ret = append(ret, c) + } + if err := rows.Err(); err != nil { + return nil, err + } + for i := range ret { + c := &ret[i] + stmt, err := parser.ParseOne(fmt.Sprintf("SELECT 'otan wuz here'::%s", typNames[i])) + if err != nil { + return nil, err + } + c.typ = stmt.AST.(*tree.Select).Select.(*tree.SelectClause).Exprs[0].Expr.(*tree.CastExpr).Type + } + + return ret, nil +} + +func (w *schemaChangeWorker) randColumn( + tx *pgx.Tx, tableName string, pctExisting int, +) (string, error) { + if w.rng.Intn(100) >= pctExisting { + // We make a unique name for all columns by prefixing them with the table + // index to make it easier to reference columns from different tables. + return fmt.Sprintf("col%s_%d", + strings.TrimPrefix(tableName, "table"), atomic.AddInt64(w.seqNum, 1)), nil + } + q := fmt.Sprintf(` + SELECT column_name + FROM [SHOW COLUMNS FROM %s] +ORDER BY random() + LIMIT 1; +`, tableName) + var name string + if err := tx.QueryRow(q).Scan(&name); err != nil { + return "", err + } + return name, nil +} + +func (w *schemaChangeWorker) randConstraint(tx *pgx.Tx, tableName string) (string, error) { + q := fmt.Sprintf(` + SELECT constraint_name + FROM [SHOW CONSTRAINTS FROM %s] +ORDER BY random() + LIMIT 1; +`, tableName) + var name string + err := tx.QueryRow(q).Scan(&name) + if err != nil { + return "", err + } + return name, nil +} + +func (w *schemaChangeWorker) randIndex( + tx *pgx.Tx, tableName string, pctExisting int, +) (string, error) { + if w.rng.Intn(100) >= pctExisting { + // We make a unique name for all indices by prefixing them with the table + // index to make it easier to reference columns from different tables. + return fmt.Sprintf("index%s_%d", + strings.TrimPrefix(tableName, "table"), atomic.AddInt64(w.seqNum, 1)), nil + } + q := fmt.Sprintf(` + SELECT index_name + FROM [SHOW INDEXES FROM %s] +ORDER BY random() + LIMIT 1; +`, tableName) + var name string + if err := tx.QueryRow(q).Scan(&name); err != nil { + return "", err + } + return name, nil +} + +func (w *schemaChangeWorker) randSequence(tx *pgx.Tx, pctExisting int) (string, error) { + if w.rng.Intn(100) >= pctExisting { + return fmt.Sprintf(`seq%d`, atomic.AddInt64(w.seqNum, 1)), nil + } + const q = ` + SELECT sequence_name + FROM [SHOW SEQUENCES] + WHERE sequence_name LIKE 'seq%' +ORDER BY random() + LIMIT 1; +` + var name string + if err := tx.QueryRow(q).Scan(&name); err != nil { + return "", err + } + return name, nil +} + +// randTable returns a schema name along with a table name +func (w *schemaChangeWorker) randTable(tx *pgx.Tx, pctExisting int) (*tree.TableName, error) { + if w.rng.Intn(100) >= pctExisting { + randSchema, err := w.randSchema(tx, 100-pctExisting) + + if err != nil { + treeTableName := tree.MakeTableNameFromPrefix(tree.TableNamePrefix{}, "") + return &treeTableName, err + } + + treeTableName := tree.MakeTableNameFromPrefix(tree.TableNamePrefix{ + SchemaName: tree.Name(randSchema), + ExplicitSchema: true, + }, tree.Name(fmt.Sprintf("table%d", atomic.AddInt64(w.seqNum, 1)))) + return &treeTableName, nil + } + + const q = ` + SELECT schema_name, table_name + FROM [SHOW TABLES] + WHERE table_name LIKE 'table%' +ORDER BY random() + LIMIT 1; +` + var schemaName string + var tableName string + if err := tx.QueryRow(q).Scan(&schemaName, &tableName); err != nil { + treeTableName := tree.MakeTableNameFromPrefix(tree.TableNamePrefix{}, "") + return &treeTableName, err + } + + treeTableName := tree.MakeTableNameFromPrefix(tree.TableNamePrefix{ + SchemaName: tree.Name(schemaName), + ExplicitSchema: true, + }, tree.Name(tableName)) + return &treeTableName, nil +} + +func (w *schemaChangeWorker) randView(tx *pgx.Tx, pctExisting int) (*tree.TableName, error) { + if w.rng.Intn(100) >= pctExisting { + randSchema, err := w.randSchema(tx, 100-pctExisting) + if err != nil { + treeViewName := tree.MakeTableNameFromPrefix(tree.TableNamePrefix{}, "") + return &treeViewName, err + } + treeViewName := tree.MakeTableNameFromPrefix(tree.TableNamePrefix{ + SchemaName: tree.Name(randSchema), + ExplicitSchema: true, + }, tree.Name(fmt.Sprintf("view%d", atomic.AddInt64(w.seqNum, 1)))) + return &treeViewName, nil + } + const q = ` + SELECT schema_name, table_name + FROM [SHOW TABLES] + WHERE table_name LIKE 'view%' +ORDER BY random() + LIMIT 1; +` + var schemaName string + var viewName string + if err := tx.QueryRow(q).Scan(&schemaName, &viewName); err != nil { + treeViewName := tree.MakeTableNameFromPrefix(tree.TableNamePrefix{}, "") + return &treeViewName, err + } + treeViewName := tree.MakeTableNameFromPrefix(tree.TableNamePrefix{ + SchemaName: tree.Name(schemaName), + ExplicitSchema: true, + }, tree.Name(viewName)) + return &treeViewName, nil +} + +func (w *schemaChangeWorker) tableColumnsShuffled(tx *pgx.Tx, tableName string) ([]string, error) { + q := fmt.Sprintf(` +SELECT column_name +FROM [SHOW COLUMNS FROM %s]; +`, tableName) + + rows, err := tx.Query(q) + if err != nil { + return nil, err + } + defer rows.Close() + + var columnNames []string + for rows.Next() { + var name string + if err := rows.Scan(&name); err != nil { + return nil, err + } + columnNames = append(columnNames, name) + } + if rows.Err() != nil { + return nil, rows.Err() + } + + w.rng.Shuffle(len(columnNames), func(i, j int) { + columnNames[i], columnNames[j] = columnNames[j], columnNames[i] + }) + + if len(columnNames) <= 0 { + return nil, errors.Errorf("table %s has no columns", tableName) + } + return columnNames, nil +} + +func (w *schemaChangeWorker) randType(tx *pgx.Tx) (*types.T, error) { + return sqlbase.RandSortingType(w.rng), nil +} + +func (w *schemaChangeWorker) randSchema(tx *pgx.Tx, pctExisting int) (string, error) { + if w.rng.Intn(100) >= pctExisting { + return fmt.Sprintf("schema%d", atomic.AddInt64(w.seqNum, 1)), nil + } + const q = ` + SELECT schema_name + FROM information_schema.schemata + WHERE schema_name + LIKE 'schema%' + OR schema_name = 'public' +ORDER BY random() + LIMIT 1; +` + var name string + if err := tx.QueryRow(q).Scan(&name); err != nil { + return "", err + } + return name, nil +} + +func (w *schemaChangeWorker) dropSchema(tx *pgx.Tx) (string, error) { + schemaName, err := w.randSchema(tx, 100) + if err != nil { + return "", err + } + return fmt.Sprintf(`DROP SCHEMA "%s" CASCADE`, schemaName), nil +} diff --git a/pkg/workload/sql_runner.go b/pkg/workload/sql_runner.go index c1c2b9fb6837..f2bae0c13f74 100644 --- a/pkg/workload/sql_runner.go +++ b/pkg/workload/sql_runner.go @@ -15,8 +15,8 @@ import ( "fmt" "strings" + "github.com/cockroachdb/errors" "github.com/jackc/pgx" - "github.com/pkg/errors" ) // SQLRunner is a helper for issuing SQL statements; it supports multiple diff --git a/pkg/workload/sqlsmith/sqlsmith.go b/pkg/workload/sqlsmith/sqlsmith.go index 7a267738cdc4..62934aff2200 100644 --- a/pkg/workload/sqlsmith/sqlsmith.go +++ b/pkg/workload/sqlsmith/sqlsmith.go @@ -115,7 +115,9 @@ func (g *sqlSmith) validateErrorSetting() error { } // Ops implements the Opser interface. -func (g *sqlSmith) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (g *sqlSmith) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { if err := g.validateErrorSetting(); err != nil { return workload.QueryLoad{}, err } diff --git a/pkg/workload/tpcc/checks.go b/pkg/workload/tpcc/checks.go index 248f72b04ba5..696b22727575 100644 --- a/pkg/workload/tpcc/checks.go +++ b/pkg/workload/tpcc/checks.go @@ -13,7 +13,7 @@ package tpcc import ( gosql "database/sql" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) // Check is a tpcc consistency check. diff --git a/pkg/workload/tpcc/ddls.go b/pkg/workload/tpcc/ddls.go index bb538e0a1e30..01874030fbdd 100644 --- a/pkg/workload/tpcc/ddls.go +++ b/pkg/workload/tpcc/ddls.go @@ -14,7 +14,7 @@ import ( gosql "database/sql" "fmt" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "golang.org/x/sync/errgroup" ) @@ -91,7 +91,7 @@ const ( h_amount decimal(6,2), h_data varchar(24), primary key (h_w_id, rowid)` - tpccHistorySchemaFkSuffix = ` + deprecatedTpccHistorySchemaFkSuffix = ` index history_customer_fk_idx (h_c_w_id, h_c_d_id, h_c_id), index history_district_fk_idx (h_w_id, h_d_id)` @@ -154,7 +154,7 @@ const ( s_remote_cnt integer, s_data varchar(50), primary key (s_w_id, s_i_id)` - tpccStockSchemaFkSuffix = ` + deprecatedTpccStockSchemaFkSuffix = ` index stock_item_fk_idx (s_i_id)` tpccStockSchemaInterleaveSuffix = ` interleave in parent warehouse (s_w_id)` @@ -172,7 +172,7 @@ const ( ol_amount decimal(6,2), ol_dist_info char(24), primary key (ol_w_id, ol_d_id, ol_o_id DESC, ol_number)` - tpccOrderLineSchemaFkSuffix = ` + deprecatedTpccOrderLineSchemaFkSuffix = ` index order_line_stock_fk_idx (ol_supply_w_id, ol_i_id)` tpccOrderLineSchemaInterleaveSuffix = ` interleave in parent "order" (ol_w_id, ol_d_id, ol_o_id)` diff --git a/pkg/workload/tpcc/delivery.go b/pkg/workload/tpcc/delivery.go index 3de83db9be2a..127b2dcb7bec 100644 --- a/pkg/workload/tpcc/delivery.go +++ b/pkg/workload/tpcc/delivery.go @@ -20,7 +20,7 @@ import ( "github.com/cockroachdb/cockroach-go/crdb" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "golang.org/x/exp/rand" ) @@ -100,7 +100,7 @@ func (del *delivery) run(ctx context.Context, wID int) (interface{}, error) { var oID int if err := del.selectNewOrder.QueryRowTx(ctx, tx, wID, dID).Scan(&oID); err != nil { // If no matching order is found, the delivery of this order is skipped. - if err != gosql.ErrNoRows { + if !errors.Is(err, gosql.ErrNoRows) { atomic.AddUint64(&del.config.auditor.skippedDelivieries, 1) return err } diff --git a/pkg/workload/tpcc/new_order.go b/pkg/workload/tpcc/new_order.go index c120f9ecee9c..55df81018f50 100644 --- a/pkg/workload/tpcc/new_order.go +++ b/pkg/workload/tpcc/new_order.go @@ -21,8 +21,8 @@ import ( "github.com/cockroachdb/cockroach-go/crdb" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" + "github.com/cockroachdb/errors" "github.com/lib/pq" - "github.com/pkg/errors" "golang.org/x/exp/rand" ) @@ -433,7 +433,7 @@ func (n *newOrder) run(ctx context.Context, wID int) (interface{}, error) { return nil }) - if err == errSimulated { + if errors.Is(err, errSimulated) { return d, nil } return d, err diff --git a/pkg/workload/tpcc/order_status.go b/pkg/workload/tpcc/order_status.go index f53284800b79..e07574cad65b 100644 --- a/pkg/workload/tpcc/order_status.go +++ b/pkg/workload/tpcc/order_status.go @@ -19,8 +19,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/bufalloc" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" + "github.com/cockroachdb/errors" "github.com/jackc/pgx/pgtype" - "github.com/pkg/errors" "golang.org/x/exp/rand" ) diff --git a/pkg/workload/tpcc/partition.go b/pkg/workload/tpcc/partition.go index 71812e2c08ee..dcdd2bcc1e5e 100644 --- a/pkg/workload/tpcc/partition.go +++ b/pkg/workload/tpcc/partition.go @@ -14,8 +14,9 @@ import ( "bytes" gosql "database/sql" "fmt" + "strings" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "golang.org/x/exp/rand" ) @@ -313,15 +314,12 @@ func partitionTable( func partitionIndex( db *gosql.DB, cfg zoneConfig, p *partitioner, table, index, col string, idx int, ) error { + indexStr := fmt.Sprintf("%s@%s", table, index) if exists, err := indexExists(db, table, index); err != nil { return err } else if !exists { - // If the index doesn't exist then there's nothing to do. This is the - // case for a few of the indexes that are only needed for foreign keys - // when foreign keys are disabled. - return nil + return errors.Errorf("could not find index %q", indexStr) } - indexStr := fmt.Sprintf("%s@%s", table, index) return partitionObject(db, cfg, p, "INDEX", indexStr, col, table, idx) } @@ -345,16 +343,10 @@ func partitionOrder(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { } func partitionOrderLine(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { - if err := partitionTable(db, cfg, wPart, "order_line", "ol_w_id", 0); err != nil { - return err - } - return partitionIndex(db, cfg, wPart, "order_line", "order_line_stock_fk_idx", "ol_supply_w_id", 1) + return partitionTable(db, cfg, wPart, "order_line", "ol_w_id", 0) } func partitionStock(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { - // The stock_item_fk_idx can't be partitioned because it doesn't have a - // warehouse prefix. It's an all-around unfortunate index that we only - // need because of a restriction in SQL. See #36859 and #37255. return partitionTable(db, cfg, wPart, "stock", "s_w_id", 0) } @@ -366,13 +358,7 @@ func partitionCustomer(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { } func partitionHistory(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { - if err := partitionTable(db, cfg, wPart, "history", "h_w_id", 0); err != nil { - return err - } - if err := partitionIndex(db, cfg, wPart, "history", "history_customer_fk_idx", "h_c_w_id", 1); err != nil { - return err - } - return partitionIndex(db, cfg, wPart, "history", "history_district_fk_idx", "h_w_id", 2) + return partitionTable(db, cfg, wPart, "history", "h_w_id", 0) } // replicateItem creates a covering "replicated index" for the item table for @@ -458,6 +444,9 @@ func partitionCount(db *gosql.DB) (int, error) { } func indexExists(db *gosql.DB, table, index string) (bool, error) { + // Strip any quotes around the table name. + table = strings.ReplaceAll(table, `"`, ``) + var exists bool if err := db.QueryRow(` SELECT count(*) > 0 diff --git a/pkg/workload/tpcc/payment.go b/pkg/workload/tpcc/payment.go index b1b4d01a67a9..e49c7dd8931a 100644 --- a/pkg/workload/tpcc/payment.go +++ b/pkg/workload/tpcc/payment.go @@ -20,7 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/bufalloc" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "golang.org/x/exp/rand" ) diff --git a/pkg/workload/tpcc/result.go b/pkg/workload/tpcc/result.go index 590cb67b5892..65a2205d7d55 100644 --- a/pkg/workload/tpcc/result.go +++ b/pkg/workload/tpcc/result.go @@ -11,12 +11,11 @@ package tpcc import ( - "strings" "time" "github.com/cockroachdb/cockroach/pkg/workload/histogram" + "github.com/cockroachdb/errors" "github.com/codahale/hdrhistogram" - "github.com/pkg/errors" ) // SpecWarehouseFactor is the default maximum per-warehouse newOrder @@ -179,10 +178,11 @@ func (r *Result) FailureError() error { // Collect all failing criteria errors into errs so that the returned error // contains information about all of the failures. - var errs []error + var err error if eff := r.Efficiency(); eff < PassingEfficiency { - errs = append(errs, errors.Errorf("efficiency value of %v is below "+ - "passing threshold of %v", eff, PassingEfficiency)) + err = errors.CombineErrors(err, + errors.Errorf("efficiency value of %v is below ppassing threshold of %v", + eff, PassingEfficiency)) } for query, max90th := range passing90ThPercentile { h, exists := r.Cumulative[query] @@ -190,22 +190,10 @@ func (r *Result) FailureError() error { return errors.Errorf("no %v data exists", query) } if v := time.Duration(h.ValueAtQuantile(.9)); v > max90th { - errs = append(errs, errors.Errorf("90th percentile latency for %v at %v "+ - "exceeds passing threshold of %v", query, v, max90th)) + err = errors.CombineErrors(err, + errors.Errorf("90th percentile latency for %v at %v exceeds passing threshold of %v", + query, v, max90th)) } } - switch len(errs) { - case 0: - return nil - case 1: - return errs[0] - default: - return errors.New("failed with multiple errors: " + - strings.Join(func() (s []string) { - for _, e := range errs { - s = append(s, e.Error()) - } - return s - }(), ", ")) - } + return err } diff --git a/pkg/workload/tpcc/tpcc.go b/pkg/workload/tpcc/tpcc.go index 77cd2906500a..a93248d92b05 100644 --- a/pkg/workload/tpcc/tpcc.go +++ b/pkg/workload/tpcc/tpcc.go @@ -26,8 +26,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" "github.com/cockroachdb/cockroach/pkg/workload/workloadimpl" + "github.com/cockroachdb/errors" "github.com/jackc/pgx" - "github.com/pkg/errors" "github.com/spf13/pflag" "golang.org/x/exp/rand" "golang.org/x/sync/errgroup" @@ -53,7 +53,10 @@ type tpcc struct { waitFraction float64 workers int fks bool - dbOverride string + // deprecatedFKIndexes adds in foreign key indexes that are no longer needed + // due to origin index restrictions being lifted. + deprecatedFkIndexes bool + dbOverride string txInfos []txInfo // deck contains indexes into the txInfos slice. @@ -138,7 +141,7 @@ var tpccMeta = workload.Meta{ Name: `tpcc`, Description: `TPC-C simulates a transaction processing workload` + ` using a rich schema of multiple tables`, - Version: `2.1.0`, + Version: `2.2.0`, PublicFacing: true, New: func() workload.Generator { g := &tpcc{} @@ -164,6 +167,7 @@ var tpccMeta = workload.Meta{ g.flags.Uint64Var(&g.seed, `seed`, 1, `Random number generator seed`) g.flags.IntVar(&g.warehouses, `warehouses`, 1, `Number of warehouses for loading`) g.flags.BoolVar(&g.fks, `fks`, true, `Add the foreign keys`) + g.flags.BoolVar(&g.deprecatedFkIndexes, `deprecated-fk-indexes`, true, `Add deprecated foreign keys (needed when running against v20.1 or below clusters)`) g.flags.BoolVar(&g.interleaved, `interleaved`, false, `Use interleaved tables`) g.flags.StringVar(&g.mix, `mix`, @@ -326,10 +330,18 @@ func (w *tpcc) Hooks() workload.Hooks { for _, fkStmt := range fkStmts { if _, err := db.Exec(fkStmt); err != nil { - // If the statement failed because the fk already exists, - // ignore it. Return the error for any other reason. const duplFKErr = "columns cannot be used by multiple foreign key constraints" - if !strings.Contains(err.Error(), duplFKErr) { + const idxErr = "foreign key requires an existing index on columns" + switch { + case strings.Contains(err.Error(), idxErr): + fmt.Println(errors.WithHint(err, "try using the --deprecated-fk-indexes flag")) + // If the statement failed because of a missing FK index, suggest + // to use the deprecated-fks flag. + return errors.WithHint(err, "try using the --deprecated-fk-indexes flag") + case strings.Contains(err.Error(), duplFKErr): + // If the statement failed because the fk already exists, + // ignore it. Return the error for any other reason. + default: return err } } @@ -470,9 +482,9 @@ func (w *tpcc) Tables() []workload.Table { history := workload.Table{ Name: `history`, Schema: maybeAddFkSuffix( - w.fks, + w.deprecatedFkIndexes, tpccHistorySchemaBase, - tpccHistorySchemaFkSuffix, + deprecatedTpccHistorySchemaFkSuffix, ), InitialRows: workload.BatchedTuples{ NumBatches: numHistoryPerWarehouse * w.warehouses, @@ -528,9 +540,9 @@ func (w *tpcc) Tables() []workload.Table { Schema: maybeAddInterleaveSuffix( w.interleaved, maybeAddFkSuffix( - w.fks, + w.deprecatedFkIndexes, tpccStockSchemaBase, - tpccStockSchemaFkSuffix, + deprecatedTpccStockSchemaFkSuffix, ), tpccStockSchemaInterleaveSuffix, ), @@ -545,9 +557,9 @@ func (w *tpcc) Tables() []workload.Table { Schema: maybeAddInterleaveSuffix( w.interleaved, maybeAddFkSuffix( - w.fks, + w.deprecatedFkIndexes, tpccOrderLineSchemaBase, - tpccOrderLineSchemaFkSuffix, + deprecatedTpccOrderLineSchemaFkSuffix, ), tpccOrderLineSchemaInterleaveSuffix, ), @@ -563,7 +575,9 @@ func (w *tpcc) Tables() []workload.Table { } // Ops implements the Opser interface. -func (w *tpcc) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (w *tpcc) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { // It would be nice to remove the need for this and to require that // partitioning and scattering occurs only when the PostLoad hook is // run, but to maintain backward compatibility, it's easiest to allow @@ -679,7 +693,7 @@ func (w *tpcc) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, idx := len(ql.WorkerFns) - 1 sem <- struct{}{} group.Go(func() error { - worker, err := newWorker(context.TODO(), w, db, reg.GetHandle(), warehouse) + worker, err := newWorker(ctx, w, db, reg.GetHandle(), warehouse) if err == nil { ql.WorkerFns[idx] = worker.run } diff --git a/pkg/workload/tpcc/worker.go b/pkg/workload/tpcc/worker.go index ea1cbdef8417..97ee67f2edce 100644 --- a/pkg/workload/tpcc/worker.go +++ b/pkg/workload/tpcc/worker.go @@ -20,7 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "golang.org/x/exp/rand" ) diff --git a/pkg/workload/tpccchecks/checks_generator.go b/pkg/workload/tpccchecks/checks_generator.go index 22da48e2df9a..0a9c4d1e4ede 100644 --- a/pkg/workload/tpccchecks/checks_generator.go +++ b/pkg/workload/tpccchecks/checks_generator.go @@ -20,7 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" "github.com/cockroachdb/cockroach/pkg/workload/tpcc" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" ) @@ -93,7 +93,9 @@ func (*tpccChecks) Meta() workload.Meta { } // Ops implements the Opser interface. -func (w *tpccChecks) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (w *tpccChecks) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { sqlDatabase, err := workload.SanitizeUrls(w, w.flags.Lookup("db").Value.String(), urls) if err != nil { return workload.QueryLoad{}, fmt.Errorf("%v", err) diff --git a/pkg/workload/tpcds/queries.go b/pkg/workload/tpcds/queries.go index 16bce83341cd..817cf18f3eae 100644 --- a/pkg/workload/tpcds/queries.go +++ b/pkg/workload/tpcds/queries.go @@ -10,9 +10,12 @@ package tpcds -const numQueries = 99 +// NumQueries specifies the number of queries in TPC-DS benchmark. +const NumQueries = 99 -var queriesByNumber = map[int]string{ +// QueriesByNumber is a mapping from the number of a TPC-DS query to the actual +// query. Only queries that can be parsed by CockroachDB are present. +var QueriesByNumber = map[int]string{ 1: query1, 2: query2, 3: query3, @@ -6845,6 +6848,8 @@ select limit 100; ` + // NOTE: this query has been modified by appending two extra columns to + // ORDER BY clause so that it had deterministic output. query71 = ` SELECT i_brand_id AS brand_id, @@ -6904,7 +6909,7 @@ WHERE GROUP BY i_brand, i_brand_id, t_hour, t_minute ORDER BY - ext_price DESC, i_brand_id; + ext_price DESC, i_brand_id, t_hour, t_minute; ` query72 = ` @@ -7535,6 +7540,8 @@ LIMIT 100; ` + // NOTE: this query has been modified by appending one extra column to + // ORDER BY clause so that it had deterministic output. query79 = ` SELECT c_last_name, @@ -7581,7 +7588,7 @@ FROM WHERE ss_customer_sk = c_customer_sk ORDER BY - c_last_name, c_first_name, substr(s_city, 1, 30), profit + c_last_name, c_first_name, substr(s_city, 1, 30), profit, ss_ticket_number LIMIT 100; ` diff --git a/pkg/workload/tpcds/tpcds.go b/pkg/workload/tpcds/tpcds.go index cd4c44bcf86a..bfcd4a4adeb2 100644 --- a/pkg/workload/tpcds/tpcds.go +++ b/pkg/workload/tpcds/tpcds.go @@ -22,7 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" ) @@ -66,7 +66,7 @@ var tpcdsMeta = workload.Meta{ `Note that --queries-to-omit flag has a higher precedence`) g.flags.DurationVar(&g.queryTimeLimit, `query-time-limit`, 5*time.Minute, `Time limit for a single run of a query`) - g.flags.StringVar(&g.vectorize, `vectorize`, `auto`, + g.flags.StringVar(&g.vectorize, `vectorize`, `on`, `Set vectorize session variable`) g.connFlags = workload.NewConnFlags(&g.flags) return g @@ -86,15 +86,15 @@ func (w *tpcds) Hooks() workload.Hooks { if w.queryTimeLimit <= 0 { return errors.Errorf("non-positive query time limit was set: %s", w.queryTimeLimit) } - skipQuery := make([]bool, numQueries+1) + skipQuery := make([]bool, NumQueries+1) for _, queryName := range strings.Split(w.queriesToOmitRaw, `,`) { queryNum, err := strconv.Atoi(queryName) if err != nil { return err } - if queryNum < 1 || queryNum > numQueries { + if queryNum < 1 || queryNum > NumQueries { return errors.Errorf("unknown query %d (only queries in range [1, %d] are supported)", - queryNum, numQueries) + queryNum, NumQueries) } skipQuery[queryNum] = true } @@ -104,7 +104,7 @@ func (w *tpcds) Hooks() workload.Hooks { if err != nil { return err } - if _, ok := queriesByNumber[queryNum]; !ok { + if _, ok := QueriesByNumber[queryNum]; !ok { return errors.Errorf(`unknown query: %s (probably, the query needs modifications, `+ `so it is disabled for now)`, queryName) } @@ -114,7 +114,7 @@ func (w *tpcds) Hooks() workload.Hooks { } return nil } - for queryNum := 1; queryNum <= numQueries; queryNum++ { + for queryNum := 1; queryNum <= NumQueries; queryNum++ { if !skipQuery[queryNum] { w.selectedQueries = append(w.selectedQueries, queryNum) } @@ -258,7 +258,9 @@ func (w *tpcds) Tables() []workload.Table { } // Ops implements the Opser interface. -func (w *tpcds) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (w *tpcds) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { sqlDatabase, err := workload.SanitizeUrls(w, w.connFlags.DBOverride, urls) if err != nil { return workload.QueryLoad{}, err @@ -298,7 +300,7 @@ func (w *worker) run(ctx context.Context) error { if err != nil { return err } - query := queriesByNumber[queryNum] + query := QueriesByNumber[queryNum] var rows *gosql.Rows start := timeutil.Now() diff --git a/pkg/workload/tpch/expected_rows.go b/pkg/workload/tpch/expected_rows.go index 859e2a97c3a6..7d880b0e3260 100644 --- a/pkg/workload/tpch/expected_rows.go +++ b/pkg/workload/tpch/expected_rows.go @@ -11,39 +11,39 @@ package tpch var ( - maxCols int - numColsByQueryName = map[string]int{} - numExpectedRowsByQueryName = map[string]int{ - `11`: 1048, - `16`: 18314, - } - queriesToCheckOnlyNumRows = map[string]bool{ - `11`: true, - `16`: true, + maxCols int + numColsByQueryNumber = map[int]int{} + // numExpectedRowsByQueryNumber is a mapping from query number to the + // number of expected rows the query should return. For all of these + // queries, only row count is checked (i.e. we won't perform row-by-row + // check). + numExpectedRowsByQueryNumber = map[int]int{ + 11: 1048, + 16: 18314, } ) func init() { - for queryName, expectedRows := range expectedRowsByQueryName { - numColsByQueryName[queryName] = len(expectedRows[0]) - numExpectedRowsByQueryName[queryName] = len(expectedRows) + for queryNumber, expectedRows := range expectedRowsByQueryNumber { + numColsByQueryNumber[queryNumber] = len(expectedRows[0]) + numExpectedRowsByQueryNumber[queryNumber] = len(expectedRows) if len(expectedRows[0]) > maxCols { maxCols = len(expectedRows[0]) } } } -// expectedRowsByQueryName maps a query name to the expected rows for that +// expectedRowsByQueryNumber maps a query number to the expected rows for that // query. Queries 11 and 16 return 1048 and 18314 rows, respectively, so we // only verify the number of rows and these both are omitted from the map. -var expectedRowsByQueryName = map[string][][]string{ - `1`: { +var expectedRowsByQueryNumber = map[int][][]string{ + 1: { {`A`, `F`, `3.7734107e+07`, `5.65865544007299e+10`, `5.375825713486514e+10`, `5.590906522282561e+10`, `25.522005853257337`, `38273.1297346216`, `0.04998529583825443`, `1478493`}, {`N`, `F`, `991417`, `1.4875047103799965e+09`, `1.413082168054104e+09`, `1.4696492231943603e+09`, `25.516471920522985`, `38284.467760848216`, `0.05009342667419324`, `38854`}, {`N`, `O`, `7.447604e+07`, `1.1170172969773557e+11`, `1.0611823030761223e+11`, `1.1036704387249208e+11`, `25.50222676958499`, `38249.11798890675`, `0.04999658605362673`, `2920374`}, {`R`, `F`, `3.7719753e+07`, `5.656804138090447e+10`, `5.374129268460378e+10`, `5.588961911982966e+10`, `25.50579361269077`, `38250.85462610268`, `0.050009405829983596`, `1478870`}, }, - `2`: { + 2: { {`9938.53`, `Supplier#000005359`, `UNITED KINGDOM`, `185358`, `Manufacturer#4`, `QKuHYh,vZGiwu2FWEJoLDx04`, `33-429-790-6131`, `uriously regular requests hag`}, {`9937.84`, `Supplier#000005969`, `ROMANIA`, `108438`, `Manufacturer#1`, `ANDENSOSmk,miq23Xfb5RWt6dvUcvt6Qa`, `29-520-692-3537`, `efully express instructions. regular requests against the slyly fin`}, {`9936.22`, `Supplier#000005250`, `UNITED KINGDOM`, `249`, `Manufacturer#4`, `B3rqp0xbSEim4Mpy2RH J`, `33-320-228-2957`, `etect about the furiously final accounts. slyly ironic pinto beans sleep inside the furiously`}, @@ -145,7 +145,7 @@ var expectedRowsByQueryName = map[string][][]string{ {`7850.66`, `Supplier#000001518`, `UNITED KINGDOM`, `86501`, `Manufacturer#1`, `ONda3YJiHKJOC`, `33-730-383-3892`, `ifts haggle fluffily pending pai`}, {`7843.52`, `Supplier#000006683`, `FRANCE`, `11680`, `Manufacturer#4`, `2Z0JGkiv01Y00oCFwUGfviIbhzCdy`, `16-464-517-8943`, ` express, final pinto beans x-ray slyly asymptotes. unusual, unusual`}, }, - `3`: { + 3: { {`2456423`, `406181.0111`, `1995-03-05 00:00:00 +0000 +0000`, `0`}, {`3459808`, `405838.69889999996`, `1995-03-04 00:00:00 +0000 +0000`, `0`}, {`492164`, `390324.061`, `1995-02-19 00:00:00 +0000 +0000`, `0`}, @@ -157,34 +157,34 @@ var expectedRowsByQueryName = map[string][][]string{ {`993600`, `371407.4595`, `1995-03-05 00:00:00 +0000 +0000`, `0`}, {`2300070`, `367371.1452000001`, `1995-03-13 00:00:00 +0000 +0000`, `0`}, }, - `4`: { + 4: { {`1-URGENT`, `10594`}, {`2-HIGH`, `10476`}, {`3-MEDIUM`, `10410`}, {`4-NOT SPECIFIED`, `10556`}, {`5-LOW`, `10487`}, }, - `5`: { + 5: { {`INDONESIA`, `5.5502041169699945e+07`}, {`VIETNAM`, `5.529508699669996e+07`}, {`CHINA`, `5.372449425659997e+07`}, {`INDIA`, `5.203551200020005e+07`}, {`JAPAN`, `4.5410175695400015e+07`}, }, - `6`: { + 6: { {`1.2314107822829871e+08`}, }, - `7`: { + 7: { {`FRANCE`, `GERMANY`, `1995`, `5.463973273359995e+07`}, {`FRANCE`, `GERMANY`, `1996`, `5.463308330759997e+07`}, {`GERMANY`, `FRANCE`, `1995`, `5.253174666969997e+07`}, {`GERMANY`, `FRANCE`, `1996`, `5.252054902239985e+07`}, }, - `8`: { + 8: { {`1995`, `0.03443589040665483`}, {`1996`, `0.04148552129353034`}, }, - `9`: { + 9: { {`ALGERIA`, `1998`, `2.713690018030001e+07`}, {`ALGERIA`, `1997`, `4.861183349620003e+07`}, {`ALGERIA`, `1996`, `4.828548267819995e+07`}, @@ -361,7 +361,7 @@ var expectedRowsByQueryName = map[string][][]string{ {`VIETNAM`, `1993`, `4.5352676867199965e+07`}, {`VIETNAM`, `1992`, `4.7846355648499995e+07`}, }, - `10`: { + 10: { {`57040`, `Customer#000057040`, `734235.2455000001`, `632.87`, `JAPAN`, `Eioyzjf4pp`, `22-895-641-3466`, `sits. slyly regular requests sleep alongside of the regular inst`}, {`143347`, `Customer#000143347`, `721002.6947999999`, `2557.47`, `EGYPT`, `1aReFYv,Kw4`, `14-742-935-3718`, `ggle carefully enticing requests. final deposits use bold, bold pinto beans. ironic, idle re`}, {`60838`, `Customer#000060838`, `679127.3077000001`, `2454.77`, `BRAZIL`, `64EaJ5vMAHWJlBOxJklpNc2RJiWE`, `12-913-494-9813`, ` need to boost against the slyly regular account`}, @@ -384,11 +384,11 @@ var expectedRowsByQueryName = map[string][][]string{ {`23431`, `Customer#000023431`, `554269.536`, `3381.86`, `ROMANIA`, `HgiV0phqhaIa9aydNoIlb`, `29-915-458-2654`, `nusual, even instructions: furiously stealthy n`}, }, // Query 11 returns 1048 rows, so we verify only the number of rows returned. - `12`: { + 12: { {`MAIL`, `6202`, `9324`}, {`SHIP`, `6200`, `9262`}, }, - `13`: { + 13: { {`0`, `50005`}, {`9`, `6641`}, {`10`, `6532`}, @@ -432,17 +432,17 @@ var expectedRowsByQueryName = map[string][][]string{ {`41`, `2`}, {`39`, `1`}, }, - `14`: { + 14: { {`16.380778626395557`}, }, - `15`: { + 15: { {`8449`, `Supplier#000008449`, `Wp34zim9qYFbVctdW`, `20-469-856-8873`, `1.7726272086999996e+06`}, }, // Query 16 returns 18314 rows, so we verify only the number of rows returned. - `17`: { + 17: { {`348406.05428571376`}, }, - `18`: { + 18: { {`Customer#000128120`, `128120`, `4722021`, `1994-04-07 00:00:00 +0000 +0000`, `544089.09`, `323`}, {`Customer#000144617`, `144617`, `3043270`, `1997-02-12 00:00:00 +0000 +0000`, `530604.44`, `317`}, {`Customer#000013940`, `13940`, `2232932`, `1997-04-13 00:00:00 +0000 +0000`, `522720.61`, `304`}, @@ -501,10 +501,10 @@ var expectedRowsByQueryName = map[string][][]string{ {`Customer#000082441`, `82441`, `857959`, `1994-02-07 00:00:00 +0000 +0000`, `382579.74`, `305`}, {`Customer#000088703`, `88703`, `2995076`, `1994-01-30 00:00:00 +0000 +0000`, `363812.12`, `302`}, }, - `19`: { + 19: { {`3.0838430578e+06`}, }, - `20`: { + 20: { {`Supplier#000000020`, `iybAE,RmTymrZVYaFZva2SH,j`}, {`Supplier#000000091`, `YV45D7TkfdQanOOZ7q9QxkyGUapU1oOWU6q3`}, {`Supplier#000000205`, `rF uV8d0JNEk`}, @@ -692,7 +692,7 @@ var expectedRowsByQueryName = map[string][][]string{ {`Supplier#000009899`, `7XdpAHrzr1t,UQFZE`}, {`Supplier#000009974`, `7wJ,J5DKcxSU4Kp1cQLpbcAvB5AsvKT`}, }, - `21`: { + 21: { {`Supplier#000002829`, `20`}, {`Supplier#000005808`, `18`}, {`Supplier#000000262`, `17`}, @@ -794,7 +794,7 @@ var expectedRowsByQueryName = map[string][][]string{ {`Supplier#000002357`, `12`}, {`Supplier#000002483`, `12`}, }, - `22`: { + 22: { {`13`, `888`, `6.737713990000005e+06`}, {`17`, `861`, `6.460573720000007e+06`}, {`18`, `964`, `7.236687400000006e+06`}, diff --git a/pkg/workload/tpch/generate.go b/pkg/workload/tpch/generate.go index 631fe872694b..2ba75864906a 100644 --- a/pkg/workload/tpch/generate.go +++ b/pkg/workload/tpch/generate.go @@ -403,7 +403,9 @@ func (w *tpch) tpchOrdersInitialRowBatch( totalPrice := float32(0) for j := 0; j < l.orderData.nOrders; j++ { ep := l.orderData.quantities[j] * makeRetailPriceFromPartKey(l.orderData.partKeys[j]) - totalPrice += ep * (1 + l.orderData.tax[j]) * (1 - l.orderData.discount[j]) + // Use an extra float32 conversion to disable "fused multiply and add" (FMA) to force + // identical behavior on all platforms. See https://golang.org/ref/spec#Floating_point_operators + totalPrice += float32(ep * (1 + l.orderData.tax[j]) * (1 - l.orderData.discount[j])) // nolint:unconvert } // O_TOTALPRICE computed as: // sum (L_EXTENDEDPRICE * (1+L_TAX) * (1-L_DISCOUNT)) for all LINEITEM of diff --git a/pkg/workload/tpch/queries.go b/pkg/workload/tpch/queries.go index 4523566a5f9d..df6df2cbc5e7 100644 --- a/pkg/workload/tpch/queries.go +++ b/pkg/workload/tpch/queries.go @@ -10,30 +10,37 @@ package tpch -var queriesByName = map[string]string{ - `1`: query1, - `2`: query2, - `3`: query3, - `4`: query4, - `5`: query5, - `6`: query6, - `7`: query7, - `8`: query8, - `9`: query9, - `10`: query10, - `11`: query11, - `12`: query12, - `13`: query13, - `14`: query14, - `15`: query15, - `16`: query16, - `17`: query17, - `18`: query18, - `19`: query19, - `20`: query20, - `21`: query21, - `22`: query22, -} +var ( + // QueriesByNumber is a mapping from the number of a TPC-H query to the actual + // query. + QueriesByNumber = map[int]string{ + 1: query1, + 2: query2, + 3: query3, + 4: query4, + 5: query5, + 6: query6, + 7: query7, + 8: query8, + 9: query9, + 10: query10, + 11: query11, + 12: query12, + 13: query13, + 14: query14, + 15: query15, + 16: query16, + 17: query17, + 18: query18, + 19: query19, + 20: query20, + 21: query21, + 22: query22, + } + + // NumQueries specifies the number of queries in TPC-H benchmark. + NumQueries = len(QueriesByNumber) +) const ( query1 = ` @@ -448,6 +455,9 @@ WHERE AND l_shipdate < DATE '1995-09-01' + INTERVAL '1' MONTH; ` + // Note that the main query has been adjusted to go around issues with + // floating point computations when the order of summation is different + // (see #53946 for more details). query15 = ` CREATE VIEW revenue0 (supplier_no, total_revenue) AS SELECT @@ -472,12 +482,12 @@ FROM revenue0 WHERE s_suppkey = supplier_no - AND total_revenue = ( + AND abs(total_revenue - ( SELECT max(total_revenue) FROM revenue0 - ) + )) < 0.001 ORDER BY s_suppkey; diff --git a/pkg/workload/tpch/tpch.go b/pkg/workload/tpch/tpch.go index d22c53692382..3bc5b8cf6fcd 100644 --- a/pkg/workload/tpch/tpch.go +++ b/pkg/workload/tpch/tpch.go @@ -61,12 +61,13 @@ type tpch struct { scaleFactor int fks bool - disableChecks bool - vectorize string - verbose bool + disableChecks bool + vectorize string + useClusterVectorizeSetting bool + verbose bool queriesRaw string - selectedQueries []string + selectedQueries []int textPool textPool localsPool *sync.Pool @@ -105,8 +106,10 @@ var tpchMeta = workload.Meta{ g.flags.BoolVar(&g.disableChecks, `disable-checks`, false, "Disable checking the output against the expected rows (default false). "+ "Note that the checks are only supported for scale factor 1") - g.flags.StringVar(&g.vectorize, `vectorize`, `auto`, + g.flags.StringVar(&g.vectorize, `vectorize`, `on`, `Set vectorize session variable`) + g.flags.BoolVar(&g.useClusterVectorizeSetting, `default-vectorize`, false, + `Ignore vectorize option and use the current cluster setting sql.defaults.vectorize`) g.flags.BoolVar(&g.verbose, `verbose`, false, `Prints out the queries being run as well as histograms`) g.connFlags = workload.NewConnFlags(&g.flags) @@ -130,10 +133,14 @@ func (w *tpch) Hooks() workload.Hooks { w.disableChecks = true } for _, queryName := range strings.Split(w.queriesRaw, `,`) { - if _, ok := queriesByName[queryName]; !ok { + queryNum, err := strconv.Atoi(queryName) + if err != nil { + return err + } + if _, ok := QueriesByNumber[queryNum]; !ok { return errors.Errorf(`unknown query: %s`, queryName) } - w.selectedQueries = append(w.selectedQueries, queryName) + w.selectedQueries = append(w.selectedQueries, queryNum) } return nil }, @@ -169,7 +176,7 @@ func (w *tpch) Hooks() workload.Hooks { // Return the error for any other reason. const duplFKErr = "columns cannot be used by multiple foreign key constraints" if !strings.Contains(err.Error(), duplFKErr) { - return errors.Wrap(err, fkStmt) + return errors.Wrapf(err, "while executing %s", fkStmt) } } } @@ -292,7 +299,9 @@ func (w *tpch) Tables() []workload.Table { } // Ops implements the Opser interface. -func (w *tpch) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (w *tpch) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { sqlDatabase, err := workload.SanitizeUrls(w, w.connFlags.DBOverride, urls) if err != nil { return workload.QueryLoad{}, err @@ -325,10 +334,14 @@ type worker struct { } func (w *worker) run(ctx context.Context) error { - queryName := w.config.selectedQueries[w.ops%len(w.config.selectedQueries)] + queryNum := w.config.selectedQueries[w.ops%len(w.config.selectedQueries)] w.ops++ - query := fmt.Sprintf("SET vectorize = '%s'; %s", w.config.vectorize, queriesByName[queryName]) + var prefix string + if !w.config.useClusterVectorizeSetting { + prefix = fmt.Sprintf("SET vectorize = '%s';", w.config.vectorize) + } + query := fmt.Sprintf("%s %s", prefix, QueriesByNumber[queryNum]) vals := make([]interface{}, maxCols) for i := range vals { @@ -341,7 +354,7 @@ func (w *worker) run(ctx context.Context) error { defer rows.Close() } if err != nil { - return errors.Errorf("[q%s]: %s", queryName, err) + return errors.Errorf("[q%d]: %s", queryNum, err) } var numRows int // NOTE: we should *NOT* return an error from this function right away @@ -350,12 +363,12 @@ func (w *worker) run(ctx context.Context) error { checkExpectedOutput := func() error { for rows.Next() { if !w.config.disableChecks { - if !queriesToCheckOnlyNumRows[queryName] { - if err = rows.Scan(vals[:numColsByQueryName[queryName]]...); err != nil { - return errors.Errorf("[q%s]: %s", queryName, err) + if _, checkOnlyRowCount := numExpectedRowsByQueryNumber[queryNum]; !checkOnlyRowCount { + if err = rows.Scan(vals[:numColsByQueryNumber[queryNum]]...); err != nil { + return errors.Errorf("[q%d]: %s", queryNum, err) } - expectedRow := expectedRowsByQueryName[queryName][numRows] + expectedRow := expectedRowsByQueryNumber[queryNum][numRows] for i, expectedValue := range expectedRow { if val := *vals[i].(*interface{}); val != nil { var actualValue string @@ -372,15 +385,15 @@ func (w *worker) run(ctx context.Context) error { var expectedFloatRounded, actualFloatRounded float64 expectedFloat, err = strconv.ParseFloat(expectedValue, 64) if err != nil { - return errors.Errorf("[q%s] failed parsing expected value as float64 with %s\n"+ + return errors.Errorf("[q%d] failed parsing expected value as float64 with %s\n"+ "wrong result in row %d in column %d: got %q, expected %q", - queryName, err, numRows, i, actualValue, expectedValue) + queryNum, err, numRows, i, actualValue, expectedValue) } actualFloat, err = strconv.ParseFloat(actualValue, 64) if err != nil { - return errors.Errorf("[q%s] failed parsing actual value as float64 with %s\n"+ + return errors.Errorf("[q%d] failed parsing actual value as float64 with %s\n"+ "wrong result in row %d in column %d: got %q, expected %q", - queryName, err, numRows, i, actualValue, expectedValue) + queryNum, err, numRows, i, actualValue, expectedValue) } // TPC-H spec requires 0.01 precision for DECIMALs, so we will // first round the values to use in the comparison. Note that we @@ -392,15 +405,15 @@ func (w *worker) run(ctx context.Context) error { // 0.01). expectedFloatRounded, err = strconv.ParseFloat(fmt.Sprintf("%.3f", expectedFloat), 64) if err != nil { - return errors.Errorf("[q%s] failed parsing rounded expected value as float64 with %s\n"+ + return errors.Errorf("[q%d] failed parsing rounded expected value as float64 with %s\n"+ "wrong result in row %d in column %d: got %q, expected %q", - queryName, err, numRows, i, actualValue, expectedValue) + queryNum, err, numRows, i, actualValue, expectedValue) } actualFloatRounded, err = strconv.ParseFloat(fmt.Sprintf("%.3f", actualFloat), 64) if err != nil { - return errors.Errorf("[q%s] failed parsing rounded actual value as float64 with %s\n"+ + return errors.Errorf("[q%d] failed parsing rounded actual value as float64 with %s\n"+ "wrong result in row %d in column %d: got %q, expected %q", - queryName, err, numRows, i, actualValue, expectedValue) + queryNum, err, numRows, i, actualValue, expectedValue) } if math.Abs(expectedFloatRounded-actualFloatRounded) > 0.02 { // We only fail the check if the difference is more than 0.02 @@ -412,9 +425,9 @@ func (w *worker) run(ctx context.Context) error { // "ideal" - expected < 0.01 && actual - "ideal" < 0.01 // so in the worst case, actual and expected might differ by // 0.02 and still be considered correct. - return errors.Errorf("[q%s] %f and %f differ by more than 0.02\n"+ + return errors.Errorf("[q%d] %f and %f differ by more than 0.02\n"+ "wrong result in row %d in column %d: got %q, expected %q", - queryName, actualFloatRounded, expectedFloatRounded, + queryNum, actualFloatRounded, expectedFloatRounded, numRows, i, actualValue, expectedValue) } } @@ -437,33 +450,34 @@ func (w *worker) run(ctx context.Context) error { // We first check whether there is any error that came from the server (for // example, an out of memory error). If there is, we return it. if err := rows.Err(); err != nil { - return errors.Errorf("[q%s]: %s", queryName, err) + return errors.Errorf("[q%d]: %s", queryNum, err) } // Now we check whether there was an error while consuming the rows. if expectedOutputError != nil { return wrongOutputError{error: expectedOutputError} } if !w.config.disableChecks { - if numRows != numExpectedRowsByQueryName[queryName] { + numRowsExpected, checkOnlyRowCount := numExpectedRowsByQueryNumber[queryNum] + if checkOnlyRowCount && numRows != numRowsExpected { return wrongOutputError{ error: errors.Errorf( - "[q%s] returned wrong number of rows: got %d, expected %d", - queryName, numRows, numExpectedRowsByQueryName[queryName], + "[q%d] returned wrong number of rows: got %d, expected %d", + queryNum, numRows, numRowsExpected, )} } } elapsed := timeutil.Since(start) if w.config.verbose { - w.hists.Get(queryName).Record(elapsed) + w.hists.Get(fmt.Sprintf("%d", queryNum)).Record(elapsed) // Note: if you are changing the output format here, please change the // regex in roachtest/tpchvec.go accordingly. - log.Infof(ctx, "[q%s] returned %d rows after %4.2f seconds:\n%s", - queryName, numRows, elapsed.Seconds(), query) + log.Infof(ctx, "[q%d] returned %d rows after %4.2f seconds:\n%s", + queryNum, numRows, elapsed.Seconds(), query) } else { // Note: if you are changing the output format here, please change the // regex in roachtest/tpchvec.go accordingly. - log.Infof(ctx, "[q%s] returned %d rows after %4.2f seconds", - queryName, numRows, elapsed.Seconds()) + log.Infof(ctx, "[q%d] returned %d rows after %4.2f seconds", + queryNum, numRows, elapsed.Seconds()) } return nil } diff --git a/pkg/workload/workload.go b/pkg/workload/workload.go index e06a3b0d9ede..9dfe0c41dd0a 100644 --- a/pkg/workload/workload.go +++ b/pkg/workload/workload.go @@ -28,7 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/col/coltypes" "github.com/cockroachdb/cockroach/pkg/util/bufalloc" "github.com/cockroachdb/cockroach/pkg/workload/histogram" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" ) @@ -73,7 +73,7 @@ type Flagser interface { // to have been created and initialized before running these. type Opser interface { Generator - Ops(urls []string, reg *histogram.Registry) (QueryLoad, error) + Ops(ctx context.Context, urls []string, reg *histogram.Registry) (QueryLoad, error) } // Hookser returns any hooks associated with the generator. @@ -175,9 +175,9 @@ type BatchedTuples struct { // Tuples is like TypedTuples except that it tries to guess the type of each // datum. However, if the function ever returns nil for one of the datums, you -// need to use TypedTuples instead and specify the coltypes. +// need to use TypedTuples instead and specify the types. func Tuples(count int, fn func(int) []interface{}) BatchedTuples { - return TypedTuples(count, nil /* colTypes */, fn) + return TypedTuples(count, nil /* typs */, fn) } const ( @@ -187,7 +187,7 @@ const ( // TypedTuples returns a BatchedTuples where each batch has size 1. It's // intended to be easier to use than directly specifying a BatchedTuples, but -// the tradeoff is some bit of performance. If colTypes is nil, an attempt is +// the tradeoff is some bit of performance. If typs is nil, an attempt is // made to infer them. func TypedTuples(count int, colTypes []coltypes.T, fn func(int) []interface{}) BatchedTuples { // The FillBatch we create has to be concurrency safe, so we can't let it do @@ -238,7 +238,7 @@ func TypedTuples(count int, colTypes []coltypes.T, fn func(int) []interface{}) B case time.Time: col.Bytes().Set(0, []byte(d.Round(time.Microsecond).UTC().Format(timestampOutputFormat))) default: - panic(fmt.Sprintf(`unhandled datum type %T`, d)) + panic(errors.AssertionFailedf(`unhandled datum type %T`, d)) } } } @@ -291,7 +291,7 @@ func ColBatchToRows(cb coldata.Batch) [][]interface{} { } case coltypes.Bytes: // HACK: workload's Table schemas are SQL schemas, but the initial data is - // returned as a coldata.Batch, which has a more limited set of coltypes. + // returned as a coldata.Batch, which has a more limited set of types. // (Or, in the case of simple workloads that return a []interface{}, it's // roundtripped through coldata.Batch by the `Tuples` helper.) // @@ -310,7 +310,7 @@ func ColBatchToRows(cb coldata.Batch) [][]interface{} { } } default: - panic(fmt.Sprintf(`unhandled type %s`, col.Type().GoTypeName())) + panic(fmt.Sprintf(`unhandled type %s`, col.Type())) } } rows := make([][]interface{}, numRows) @@ -405,7 +405,8 @@ func FromFlags(meta Meta, flags ...string) Generator { if !ok { panic(fmt.Sprintf(`generator %s does not accept flags: %v`, meta.Name, flags)) } - if err := f.Flags().Parse(flags); err != nil { + flagsStruct := f.Flags() + if err := flagsStruct.Parse(flags); err != nil { panic(fmt.Sprintf(`generator %s parsing flags %v: %v`, meta.Name, flags, err)) } } @@ -451,6 +452,6 @@ func ApproxDatumSize(x interface{}) int64 { case time.Time: return 12 default: - panic(fmt.Sprintf("unsupported type %T: %v", x, x)) + panic(errors.AssertionFailedf("unsupported type %T: %v", x, x)) } } diff --git a/pkg/workload/workloadsql/dataload.go b/pkg/workload/workloadsql/dataload.go index 9d909cb617ed..8f0a5080e662 100644 --- a/pkg/workload/workloadsql/dataload.go +++ b/pkg/workload/workloadsql/dataload.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "golang.org/x/sync/errgroup" ) diff --git a/pkg/workload/workloadsql/workloadsql.go b/pkg/workload/workloadsql/workloadsql.go index 7d015f0c58ea..15e3d7e89188 100644 --- a/pkg/workload/workloadsql/workloadsql.go +++ b/pkg/workload/workloadsql/workloadsql.go @@ -24,7 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/version" "github.com/cockroachdb/cockroach/pkg/workload" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" "golang.org/x/time/rate" ) @@ -134,18 +134,20 @@ func Split(ctx context.Context, db *gosql.DB, table workload.Table, concurrency // If you're investigating an error coming out of this Exec, see the // HACK comment in ColBatchToRows for some context that may (or may // not) help you. - if _, err := db.Exec(buf.String()); err != nil { - return errors.Wrap(err, buf.String()) + stmt := buf.String() + if _, err := db.Exec(stmt); err != nil { + return errors.Wrapf(err, "executing %s", stmt) } buf.Reset() fmt.Fprintf(&buf, `ALTER TABLE %s SCATTER FROM (%s) TO (%s)`, table.Name, split, split) - if _, err := db.Exec(buf.String()); err != nil { + stmt = buf.String() + if _, err := db.Exec(stmt); err != nil { // SCATTER can collide with normal replicate queue // operations and fail spuriously, so only print the // error. - log.Warningf(ctx, `%s: %s`, buf.String(), err) + log.Warningf(ctx, `%s: %v`, stmt, err) } select { @@ -212,7 +214,7 @@ func StringTuple(datums []interface{}) []string { // See the HACK comment in ColBatchToRows. s[i] = lex.EscapeSQLString(string(x)) default: - panic(fmt.Sprintf("unsupported type %T: %v", x, x)) + panic(errors.AssertionFailedf("unsupported type %T: %v", x, x)) } } return s @@ -263,7 +265,7 @@ func (s sliceSliceInterface) Less(i, j int) bool { case []byte: cmp = bytes.Compare(x, s[j][offset].([]byte)) default: - panic(fmt.Sprintf("unsupported type %T: %v", x, x)) + panic(errors.AssertionFailedf("unsupported type %T: %v", x, x)) } if cmp < 0 { return true diff --git a/pkg/workload/ycsb/acknowledged_counter.go b/pkg/workload/ycsb/acknowledged_counter.go index e7272d7e8433..810cef050950 100644 --- a/pkg/workload/ycsb/acknowledged_counter.go +++ b/pkg/workload/ycsb/acknowledged_counter.go @@ -12,7 +12,7 @@ package ycsb import ( "github.com/cockroachdb/cockroach/pkg/util/syncutil" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" ) const ( diff --git a/pkg/workload/ycsb/skewed_latest_generator.go b/pkg/workload/ycsb/skewed_latest_generator.go index 54c92245b2d5..7d4c3bcfb874 100644 --- a/pkg/workload/ycsb/skewed_latest_generator.go +++ b/pkg/workload/ycsb/skewed_latest_generator.go @@ -11,9 +11,8 @@ package ycsb import ( - "math/rand" - "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "golang.org/x/exp/rand" ) // SkewedLatestGenerator is a random number generator that generates numbers in diff --git a/pkg/workload/ycsb/uniform_generator.go b/pkg/workload/ycsb/uniform_generator.go index ee38bba90e8b..f6cce62562f4 100644 --- a/pkg/workload/ycsb/uniform_generator.go +++ b/pkg/workload/ycsb/uniform_generator.go @@ -11,9 +11,8 @@ package ycsb import ( - "math/rand" - "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "golang.org/x/exp/rand" ) // UniformGenerator is a random number generator that generates draws from a diff --git a/pkg/workload/ycsb/ycsb.go b/pkg/workload/ycsb/ycsb.go index b8f2d1f9b6f0..c9a258eaca62 100644 --- a/pkg/workload/ycsb/ycsb.go +++ b/pkg/workload/ycsb/ycsb.go @@ -19,18 +19,20 @@ import ( "hash" "hash/fnv" "math" - "math/rand" "strings" "sync/atomic" "github.com/cockroachdb/cockroach-go/crdb" + "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coltypes" + "github.com/cockroachdb/cockroach/pkg/util/bufalloc" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" + "github.com/cockroachdb/errors" "github.com/lib/pq" - "github.com/pkg/errors" "github.com/spf13/pflag" + "golang.org/x/exp/rand" ) const ( @@ -79,13 +81,18 @@ const ( ycsb_key VARCHAR(255) PRIMARY KEY NOT NULL, FIELD JSONB )` + + timeFormatTemplate = `2006-01-02 15:04:05.000000-07:00` ) type ycsb struct { flags workload.Flags connFlags *workload.ConnFlags - seed int64 + seed uint64 + timeString bool + insertHash bool + zeroPadding int insertStart int insertCount int recordCount int @@ -116,7 +123,10 @@ var ycsbMeta = workload.Meta{ g.flags.Meta = map[string]workload.FlagMeta{ `workload`: {RuntimeOnly: true}, } - g.flags.Int64Var(&g.seed, `seed`, 1, `Key hash seed.`) + g.flags.Uint64Var(&g.seed, `seed`, 1, `Key hash seed.`) + g.flags.BoolVar(&g.timeString, `time-string`, false, `Prepend field[0-9] data with current time in microsecond precision.`) + g.flags.BoolVar(&g.insertHash, `insert-hash`, true, `Key to be hashed or ordered.`) + g.flags.IntVar(&g.zeroPadding, `zero-padding`, 1, `Key using "insert-hash=false" has zeros padded to left to make this length of digits.`) g.flags.IntVar(&g.insertStart, `insert-start`, 0, `Key to start initial sequential insertions from. (default 0)`) g.flags.IntVar(&g.insertCount, `insert-count`, 10000, `Number of rows to sequentially insert before beginning workload.`) g.flags.IntVar(&g.recordCount, `record-count`, 0, `Key to start workload insertions from. Must be >= insert-start + insert-count. (Default: insert-start + insert-count)`) @@ -149,27 +159,28 @@ func (g *ycsb) Flags() workload.Flags { return g.flags } func (g *ycsb) Hooks() workload.Hooks { return workload.Hooks{ Validate: func() error { + g.workload = strings.ToUpper(g.workload) switch g.workload { - case "A", "a": + case "A": g.readFreq = 0.5 g.updateFreq = 0.5 g.requestDistribution = "zipfian" - case "B", "b": + case "B": g.readFreq = 0.95 g.updateFreq = 0.05 g.requestDistribution = "zipfian" - case "C", "c": + case "C": g.readFreq = 1.0 g.requestDistribution = "zipfian" - case "D", "d": + case "D": g.readFreq = 0.95 g.insertFreq = 0.05 g.requestDistribution = "latest" - case "E", "e": + case "E": g.scanFreq = 0.95 g.insertFreq = 0.05 g.requestDistribution = "zipfian" - case "F", "f": + case "F": g.readFreq = 0.5 g.readModifyWriteFreq = 0.5 g.requestDistribution = "zipfian" @@ -177,6 +188,12 @@ func (g *ycsb) Hooks() workload.Hooks { return errors.Errorf("Unknown workload: %q", g.workload) } + if !g.flags.Lookup(`families`).Changed { + // If `--families` was not specified, default its value to the + // configuration that we expect to lead to better performance. + g.families = preferColumnFamilies(g.workload) + } + if g.recordCount == 0 { g.recordCount = g.insertStart + g.insertCount } @@ -188,6 +205,78 @@ func (g *ycsb) Hooks() workload.Hooks { } } +// preferColumnFamilies returns whether we expect the use of column families to +// improve performance for a given workload. +func preferColumnFamilies(workload string) bool { + // These determinations were computed on 80da27b (04/04/2020) while running + // the ycsb roachtests. + // + // ycsb/[A-F]/nodes=3 (3x n1-standard-8 VMs): + // + // | workload | --families=false | --families=true | better with families? | + // |----------|-----------------:|----------------:|-----------------------| + // | A | 11,743.5 | 17,760.5 | true | + // | B | 35,232.3 | 32,982.2 | false | + // | C | 45,454.7 | 44,112.5 | false | + // | D | 36,091.0 | 35,615.1 | false | + // | E | 5,774.9 | 2,604.8 | false | + // | F | 4,933.1 | 8,259.7 | true | + // + // ycsb/[A-F]/nodes=3/cpu=32 (3x n1-standard-32 VMs): + // + // | workload | --families=false | --families=true | better with families? | + // |----------|-----------------:|----------------:|-----------------------| + // | A | 14,144.1 | 27,179.4 | true | + // | B | 96,669.6 | 104,567.5 | true | + // | C | 137,463.3 | 131,953.7 | false | + // | D | 103,188.6 | 95,285.7 | false | + // | E | 10,417.5 | 7,913.6 | false | + // | F | 5,782.3 | 15,532.1 | true | + // + switch workload { + case "A": + // Workload A is highly contended. It performs 50% single-row lookups + // and 50% single-column updates. Using column families breaks the + // contention between all updates to different columns of the same row, + // so we use them by default. + return true + case "B": + // Workload B is less contended than Workload A, but still bottlenecks + // on contention as concurrency grows. It performs 95% single-row + // lookups and 5% single-column updates. Using column families slows + // down the single-row lookups but speeds up the updates (see above). + // This trade-off favors column families for higher concurrency levels + // but does not at lower concurrency levels. We prefer larger YCSB + // deployments, so we use column families by default. + return true + case "C": + // Workload C has no contention. It consistent entirely of single-row + // lookups. Using column families slows down single-row lookups, so we + // do not use them by default. + return false + case "D": + // Workload D has no contention. It performs 95% single-row lookups and + // 5% single-row insertion. Using column families slows down single-row + // lookups and single-row insertion, so we do not use them by default. + return false + case "E": + // Workload E has moderate contention. It performs 95% multi-row scans + // and 5% single-row insertion. Using column families slows down + // multi-row scans and single-row insertion, so we do not use them by + // default. + return false + case "F": + // Workload F is highly contended. It performs 50% single-row lookups + // and 50% single-column updates expressed as multi-statement + // read-modify-write transactions. Using column families breaks the + // contention between all updates to different columns of the same row, + // so we use them by default. + return true + default: + panic(fmt.Sprintf("unexpected workload: %s", workload)) + } +} + var usertableColTypes = []coltypes.T{ coltypes.Bytes, coltypes.Bytes, coltypes.Bytes, coltypes.Bytes, coltypes.Bytes, coltypes.Bytes, coltypes.Bytes, coltypes.Bytes, coltypes.Bytes, coltypes.Bytes, coltypes.Bytes, @@ -207,37 +296,75 @@ func (g *ycsb) Tables() []workload.Table { }, ), } - usertableInitialRowsFn := func(rowIdx int) []interface{} { - w := ycsbWorker{config: g, hashFunc: fnv.New64()} - key := w.buildKeyName(uint64(g.insertStart + rowIdx)) - if g.json { - return []interface{}{key, "{}"} - } - return []interface{}{key, "", "", "", "", "", "", "", "", "", ""} - } if g.json { usertable.Schema = usertableSchemaJSON usertable.InitialRows = workload.Tuples( g.insertCount, - usertableInitialRowsFn, - ) + func(rowIdx int) []interface{} { + w := ycsbWorker{ + config: g, + hashFunc: fnv.New64(), + } + key := w.buildKeyName(uint64(g.insertStart + rowIdx)) + // TODO(peter): Need to fill in FIELD here, rather than an empty JSONB + // value. + return []interface{}{key, "{}"} + }) } else { if g.families { usertable.Schema = usertableSchemaRelationalWithFamilies } else { usertable.Schema = usertableSchemaRelational } - usertable.InitialRows = workload.TypedTuples( - g.insertCount, - usertableColTypes, - usertableInitialRowsFn, - ) + + const batchSize = 1000 + usertable.InitialRows = workload.BatchedTuples{ + NumBatches: (g.insertCount + batchSize - 1) / batchSize, + FillBatch: func(batchIdx int, cb coldata.Batch, _ *bufalloc.ByteAllocator) { + rowBegin, rowEnd := batchIdx*batchSize, (batchIdx+1)*batchSize + if rowEnd > g.insertCount { + rowEnd = g.insertCount + } + cb.Reset(usertableColTypes, rowEnd-rowBegin) + + key := cb.ColVec(0).Bytes() + // coldata.Bytes only allows appends so we have to reset it. + key.Reset() + + var fields [numTableFields]*coldata.Bytes + for i := range fields { + fields[i] = cb.ColVec(i + 1).Bytes() + // coldata.Bytes only allows appends so we have to reset it. + fields[i].Reset() + } + + w := ycsbWorker{ + config: g, + hashFunc: fnv.New64(), + } + rng := rand.NewSource(g.seed + uint64(batchIdx)) + + var tmpbuf [fieldLength]byte + for rowIdx := rowBegin; rowIdx < rowEnd; rowIdx++ { + rowOffset := rowIdx - rowBegin + + key.Set(rowOffset, []byte(w.buildKeyName(uint64(rowIdx)))) + + for i := range fields { + randStringLetters(rng, tmpbuf[:]) + fields[i].Set(rowOffset, tmpbuf[:]) + } + } + }, + } } return []workload.Table{usertable} } // Ops implements the Opser interface. -func (g *ycsb) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { +func (g *ycsb) Ops( + ctx context.Context, urls []string, reg *histogram.Registry, +) (workload.QueryLoad, error) { sqlDatabase, err := workload.SanitizeUrls(g, g.connFlags.DBOverride, urls) if err != nil { return workload.QueryLoad{}, err @@ -358,7 +485,7 @@ func (g *ycsb) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, ql := workload.QueryLoad{SQLDatabase: sqlDatabase} for i := 0; i < g.connFlags.Concurrency; i++ { - rng := rand.New(rand.NewSource(g.seed + int64(i))) + rng := rand.New(rand.NewSource(g.seed + uint64(i))) w := &ycsbWorker{ config: g, hists: reg.GetHandle(), @@ -465,13 +592,20 @@ func (yw *ycsbWorker) hashKey(key uint64) uint64 { } func (yw *ycsbWorker) buildKeyName(keynum uint64) string { - return keyNameFromHash(yw.hashKey(keynum)) + if yw.config.insertHash { + return keyNameFromHash(yw.hashKey(keynum)) + } + return keyNameFromOrder(keynum, yw.config.zeroPadding) } func keyNameFromHash(hashedKey uint64) string { return fmt.Sprintf("user%d", hashedKey) } +func keyNameFromOrder(keynum uint64, zeroPadding int) string { + return fmt.Sprintf("user%0*d", zeroPadding, keynum) +} + // Keys are chosen by first drawing from a Zipf distribution, hashing the drawn // value, and modding by the total number of rows, so that not all hot keys are // close together. @@ -513,12 +647,44 @@ var letters = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") // Gnerate a random string of alphabetic characters. func (yw *ycsbWorker) randString(length int) string { str := make([]byte, length) - for i := range str { + // prepend current timestamp matching the default CRDB UTC time format + strStart := 0 + if yw.config.timeString { + currentTime := timeutil.Now().UTC() + str = currentTime.AppendFormat(str[:0], timeFormatTemplate) + strStart = len(str) + str = str[:length] + } + // the rest of data is random str + for i := strStart; i < length; i++ { str[i] = letters[yw.rng.Intn(len(letters))] } return string(str) } +// NOTE: The following is intentionally duplicated with the ones in +// workload/tpcc/generate.go. They're a very hot path in restoring a fixture and +// hardcoding the consts seems to trigger some compiler optimizations that don't +// happen if those things are params. Don't modify these without consulting +// BenchmarkRandStringFast. + +func randStringLetters(rng rand.Source, buf []byte) { + const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + const lettersLen = uint64(len(letters)) + const lettersCharsPerRand = uint64(11) // floor(log(math.MaxUint64)/log(lettersLen)) + + var r, charsLeft uint64 + for i := 0; i < len(buf); i++ { + if charsLeft == 0 { + r = rng.Uint64() + charsLeft = lettersCharsPerRand + } + buf[i] = letters[r%lettersLen] + r = r / lettersLen + charsLeft-- + } +} + func (yw *ycsbWorker) insertRow(ctx context.Context) error { var args [numTableFields + 1]interface{} keyIndex := yw.nextInsertKeyIndex() @@ -637,7 +803,7 @@ func (yw *ycsbWorker) readModifyWriteRow(ctx context.Context) error { _, err := tx.StmtContext(ctx, updateStmt).ExecContext(ctx, args[:]...) return err }) - if err == gosql.ErrNoRows && ctx.Err() != nil { + if errors.Is(err, gosql.ErrNoRows) && ctx.Err() != nil { // Sometimes a context cancellation during a transaction can result in // sql.ErrNoRows instead of the appropriate context.DeadlineExceeded. In // this case, we just return ctx.Err(). See diff --git a/pkg/workload/ycsb/zipfgenerator.go b/pkg/workload/ycsb/zipfgenerator.go index 0020dc538a0e..96e447b2ed76 100644 --- a/pkg/workload/ycsb/zipfgenerator.go +++ b/pkg/workload/ycsb/zipfgenerator.go @@ -17,10 +17,10 @@ package ycsb import ( "fmt" "math" - "math/rand" "github.com/cockroachdb/cockroach/pkg/util/syncutil" - "github.com/pkg/errors" + "github.com/cockroachdb/errors" + "golang.org/x/exp/rand" ) const ( diff --git a/pkg/workload/ycsb/zipfgenerator_test.go b/pkg/workload/ycsb/zipfgenerator_test.go index f151e28e9c73..0e7b67155a34 100644 --- a/pkg/workload/ycsb/zipfgenerator_test.go +++ b/pkg/workload/ycsb/zipfgenerator_test.go @@ -13,12 +13,13 @@ package ycsb import ( "fmt" "math" - "math/rand" "sort" "testing" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "golang.org/x/exp/rand" ) type params struct { @@ -34,7 +35,7 @@ var gens = []params{ func TestCreateZipfGenerator(t *testing.T) { defer leaktest.AfterTest(t)() for _, gen := range gens { - rng := rand.New(rand.NewSource(timeutil.Now().UnixNano())) + rng := rand.New(rand.NewSource(uint64(timeutil.Now().UnixNano()))) _, err := NewZipfGenerator(rng, gen.iMin, gen.iMax, gen.theta, false) if err != nil { t.Fatal(err) @@ -60,9 +61,7 @@ var tests = []struct { func TestZetaFromScratch(t *testing.T) { defer leaktest.AfterTest(t)() - if testing.Short() { - t.Skip("short") - } + skip.UnderShort(t) for _, test := range tests { computedZeta, err := computeZetaFromScratch(test.n, test.theta) if err != nil { @@ -76,9 +75,7 @@ func TestZetaFromScratch(t *testing.T) { func TestZetaIncrementally(t *testing.T) { defer leaktest.AfterTest(t)() - if testing.Short() { - t.Skip("short") - } + skip.UnderShort(t) // Theta cannot be 1 by definition, so this is a safe initial value. oldTheta := 1.0 var oldZetaN float64 @@ -110,7 +107,7 @@ func TestZetaIncrementally(t *testing.T) { func runZipfGenerators(t *testing.T, withIncrements bool) { gen := gens[0] - rng := rand.New(rand.NewSource(timeutil.Now().UnixNano())) + rng := rand.New(rand.NewSource(uint64(timeutil.Now().UnixNano()))) z, err := NewZipfGenerator(rng, gen.iMin, gen.iMax, gen.theta, false) if err != nil { t.Fatal(err)