From 6700a17f3cf0f0614b132f2199e11cb5c7f05f0f Mon Sep 17 00:00:00 2001 From: Rafi Shamim Date: Sun, 20 Feb 2022 18:20:36 -0500 Subject: [PATCH 1/3] *: prepare tests for batch stmt txn change This commit will make the next one easier to review. Release justification: test only change Release note: None --- pkg/ccl/backupccl/backup_planning.go | 4 +- pkg/ccl/backupccl/backup_rand_test.go | 6 +- pkg/ccl/backupccl/backup_test.go | 229 ++++++-------- .../full_cluster_backup_restore_test.go | 24 +- pkg/ccl/backupccl/restore_planning.go | 4 +- .../backup-restore/backup-dropped-descriptors | 21 ++ .../backup-restore/backup-permissions | 16 + .../testdata/backup-restore/column-families | 24 ++ .../backup-restore/descriptor-broadening | 12 + .../testdata/backup-restore/feature-flags | 12 + .../testdata/backup-restore/max-row-size | 6 + .../testdata/backup-restore/multiregion | 29 +- .../testdata/backup-restore/restore-grants | 11 +- .../backup-restore/restore-permissions | 3 + .../testdata/backup-restore/revision_history | 10 +- .../testdata/backup-restore/temp-tables | 26 +- .../backup-restore/user-defined-types | 5 +- .../rttanalysisccl/multi_region_bench_test.go | 2 +- pkg/ccl/changefeedccl/changefeed_test.go | 10 +- .../testdata/boundedstaleness/single_row | 9 + .../testdata/logic_test/alter_table_locality | 90 ++++++ .../testdata/logic_test/crdb_internal_tenant | 10 +- .../testdata/logic_test/multi_region | 34 ++ .../testdata/logic_test/multi_region_backup | 12 + .../logic_test/multi_region_privileges | 42 +++ .../logic_test/multi_region_query_behavior | 2 + .../logic_test/multi_region_zone_configs | 25 +- .../testdata/logic_test/partitioning_enum | 12 +- .../testdata/logic_test/partitioning_implicit | 20 ++ .../testdata/logic_test/regional_by_row | 22 +- .../logic_test/regional_by_row_auto_rehoming | 12 + .../logic_test/regional_by_row_query_behavior | 18 ++ .../testdata/logic_test/super_regions | 12 + pkg/ccl/logictestccl/testdata/logic_test/zone | 28 ++ .../multiregionccl/regional_by_row_test.go | 36 +-- pkg/ccl/partitionccl/drop_test.go | 40 +-- .../testdata/list_partitions | 3 + .../testdata/mixed_partitions | 6 + .../testdata/multi_column_partitions | 3 + .../testdata/range_partitions | 3 + .../sqlwatcher_test.go | 67 ++-- .../testdata/telemetry/multiregion | 3 + pkg/cli/interactive_tests/test_exec_log.tcl | 16 +- pkg/internal/sqlsmith/setup.go | 4 +- pkg/jobs/jobs_test.go | 7 +- pkg/server/stats_test.go | 42 +-- pkg/sql/alter_column_type_test.go | 21 +- .../colfetcher/vectorized_batch_size_test.go | 12 +- pkg/sql/conn_executor_test.go | 7 +- pkg/sql/delete_preserving_index_test.go | 8 +- pkg/sql/descriptor_mutation_test.go | 63 ++-- pkg/sql/export.go | 4 +- pkg/sql/importer/import_planning.go | 4 +- pkg/sql/importer/import_stmt_test.go | 32 +- .../logictest/testdata/logic_test/aggregate | 2 + .../testdata/logic_test/alter_column_type | 47 ++- .../testdata/logic_test/alter_primary_key | 162 ++++++++++ .../logictest/testdata/logic_test/alter_table | 34 ++ .../logictest/testdata/logic_test/alter_type | 10 + pkg/sql/logictest/testdata/logic_test/array | 50 ++- pkg/sql/logictest/testdata/logic_test/cascade | 20 ++ pkg/sql/logictest/testdata/logic_test/cast | 22 ++ .../testdata/logic_test/column_families | 12 +- .../testdata/logic_test/crdb_internal | 8 +- .../logictest/testdata/logic_test/create_as | 12 +- .../testdata/logic_test/create_statements | 15 +- .../testdata/logic_test/create_table | 7 +- .../logictest/testdata/logic_test/database | 20 +- .../logictest/testdata/logic_test/datetime | 4 + pkg/sql/logictest/testdata/logic_test/discard | 10 +- .../logictest/testdata/logic_test/distinct | 8 + .../testdata/logic_test/distsql_stats | 56 ++++ .../testdata/logic_test/drop_database | 10 +- .../logictest/testdata/logic_test/drop_index | 10 + .../logictest/testdata/logic_test/drop_type | 96 ++++++ .../logictest/testdata/logic_test/drop_user | 6 +- pkg/sql/logictest/testdata/logic_test/enums | 30 +- .../logictest/testdata/logic_test/event_log | 6 +- .../testdata/logic_test/expression_index | 10 + pkg/sql/logictest/testdata/logic_test/fk | 297 +++++++++++++++--- .../testdata/logic_test/geospatial_index | 5 +- .../testdata/logic_test/information_schema | 1 + pkg/sql/logictest/testdata/logic_test/jobs | 19 +- pkg/sql/logictest/testdata/logic_test/join | 4 + .../testdata/logic_test/manual_retry | 4 +- .../testdata/logic_test/materialized_view | 8 + .../testdata/logic_test/multi_statement | 4 +- .../logictest/testdata/logic_test/on_update | 6 + .../testdata/logic_test/partial_index | 16 + .../logictest/testdata/logic_test/pg_catalog | 9 + pkg/sql/logictest/testdata/logic_test/prepare | 5 +- pkg/sql/logictest/testdata/logic_test/ranges | Bin 32078 -> 32190 bytes .../testdata/logic_test/reassign_owned_by | 14 + .../testdata/logic_test/rename_database | 41 ++- .../testdata/logic_test/row_level_ttl | 16 + .../secondary_index_column_families | 12 + .../logictest/testdata/logic_test/set_role | 2 + .../logic_test/show_create_all_tables | 12 + .../logic_test/show_create_all_tables_builtin | 10 + .../logictest/testdata/logic_test/show_source | 6 +- .../testdata/logic_test/statement_statistics | 15 +- .../testdata/logic_test/subquery_correlated | 6 + .../logictest/testdata/logic_test/temp_table | 5 +- .../logictest/testdata/logic_test/truncate | 22 ++ pkg/sql/logictest/testdata/logic_test/txn | 46 ++- pkg/sql/logictest/testdata/logic_test/union | 12 + pkg/sql/logictest/testdata/logic_test/update | 5 +- pkg/sql/logictest/testdata/logic_test/upsert | 10 +- .../logictest/testdata/logic_test/vectorize | 4 + pkg/sql/logictest/testdata/logic_test/views | 16 + pkg/sql/logictest/testdata/logic_test/window | 6 + .../logictest/testdata/logic_test/zigzag_join | 4 + pkg/sql/materialized_view_test.go | 16 +- .../testdata/autocommit_nonmetamorphic | 56 ++++ pkg/sql/opt/exec/execbuilder/testdata/delete | 5 +- ...ndary_index_column_families_nonmetamorphic | 18 +- pkg/sql/opt/exec/execbuilder/testdata/select | 40 ++- .../testdata/show_trace_nonmetamorphic | 31 +- pkg/sql/opt/exec/execbuilder/testdata/union | 2 + pkg/sql/pgwire/testdata/auth/conn_log | 9 +- pkg/sql/pgwire/testdata/pgtest/portals_crbugs | 8 +- pkg/sql/planhook.go | 1 + pkg/sql/planner.go | 15 +- pkg/sql/rowexec/backfiller_test.go | 12 +- pkg/sql/schema_changer_test.go | 111 +++---- pkg/sql/sessioninit/BUILD.bazel | 25 -- pkg/sql/set_cluster_setting.go | 2 +- .../show_create_all_tables_builtin_test.go | 38 +-- pkg/sql/stats/stats_cache_test.go | 23 +- pkg/sql/table_test.go | 22 +- pkg/sql/temporary_schema_test.go | 10 +- pkg/sql/testdata/session_migration/errors | 3 + pkg/sql/testdata/session_migration/sequence | 6 + pkg/sql/testdata/telemetry/error | 40 ++- pkg/sql/testdata/telemetry/sql-stats | 6 +- pkg/sql/tests/data.go | 15 +- pkg/sql/txn_restart_test.go | 13 +- pkg/testutils/jobutils/jobs_verification.go | 2 + 138 files changed, 2352 insertions(+), 669 deletions(-) diff --git a/pkg/ccl/backupccl/backup_planning.go b/pkg/ccl/backupccl/backup_planning.go index 0a26c2be949f..b6277556a213 100644 --- a/pkg/ccl/backupccl/backup_planning.go +++ b/pkg/ccl/backupccl/backup_planning.go @@ -653,8 +653,8 @@ func backupPlanHook( ctx, span := tracing.ChildSpan(ctx, stmt.StatementTag()) defer span.Finish() - if !(p.ExtendedEvalContext().TxnImplicit || backupStmt.Options.Detached) { - return errors.Errorf("BACKUP cannot be used inside a transaction without DETACHED option") + if !(p.IsAutoCommit() || backupStmt.Options.Detached) { + return errors.Errorf("BACKUP cannot be used inside a multi-statement transaction without DETACHED option") } subdir, err := subdirFn() diff --git a/pkg/ccl/backupccl/backup_rand_test.go b/pkg/ccl/backupccl/backup_rand_test.go index 89f2104702db..4208f0673410 100644 --- a/pkg/ccl/backupccl/backup_rand_test.go +++ b/pkg/ccl/backupccl/backup_rand_test.go @@ -115,7 +115,8 @@ database_name = 'rand' AND schema_name = 'public'`) // and per-table restores) work properly with two kinds of table backups // (full database backups and per-table backups). for _, backup := range dbBackups { - sqlDB.Exec(t, "DROP DATABASE IF EXISTS restoredb; CREATE DATABASE restoredb") + sqlDB.Exec(t, "DROP DATABASE IF EXISTS restoredb") + sqlDB.Exec(t, "CREATE DATABASE restoredb") if err := verifyBackupRestoreStatementResult( t, sqlDB, "RESTORE rand.* FROM $1 WITH OPTIONS (into_db='restoredb')", backup, ); err != nil { @@ -135,7 +136,8 @@ database_name = 'rand' AND schema_name = 'public'`) tableNameCombos := powerset(tableNames) for i, combo := range tableNameCombos { - sqlDB.Exec(t, "DROP DATABASE IF EXISTS restoredb; CREATE DATABASE restoredb") + sqlDB.Exec(t, "DROP DATABASE IF EXISTS restoredb") + sqlDB.Exec(t, "CREATE DATABASE restoredb") backupTarget := fmt.Sprintf("%s%d", localFoo, i) if len(combo) == 0 { continue diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index 677c9849d25c..ac207868a928 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -2413,48 +2413,37 @@ func TestBackupRestoreUserDefinedSchemas(t *testing.T) { defer cleanupFn() var ts1, ts2, ts3, ts4, ts5, ts6 string - sqlDB.Exec(t, ` -CREATE DATABASE d; -USE d; - -CREATE SCHEMA sc; -CREATE SCHEMA sc2; -CREATE TABLE d.sc.t1 (x int); -CREATE TABLE d.sc2.t1 (x bool); -`) + sqlDB.Exec(t, `CREATE DATABASE d;`) + sqlDB.Exec(t, `USE d;`) + sqlDB.Exec(t, `CREATE SCHEMA sc;`) + sqlDB.Exec(t, `CREATE SCHEMA sc2;`) + sqlDB.Exec(t, `CREATE TABLE d.sc.t1 (x int);`) + sqlDB.Exec(t, `CREATE TABLE d.sc2.t1 (x bool);`) sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts1) - sqlDB.Exec(t, ` -ALTER SCHEMA sc RENAME TO sc3; -ALTER SCHEMA sc2 RENAME TO sc; -`) + sqlDB.Exec(t, `ALTER SCHEMA sc RENAME TO sc3;`) + sqlDB.Exec(t, `ALTER SCHEMA sc2 RENAME TO sc;`) sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts2) - sqlDB.Exec(t, ` -DROP TABLE sc.t1; -DROP TABLE sc3.t1; -DROP SCHEMA sc; -DROP SCHEMA sc3; -`) + sqlDB.Exec(t, `DROP TABLE sc.t1;`) + sqlDB.Exec(t, `DROP TABLE sc3.t1;`) + sqlDB.Exec(t, `DROP SCHEMA sc;`) + sqlDB.Exec(t, `DROP SCHEMA sc3;`) sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts3) - sqlDB.Exec(t, ` - CREATE SCHEMA sc; - CREATE TABLE sc.t1 (a STRING); + sqlDB.Exec(t, `CREATE SCHEMA sc;`) + sqlDB.Exec(t, `CREATE TABLE sc.t1 (a STRING); `) sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts4) sqlDB.Exec(t, `BACKUP DATABASE d TO 'nodelocal://0/rev-history-backup' WITH revision_history`) - sqlDB.Exec(t, ` -DROP TABLE sc.t1; -DROP SCHEMA sc; + sqlDB.Exec(t, `DROP TABLE sc.t1;`) + sqlDB.Exec(t, `DROP SCHEMA sc; `) sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts5) - sqlDB.Exec(t, ` -CREATE SCHEMA sc; -CREATE TABLE sc.t1 (a FLOAT); -`) + sqlDB.Exec(t, `CREATE SCHEMA sc;`) + sqlDB.Exec(t, `CREATE TABLE sc.t1 (a FLOAT);`) sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts6) sqlDB.Exec(t, `BACKUP DATABASE d TO 'nodelocal://0/rev-history-backup' WITH revision_history`) @@ -2500,17 +2489,15 @@ CREATE TABLE sc.t1 (a FLOAT); t.Run("full-cluster", func(t *testing.T) { _, sqlDB, dataDir, cleanupFn := backupRestoreTestSetup(t, singleNode, 0, InitManualReplication) defer cleanupFn() - sqlDB.Exec(t, ` -CREATE DATABASE d; -USE d; -CREATE SCHEMA unused; -CREATE SCHEMA sc; -CREATE TABLE sc.tb1 (x INT); -INSERT INTO sc.tb1 VALUES (1); -CREATE TYPE sc.typ1 AS ENUM ('hello'); -CREATE TABLE sc.tb2 (x sc.typ1); -INSERT INTO sc.tb2 VALUES ('hello'); -`) + sqlDB.Exec(t, `CREATE DATABASE d;`) + sqlDB.Exec(t, `USE d;`) + sqlDB.Exec(t, `CREATE SCHEMA unused;`) + sqlDB.Exec(t, `CREATE SCHEMA sc;`) + sqlDB.Exec(t, `CREATE TABLE sc.tb1 (x INT);`) + sqlDB.Exec(t, `INSERT INTO sc.tb1 VALUES (1);`) + sqlDB.Exec(t, `CREATE TYPE sc.typ1 AS ENUM ('hello');`) + sqlDB.Exec(t, `CREATE TABLE sc.tb2 (x sc.typ1);`) + sqlDB.Exec(t, `INSERT INTO sc.tb2 VALUES ('hello');`) // Now backup the full cluster. sqlDB.Exec(t, `BACKUP TO 'nodelocal://0/test/'`) // Start a new server that shares the data directory. @@ -2535,17 +2522,15 @@ INSERT INTO sc.tb2 VALUES ('hello'); _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, 0, InitManualReplication) defer cleanupFn() - sqlDB.Exec(t, ` -CREATE DATABASE d; -USE d; -CREATE SCHEMA sc; -CREATE SCHEMA unused; -CREATE TABLE sc.tb1 (x INT); -INSERT INTO sc.tb1 VALUES (1); -CREATE TYPE sc.typ1 AS ENUM ('hello'); -CREATE TABLE sc.tb2 (x sc.typ1); -INSERT INTO sc.tb2 VALUES ('hello'); -`) + sqlDB.Exec(t, `CREATE DATABASE d;`) + sqlDB.Exec(t, `USE d;`) + sqlDB.Exec(t, `CREATE SCHEMA sc;`) + sqlDB.Exec(t, `CREATE SCHEMA unused;`) + sqlDB.Exec(t, `CREATE TABLE sc.tb1 (x INT);`) + sqlDB.Exec(t, `INSERT INTO sc.tb1 VALUES (1);`) + sqlDB.Exec(t, `CREATE TYPE sc.typ1 AS ENUM ('hello');`) + sqlDB.Exec(t, `CREATE TABLE sc.tb2 (x sc.typ1);`) + sqlDB.Exec(t, `INSERT INTO sc.tb2 VALUES ('hello');`) // Backup the database. sqlDB.Exec(t, `BACKUP DATABASE d TO 'nodelocal://0/test/'`) @@ -2569,25 +2554,23 @@ INSERT INTO sc.tb2 VALUES ('hello'); _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, 0, InitManualReplication) defer cleanupFn() - sqlDB.Exec(t, ` -CREATE TABLE table_in_data (x INT); + sqlDB.Exec(t, `CREATE TABLE table_in_data (x INT);`) -CREATE SCHEMA data; -CREATE TABLE data.tb1 (x INT); + sqlDB.Exec(t, `CREATE SCHEMA data;`) + sqlDB.Exec(t, `CREATE TABLE data.tb1 (x INT);`) -CREATE DATABASE foo; -USE foo; -CREATE SCHEMA schema_in_foo; -CREATE TABLE schema_in_foo.tb1 (x INT); + sqlDB.Exec(t, `CREATE DATABASE foo;`) + sqlDB.Exec(t, `USE foo;`) + sqlDB.Exec(t, `CREATE SCHEMA schema_in_foo;`) + sqlDB.Exec(t, `CREATE TABLE schema_in_foo.tb1 (x INT);`) -CREATE SCHEMA schema_in_foo2; -CREATE TABLE schema_in_foo2.tb1 (x INT); + sqlDB.Exec(t, `CREATE SCHEMA schema_in_foo2;`) + sqlDB.Exec(t, `CREATE TABLE schema_in_foo2.tb1 (x INT);`) -CREATE SCHEMA foo; -CREATE TABLE foo.tb1 (x INT); + sqlDB.Exec(t, `CREATE SCHEMA foo;`) + sqlDB.Exec(t, `CREATE TABLE foo.tb1 (x INT);`) -CREATE TABLE tb2 (y INT); -`) + sqlDB.Exec(t, `CREATE TABLE tb2 (y INT);`) for _, tc := range []struct { name string @@ -2635,16 +2618,14 @@ table_name from [SHOW TABLES FROM restore] ORDER BY schema_name, table_name`, tc _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, 0, InitManualReplication) defer cleanupFn() - sqlDB.Exec(t, ` -CREATE DATABASE d; -USE d; -CREATE SCHEMA sc; -CREATE TYPE sc.typ1 AS ENUM ('hello'); -CREATE TABLE sc.tb1 (x sc.typ1); -INSERT INTO sc.tb1 VALUES ('hello'); -CREATE TABLE sc.tb2 (x INT); -INSERT INTO sc.tb2 VALUES (1); -`) + sqlDB.Exec(t, `CREATE DATABASE d;`) + sqlDB.Exec(t, `USE d;`) + sqlDB.Exec(t, `CREATE SCHEMA sc;`) + sqlDB.Exec(t, `CREATE TYPE sc.typ1 AS ENUM ('hello');`) + sqlDB.Exec(t, `CREATE TABLE sc.tb1 (x sc.typ1);`) + sqlDB.Exec(t, `INSERT INTO sc.tb1 VALUES ('hello');`) + sqlDB.Exec(t, `CREATE TABLE sc.tb2 (x INT);`) + sqlDB.Exec(t, `INSERT INTO sc.tb2 VALUES (1);`) { // We have to qualify the table correctly to back it up. d.tb1 resolves // to d.public.tb1. @@ -2693,25 +2674,23 @@ INSERT INTO sc.tb2 VALUES (1); defer cleanupFn() kvDB := tc.Server(0).DB() - sqlDB.Exec(t, ` -CREATE DATABASE d1; -USE d1; -CREATE SCHEMA sc1; -CREATE TABLE sc1.tb (x INT); -INSERT INTO sc1.tb VALUES (1); -CREATE SCHEMA sc2; -CREATE TABLE sc2.tb (x INT); -INSERT INTO sc2.tb VALUES (2); - -CREATE DATABASE d2; -USE d2; -CREATE SCHEMA sc3; -CREATE TABLE sc3.tb (x INT); -INSERT INTO sc3.tb VALUES (3); -CREATE SCHEMA sc4; -CREATE TABLE sc4.tb (x INT); -INSERT INTO sc4.tb VALUES (4); -`) + sqlDB.Exec(t, `CREATE DATABASE d1;`) + sqlDB.Exec(t, `USE d1;`) + sqlDB.Exec(t, `CREATE SCHEMA sc1;`) + sqlDB.Exec(t, `CREATE TABLE sc1.tb (x INT);`) + sqlDB.Exec(t, `INSERT INTO sc1.tb VALUES (1);`) + sqlDB.Exec(t, `CREATE SCHEMA sc2;`) + sqlDB.Exec(t, `CREATE TABLE sc2.tb (x INT);`) + sqlDB.Exec(t, `INSERT INTO sc2.tb VALUES (2);`) + + sqlDB.Exec(t, `CREATE DATABASE d2;`) + sqlDB.Exec(t, `USE d2;`) + sqlDB.Exec(t, `CREATE SCHEMA sc3;`) + sqlDB.Exec(t, `CREATE TABLE sc3.tb (x INT);`) + sqlDB.Exec(t, `INSERT INTO sc3.tb VALUES (3);`) + sqlDB.Exec(t, `CREATE SCHEMA sc4;`) + sqlDB.Exec(t, `CREATE TABLE sc4.tb (x INT);`) + sqlDB.Exec(t, `INSERT INTO sc4.tb VALUES (4);`) { // Backup all databases. sqlDB.Exec(t, `BACKUP DATABASE d1, d2 TO 'nodelocal://0/test/'`) @@ -2751,14 +2730,12 @@ INSERT INTO sc4.tb VALUES (4); _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, 0, InitManualReplication) defer cleanupFn() - sqlDB.Exec(t, ` -CREATE DATABASE d; -USE d; -CREATE SCHEMA sc; -CREATE TYPE sc.typ1 AS ENUM ('hello'); -CREATE TABLE sc.tb1 (x sc.typ1); -INSERT INTO sc.tb1 VALUES ('hello'); -`) + sqlDB.Exec(t, `CREATE DATABASE d;`) + sqlDB.Exec(t, `USE d;`) + sqlDB.Exec(t, `CREATE SCHEMA sc;`) + sqlDB.Exec(t, `CREATE TYPE sc.typ1 AS ENUM ('hello');`) + sqlDB.Exec(t, `CREATE TABLE sc.tb1 (x sc.typ1);`) + sqlDB.Exec(t, `INSERT INTO sc.tb1 VALUES ('hello');`) // Take a backup. sqlDB.Exec(t, `BACKUP TABLE d.sc.tb1 TO 'nodelocal://0/test/'`) // Now drop the table. @@ -4281,7 +4258,9 @@ func TestRestoreAsOfSystemTime(t *testing.T) { t, fmt.Sprintf(`RESTORE data.* FROM $1 AS OF SYSTEM TIME %s WITH into_db='err'`, i), latestBackup, ) - sqlDB.Exec(t, `DROP DATABASE err; CREATE DATABASE err`) + sqlDB.Exec(t, `DROP DATABASE err`) + sqlDB.Exec(t, `CREATE DATABASE err`) + } else { sqlDB.ExpectErr( t, "invalid RESTORE timestamp", @@ -4299,7 +4278,8 @@ func TestRestoreAsOfSystemTime(t *testing.T) { t, fmt.Sprintf(`RESTORE data.* FROM $1, $2, $3 AS OF SYSTEM TIME %s WITH into_db='err'`, i), latestBackup, incLatestBackup, inc2LatestBackup, ) - sqlDB.Exec(t, `DROP DATABASE err; CREATE DATABASE err`) + sqlDB.Exec(t, `DROP DATABASE err`) + sqlDB.Exec(t, `CREATE DATABASE err`) } else { sqlDB.ExpectErr( t, "invalid RESTORE timestamp", @@ -5548,7 +5528,7 @@ func TestDetachedBackup(t *testing.T) { return tx.QueryRow(`BACKUP DATABASE data TO $1`, localFoo).Scan(&jobID) }) require.True(t, testutils.IsError(err, - "BACKUP cannot be used inside a transaction without DETACHED option")) + "BACKUP cannot be used inside a multi-statement transaction without DETACHED option")) // Okay to run DETACHED backup, even w/out explicit transaction. sqlDB.QueryRow(t, `BACKUP DATABASE data TO $1 WITH DETACHED`, localFoo).Scan(&jobID) @@ -5602,7 +5582,7 @@ func TestDetachedRestore(t *testing.T) { return tx.QueryRow(`RESTORE TABLE t FROM $1 WITH INTO_DB=test`, localFoo).Scan(&jobID) }) require.True(t, testutils.IsError(err, - "RESTORE cannot be used inside a transaction without DETACHED option")) + "RESTORE cannot be used inside a multi-statement transaction without DETACHED option")) // Okay to run DETACHED RESTORE, even w/out explicit transaction. sqlDB.QueryRow(t, `RESTORE TABLE t FROM $1 WITH DETACHED, INTO_DB=test`, @@ -9277,11 +9257,9 @@ func TestDroppedDescriptorRevisionAndSystemDBIDClash(t *testing.T) { _, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, 1, InitManualReplication) defer cleanupFn() - sqlDB.Exec(t, ` -CREATE TABLE foo (id INT); -BACKUP TO 'nodelocal://0/foo' WITH revision_history; -DROP TABLE foo; -`) + sqlDB.Exec(t, `CREATE TABLE foo (id INT);`) + sqlDB.Exec(t, `BACKUP TO 'nodelocal://0/foo' WITH revision_history;`) + sqlDB.Exec(t, `DROP TABLE foo;`) var aost string sqlDB.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&aost) @@ -9357,15 +9335,13 @@ func TestRestoreRemappingOfExistingUDTInColExpr(t *testing.T) { _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication) defer cleanupFn() - sqlDB.Exec(t, ` -CREATE TYPE status AS ENUM ('open', 'closed', 'inactive'); -CREATE TABLE foo (id INT PRIMARY KEY, what status default 'open'); -BACKUP DATABASE data to 'nodelocal://0/foo'; -DROP TABLE foo CASCADE; -DROP TYPE status; -CREATE TYPE status AS ENUM ('open', 'closed', 'inactive'); -RESTORE TABLE foo FROM 'nodelocal://0/foo'; -`) + sqlDB.Exec(t, `CREATE TYPE status AS ENUM ('open', 'closed', 'inactive');`) + sqlDB.Exec(t, `CREATE TABLE foo (id INT PRIMARY KEY, what status default 'open');`) + sqlDB.Exec(t, `BACKUP DATABASE data to 'nodelocal://0/foo';`) + sqlDB.Exec(t, `DROP TABLE foo CASCADE;`) + sqlDB.Exec(t, `DROP TYPE status;`) + sqlDB.Exec(t, `CREATE TYPE status AS ENUM ('open', 'closed', 'inactive');`) + sqlDB.Exec(t, `RESTORE TABLE foo FROM 'nodelocal://0/foo';`) } // TestGCDropIndexSpanExpansion is a regression test for @@ -9398,13 +9374,12 @@ func TestGCDropIndexSpanExpansion(t *testing.T) { sqlRunner := sqlutils.MakeSQLRunner(conn) sqlRunner.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'`) // speeds up the test - sqlRunner.Exec(t, ` -CREATE DATABASE test; USE test; -CREATE TABLE foo (id INT PRIMARY KEY, id2 INT, id3 INT, INDEX bar (id2), INDEX baz(id3)); -ALTER INDEX foo@bar CONFIGURE ZONE USING gc.ttlseconds = '1'; -INSERT INTO foo VALUES (1, 2, 3); -DROP INDEX foo@bar; -`) + sqlRunner.Exec(t, `CREATE DATABASE test;`) + sqlRunner.Exec(t, ` USE test;`) + sqlRunner.Exec(t, `CREATE TABLE foo (id INT PRIMARY KEY, id2 INT, id3 INT, INDEX bar (id2), INDEX baz(id3));`) + sqlRunner.Exec(t, `ALTER INDEX foo@bar CONFIGURE ZONE USING gc.ttlseconds = '1';`) + sqlRunner.Exec(t, `INSERT INTO foo VALUES (1, 2, 3);`) + sqlRunner.Exec(t, `DROP INDEX foo@bar;`) // Wait until the index is about to get gc'ed. <-aboutToGC diff --git a/pkg/ccl/backupccl/full_cluster_backup_restore_test.go b/pkg/ccl/backupccl/full_cluster_backup_restore_test.go index 761190e204c4..d0ee8f0ac56a 100644 --- a/pkg/ccl/backupccl/full_cluster_backup_restore_test.go +++ b/pkg/ccl/backupccl/full_cluster_backup_restore_test.go @@ -769,11 +769,9 @@ func TestDropDatabaseRevisionHistory(t *testing.T) { defer cleanupFn() sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo) - sqlDB.Exec(t, ` -CREATE DATABASE same_name_db; -DROP DATABASE same_name_db; -CREATE DATABASE same_name_db; -`) + sqlDB.Exec(t, `CREATE DATABASE same_name_db;`) + sqlDB.Exec(t, `DROP DATABASE same_name_db;`) + sqlDB.Exec(t, `CREATE DATABASE same_name_db;`) sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo) _, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{}) @@ -1042,15 +1040,13 @@ func TestClusterRevisionDoesNotBackupOptOutSystemTables(t *testing.T) { sqlDB := sqlutils.MakeSQLRunner(conn) defer cleanup() - sqlDB.Exec(t, ` -CREATE DATABASE test; -USE test; -CREATE TABLE foo (id INT); -BACKUP TO 'nodelocal://1/foo' WITH revision_history; -BACKUP TO 'nodelocal://1/foo' WITH revision_history; -CREATE TABLE bar (id INT); -BACKUP TO 'nodelocal://1/foo' WITH revision_history; -`) + sqlDB.Exec(t, `CREATE DATABASE test;`) + sqlDB.Exec(t, `USE test;`) + sqlDB.Exec(t, `CREATE TABLE foo (id INT);`) + sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`) + sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`) + sqlDB.Exec(t, `CREATE TABLE bar (id INT);`) + sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`) } func TestRestoreWithRecreatedDefaultDB(t *testing.T) { diff --git a/pkg/ccl/backupccl/restore_planning.go b/pkg/ccl/backupccl/restore_planning.go index 89a966f754b2..f091d2f90434 100644 --- a/pkg/ccl/backupccl/restore_planning.go +++ b/pkg/ccl/backupccl/restore_planning.go @@ -1142,8 +1142,8 @@ func restorePlanHook( ctx, span := tracing.ChildSpan(ctx, stmt.StatementTag()) defer span.Finish() - if !(p.ExtendedEvalContext().TxnImplicit || restoreStmt.Options.Detached) { - return errors.Errorf("RESTORE cannot be used inside a transaction without DETACHED option") + if !(p.IsAutoCommit() || restoreStmt.Options.Detached) { + return errors.Errorf("RESTORE cannot be used inside a multi-statement transaction without DETACHED option") } subdir, err := subdirFn() diff --git a/pkg/ccl/backupccl/testdata/backup-restore/backup-dropped-descriptors b/pkg/ccl/backupccl/testdata/backup-restore/backup-dropped-descriptors index cb7c5e89be79..749c2dcf773f 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/backup-dropped-descriptors +++ b/pkg/ccl/backupccl/testdata/backup-restore/backup-dropped-descriptors @@ -64,6 +64,9 @@ BACKUP INTO 'nodelocal://0/cluster/dropped-database'; # dropped database 'd'. exec-sql RESTORE DATABASE d FROM LATEST IN 'nodelocal://0/dropped-database' WITH new_db_name = 'd1'; +---- + +exec-sql USE d1; ---- @@ -77,6 +80,9 @@ public bar # dropped database 'd'. exec-sql RESTORE DATABASE d FROM LATEST IN 'nodelocal://0/cluster/dropped-database' WITH new_db_name = 'd2'; +---- + +exec-sql USE d2; ---- @@ -95,6 +101,9 @@ new-server name=s2 exec-sql CREATE DATABASE d2; +---- + +exec-sql CREATE TABLE d2.t2 (id INT); ---- @@ -102,8 +111,14 @@ exec-sql CREATE TYPE d2.typ AS ENUM ('hello'); CREATE SCHEMA d2.s; CREATE TABLE d2.s.t (id INT); +---- + +exec-sql SET use_declarative_schema_changer = 'off'; SET CLUSTER SETTING jobs.debug.pausepoints = 'schemachanger.before.exec'; +---- + +exec-sql DROP SCHEMA d2.s CASCADE; ---- paused before it completed with reason: pause point "schemachanger.before.exec" hit @@ -160,6 +175,9 @@ WHERE object_name = 's' OR object_name = 'typ'; # Restore the backups to check they are valid. exec-sql RESTORE DATABASE d2 FROM LATEST IN 'nodelocal://0/dropped-schema-in-database' WITH new_db_name = 'd3'; +---- + +exec-sql USE d3; ---- @@ -182,6 +200,9 @@ public t2 exec-sql RESTORE DATABASE d2 FROM LATEST IN 'nodelocal://0/cluster/dropped-schema-in-database' WITH new_db_name ='d4'; +---- + +exec-sql USE d4; ---- diff --git a/pkg/ccl/backupccl/testdata/backup-restore/backup-permissions b/pkg/ccl/backupccl/testdata/backup-restore/backup-permissions index 6172d2a4f3de..e53bc58dc2c8 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/backup-permissions +++ b/pkg/ccl/backupccl/testdata/backup-restore/backup-permissions @@ -9,6 +9,13 @@ CREATE TABLE d.t (x INT); INSERT INTO d.t VALUES (1), (2), (3); ---- +# BACKUP is not allowed in a batch-statement. +exec-sql +BACKUP TO 'nodelocal://0/test-root/'; +SELECT 1; +---- +pq: BACKUP cannot be used inside a multi-statement transaction without DETACHED option + # Cluster backup should succeed as a root user. exec-sql BACKUP TO 'nodelocal://0/test-root/' @@ -22,7 +29,13 @@ GRANT ADMIN TO testuser; exec-sql user=testuser BACKUP TO 'nodelocal://0/test-nonroot-cluster'; +---- + +exec-sql user=testuser BACKUP DATABASE d TO 'nodelocal://0/test-nonroot-db'; +---- + +exec-sql user=testuser BACKUP TABLE d.t TO 'nodelocal://0/test-nonroot-table'; ---- @@ -106,6 +119,9 @@ GRANT USAGE ON TYPE d2.greeting TO testuser; # testuser should now have all the required privileges. exec-sql server=s2 user=testuser BACKUP DATABASE d2 TO 'nodelocal://0/d2'; +---- + +exec-sql server=s2 user=testuser BACKUP TABLE d2.t TO 'nodelocal://0/d2-table'; ---- diff --git a/pkg/ccl/backupccl/testdata/backup-restore/column-families b/pkg/ccl/backupccl/testdata/backup-restore/column-families index bec83fd05b79..8f873736de3d 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/column-families +++ b/pkg/ccl/backupccl/testdata/backup-restore/column-families @@ -3,16 +3,40 @@ new-server name=s1 localities=us-east-1,us-west-1,us-west-2,eu-central-1 exec-sql CREATE DATABASE orig; +---- + +exec-sql USE orig; +---- + +exec-sql CREATE TABLE cfs (a INT PRIMARY KEY, b STRING, c STRING, d STRING, FAMILY (b), FAMILY (c)); +---- + +exec-sql INSERT INTO cfs SELECT x, repeat('abc', 100), repeat('abc', 100) FROM generate_series(0, 3) AS x; +---- + +exec-sql ALTER TABLE cfs SPLIT AT SELECT a FROM cfs; +---- + +exec-sql -- Split the output files very small to catch output SSTs mid-row. SET CLUSTER SETTING bulkio.backup.file_size = '1'; SET CLUSTER SETTING kv.bulk_sst.target_size = '1'; SET CLUSTER SETTING bulkio.backup.merge_file_buffer_size = '1MiB'; +---- + +exec-sql BACKUP cfs TO 'nodelocal://1/foo'; +---- + +exec-sql CREATE DATABASE r1; +---- + +exec-sql RESTORE cfs FROM 'nodelocal://1/foo' WITH into_db='r1'; ---- diff --git a/pkg/ccl/backupccl/testdata/backup-restore/descriptor-broadening b/pkg/ccl/backupccl/testdata/backup-restore/descriptor-broadening index a81fa91e8c2a..848deb9392e3 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/descriptor-broadening +++ b/pkg/ccl/backupccl/testdata/backup-restore/descriptor-broadening @@ -4,14 +4,26 @@ new-server name=s1 exec-sql CREATE DATABASE db1; CREATE DATABASE db2; +---- + +exec-sql CREATE TABLE db1.t (a INT); +---- + +exec-sql BACKUP DATABASE db1 TO 'nodelocal://1/backup'; +---- + +exec-sql BACKUP DATABASE db1,db2 TO 'nodelocal://1/backup'; ---- pq: previous backup does not contain the complete database "db2" exec-sql BACKUP db1.t TO 'nodelocal://1/backup_2'; +---- + +exec-sql BACKUP DATABASE db1 TO 'nodelocal://1/backup_2'; ---- pq: previous backup does not contain the complete database "db1" diff --git a/pkg/ccl/backupccl/testdata/backup-restore/feature-flags b/pkg/ccl/backupccl/testdata/backup-restore/feature-flags index 115e24946e94..b014b12aef5e 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/feature-flags +++ b/pkg/ccl/backupccl/testdata/backup-restore/feature-flags @@ -10,6 +10,9 @@ INSERT INTO d.t VALUES (1), (2), (3); # Test running backup when BACKUP feature flag is disabled. exec-sql SET CLUSTER SETTING feature.backup.enabled = FALSE; +---- + +exec-sql BACKUP TO 'nodelocal://0/test-root/'; ---- pq: feature BACKUP was disabled by the database administrator @@ -17,6 +20,9 @@ pq: feature BACKUP was disabled by the database administrator # Test running backup when feature flag is enabled. exec-sql SET CLUSTER SETTING feature.backup.enabled = TRUE; +---- + +exec-sql BACKUP TO 'nodelocal://0/test-root/'; ---- @@ -27,6 +33,9 @@ DROP TABLE d.t; # Test running restore when feature flag is disabled. exec-sql SET CLUSTER SETTING feature.restore.enabled = FALSE; +---- + +exec-sql RESTORE TABLE d.t FROM 'nodelocal://0/test-root/'; ---- pq: feature RESTORE was disabled by the database administrator @@ -34,5 +43,8 @@ pq: feature RESTORE was disabled by the database administrator # Test running restore when feature flag is enabled. exec-sql SET CLUSTER SETTING feature.restore.enabled = TRUE; +---- + +exec-sql RESTORE TABLE d.t FROM 'nodelocal://0/test-root/'; ---- diff --git a/pkg/ccl/backupccl/testdata/backup-restore/max-row-size b/pkg/ccl/backupccl/testdata/backup-restore/max-row-size index d8627eb75c4d..8fc41a66c586 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/max-row-size +++ b/pkg/ccl/backupccl/testdata/backup-restore/max-row-size @@ -24,7 +24,13 @@ pq: row larger than max row size: table 109 family 0 primary key /Table/109/1/2/ exec-sql BACKUP maxrow TO 'nodelocal://1/maxrow'; +---- + +exec-sql CREATE DATABASE d2; +---- + +exec-sql RESTORE maxrow FROM 'nodelocal://1/maxrow' WITH into_db='d2'; ---- diff --git a/pkg/ccl/backupccl/testdata/backup-restore/multiregion b/pkg/ccl/backupccl/testdata/backup-restore/multiregion index 1d5f3896bb54..eee1bc5e5ae4 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/multiregion +++ b/pkg/ccl/backupccl/testdata/backup-restore/multiregion @@ -66,19 +66,37 @@ HINT: there are two ways you can resolve this issue: 1) update the cluster to wh # Create a database with no regions to check default primary regions. exec-sql CREATE DATABASE no_region_db; +---- + +exec-sql CREATE TABLE no_region_db.t (x INT); INSERT INTO no_region_db.t VALUES (1), (2), (3); CREATE DATABASE no_region_db_2; CREATE TABLE no_region_db_2.t (x INT); INSERT INTO no_region_db_2.t VALUES (1), (2), (3); +---- + +exec-sql BACKUP DATABASE no_region_db TO 'nodelocal://1/no_region_database_backup/'; +---- + +exec-sql BACKUP TO 'nodelocal://1/no_region_cluster_backup/'; +---- + +exec-sql DROP DATABASE no_region_db; +---- + +exec-sql DROP DATABASE no_region_db_2; ---- exec-sql SET CLUSTER SETTING sql.defaults.primary_region = 'non-existent-region'; +---- + +exec-sql RESTORE DATABASE no_region_db FROM 'nodelocal://1/no_region_database_backup/'; ---- pq: region "non-existent-region" does not exist @@ -88,6 +106,9 @@ set the default PRIMARY REGION to a region that exists (see SHOW REGIONS FROM CL exec-sql SET CLUSTER SETTING sql.defaults.primary_region = 'eu-central-1'; +---- + +exec-sql RESTORE DATABASE no_region_db FROM 'nodelocal://1/no_region_database_backup/'; ---- NOTICE: setting the PRIMARY REGION as eu-central-1 on database no_region_db @@ -111,16 +132,22 @@ exec-sql CREATE DATABASE eu_central_db; CREATE TABLE eu_central_db.t (x INT); INSERT INTO eu_central_db.t VALUES (1), (2), (3); -BACKUP DATABASE eu_central_db TO 'nodelocal://1/eu_central_database_backup/'; ---- NOTICE: setting eu-central-1 as the PRIMARY REGION as no PRIMARY REGION was specified +exec-sql +BACKUP DATABASE eu_central_db TO 'nodelocal://1/eu_central_database_backup/'; +---- + # New cluster for a cluster backup. new-server name=s4 share-io-dir=s1 allow-implicit-access localities=eu-central-1,eu-north-1 ---- exec-sql SET CLUSTER SETTING sql.defaults.primary_region = 'eu-north-1'; +---- + +exec-sql RESTORE FROM 'nodelocal://1/no_region_cluster_backup/'; ---- NOTICE: setting the PRIMARY REGION as eu-north-1 on database defaultdb diff --git a/pkg/ccl/backupccl/testdata/backup-restore/restore-grants b/pkg/ccl/backupccl/testdata/backup-restore/restore-grants index 78abdc998274..59617a755a0b 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/restore-grants +++ b/pkg/ccl/backupccl/testdata/backup-restore/restore-grants @@ -203,8 +203,14 @@ SELECT * FROM testuser_db.sc.othertable; # Let's restore tables as admin and ensure that the table's privs are the same # as the db it restores into. exec-sql -CREATE DATABASE restoredb; +CREATE DATABASE restoredb +---- + +exec-sql GRANT CREATE ON DATABASE restoredb TO user1; +---- + +exec-sql RESTORE testdb.sc.othertable, testdb.testtable_greeting_usage, testdb.testtable_greeting_owner FROM 'nodelocal://1/test' WITH into_db='restoredb'; ---- @@ -259,6 +265,9 @@ ALTER TYPE restoredb.greeting_owner ADD VALUE 'new' BEFORE 'howdy'; exec-sql USE defaultdb; DROP DATABASE testdb CASCADE; +---- + +exec-sql RESTORE DATABASE testdb FROM 'nodelocal://0/test/'; ---- diff --git a/pkg/ccl/backupccl/testdata/backup-restore/restore-permissions b/pkg/ccl/backupccl/testdata/backup-restore/restore-permissions index d98c8aa85fc0..e7bfa75f7cf8 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/restore-permissions +++ b/pkg/ccl/backupccl/testdata/backup-restore/restore-permissions @@ -20,6 +20,9 @@ GRANT ADMIN TO testuser; exec-sql user=testuser DROP DATABASE d; +---- + +exec-sql user=testuser RESTORE DATABASE d FROM 'nodelocal://0/test/'; ---- diff --git a/pkg/ccl/backupccl/testdata/backup-restore/revision_history b/pkg/ccl/backupccl/testdata/backup-restore/revision_history index 99a7740048d2..f003a5a62ab7 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/revision_history +++ b/pkg/ccl/backupccl/testdata/backup-restore/revision_history @@ -8,12 +8,18 @@ new-server name=s1 # descriptor will add the span for index 2 /Table/55/{2-3}. These spans should # be allowed to overlap. exec-sql -CREATE DATABASE d; +CREATE DATABASE d +---- + +exec-sql CREATE TABLE d.t (a INT PRIMARY KEY, b INT, c INT); CREATE INDEX test_idx_2 ON d.t(b); CREATE INDEX test_idx_3 ON d.t(c); DROP INDEX d.test_idx_2; -BACKUP DATABASE d INTO 'nodelocal://0/my_backups' WITH revision_history; ---- NOTICE: the data for dropped indexes is reclaimed asynchronously HINT: The reclamation delay can be customized in the zone configuration for the table. + +exec-sql +BACKUP DATABASE d INTO 'nodelocal://0/my_backups' WITH revision_history; +---- diff --git a/pkg/ccl/backupccl/testdata/backup-restore/temp-tables b/pkg/ccl/backupccl/testdata/backup-restore/temp-tables index a846f97fe9ec..675c59589f01 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/temp-tables +++ b/pkg/ccl/backupccl/testdata/backup-restore/temp-tables @@ -47,13 +47,22 @@ BACKUP d1.* TO 'nodelocal://0/d1_star_backup/' exec-sql COMMENT ON TABLE temp_table IS 'should not show up in restore'; +---- + +exec-sql BACKUP TO 'nodelocal://0/full_cluster_backup/'; ---- exec-sql USE defaultdb; -DROP DATABASE d1; -RESTORE DATABASE d1 FROM 'nodelocal://0/d1_backup/'; +DROP DATABASE d1 +---- + +exec-sql +RESTORE DATABASE d1 FROM 'nodelocal://0/d1_backup/' +---- + +exec-sql USE d1 ---- @@ -75,8 +84,14 @@ perm_table exec-sql USE defaultdb; -DROP DATABASE d1; -RESTORE DATABASE d1 FROM 'nodelocal://0/d1_star_backup/'; +DROP DATABASE d1 +---- + +exec-sql +RESTORE DATABASE d1 FROM 'nodelocal://0/d1_star_backup/' +---- + +exec-sql USE d1 ---- @@ -102,6 +117,9 @@ new-server name=s2 share-io-dir=s1 temp-cleanup-freq=5s exec-sql USE defaultdb; +---- + +exec-sql RESTORE FROM 'nodelocal://0/full_cluster_backup/'; ---- diff --git a/pkg/ccl/backupccl/testdata/backup-restore/user-defined-types b/pkg/ccl/backupccl/testdata/backup-restore/user-defined-types index 5ae804c8c149..4e6d0f1e9b26 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/user-defined-types +++ b/pkg/ccl/backupccl/testdata/backup-restore/user-defined-types @@ -133,7 +133,10 @@ BACKUP DATABASE d TO 'nodelocal://0/test/' ---- exec-sql -DROP DATABASE d; +DROP DATABASE d +---- + +exec-sql RESTORE DATABASE d FROM 'nodelocal://0/test/'; ---- diff --git a/pkg/ccl/benchccl/rttanalysisccl/multi_region_bench_test.go b/pkg/ccl/benchccl/rttanalysisccl/multi_region_bench_test.go index 3227dba46bce..334e14127957 100644 --- a/pkg/ccl/benchccl/rttanalysisccl/multi_region_bench_test.go +++ b/pkg/ccl/benchccl/rttanalysisccl/multi_region_bench_test.go @@ -34,7 +34,7 @@ func TestBenchmarkExpectation(t *testing.T) { reg.RunExpectations(t) } const ( multipleTableFixture = ` -CREATE DATABASE test PRIMARY REGION "us-east1" REGIONS "us-east1", "us-east2", "us-east3"; +BEGIN; CREATE DATABASE test PRIMARY REGION "us-east1" REGIONS "us-east1", "us-east2", "us-east3"; COMMIT; USE test; CREATE TABLE test11 (p int) LOCALITY REGIONAL BY TABLE IN "us-east1"; CREATE TABLE test12 (p int) LOCALITY REGIONAL BY TABLE IN "us-east1"; diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index cd30f5030010..f9ca7a5c8dc3 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -5470,12 +5470,10 @@ func TestDistSenderRangeFeedPopulatesVirtualTable(t *testing.T) { defer s.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(db) - sqlDB.Exec(t, ` -SET CLUSTER SETTING kv.rangefeed.enabled='true'; -CREATE TABLE tbl (a INT, b STRING); -INSERT INTO tbl VALUES (1, 'one'), (2, 'two'), (3, 'three'); -CREATE CHANGEFEED FOR tbl INTO 'null://'; -`) + sqlDB.Exec(t, `SET CLUSTER SETTING kv.rangefeed.enabled='true';`) + sqlDB.Exec(t, `CREATE TABLE tbl (a INT, b STRING);`) + sqlDB.Exec(t, `INSERT INTO tbl VALUES (1, 'one'), (2, 'two'), (3, 'three');`) + sqlDB.Exec(t, `CREATE CHANGEFEED FOR tbl INTO 'null://';`) var tableID int sqlDB.QueryRow(t, "SELECT table_id FROM crdb_internal.tables WHERE name='tbl'").Scan(&tableID) diff --git a/pkg/ccl/kvccl/kvfollowerreadsccl/testdata/boundedstaleness/single_row b/pkg/ccl/kvccl/kvfollowerreadsccl/testdata/boundedstaleness/single_row index c7c027728cc1..32c0d719aa05 100644 --- a/pkg/ccl/kvccl/kvfollowerreadsccl/testdata/boundedstaleness/single_row +++ b/pkg/ccl/kvccl/kvfollowerreadsccl/testdata/boundedstaleness/single_row @@ -105,6 +105,9 @@ events (1 found): exec idx=2 PREPARE max_staleness_prep AS SELECT pk FROM t AS OF SYSTEM TIME with_max_staleness('10s') WHERE pk = 1; +---- + +exec idx=2 PREPARE min_timestamp_prep AS SELECT pk FROM t AS OF SYSTEM TIME with_min_timestamp(now() - '10s') WHERE pk = 1 ---- @@ -205,7 +208,13 @@ events (7 found): # with the schema not existing if none of the followers have caught up. exec CREATE TABLE t2(pk INT PRIMARY KEY); +---- + +exec INSERT INTO t2 VALUES (2); +---- + +exec ALTER TABLE t2 ADD COLUMN new_col INT ---- diff --git a/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality b/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality index 53e12765e9df..2382d57571ed 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality +++ b/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality @@ -518,6 +518,8 @@ created_as_global CREATE TABLE public.created_as_global ( # for REGIONAL BY ROW transformations. statement ok DROP TABLE created_as_global; + +statement ok CREATE TABLE created_as_global ( pk int primary key, i int, @@ -526,6 +528,8 @@ CREATE TABLE created_as_global ( unique(i), FAMILY (pk, i, b) ) LOCALITY GLOBAL; + +statement ok INSERT INTO created_as_global VALUES (0, 1, 2) query TTT @@ -650,6 +654,8 @@ ap-southeast-2 1 # Drop the table and try again with the crdb_region column defined. statement ok DROP TABLE created_as_global; + +statement ok CREATE TABLE created_as_global ( pk int primary key, i int, @@ -665,6 +671,8 @@ ALTER TABLE created_as_global SET LOCALITY REGIONAL BY ROW statement ok DROP TABLE created_as_global; + +statement ok CREATE TABLE created_as_global ( pk int primary key, i int, @@ -674,6 +682,8 @@ CREATE TABLE created_as_global ( crdb_region crdb_internal_region, FAMILY (pk, i, b, crdb_region) ) LOCALITY GLOBAL; + +statement ok INSERT INTO created_as_global VALUES (0, 1, 2, 'us-east-1') statement error cannot use column crdb_region for REGIONAL BY ROW table as it may contain NULL values @@ -681,6 +691,8 @@ ALTER TABLE created_as_global SET LOCALITY REGIONAL BY ROW statement ok ALTER TABLE created_as_global ALTER COLUMN crdb_region SET NOT NULL; + +statement ok ALTER TABLE created_as_global SET LOCALITY REGIONAL BY ROW query TT @@ -750,6 +762,8 @@ created_as_global CREATE TABLE public.created_as_global ( # Test altering to REGIONAL BY ROW AS . statement ok DROP TABLE created_as_global; + +statement ok CREATE TABLE created_as_global ( pk int primary key, i int, @@ -759,6 +773,8 @@ CREATE TABLE created_as_global ( cr crdb_internal_region, FAMILY (pk, i, b, cr) ) LOCALITY GLOBAL; + +statement ok INSERT INTO created_as_global VALUES (0, 1, 2, 'us-east-1') statement error column "non_exist" does not exist @@ -1316,6 +1332,8 @@ TABLE regional_by_table_no_region ALTER TABLE regional_by_table_no_region CONFI # REGIONAL BY ROW transformations. statement ok DROP TABLE regional_by_table_no_region; + +statement ok CREATE TABLE regional_by_table_no_region ( pk int primary key, i int, @@ -1324,6 +1342,8 @@ CREATE TABLE regional_by_table_no_region ( unique(i), FAMILY (pk, i, b) ) LOCALITY REGIONAL BY TABLE; + +statement ok INSERT INTO regional_by_table_no_region VALUES (0, 1, 2) statement ok @@ -1345,6 +1365,8 @@ regional_by_table_no_region CREATE TABLE public.regional_by_table_no_region ( statement ok DROP TABLE regional_by_table_no_region; + +statement ok CREATE TABLE regional_by_table_no_region ( pk int primary key, i int, @@ -1354,6 +1376,8 @@ CREATE TABLE regional_by_table_no_region ( cr crdb_internal_region NOT NULL NOT NULL, FAMILY (pk, i, b, cr) ) LOCALITY REGIONAL BY TABLE; + +statement ok INSERT INTO regional_by_table_no_region VALUES (0, 1, 2, 'us-east-1') query TTT @@ -1396,6 +1420,8 @@ DATABASE alter_locality_test ALTER DATABASE alter_locality_test CONFIGURE ZONE # REGIONAL BY ROW AS transformations. statement ok DROP TABLE regional_by_table_no_region; + +statement ok CREATE TABLE regional_by_table_no_region ( pk int primary key, i int, @@ -1405,6 +1431,8 @@ CREATE TABLE regional_by_table_no_region ( unique(i), FAMILY (pk, i, b, cr) ) LOCALITY REGIONAL BY TABLE; + +statement ok INSERT INTO regional_by_table_no_region VALUES (0, 1, 2, 'us-east-1') statement ok @@ -1669,6 +1697,8 @@ TABLE regional_by_table_in_us_east ALTER TABLE regional_by_table_in_us_east CON # REGIONAL BY ROW transformations. statement ok DROP TABLE regional_by_table_in_us_east; + +statement ok CREATE TABLE regional_by_table_in_us_east ( pk int PRIMARY KEY, i int, @@ -1677,7 +1707,11 @@ CREATE TABLE regional_by_table_in_us_east ( unique(i), FAMILY (pk, i, b) ) LOCALITY REGIONAL BY TABLE IN "us-east-1"; + +statement ok INSERT INTO regional_by_table_in_us_east VALUES (0, 1, 2); + +statement ok ALTER TABLE regional_by_table_in_us_east SET LOCALITY REGIONAL BY ROW query TT @@ -1734,6 +1768,8 @@ DATABASE alter_locality_test ALTER DATABASE alter_locality_test CONFIGURE ZONE # REGIONAL BY ROW AS transformations. statement ok DROP TABLE regional_by_table_in_us_east; + +statement ok CREATE TABLE regional_by_table_in_us_east ( pk int PRIMARY KEY, i int, @@ -1743,7 +1779,11 @@ CREATE TABLE regional_by_table_in_us_east ( cr crdb_internal_region NOT NULL NOT NULL, FAMILY (pk, i, b, cr) ) LOCALITY REGIONAL BY TABLE IN "us-east-1"; + +statement ok INSERT INTO regional_by_table_in_us_east VALUES (0, 1, 2, 'us-east-1'); + +statement ok ALTER TABLE regional_by_table_in_us_east SET LOCALITY REGIONAL BY ROW AS "cr" query TT @@ -1834,13 +1874,19 @@ DATABASE alter_locality_test ALTER DATABASE alter_locality_test CONFIGURE ZONE statement ok DROP TABLE regional_by_row; + +statement ok CREATE TABLE regional_by_row ( pk INT PRIMARY KEY, i INT, INDEX(i), FAMILY (pk, i) ) LOCALITY REGIONAL BY ROW; + +statement ok INSERT INTO regional_by_row (pk, i) VALUES (1, 1); + +statement ok ALTER TABLE regional_by_row SET LOCALITY REGIONAL BY TABLE in "ap-southeast-2" query TT @@ -1875,13 +1921,19 @@ TABLE regional_by_row ALTER TABLE regional_by_row CONFIGURE ZONE USING statement ok DROP TABLE regional_by_row; + +statement ok CREATE TABLE regional_by_row ( pk INT PRIMARY KEY, i INT, INDEX(i), FAMILY (pk, i) ) LOCALITY REGIONAL BY ROW; + +statement ok INSERT INTO regional_by_row (pk, i) VALUES (1, 1); + +statement ok ALTER TABLE regional_by_row SET LOCALITY REGIONAL BY TABLE IN PRIMARY REGION query TT @@ -1916,13 +1968,19 @@ DATABASE alter_locality_test ALTER DATABASE alter_locality_test CONFIGURE ZONE statement ok DROP TABLE regional_by_row; + +statement ok CREATE TABLE regional_by_row ( pk INT PRIMARY KEY, i INT, INDEX(i), FAMILY (pk, i) ) LOCALITY REGIONAL BY ROW; + +statement ok INSERT INTO regional_by_row (pk, i) VALUES (1, 1); + +statement ok ALTER TABLE regional_by_row SET LOCALITY GLOBAL query TT @@ -1958,13 +2016,19 @@ TABLE regional_by_row ALTER TABLE regional_by_row CONFIGURE ZONE USING statement ok DROP TABLE regional_by_row; + +statement ok CREATE TABLE regional_by_row ( pk INT PRIMARY KEY, i INT, INDEX(i), FAMILY (pk, i) ) LOCALITY REGIONAL BY ROW; + +statement ok INSERT INTO regional_by_row (pk, i) VALUES (1, 1); + +statement ok ALTER TABLE regional_by_row SET LOCALITY REGIONAL BY ROW query TT @@ -2126,7 +2190,11 @@ CREATE TABLE regional_by_row_to_regional_by_row_as ( INDEX(i), FAMILY (pk, i) ) LOCALITY REGIONAL BY ROW; + +statement ok INSERT INTO regional_by_row_to_regional_by_row_as (pk, i) VALUES (1, 1); + +statement ok ALTER TABLE regional_by_row_to_regional_by_row_as SET LOCALITY REGIONAL BY ROW AS "cr" query TT @@ -2234,6 +2302,8 @@ DATABASE alter_locality_test ALTER DATABASE alter_locality_test CONFIGURE ZONE statement ok DROP TABLE regional_by_row_as; + +statement ok CREATE TABLE regional_by_row_as ( pk INT PRIMARY KEY, i INT, @@ -2241,7 +2311,11 @@ CREATE TABLE regional_by_row_as ( INDEX(i), FAMILY (cr, pk, i) ) LOCALITY REGIONAL BY ROW AS "cr"; + +statement ok INSERT INTO regional_by_row_as (pk, i) VALUES (1, 1); + +statement ok ALTER TABLE regional_by_row_as SET LOCALITY REGIONAL BY TABLE in "ap-southeast-2" query TT @@ -2294,6 +2368,8 @@ TABLE regional_by_row_as ALTER TABLE regional_by_row_as CONFIGURE ZONE USING statement ok DROP TABLE regional_by_row_as; + +statement ok CREATE TABLE regional_by_row_as ( pk INT PRIMARY KEY, i INT, @@ -2301,7 +2377,11 @@ CREATE TABLE regional_by_row_as ( INDEX(i), FAMILY (cr, pk, i) ) LOCALITY REGIONAL BY ROW AS "cr"; + +statement ok INSERT INTO regional_by_row_as (pk, i) VALUES (1, 1); + +statement ok ALTER TABLE regional_by_row_as SET LOCALITY REGIONAL BY TABLE IN PRIMARY REGION query TT @@ -2354,6 +2434,8 @@ DATABASE alter_locality_test ALTER DATABASE alter_locality_test CONFIGURE ZONE statement ok DROP TABLE regional_by_row_as; + +statement ok CREATE TABLE regional_by_row_as ( pk INT PRIMARY KEY, i INT, @@ -2361,7 +2443,11 @@ CREATE TABLE regional_by_row_as ( INDEX(i), FAMILY (cr, pk, i) ) LOCALITY REGIONAL BY ROW AS "cr"; + +statement ok INSERT INTO regional_by_row_as (pk, i) VALUES (1, 1); + +statement ok ALTER TABLE regional_by_row_as SET LOCALITY GLOBAL query TT @@ -2421,6 +2507,8 @@ CREATE TABLE regional_by_row_as_to_regional_by_row ( INDEX(i), FAMILY (cr, pk, i) ) LOCALITY REGIONAL BY ROW AS "cr"; + +statement ok INSERT INTO regional_by_row_as_to_regional_by_row (pk, i) VALUES (1, 1); ALTER TABLE regional_by_row_as_to_regional_by_row SET LOCALITY REGIONAL BY ROW @@ -2475,6 +2563,8 @@ DATABASE alter_locality_test ALTER DATABASE alter_locality_test CONFIGURE ZONE statement ok DROP TABLE regional_by_row_as; + +statement ok CREATE TABLE regional_by_row_as ( pk INT PRIMARY KEY, i INT, diff --git a/pkg/ccl/logictestccl/testdata/logic_test/crdb_internal_tenant b/pkg/ccl/logictestccl/testdata/logic_test/crdb_internal_tenant index 0c28ebb0b3e6..fdcd6165755f 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/crdb_internal_tenant +++ b/pkg/ccl/logictestccl/testdata/logic_test/crdb_internal_tenant @@ -468,8 +468,12 @@ SET application_name = 'test_max_retry' # become different from 0. statement OK CREATE SEQUENCE s; - SELECT IF(nextval('s')<3, crdb_internal.force_retry('1h'::INTERVAL), 0); - DROP SEQUENCE s + +statement OK +SELECT IF(nextval('s')<3, crdb_internal.force_retry('1h'::INTERVAL), 0); + +statement OK +DROP SEQUENCE s statement OK RESET application_name @@ -512,6 +516,8 @@ testclustername # Regression for 41834. statement ok CREATE TABLE table41834 (); + +statement ok SELECT crdb_internal.encode_key( -8912529861854991652, diff --git a/pkg/ccl/logictestccl/testdata/logic_test/multi_region b/pkg/ccl/logictestccl/testdata/logic_test/multi_region index 0f506d15d014..a6eee7c1c0fb 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/multi_region +++ b/pkg/ccl/logictestccl/testdata/logic_test/multi_region @@ -78,8 +78,14 @@ us-east-1 us-east-1 statement ok CREATE DATABASE deleted_database; + +statement ok SET sql_safe_updates = false; + +statement ok USE deleted_database; + +statement ok DROP DATABASE deleted_database statement error database.*deleted_database.*does not exist @@ -1126,8 +1132,14 @@ RANGE default ALTER RANGE default CONFIGURE ZONE USING # created before the first region was added to the multi-region DB. statement ok CREATE DATABASE start_off_non_multi_region; + +statement ok CREATE TABLE start_off_non_multi_region.public.t(a INT); + +statement ok ALTER DATABASE start_off_non_multi_region PRIMARY REGION "ca-central-1"; + +statement ok ALTER DATABASE start_off_non_multi_region ADD REGION "ap-southeast-2" statement error pgcode 42P12 cannot drop region "ca-central-1"\nHINT: You must designate another region as the primary region using ALTER DATABASE start_off_non_multi_region PRIMARY REGION or remove all other regions before attempting to drop region "ca-central-1" @@ -1333,9 +1345,17 @@ USE db_with_views_and_sequences statement ok CREATE TABLE t (id INT PRIMARY KEY, a INT, b INT); + +statement ok INSERT INTO t VALUES (1, 2, 3), (4, 5, 6); + +statement ok CREATE SEQUENCE s; + +statement ok CREATE VIEW v AS SELECT id, a, b FROM t; + +statement ok CREATE MATERIALIZED VIEW mat_view AS SELECT id, a, b FROM t statement ok @@ -1352,7 +1372,11 @@ mat_view GLOBAL statement ok CREATE SEQUENCE s2; + +statement ok CREATE VIEW v2 AS SELECT id, a, b FROM t; + +statement ok CREATE MATERIALIZED VIEW mat_view2 AS SELECT id, a, b FROM t query TT colnames @@ -1400,6 +1424,8 @@ subtest enum_name_clash statement ok CREATE DATABASE db_enum_name_clash; + +statement ok CREATE TYPE db_enum_name_clash.crdb_internal_region AS ENUM (); statement error pq: type "db_enum_name_clash.public.crdb_internal_region" already exists\nHINT: object "crdb_internal_regions" must be renamed or dropped before adding the primary region\nDETAIL: multi-region databases employ an internal enum called crdb_internal_region to manage regions which conflicts with the existing object @@ -1409,6 +1435,8 @@ ALTER DATABASE db_enum_name_clash SET PRIMARY REGION "us-east-1" # database. statement ok DROP TYPE db_enum_name_clash.crdb_internal_region; + +statement ok ALTER DATABASE db_enum_name_clash SET PRIMARY REGION "us-east-1" @@ -1418,8 +1446,12 @@ subtest regional_by_row_view statement ok CREATE DATABASE db; + +statement ok USE db; ALTER DATABASE db PRIMARY REGION "us-east-1"; + +statement ok CREATE TABLE kv (k INT PRIMARY KEY, v INT) LOCALITY REGIONAL BY ROW statement ok @@ -1433,6 +1465,8 @@ subtest create_table_as_regional_by_row statement ok CREATE DATABASE "mr-create-table-as" PRIMARY REGION "ap-southeast-2" REGIONS "ap-southeast-2", "ca-central-1", "us-east-1"; + +statement ok USE "mr-create-table-as" statement ok diff --git a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_backup b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_backup index dee4eddaa12e..1feb8950b511 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_backup +++ b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_backup @@ -376,7 +376,11 @@ ALTER DATABASE "mr-backup-2" CONFIGURE ZONE USING gc.ttlseconds = 1 statement ok BACKUP DATABASE "mr-backup-1" TO 'nodelocal://1/mr-backup-1/'; + +statement ok BACKUP DATABASE "mr-backup-2" TO 'nodelocal://1/mr-backup-2/'; + +statement ok BACKUP DATABASE "mr-backup-1", "mr-backup-2" TO 'nodelocal://1/mr-backup-combined/' query T @@ -391,6 +395,8 @@ test statement ok DROP DATABASE "mr-backup-1"; + +statement ok DROP DATABASE "mr-backup-2" query T @@ -712,6 +718,8 @@ TABLE regional_by_table_in_ca_central_1 ALTER TABLE regional_by_table_in_ca_cen statement ok DROP DATABASE "mr-backup-1"; + +statement ok DROP DATABASE "mr-backup-2" query T @@ -1174,6 +1182,8 @@ TABLE global_table ALTER TABLE global_table CONFIGURE ZONE USING statement ok CREATE DATABASE non_mr_backup; + +statement ok USE non_mr_backup statement ok @@ -1328,6 +1338,8 @@ CREATE DATABASE "mr-restore-1" primary region "ap-southeast-2" regions "us-east- statement ok RESTORE TABLE "mr-backup-2".global_table FROM 'nodelocal://1/mr-backup-2/' WITH into_db='mr-restore-1'; + +statement ok USE "mr-restore-1"; query TT diff --git a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_privileges b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_privileges index 63eb12277b5b..eedd7315092a 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_privileges +++ b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_privileges @@ -4,9 +4,17 @@ user root statement ok CREATE DATABASE db; + +statement ok CREATE TABLE db.t(); + +statement ok GRANT CREATE ON DATABASE db TO testuser; + +statement ok CREATE TABLE db.t2(); + +statement ok ALTER USER testuser CREATEDB; user testuser @@ -66,6 +74,8 @@ user root statement ok CREATE DATABASE alter_db PRIMARY REGION "us-east-1"; + +statement ok CREATE TABLE alter_db.t(); user testuser @@ -131,8 +141,14 @@ user testuser statement ok ALTER DATABASE alter_mr_db SET PRIMARY REGION "us-east-1"; + +statement ok ALTER DATABASE alter_mr_db SURVIVE ZONE FAILURE; + +statement ok ALTER DATABASE alter_mr_db DROP REGION "ap-southeast-2"; + +statement ok ALTER DATABASE alter_mr_db ADD REGION "ap-southeast-2"; user root @@ -146,8 +162,14 @@ user testuser statement ok ALTER DATABASE alter_mr_db SET PRIMARY REGION "us-east-1"; + +statement ok ALTER DATABASE alter_mr_db SURVIVE ZONE FAILURE; + +statement ok ALTER DATABASE alter_mr_db DROP REGION "ap-southeast-2"; + +statement ok ALTER DATABASE alter_mr_db ADD REGION "ap-southeast-2"; user root @@ -161,8 +183,14 @@ user testuser statement ok ALTER DATABASE alter_mr_db SET PRIMARY REGION "us-east-1"; + +statement ok ALTER DATABASE alter_mr_db SURVIVE ZONE FAILURE; + +statement ok ALTER DATABASE alter_mr_db DROP REGION "ap-southeast-2"; + +statement ok ALTER DATABASE alter_mr_db ADD REGION "ap-southeast-2"; subtest add_drop_region_repartitioning_privs @@ -171,12 +199,26 @@ user root statement ok CREATE DATABASE repartition_privs PRIMARY REGION "ca-central-1" REGIONS "us-east-1"; + +statement ok CREATE TABLE repartition_privs.rbr () LOCALITY REGIONAL BY ROW; + +statement ok CREATE TABLE repartition_privs.regional() LOCALITY REGIONAL; + +statement ok CREATE TABLE repartition_privs.global() LOCALITY GLOBAL; + +statement ok CREATE VIEW repartition_privs.v AS SELECT 1; + +statement ok CREATE SEQUENCE repartition_privs.seq; + +statement ok GRANT CREATE ON DATABASE repartition_privs TO testuser; + +statement ok CREATE TABLE repartition_privs.rbr2 () LOCALITY REGIONAL BY ROW user testuser diff --git a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_query_behavior b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_query_behavior index 8e178d21398a..06820c4687f1 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_query_behavior +++ b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_query_behavior @@ -9,6 +9,8 @@ SET CLUSTER SETTING kv.closed_timestamp.target_duration = '10ms'; statement ok CREATE DATABASE multi_region_test_db PRIMARY REGION "ca-central-1" REGIONS "ap-southeast-2", "us-east-1"; + +statement ok USE multi_region_test_db statement ok diff --git a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_zone_configs b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_zone_configs index f91c132f0a58..5c68bc280972 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_zone_configs +++ b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_zone_configs @@ -17,11 +17,17 @@ SELECT crdb_internal.validate_multi_region_zone_configs() # a zone config drop. statement ok CREATE DATABASE drop_region_db PRIMARY REGION "ca-central-1" REGIONS "ap-southeast-2"; + +statement ok USE drop_region_db statement ok CREATE TABLE global_table () LOCALITY GLOBAL; + +statement ok CREATE TABLE global_table_custom_gc_ttl () LOCALITY GLOBAL; + +statement ok ALTER TABLE global_table_custom_gc_ttl CONFIGURE ZONE USING gc.ttlseconds = 10 statement ok @@ -535,7 +541,10 @@ TABLE regional_by_row ALTER TABLE regional_by_row CONFIGURE ZONE USING lease_preferences = '[[+region=us-east-1]]' statement ok -DROP TABLE regional_by_row; CREATE TABLE regional_by_row ( +DROP TABLE regional_by_row + +statement ok +CREATE TABLE regional_by_row ( pk INT PRIMARY KEY, i INT, INDEX idx(i), @@ -775,10 +784,20 @@ DROP DATABASE "mr-zone-configs" CASCADE # Test validation for initial SET PRIMARY REGION statement ok CREATE DATABASE initial_multiregion_db; + +statement ok USE initial_multiregion_db; + +statement ok CREATE TABLE tbl (a INT PRIMARY KEY, INDEX a_idx (a)); + +statement ok ALTER DATABASE initial_multiregion_db CONFIGURE ZONE USING gc.ttlseconds = 5; + +statement ok ALTER TABLE tbl CONFIGURE ZONE USING gc.ttlseconds = 5; + +statement ok ALTER INDEX tbl@a_idx CONFIGURE ZONE USING gc.ttlseconds = 5 statement ok @@ -1689,6 +1708,8 @@ TABLE tbl10 ALTER TABLE tbl10 CONFIGURE ZONE USING # Ensure that built-in no-ops on non-multi-region tables. statement ok CREATE DATABASE not_multi_region; + +statement ok USE not_multi_region statement ok @@ -1736,6 +1757,8 @@ subtest reset_multi_region_zone_configs_database statement ok CREATE DATABASE "rebuild_zc_db" primary region "ca-central-1" regions "ap-southeast-2", "us-east-1"; + +statement ok USE "rebuild_zc_db" query TT diff --git a/pkg/ccl/logictestccl/testdata/logic_test/partitioning_enum b/pkg/ccl/logictestccl/testdata/logic_test/partitioning_enum index 5ac2d97e6b6e..26f4e6a29a20 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/partitioning_enum +++ b/pkg/ccl/logictestccl/testdata/logic_test/partitioning_enum @@ -61,7 +61,9 @@ subtest drop_enum_partitioning_value statement ok drop table if exists tbl; -drop type if exists t; +drop type if exists t + +statement ok create type t as enum('a', 'b', 'c'); create table tbl (pk t PRIMARY KEY) PARTITION BY LIST (pk) (PARTITION "a" VALUES IN ('a')) @@ -70,7 +72,9 @@ alter type t drop value 'a' statement ok drop table if exists tbl; -drop type if exists t; +drop type if exists t + +statement ok create type t as enum('a', 'b', 'c'); create table tbl (i INT, k t, PRIMARY KEY (i, k)) PARTITION BY LIST (i) (PARTITION "one" VALUES IN (1) PARTITION BY RANGE (k) (PARTITION "a" VALUES FROM ('a') TO ('b'))) @@ -79,7 +83,9 @@ alter type t drop value 'a' statement ok drop table if exists tbl; -drop type if exists t; +drop type if exists t + +statement ok create type t as enum('a', 'b', 'c'); create table tbl (i INT, k t, PRIMARY KEY (i, k), INDEX idx (k) PARTITION BY RANGE (k) (PARTITION "a" VALUES FROM ('a') TO ('b'))) diff --git a/pkg/ccl/logictestccl/testdata/logic_test/partitioning_implicit b/pkg/ccl/logictestccl/testdata/logic_test/partitioning_implicit index 43a2e0de4535..5249fa2427d8 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/partitioning_implicit +++ b/pkg/ccl/logictestccl/testdata/logic_test/partitioning_implicit @@ -903,6 +903,8 @@ pk pk2 partition_by a b c d # Regression tests for #59583. statement ok DROP TABLE t; + +statement ok CREATE TABLE t ( pk INT, pk2 INT NOT NULL, @@ -913,6 +915,8 @@ CREATE TABLE t ( ) PARTITION BY LIST (partition_by) ( PARTITION p1 VALUES IN (1) ); + +statement ok INSERT INTO t VALUES (1,2,3,4,5),(11,12,13,14,15) query IIIII rowsort @@ -948,7 +952,11 @@ SELECT * FROM t@new_idx # Tests adding a new UNIQUE index with implicit partitioning. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (partition_by INT, v INT); + +statement ok INSERT INTO t VALUES (1, 1), (2, 1); statement error could not create unique constraint "uniq_on_t"\nDETAIL: Key \(v\)=\(1\) is duplicated @@ -967,10 +975,14 @@ CREATE UNIQUE INDEX uniq_on_t ON t(v) PARTITION BY LIST (partition_by) ( # Tests adding a UNIQUE index with PARTITION ALL BY implicit partitioning. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (partition_by INT, v INT) PARTITION ALL BY LIST (partition_by) ( PARTITION one VALUES IN (1), PARTITION two VALUES IN (2) ); + +statement ok INSERT INTO t VALUES (1, 1), (2, 1); statement error could not create unique constraint "uniq_on_t"\nDETAIL: Key \(v\)=\(1\) is duplicated @@ -983,7 +995,11 @@ CREATE UNIQUE INDEX uniq_on_t ON t(v) # Tests adding a new partial UNIQUE index with implicit partitioning. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (partition_by INT, a INT, b INT); + +statement ok INSERT INTO t VALUES (1, 1, 1), (1, 2, 2), (2, 1, 1), (2, 2, -2); statement error could not create unique constraint "uniq_on_t"\nDETAIL: Key \(a\)=\(1\) is duplicated @@ -1003,10 +1019,14 @@ CREATE UNIQUE INDEX uniq_on_t ON t(a) PARTITION BY LIST (partition_by) ( # partitioning. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (partition_by INT, a INT, b INT) PARTITION ALL BY LIST (partition_by) ( PARTITION one VALUES IN (1), PARTITION two VALUES IN (2) ); + +statement ok INSERT INTO t VALUES (1, 1, 1), (1, 2, 2), (2, 1, 1), (2, 2, -2); statement error could not create unique constraint "uniq_on_t"\nDETAIL: Key \(a\)=\(1\) is duplicated diff --git a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row index 8e31ff360216..1cd0e76c9e77 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row +++ b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row @@ -846,6 +846,8 @@ PARTITION "us-east-1" OF TABLE t_regional_by_row ALTER PARTITION "us-east-1" OF statement ok CREATE DATABASE two_region_test_db PRIMARY REGION "ca-central-1" REGIONS "ap-southeast-2"; + +statement ok USE two_region_test_db statement ok @@ -876,6 +878,8 @@ ca-central-1 6 statement ok CREATE DATABASE add_regions WITH PRIMARY REGION "ca-central-1"; + +statement ok USE add_regions statement ok @@ -1163,6 +1167,8 @@ DATABASE add_regions ALTER DATABASE add_regions CONFIGURE ZONE USING statement ok CREATE DATABASE add_regions_in_txn WITH PRIMARY REGION "ca-central-1"; + +statement ok USE add_regions_in_txn statement ok @@ -1252,7 +1258,9 @@ regional_by_row_like CREATE TABLE public.regional_by_ ) LOCALITY REGIONAL BY TABLE IN PRIMARY REGION statement ok -DROP TABLE regional_by_row_like; +DROP TABLE regional_by_row_like + +statement ok CREATE TABLE regional_by_row_like (LIKE regional_by_row INCLUDING INDEXES) query TT @@ -1269,6 +1277,8 @@ regional_by_row_like CREATE TABLE public.regional_by_ statement ok DROP TABLE regional_by_row_like; + +statement ok CREATE TABLE regional_by_row_like (LIKE regional_by_row INCLUDING ALL) query TT @@ -1392,13 +1402,19 @@ regional_by_row_fk CREATE TABLE public.regional_by_row_fk ( statement ok CREATE DATABASE drop_regions PRIMARY REGION "ca-central-1" REGIONS "us-east-1", "ap-southeast-2"; + +statement ok USE drop_regions; + +statement ok CREATE TABLE regional_by_row ( pk INT PRIMARY KEY, i INT, INDEX(i), FAMILY (pk, i) ) LOCALITY REGIONAL BY ROW; + +statement ok CREATE TABLE regional_by_row_as ( pk INT PRIMARY KEY, i INT, @@ -1644,7 +1660,11 @@ ALTER DATABASE drop_regions DROP REGION "ca-central-1"; # Drop the two regional by row tables and now the primary region can be removed. statement ok DROP TABLE regional_by_row; + +statement ok DROP TABLE regional_by_row_as; + +statement ok ALTER DATABASE drop_regions DROP REGION "ca-central-1"; ############################################## diff --git a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_auto_rehoming b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_auto_rehoming index 9b5abf91e337..1a8e56f40144 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_auto_rehoming +++ b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_auto_rehoming @@ -3,8 +3,14 @@ statement ok SET experimental_enable_auto_rehoming = true; CREATE DATABASE testdb PRIMARY REGION "ca-central-1" REGIONS "ap-southeast-2"; + +statement ok USE testdb; + +statement ok CREATE TABLE rbr (p INT PRIMARY KEY, s STRING) LOCALITY REGIONAL BY ROW; + +statement ok INSERT INTO rbr (p, s) VALUES (1, 'hi') query TIT @@ -72,8 +78,14 @@ SET on_update_rehome_row_enabled = true # Re-homing still works even after locality change. statement ok DROP TABLE rbr; + +statement ok CREATE TABLE rbr (p INT PRIMARY KEY, s STRING); + +statement ok ALTER TABLE rbr SET LOCALITY REGIONAL BY ROW; + +statement ok INSERT INTO rbr (p, s) VALUES (1, 'hi') query TIT diff --git a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior index 27df75baa3f8..271b14825239 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior +++ b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior @@ -10,6 +10,8 @@ SET CLUSTER SETTING kv.closed_timestamp.target_duration = '10ms'; statement ok CREATE DATABASE multi_region_test_db PRIMARY REGION "ca-central-1" REGIONS "ap-southeast-2", "us-east-1" SURVIVE REGION FAILURE; + +statement ok USE multi_region_test_db statement ok @@ -2231,10 +2233,20 @@ subtest regressions # ERROR: missing "crdb_region" primary key column. statement ok CREATE DATABASE single_region_test_db PRIMARY REGION "ap-southeast-2"; + +statement ok USE single_region_test_db; + +statement ok CREATE TABLE t63109 (a INT, b STRING); + +statement ok ALTER TABLE t63109 SET LOCALITY REGIONAL BY ROW; + +statement ok INSERT INTO t63109 VALUES (1, 'one'); + +statement ok UPSERT INTO t63109 VALUES (1, 'two'); UPSERT INTO t63109 (crdb_region, a, b) VALUES ('ap-southeast-2', 1, 'three'); UPSERT INTO t63109 (a, b) VALUES (1, 'four'); @@ -2243,8 +2255,14 @@ UPSERT INTO t63109 (a, b) VALUES (1, 'four'); # even if the stats show zero rows. statement ok CREATE DATABASE db PRIMARY REGION "ca-central-1" REGIONS "ap-southeast-2", "us-east-1"; + +statement ok USE db; + +statement ok CREATE TABLE t65064 (username STRING NOT NULL UNIQUE) LOCALITY REGIONAL BY ROW; + +statement ok ALTER TABLE t65064 INJECT STATISTICS '[ { "columns": ["username"], diff --git a/pkg/ccl/logictestccl/testdata/logic_test/super_regions b/pkg/ccl/logictestccl/testdata/logic_test/super_regions index c36242c461c7..12ba356eee67 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/super_regions +++ b/pkg/ccl/logictestccl/testdata/logic_test/super_regions @@ -31,8 +31,14 @@ ALTER DATABASE db ADD SUPER REGION "test2" VALUES "us-east-1" statement ok CREATE DATABASE mr1 PRIMARY REGION "us-east-1"; + +statement ok CREATE TABLE mr1.rbt(x INT) LOCALITY REGIONAL BY TABLE; + +statement ok CREATE TABLE mr1.rbr(x INT) LOCALITY REGIONAL BY ROW; + +statement ok ALTER DATABASE mr1 ADD SUPER REGION "test" VALUES "us-east-1"; query TT @@ -588,6 +594,8 @@ ALTER DATABASE db DROP REGION "us-east-1"; # Dropping primary region. statement ok CREATE DATABASE db2 PRIMARY REGION "ca-central-1"; + +statement ok ALTER DATABASE db2 ADD SUPER REGION "test" VALUES "ca-central-1"; statement error pq: region ca-central-1 is part of super region test @@ -598,7 +606,11 @@ ALTER DATABASE db2 DROP REGION "ca-central-1"; # to be part of another super region. statement ok CREATE DATABASE db3 PRIMARY REGION "ca-central-1" REGIONS "us-east-1", "us-west-1"; + +statement ok ALTER DATABASE db3 ADD SUPER REGION "test1" VALUES "ca-central-1", "us-west-1"; + +statement ok ALTER DATABASE db3 ADD SUPER REGION "test2" VALUES "us-east-1"; statement ok diff --git a/pkg/ccl/logictestccl/testdata/logic_test/zone b/pkg/ccl/logictestccl/testdata/logic_test/zone index 7201f7123194..060e74f6449e 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/zone +++ b/pkg/ccl/logictestccl/testdata/logic_test/zone @@ -582,17 +582,33 @@ CONSTRAINTS='[+region=us-east1, -region=us-east1]' # names. statement ok CREATE DATABASE "my database"; + +statement ok USE "my database"; + +statement ok CREATE TABLE "my table" (x INT PRIMARY KEY) PARTITION BY LIST (x) ( PARTITION "my partition" VALUES IN (1) ); + +statement ok CREATE INDEX "my index" ON "my table" (x) PARTITION BY LIST (x) ( PARTITION "my partition" VALUES IN (1) ); + +statement ok ALTER DATABASE "my database" CONFIGURE ZONE USING num_replicas = 1; + +statement ok ALTER TABLE "my table" CONFIGURE ZONE USING num_replicas = 1; + +statement ok ALTER INDEX "my table"@"my index" CONFIGURE ZONE USING num_replicas = 1; + +statement ok ALTER PARTITION "my partition" OF INDEX "my table"@"my table_pkey" CONFIGURE ZONE USING num_replicas = 1; + +statement ok ALTER PARTITION "my partition" OF INDEX "my table"@"my index" CONFIGURE ZONE USING num_replicas = 1 query TTTTTT @@ -750,9 +766,13 @@ subtest authorization statement ok CREATE DATABASE auth; + +statement ok CREATE TABLE auth.t (x INT PRIMARY KEY) PARTITION BY LIST (x) ( PARTITION p VALUES IN (1) ); + +statement ok CREATE INDEX x ON auth.t (x) PARTITION BY LIST (x) ( PARTITION p VALUES IN (1) ) @@ -1060,6 +1080,8 @@ parent_modify NULL ALTER TABLE test.public.parent_modify CONFIGURE ZONE USING # change don't drop zone configs on rewritten indexes. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t ( x INT PRIMARY KEY, y INT NOT NULL, @@ -1069,6 +1091,8 @@ CREATE TABLE t ( INDEX i2 (w), FAMILY (x, y, z, w) ); + +statement ok ALTER INDEX t@i1 PARTITION BY LIST (z) ( PARTITION p1 VALUES IN (1, 2), PARTITION p2 VALUES IN (3, 4) @@ -1077,10 +1101,14 @@ ALTER INDEX t@i2 PARTITION BY LIST (w) ( PARTITION p3 VALUES IN (5, 6), PARTITION p4 VALUES IN (7, 8) ); + +statement ok ALTER PARTITION p1 OF INDEX t@i1 CONFIGURE ZONE USING gc.ttlseconds = 15210; ALTER PARTITION p2 OF INDEX t@i1 CONFIGURE ZONE USING gc.ttlseconds = 15213; ALTER PARTITION p3 OF INDEX t@i2 CONFIGURE ZONE USING gc.ttlseconds = 15411; ALTER PARTITION p4 OF INDEX t@i2 CONFIGURE ZONE USING gc.ttlseconds = 15418; + +statement ok ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (y) # Ensure that all the partitions of i1 and i2 still have their zone configs. diff --git a/pkg/ccl/multiregionccl/regional_by_row_test.go b/pkg/ccl/multiregionccl/regional_by_row_test.go index 379949222865..68bb68f4efdc 100644 --- a/pkg/ccl/multiregionccl/regional_by_row_test.go +++ b/pkg/ccl/multiregionccl/regional_by_row_test.go @@ -596,27 +596,21 @@ func TestIndexCleanupAfterAlterFromRegionalByRow(t *testing.T) { ) defer cleanup() - _, err := sqlDB.Exec( - `CREATE DATABASE "mr-zone-configs" WITH PRIMARY REGION "us-east1" REGIONS "us-east2", "us-east3"; -USE "mr-zone-configs"; + sqlRunner := sqlutils.MakeSQLRunner(sqlDB) + sqlRunner.Exec(t, `CREATE DATABASE "mr-zone-configs" WITH PRIMARY REGION "us-east1" REGIONS "us-east2","us-east3";`) + sqlRunner.Exec(t, `USE "mr-zone-configs";`) + sqlRunner.Exec(t, ` CREATE TABLE regional_by_row ( pk INT PRIMARY KEY, region_col crdb_internal_region NOT NULL, i INT, INDEX(i) ) LOCALITY REGIONAL BY ROW`) - require.NoError(t, err) // Alter the table to REGIONAL BY TABLE, and then back to REGIONAL BY ROW, to // create some indexes that need cleaning up. - _, err = sqlDB.Exec( - fmt.Sprintf( - `ALTER TABLE regional_by_row SET LOCALITY %s; - ALTER TABLE regional_by_row SET LOCALITY REGIONAL BY ROW`, - tc.locality, - ), - ) - require.NoError(t, err) + sqlRunner.Exec(t, fmt.Sprintf(`ALTER TABLE regional_by_row SET LOCALITY %s;`, tc.locality)) + sqlRunner.Exec(t, `ALTER TABLE regional_by_row SET LOCALITY REGIONAL BY ROW`) // Validate that the indexes requiring cleanup exist. type row struct { @@ -626,7 +620,7 @@ CREATE TABLE regional_by_row ( for { // First confirm that the schema change job has completed - res := sqlDB.QueryRow(`WITH jobs AS ( + res := sqlRunner.QueryRow(t, `WITH jobs AS ( SELECT status, crdb_internal.pb_to_json( 'cockroach.sql.jobs.jobspb.Payload', payload, @@ -638,11 +632,8 @@ CREATE TABLE regional_by_row ( FROM jobs WHERE (job->>'schemaChange') IS NOT NULL AND status = 'running'`) - require.NoError(t, res.Err()) - numJobs := 0 - err = res.Scan(&numJobs) - require.NoError(t, err) + res.Scan(&numJobs) if numJobs == 0 { break } @@ -661,14 +652,12 @@ CREATE TABLE regional_by_row ( FROM jobs WHERE (job->>'schemaChangeGC') IS NOT NULL AND status = '%s'` - res, err := sqlDB.Query(fmt.Sprintf(query, status)) - require.NoError(t, err) + res := sqlRunner.Query(t, fmt.Sprintf(query, status)) var rows []row for res.Next() { r := row{} - err = res.Scan(&r.status, &r.details) - require.NoError(t, err) + require.NoError(t, res.Scan(&r.status, &r.details)) rows = append(rows, r) } if err := res.Err(); err != nil { @@ -689,7 +678,7 @@ CREATE TABLE regional_by_row ( expectedGCJobsForDrops := 4 expectedGCJobsForTempIndexes := 4 // Now check that we have the right number of index GC jobs pending. - err = queryIndexGCJobsAndValidateCount(`running`, expectedGCJobsForDrops+expectedGCJobsForTempIndexes) + err := queryIndexGCJobsAndValidateCount(`running`, expectedGCJobsForDrops+expectedGCJobsForTempIndexes) require.NoError(t, err) err = queryIndexGCJobsAndValidateCount(`succeeded`, 0) require.NoError(t, err) @@ -707,8 +696,7 @@ CREATE TABLE regional_by_row ( require.NoError(t, err) // Change gc.ttlseconds to speed up the cleanup. - _, err = sqlDB.Exec(`ALTER TABLE regional_by_row CONFIGURE ZONE USING gc.ttlseconds = 1`) - require.NoError(t, err) + _ = sqlRunner.Exec(t, `ALTER TABLE regional_by_row CONFIGURE ZONE USING gc.ttlseconds = 1`) // Validate that indexes are cleaned up. testutils.SucceedsSoon(t, queryAndEnsureThatIndexGCJobsSucceeded(expectedGCJobsForDrops+expectedGCJobsForTempIndexes)) diff --git a/pkg/ccl/partitionccl/drop_test.go b/pkg/ccl/partitionccl/drop_test.go index 5615099ba619..5f55395ca2f5 100644 --- a/pkg/ccl/partitionccl/drop_test.go +++ b/pkg/ccl/partitionccl/drop_test.go @@ -196,18 +196,17 @@ SELECT job_id tdb := sqlutils.MakeSQLRunner(tc.ServerConn(0)) + tdb.Exec(t, `CREATE TYPE typ AS ENUM ('a', 'b', 'c')`) tdb.Exec(t, ` - CREATE TYPE typ AS ENUM ('a', 'b', 'c'); - CREATE TABLE t (e typ PRIMARY KEY) PARTITION BY LIST (e) ( + CREATE TABLE t (e typ PRIMARY KEY) PARTITION BY LIST (e) ( PARTITION a VALUES IN ('a'), PARTITION b VALUES IN ('b'), PARTITION c VALUES IN ('c') - ); - CREATE INDEX idx ON t (e); - ALTER PARTITION a OF TABLE t CONFIGURE ZONE USING range_min_bytes = 123456, range_max_bytes = 654321; - ALTER INDEX t@idx CONFIGURE ZONE USING gc.ttlseconds = 1; - DROP INDEX t@idx; - `) + )`) + tdb.Exec(t, `CREATE INDEX idx ON t (e)`) + tdb.Exec(t, `ALTER PARTITION a OF TABLE t CONFIGURE ZONE USING range_min_bytes = 123456, range_max_bytes = 654321`) + tdb.Exec(t, `ALTER INDEX t@idx CONFIGURE ZONE USING gc.ttlseconds = 1`) + tdb.Exec(t, `DROP INDEX t@idx`) waitForJobDone(t, tdb, "GC for DROP INDEX%idx") }) @@ -237,28 +236,29 @@ SELECT job_id tdb := sqlutils.MakeSQLRunner(tc.ServerConn(0)) + tdb.Exec(t, `CREATE TYPE typ AS ENUM ('a', 'b', 'c')`) tdb.Exec(t, ` - CREATE TYPE typ AS ENUM ('a', 'b', 'c'); - CREATE TABLE t (e typ PRIMARY KEY) PARTITION BY LIST (e) ( + CREATE TABLE t (e typ PRIMARY KEY) PARTITION BY LIST (e) ( PARTITION a VALUES IN ('a'), PARTITION b VALUES IN ('b'), PARTITION c VALUES IN ('c') - ); - CREATE INDEX idx + )`) + tdb.Exec(t, ` + CREATE INDEX idx ON t (e) PARTITION BY LIST (e) ( PARTITION ai VALUES IN ('a'), PARTITION bi VALUES IN ('b'), PARTITION ci VALUES IN ('c') - ); - ALTER PARTITION ai OF INDEX t@idx CONFIGURE ZONE USING range_min_bytes = 123456, range_max_bytes = 654321; - ALTER PARTITION a OF TABLE t CONFIGURE ZONE USING range_min_bytes = 123456, range_max_bytes = 654321; - ALTER INDEX t@idx CONFIGURE ZONE USING gc.ttlseconds = 1; - DROP INDEX t@idx; - DROP TABLE t; - DROP TYPE typ; - `) + )`, + ) + tdb.Exec(t, `ALTER PARTITION ai OF INDEX t@idx CONFIGURE ZONE USING range_min_bytes = 123456,range_max_bytes = 654321`) + tdb.Exec(t, `ALTER PARTITION a OF TABLE t CONFIGURE ZONE USING range_min_bytes = 123456, range_max_bytes = 654321`) + tdb.Exec(t, `ALTER INDEX t@idx CONFIGURE ZONE USING gc.ttlseconds = 1`) + tdb.Exec(t, `DROP INDEX t@idx`) + tdb.Exec(t, `DROP TABLE t`) + tdb.Exec(t, `DROP TYPE typ`) waitForJobDone(t, tdb, "GC for DROP INDEX%idx") tdb.Exec(t, `ALTER RANGE default CONFIGURE ZONE USING gc.ttlseconds = 1`) diff --git a/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/list_partitions b/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/list_partitions index 4c7c06ddbfcc..b98579e156d6 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/list_partitions +++ b/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/list_partitions @@ -55,6 +55,9 @@ splits database=db table=list_partitions # Try the same thing on a secondary index. exec-sql CREATE INDEX idx ON db.list_partitions (j); +---- + +exec-sql ALTER INDEX db.list_partitions@idx PARTITION BY LIST (j) ( PARTITION one_and_five VALUES IN (2, 5), PARTITION four_and_three VALUES IN (4, 3), diff --git a/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/mixed_partitions b/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/mixed_partitions index 463fae82b9af..24c784ff4f4f 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/mixed_partitions +++ b/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/mixed_partitions @@ -42,6 +42,9 @@ splits database=db table=list_then_range_partitions # Try the same thing on a secondary index. exec-sql CREATE INDEX idx ON db.list_then_range_partitions (C1, N2); +---- + +exec-sql ALTER INDEX db.list_then_range_partitions@idx PARTITION BY LIST (C1) ( PARTITION P1C1 VALUES IN ('A', 'C') PARTITION BY RANGE (N2) ( @@ -135,6 +138,9 @@ splits database=db table=list_then_list_then_range_partitions_mixed # Try the same thing on a secondary index. exec-sql CREATE INDEX idx ON db.list_then_list_then_range_partitions_mixed (C1, C2, N3); +---- + +exec-sql ALTER INDEX db.list_then_list_then_range_partitions_mixed@idx PARTITION BY LIST (C1) ( PARTITION P1C1 VALUES IN ('A', 'C'), PARTITION P2C1 VALUES IN ('B', 'D') diff --git a/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/multi_column_partitions b/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/multi_column_partitions index e2d4a5212852..c2e1d4cf4d1f 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/multi_column_partitions +++ b/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/multi_column_partitions @@ -35,6 +35,9 @@ splits database=db table=list_multi_column_partitions # Try the same thing on a secondary index. exec-sql CREATE INDEX idx ON db.list_multi_column_partitions(i, j); +---- + +exec-sql ALTER INDEX db.list_multi_column_partitions@idx PARTITION BY LIST (i, j) ( PARTITION two_and_default VALUES IN ((2, DEFAULT)), PARTITION six_and_seven VALUES IN ((6, 7)), diff --git a/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/range_partitions b/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/range_partitions index 926e524d68b4..ed825a91c403 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/range_partitions +++ b/pkg/ccl/spanconfigccl/spanconfigsplitterccl/testdata/range_partitions @@ -24,6 +24,9 @@ splits database=db table=range_partitions # Try the same thing on a secondary index. exec-sql CREATE INDEX idx ON db.range_partitions (j); +---- + +exec-sql ALTER INDEX db.range_partitions@idx PARTITION BY RANGE (j) ( PARTITION less_than_five VALUES FROM (minvalue) to (5), PARTITION between_five_and_ten VALUES FROM (5) to (10), diff --git a/pkg/ccl/spanconfigccl/spanconfigsqlwatcherccl/sqlwatcher_test.go b/pkg/ccl/spanconfigccl/spanconfigsqlwatcherccl/sqlwatcher_test.go index adeb62e5fc9c..e7d3dc9d0769 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsqlwatcherccl/sqlwatcher_test.go +++ b/pkg/ccl/spanconfigccl/spanconfigsqlwatcherccl/sqlwatcher_test.go @@ -10,6 +10,7 @@ package spanconfigsqlwatcherccl import ( "context" + "strings" "sync" "sync/atomic" "testing" @@ -132,97 +133,117 @@ func TestSQLWatcherReactsToUpdates(t *testing.T) { }() testCases := []struct { - stmt string + stmts []string expectedIDs descpb.IDs expectedPTSUpdates []spanconfig.ProtectedTimestampUpdate }{ { - stmt: "CREATE TABLE t()", + stmts: []string{"CREATE TABLE t()"}, expectedIDs: ids(1), }, { - stmt: "CREATE TABLE t2(); ALTER TABLE t2 CONFIGURE ZONE USING num_replicas = 3", + stmts: []string{ + "CREATE TABLE t2();", + "ALTER TABLE t2 CONFIGURE ZONE USING num_replicas = 3", + }, expectedIDs: ids(2), }, { - stmt: "CREATE DATABASE d; CREATE TABLE d.t1(); CREATE TABLE d.t2()", + stmts: []string{ + "CREATE DATABASE d;", + "CREATE TABLE d.t1();", + "CREATE TABLE d.t2()", + }, expectedIDs: ids(3, 4, 5, 6), }, { - stmt: "ALTER DATABASE d CONFIGURE ZONE USING num_replicas=5", + stmts: []string{"ALTER DATABASE d CONFIGURE ZONE USING num_replicas=5"}, expectedIDs: ids(3), }, { - stmt: "CREATE TABLE t3(); CREATE TABLE t4()", + stmts: []string{ + "CREATE TABLE t3();", + "CREATE TABLE t4()", + }, expectedIDs: ids(7, 8), }, { - stmt: "ALTER TABLE t3 CONFIGURE ZONE USING num_replicas=5; CREATE TABLE t5(); DROP TABLE t4;", + stmts: []string{ + "ALTER TABLE t3 CONFIGURE ZONE USING num_replicas=5;", + "CREATE TABLE t5();", + "DROP TABLE t4;", + }, expectedIDs: ids(7, 8, 9), }, // Named zone tests. { - stmt: "ALTER RANGE DEFAULT CONFIGURE ZONE USING num_replicas = 7", + stmts: []string{"ALTER RANGE DEFAULT CONFIGURE ZONE USING num_replicas = 7"}, expectedIDs: descpb.IDs{keys.RootNamespaceID}, }, { - stmt: "ALTER RANGE liveness CONFIGURE ZONE USING num_replicas = 7", + stmts: []string{"ALTER RANGE liveness CONFIGURE ZONE USING num_replicas = 7"}, expectedIDs: descpb.IDs{keys.LivenessRangesID}, }, { - stmt: "ALTER RANGE meta CONFIGURE ZONE USING num_replicas = 7", + stmts: []string{"ALTER RANGE meta CONFIGURE ZONE USING num_replicas = 7"}, expectedIDs: descpb.IDs{keys.MetaRangesID}, }, { - stmt: "ALTER RANGE system CONFIGURE ZONE USING num_replicas = 7", + stmts: []string{"ALTER RANGE system CONFIGURE ZONE USING num_replicas = 7"}, expectedIDs: descpb.IDs{keys.SystemRangesID}, }, { - stmt: "ALTER RANGE timeseries CONFIGURE ZONE USING num_replicas = 7", + stmts: []string{"ALTER RANGE timeseries CONFIGURE ZONE USING num_replicas = 7"}, expectedIDs: descpb.IDs{keys.TimeseriesRangesID}, }, // Test that events on types/schemas are also captured. { - stmt: "CREATE DATABASE db; CREATE SCHEMA db.sc", + stmts: []string{ + "CREATE DATABASE db;", + "CREATE SCHEMA db.sc", + }, // One ID each for the parent database, the public schema and the schema. expectedIDs: ids(10, 11, 12), }, { - stmt: "CREATE TYPE typ AS ENUM()", + stmts: []string{"CREATE TYPE typ AS ENUM()"}, // One ID each for the enum and the array type. expectedIDs: ids(13, 14), }, // Test that pts updates are seen. { - stmt: "BACKUP TABLE t,t2 INTO 'nodelocal://1/foo'", + stmts: []string{"BACKUP TABLE t,t2 INTO 'nodelocal://1/foo'"}, expectedIDs: ids(1, 2), }, { - stmt: "BACKUP DATABASE d INTO 'nodelocal://1/foo'", + stmts: []string{"BACKUP DATABASE d INTO 'nodelocal://1/foo'"}, expectedIDs: ids(3), }, { - stmt: "BACKUP TABLE d.* INTO 'nodelocal://1/foo'", + stmts: []string{"BACKUP TABLE d.* INTO 'nodelocal://1/foo'"}, expectedIDs: ids(3), }, { - stmt: "BACKUP INTO 'nodelocal://1/foo'", + stmts: []string{"BACKUP INTO 'nodelocal://1/foo'"}, expectedPTSUpdates: []spanconfig.ProtectedTimestampUpdate{{ClusterTarget: true, TenantTarget: roachpb.TenantID{}}}, }, { - stmt: ` -SELECT crdb_internal.create_tenant(2); -BACKUP TENANT 2 INTO 'nodelocal://1/foo'`, + stmts: []string{ + "SELECT crdb_internal.create_tenant(2);", + "BACKUP TENANT 2 INTO 'nodelocal://1/foo'", + }, expectedPTSUpdates: []spanconfig.ProtectedTimestampUpdate{{ClusterTarget: false, TenantTarget: roachpb.MakeTenantID(2)}}, }, } for _, tc := range testCases { - t.Run(tc.stmt, func(t *testing.T) { + t.Run(strings.Join(tc.stmts, "_"), func(t *testing.T) { reset() - tdb.Exec(t, tc.stmt) + for _, stmt := range tc.stmts { + tdb.Exec(t, stmt) + } afterStmtTS := ts.Clock().Now() testutils.SucceedsSoon(t, func() error { diff --git a/pkg/ccl/telemetryccl/testdata/telemetry/multiregion b/pkg/ccl/telemetryccl/testdata/telemetry/multiregion index 628047bf311d..70675df86173 100644 --- a/pkg/ccl/telemetryccl/testdata/telemetry/multiregion +++ b/pkg/ccl/telemetryccl/testdata/telemetry/multiregion @@ -301,6 +301,9 @@ sql.multiregion.alter_table.locality.from.regional_by_row.to.regional_by_table_i exec ALTER TABLE t5 SET LOCALITY REGIONAL BY ROW; +---- + +exec ALTER TABLE t5 ADD COLUMN cr crdb_internal_region NOT NULL ---- diff --git a/pkg/cli/interactive_tests/test_exec_log.tcl b/pkg/cli/interactive_tests/test_exec_log.tcl index 1bdbd2326293..3b7de6218bdc 100644 --- a/pkg/cli/interactive_tests/test_exec_log.tcl +++ b/pkg/cli/interactive_tests/test_exec_log.tcl @@ -84,12 +84,19 @@ eexpect 999 eexpect 999 eexpect root@ -# Two standalone statements that will want separate counters. +# Two statements sent in a batch will have the same counter. send "SELECT 660+6; SELECT 660+6;\r" eexpect 666 eexpect 666 eexpect root@ +# Two standalone statements that will want separate counters. +send "SELECT 550+5;\r" +eexpect 555 +send "SELECT 550+5;\r" +eexpect 555 +eexpect root@ + flush_server_logs # Now check the items are there in the log file. We need to iterate @@ -100,17 +107,18 @@ flush_server_logs # previous statement is also in the log file after this check # succeeds. system "for i in `seq 1 3`; do - grep 'SELECT ..*660..* +' $logfile && exit 0; + grep 'SELECT ..*550..* +' $logfile && exit 0; echo still waiting; sleep 1; done; echo 'not finding two separate txn counter values?'; -grep 'SELECT ..*660..* +' $logfile; +grep 'SELECT ..*550..* +' $logfile; exit 1;" # Two separate single-stmt txns. -system "n=`grep 'SELECT ..*660..* +' $logfile | sed -e 's/.*TxnCounter.:\\(\[0-9\]*\\)/\\1/g' | uniq | wc -l`; if test \$n -ne 2; then echo unexpected \$n; exit 1; fi" +system "n=`grep 'SELECT ..*550..* +' $logfile | sed -e 's/.*TxnCounter.:\\(\[0-9\]*\\)/\\1/g' | uniq | wc -l`; if test \$n -ne 2; then echo unexpected \$n; exit 1; fi" # Same txns. +system "n=`grep 'SELECT ..*660..* +' $logfile | sed -e 's/.*TxnCounter.:\\(\[0-9\]*\\)/\\1/g' | uniq | wc -l`; if test \$n -ne 1; then echo unexpected \$n; exit 1; fi" system "n=`grep 'SELECT ..*770..* +' $logfile | sed -e 's/.*TxnCounter.:\\(\[0-9\]*\\)/\\1/g' | uniq | wc -l`; if test \$n -ne 1; then echo unexpected \$n; exit 1; fi" system "n=`grep 'SELECT ..*880..* +' $logfile | sed -e 's/.*TxnCounter.:\\(\[0-9\]*\\)/\\1/g' | uniq | wc -l`; if test \$n -ne 1; then echo unexpected \$n; exit 1; fi" system "n=`grep 'SELECT ..*990..* +' $logfile | sed -e 's/.*TxnCounter.:\\(\[0-9\]*\\)/\\1/g' | uniq | wc -l`; if test \$n -ne 1; then echo unexpected \$n; exit 1; fi" diff --git a/pkg/internal/sqlsmith/setup.go b/pkg/internal/sqlsmith/setup.go index 3f428e804798..e2de50f87a9c 100644 --- a/pkg/internal/sqlsmith/setup.go +++ b/pkg/internal/sqlsmith/setup.go @@ -108,7 +108,8 @@ func randTablesN(r *rand.Rand, n int) string { const ( seedTable = ` -CREATE TYPE greeting AS ENUM ('hello', 'howdy', 'hi', 'good day', 'morning'); +BEGIN; CREATE TYPE greeting AS ENUM ('hello', 'howdy', 'hi', 'good day', 'morning'); COMMIT; +BEGIN; CREATE TABLE IF NOT EXISTS seed AS SELECT g::INT2 AS _int2, @@ -130,6 +131,7 @@ CREATE TABLE IF NOT EXISTS seed AS enum_range('hello'::greeting)[g] as _enum FROM generate_series(1, 5) AS g; +COMMIT; INSERT INTO seed DEFAULT VALUES; CREATE INDEX on seed (_int8, _float8, _date); diff --git a/pkg/jobs/jobs_test.go b/pkg/jobs/jobs_test.go index e1a73a4ad682..44d7dbb5a73b 100644 --- a/pkg/jobs/jobs_test.go +++ b/pkg/jobs/jobs_test.go @@ -2040,9 +2040,10 @@ func TestShowJobsWithError(t *testing.T) { // Create at least 6 rows, ensuring 3 rows are corrupted. // Ensure there is at least one row in system.jobs. - if _, err := sqlDB.Exec(` - CREATE TABLE foo(x INT); ALTER TABLE foo ADD COLUMN y INT; - `); err != nil { + if _, err := sqlDB.Exec(`CREATE TABLE foo(x INT);`); err != nil { + t.Fatal(err) + } + if _, err := sqlDB.Exec(`ALTER TABLE foo ADD COLUMN y INT;`); err != nil { t.Fatal(err) } // Get the id of the ADD COLUMN job to use later. diff --git a/pkg/server/stats_test.go b/pkg/server/stats_test.go index cd165579ef79..bedca43947fb 100644 --- a/pkg/server/stats_test.go +++ b/pkg/server/stats_test.go @@ -183,6 +183,7 @@ func TestSQLStatCollection(t *testing.T) { s, sqlDB, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop(ctx) + sqlRunner := sqlutils.MakeSQLRunner(sqlDB) sqlServer := s.(*TestServer).Server.sqlServer.pgServer.SQLServer // Flush stats at the beginning of the test. @@ -190,15 +191,11 @@ func TestSQLStatCollection(t *testing.T) { sqlServer.GetReportedSQLStatsController().ResetLocalSQLStats(ctx) // Execute some queries against the sqlDB to build up some stats. - if _, err := sqlDB.Exec(` - CREATE DATABASE t; - CREATE TABLE t.test (x INT PRIMARY KEY); - INSERT INTO t.test VALUES (1); - INSERT INTO t.test VALUES (2); - INSERT INTO t.test VALUES (3); -`); err != nil { - t.Fatal(err) - } + sqlRunner.Exec(t, `CREATE DATABASE t`) + sqlRunner.Exec(t, `CREATE TABLE t.test (x INT PRIMARY KEY);`) + sqlRunner.Exec(t, `INSERT INTO t.test VALUES (1);`) + sqlRunner.Exec(t, `INSERT INTO t.test VALUES (2);`) + sqlRunner.Exec(t, `INSERT INTO t.test VALUES (3);`) // Collect stats from the SQL server and ensure our queries are present. stats, err := sqlServer.GetScrubbedStmtStats(ctx) @@ -241,14 +238,10 @@ func TestSQLStatCollection(t *testing.T) { } // Make another query to the db. - if _, err := sqlDB.Exec(` - INSERT INTO t.test VALUES (4); - INSERT INTO t.test VALUES (5); - INSERT INTO t.test VALUES (6); - CREATE USER us WITH PASSWORD 'pass'; -`); err != nil { - t.Fatal(err) - } + sqlRunner.Exec(t, `INSERT INTO t.test VALUES (4);`) + sqlRunner.Exec(t, `INSERT INTO t.test VALUES (5);`) + sqlRunner.Exec(t, `INSERT INTO t.test VALUES (6);`) + sqlRunner.Exec(t, `CREATE USER us WITH PASSWORD 'pass';`) // Find and record the stats for our second query. stats, err = sqlServer.GetScrubbedStmtStats(ctx) @@ -294,15 +287,12 @@ func TestSQLStatCollection(t *testing.T) { } func populateStats(t *testing.T, sqlDB *gosql.DB) { - if _, err := sqlDB.Exec(` - CREATE DATABASE t; - CREATE TABLE t.test (x INT PRIMARY KEY); - INSERT INTO t.test VALUES (1); - INSERT INTO t.test VALUES (2); - INSERT INTO t.test VALUES (3); -`); err != nil { - t.Fatal(err) - } + sqlRunner := sqlutils.MakeSQLRunner(sqlDB) + sqlRunner.Exec(t, `CREATE DATABASE t;`) + sqlRunner.Exec(t, `CREATE TABLE t.test (x INT PRIMARY KEY);`) + sqlRunner.Exec(t, `INSERT INTO t.test VALUES (1);`) + sqlRunner.Exec(t, `INSERT INTO t.test VALUES (2);`) + sqlRunner.Exec(t, `INSERT INTO t.test VALUES (3);`) } func TestClusterResetSQLStats(t *testing.T) { diff --git a/pkg/sql/alter_column_type_test.go b/pkg/sql/alter_column_type_test.go index 5d666521f3a2..c206bbe45a7b 100644 --- a/pkg/sql/alter_column_type_test.go +++ b/pkg/sql/alter_column_type_test.go @@ -263,15 +263,12 @@ func TestAlterColumnTypeFailureRollback(t *testing.T) { defer s.Stopper().Stop(ctx) sqlDB.Exec(t, `SET enable_experimental_alter_column_type_general = true;`) + sqlDB.Exec(t, `CREATE DATABASE t;`) + sqlDB.Exec(t, `CREATE TABLE t.test (x STRING);`) + sqlDB.Exec(t, `INSERT INTO t.test VALUES ('1'), ('2'), ('HELLO');`) expected := "pq: could not parse \"HELLO\" as type int: strconv.ParseInt: parsing \"HELLO\": invalid syntax" - - sqlDB.ExpectErr(t, expected, ` -CREATE DATABASE t; -CREATE TABLE t.test (x STRING); -INSERT INTO t.test VALUES ('1'), ('2'), ('HELLO'); -ALTER TABLE t.test ALTER COLUMN x TYPE INT; -`) + sqlDB.ExpectErr(t, expected, `ALTER TABLE t.test ALTER COLUMN x TYPE INT;`) // Ensure that the add column and column swap mutations are cleaned up. testutils.SucceedsSoon(t, func() error { @@ -300,12 +297,10 @@ func TestQueryIntToString(t *testing.T) { sqlDB.Exec(t, `SET enable_experimental_alter_column_type_general = true;`) - sqlDB.Exec(t, ` -CREATE DATABASE t; -CREATE TABLE t.test (x INT, y INT, z INT); -INSERT INTO t.test VALUES (1, 1, 1), (2, 2, 2); -ALTER TABLE t.test ALTER COLUMN y TYPE STRING; -`) + sqlDB.Exec(t, `CREATE DATABASE t;`) + sqlDB.Exec(t, `CREATE TABLE t.test (x INT, y INT, z INT);`) + sqlDB.Exec(t, `INSERT INTO t.test VALUES (1, 1, 1), (2, 2, 2);`) + sqlDB.Exec(t, `ALTER TABLE t.test ALTER COLUMN y TYPE STRING;`) sqlDB.ExecSucceedsSoon(t, `INSERT INTO t.test VALUES (3, 'HELLO', 3);`) diff --git a/pkg/sql/colfetcher/vectorized_batch_size_test.go b/pkg/sql/colfetcher/vectorized_batch_size_test.go index 613af7a7e2ae..66f4e2de2586 100644 --- a/pkg/sql/colfetcher/vectorized_batch_size_test.go +++ b/pkg/sql/colfetcher/vectorized_batch_size_test.go @@ -148,11 +148,13 @@ func TestCFetcherLimitsOutputBatch(t *testing.T) { // such setup the cFetcher will allocate an output batch of capacity 50, yet // after setting the 7th or so row the footprint of the batch will exceed // the memory limit. As a result, we will get around 7 batches. - _, err := conn.ExecContext(ctx, ` -SET distsql_workmem='128KiB'; -CREATE TABLE t (a PRIMARY KEY, b) AS SELECT i, repeat('a', 16 * 1024) FROM generate_series(1, 50) AS g(i); -ANALYZE t -`) + _, err := conn.ExecContext(ctx, `SET distsql_workmem='128KiB';`) + assert.NoError(t, err) + _, err = conn.ExecContext(ctx, ` +CREATE TABLE t (a PRIMARY KEY, b) AS +SELECT i, repeat('a', 16 * 1024) FROM generate_series(1, 50) AS g(i);`) + assert.NoError(t, err) + _, err = conn.ExecContext(ctx, `ANALYZE t`) assert.NoError(t, err) batchCountRegex := regexp.MustCompile(`vectorized batch count: (\d+)`) diff --git a/pkg/sql/conn_executor_test.go b/pkg/sql/conn_executor_test.go index c578330f7a04..a3ec4c9683ae 100644 --- a/pkg/sql/conn_executor_test.go +++ b/pkg/sql/conn_executor_test.go @@ -1270,8 +1270,11 @@ ALTER TABLE t1 ADD COLUMN b INT DEFAULT 1`, } for _, tc := range testCases { - if _, err := sqlConn.Exec(tc.stmt); err != nil { - require.NoError(t, err, "executing %s ", tc.stmt) + stmts := strings.Split(tc.stmt, ";") + for _, s := range stmts { + if _, err := sqlConn.Exec(s); err != nil { + require.NoError(t, err, "executing %s ", s) + } } rows, err := sqlConn.Query("SHOW LAST QUERY STATISTICS RETURNING parse_latency, plan_latency, exec_latency, service_latency, post_commit_jobs_latency") diff --git a/pkg/sql/delete_preserving_index_test.go b/pkg/sql/delete_preserving_index_test.go index 9c1475e055d7..2738a28ef925 100644 --- a/pkg/sql/delete_preserving_index_test.go +++ b/pkg/sql/delete_preserving_index_test.go @@ -14,6 +14,7 @@ import ( "context" "math" "reflect" + "strings" "sync" "testing" @@ -86,8 +87,11 @@ SET use_declarative_schema_changer = 'off'; defer server.Stopper().Stop(context.Background()) getRevisionsForTest := func(setupSQL, schemaChangeSQL, dataSQL string, deletePreservingEncoding bool) ([]kvclient.VersionedValues, []byte, error) { - if _, err := sqlDB.Exec(setupSQL); err != nil { - t.Fatal(err) + setupStmts := strings.Split(setupSQL, ";") + for _, stmt := range setupStmts { + if _, err := sqlDB.Exec(stmt); err != nil { + t.Fatal(err) + } } // Start the schema change but pause right before the backfill. diff --git a/pkg/sql/descriptor_mutation_test.go b/pkg/sql/descriptor_mutation_test.go index 07fe56f85ad9..35b0815d5a5d 100644 --- a/pkg/sql/descriptor_mutation_test.go +++ b/pkg/sql/descriptor_mutation_test.go @@ -239,17 +239,14 @@ func TestOperationsWithColumnMutation(t *testing.T) { params, _ := tests.CreateTestServerParams() server, sqlDB, kvDB := serverutils.StartServer(t, params) defer server.Stopper().Stop(ctx) + sqlRunner := sqlutils.MakeSQLRunner(sqlDB) // Fix the column families so the key counts below don't change if the // family heuristics are updated. // Add an index so that we test adding a column when a table has an index. - if _, err := sqlDB.Exec(` -CREATE DATABASE t; -CREATE TABLE t.test (k VARCHAR PRIMARY KEY DEFAULT 'default', v VARCHAR, i VARCHAR DEFAULT 'i', FAMILY (k), FAMILY (v), FAMILY (i)); -CREATE INDEX allidx ON t.test (k, v); -`); err != nil { - t.Fatal(err) - } + sqlRunner.Exec(t, `CREATE DATABASE t;`) + sqlRunner.Exec(t, `CREATE TABLE t.test (k VARCHAR PRIMARY KEY DEFAULT 'default', v VARCHAR, i VARCHAR DEFAULT 'i',FAMILY (k), FAMILY (v), FAMILY (i));`) + sqlRunner.Exec(t, `CREATE INDEX allidx ON t.test (k, v);`) // read table descriptor tableDesc := desctestutils.TestingGetMutableExistingTableDescriptor( @@ -266,13 +263,9 @@ CREATE INDEX allidx ON t.test (k, v); func(t *testing.T) { // Init table to start state. - if _, err := sqlDB.Exec(` -DROP TABLE t.test; -CREATE TABLE t.test (k VARCHAR PRIMARY KEY DEFAULT 'default', v VARCHAR, i VARCHAR DEFAULT 'i', FAMILY (k), FAMILY (v), FAMILY (i)); -CREATE INDEX allidx ON t.test (k, v); -`); err != nil { - t.Fatal(err) - } + sqlRunner.Exec(t, `DROP TABLE t.test;`) + sqlRunner.Exec(t, `CREATE TABLE t.test (k VARCHAR PRIMARY KEY DEFAULT 'default', v VARCHAR,i VARCHAR DEFAULT 'i', FAMILY (k), FAMILY (v), FAMILY (i));`) + sqlRunner.Exec(t, `CREATE INDEX allidx ON t.test (k, v);`) // read table descriptor mTest.tableDesc = desctestutils.TestingGetMutableExistingTableDescriptor( @@ -512,13 +505,10 @@ func TestOperationsWithIndexMutation(t *testing.T) { params, _ := tests.CreateTestServerParams() server, sqlDB, kvDB := serverutils.StartServer(t, params) defer server.Stopper().Stop(context.Background()) + sqlRunner := sqlutils.MakeSQLRunner(sqlDB) - if _, err := sqlDB.Exec(` -CREATE DATABASE t; -CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo (v)); -`); err != nil { - t.Fatal(err) - } + sqlRunner.Exec(t, `CREATE DATABASE t;`) + sqlRunner.Exec(t, `CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo (v));`) // read table descriptor tableDesc := desctestutils.TestingGetMutableExistingTableDescriptor( @@ -538,15 +528,9 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo (v)); } { t.Run(fmt.Sprintf("upsert=%t/state=%v", useUpsert, state), func(t *testing.T) { // Init table with some entries. - if _, err := sqlDB.Exec(`TRUNCATE TABLE t.test`); err != nil { - t.Fatal(err) - } - if _, err := sqlDB.Exec(` -DROP TABLE t.test; -CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo (v)); -`); err != nil { - t.Fatal(err) - } + sqlRunner.Exec(t, `TRUNCATE TABLE t.test`) + sqlRunner.Exec(t, `DROP TABLE t.test;`) + sqlRunner.Exec(t, `CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo (v));`) // read table descriptor mTest.tableDesc = desctestutils.TestingGetMutableExistingTableDescriptor( kvDB, keys.SystemSQLCodec, "t", "test") @@ -689,18 +673,15 @@ func TestOperationsWithColumnAndIndexMutation(t *testing.T) { server, sqlDB, kvDB := serverutils.StartServer(t, params) ctx := context.Background() defer server.Stopper().Stop(ctx) + sqlRunner := sqlutils.MakeSQLRunner(sqlDB) // Create a table with column i and an index on v and i. Fix the column // families so the key counts below don't change if the family heuristics // are updated. // Add an index so that we test adding a column when a table has an index. - if _, err := sqlDB.Exec(` -CREATE DATABASE t; -CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, i CHAR, INDEX foo (i, v), FAMILY (k), FAMILY (v), FAMILY (i)); -CREATE INDEX allidx ON t.test (k, v); -`); err != nil { - t.Fatal(err) - } + sqlRunner.Exec(t, `CREATE DATABASE t;`) + sqlRunner.Exec(t, `CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, i CHAR, INDEX foo (i, v), FAMILY (k),FAMILY (v), FAMILY (i));`) + sqlRunner.Exec(t, `CREATE INDEX allidx ON t.test (k, v);`) // read table descriptor tableDesc := desctestutils.TestingGetMutableExistingTableDescriptor( @@ -728,13 +709,9 @@ CREATE INDEX allidx ON t.test (k, v); continue } // Init table to start state. - if _, err := sqlDB.Exec(` -DROP TABLE t.test; -CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, i CHAR, INDEX foo (i, v), FAMILY (k), FAMILY (v), FAMILY (i)); -CREATE INDEX allidx ON t.test (k, v); -`); err != nil { - t.Fatal(err) - } + sqlRunner.Exec(t, `DROP TABLE t.test;`) + sqlRunner.Exec(t, `CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, i CHAR, INDEX foo (i, v), FAMILY (k),FAMILY (v), FAMILY (i));`) + sqlRunner.Exec(t, `CREATE INDEX allidx ON t.test (k, v);`) if _, err := sqlDB.Exec(`TRUNCATE TABLE t.test`); err != nil { t.Fatal(err) } diff --git a/pkg/sql/export.go b/pkg/sql/export.go index dc4645123700..f2ceaf37098a 100644 --- a/pkg/sql/export.go +++ b/pkg/sql/export.go @@ -123,8 +123,8 @@ func (ef *execFactory) ConstructExport( return nil, err } - if !ef.planner.ExtendedEvalContext().TxnImplicit { - return nil, errors.Errorf("EXPORT cannot be used inside a transaction") + if !ef.planner.IsAutoCommit() { + return nil, errors.Errorf("EXPORT cannot be used inside a multi-statement transaction") } if fileSuffix != csvSuffix && fileSuffix != parquetSuffix { diff --git a/pkg/sql/importer/import_planning.go b/pkg/sql/importer/import_planning.go index 25a2d94a0722..f52eb45aa235 100644 --- a/pkg/sql/importer/import_planning.go +++ b/pkg/sql/importer/import_planning.go @@ -372,8 +372,8 @@ func importPlanHook( ctx, span := tracing.ChildSpan(ctx, importStmt.StatementTag()) defer span.Finish() - if !(p.ExtendedEvalContext().TxnImplicit || isDetached) { - return errors.Errorf("IMPORT cannot be used inside a transaction without DETACHED option") + if !(p.IsAutoCommit() || isDetached) { + return errors.Errorf("IMPORT cannot be used inside a multi-statement transaction without DETACHED option") } if optsErr != nil { diff --git a/pkg/sql/importer/import_stmt_test.go b/pkg/sql/importer/import_stmt_test.go index f2aa3e231a45..453dc52038bf 100644 --- a/pkg/sql/importer/import_stmt_test.go +++ b/pkg/sql/importer/import_stmt_test.go @@ -2730,7 +2730,7 @@ func TestExportImportRoundTrip(t *testing.T) { sqlDB := sqlutils.MakeSQLRunner(conn) tests := []struct { - stmts string + stmts []string tbl string expected string }{ @@ -2738,23 +2738,29 @@ func TestExportImportRoundTrip(t *testing.T) { // need to differ across runs, so we let the test runner format the stmts field // with a unique directory name per run. { - stmts: `EXPORT INTO CSV 'nodelocal://0/%[1]s' FROM SELECT ARRAY['a', 'b', 'c']; - CREATE TABLE t (x TEXT[]); - IMPORT INTO t CSV DATA ('nodelocal://0/%[1]s/export*-n*.0.csv')`, + stmts: []string{ + `EXPORT INTO CSV 'nodelocal://0/%[1]s' FROM SELECT ARRAY['a', 'b', 'c']`, + `CREATE TABLE t (%[1]s TEXT[])`, + `IMPORT INTO t CSV DATA ('nodelocal://0/%[1]s/export*-n*.0.csv')`, + }, tbl: "t", expected: `SELECT ARRAY['a', 'b', 'c']`, }, { - stmts: `EXPORT INTO CSV 'nodelocal://0/%[1]s' FROM SELECT ARRAY[b'abc', b'\141\142\143', b'\x61\x62\x63']; - CREATE TABLE t (x BYTES[]); - IMPORT INTO t CSV DATA ('nodelocal://0/%[1]s/export*-n*.0.csv')`, + stmts: []string{ + `EXPORT INTO CSV 'nodelocal://0/%[1]s' FROM SELECT ARRAY[b'abc', b'\141\142\143', b'\x61\x62\x63']`, + `CREATE TABLE t (%[1]s BYTES[])`, + `IMPORT INTO t CSV DATA ('nodelocal://0/%[1]s/export*-n*.0.csv')`, + }, tbl: "t", expected: `SELECT ARRAY[b'abc', b'\141\142\143', b'\x61\x62\x63']`, }, { - stmts: `EXPORT INTO CSV 'nodelocal://0/%[1]s' FROM SELECT 'dog' COLLATE en; - CREATE TABLE t (x STRING COLLATE en); - IMPORT INTO t CSV DATA ('nodelocal://0/%[1]s/export*-n*.0.csv')`, + stmts: []string{ + `EXPORT INTO CSV 'nodelocal://0/%[1]s' FROM SELECT 'dog' COLLATE en`, + `CREATE TABLE t (%[1]s STRING COLLATE en)`, + `IMPORT INTO t CSV DATA ('nodelocal://0/%[1]s/export*-n*.0.csv')`, + }, tbl: "t", expected: `SELECT 'dog' COLLATE en`, }, @@ -2762,7 +2768,9 @@ func TestExportImportRoundTrip(t *testing.T) { for i, test := range tests { sqlDB.Exec(t, fmt.Sprintf(`DROP TABLE IF EXISTS %s`, test.tbl)) - sqlDB.Exec(t, fmt.Sprintf(test.stmts, fmt.Sprintf("run%d", i))) + for _, stmt := range test.stmts { + sqlDB.Exec(t, fmt.Sprintf(stmt, fmt.Sprintf("run%d", i))) + } sqlDB.CheckQueryResults(t, fmt.Sprintf(`SELECT * FROM %s`, test.tbl), sqlDB.QueryStr(t, test.expected)) } } @@ -6606,7 +6614,7 @@ func TestDetachedImport(t *testing.T) { } err := crdb.ExecuteTx(ctx, connDB, nil, importWithoutDetached) require.True(t, - testutils.IsError(err, "IMPORT cannot be used inside a transaction without DETACHED option")) + testutils.IsError(err, "IMPORT cannot be used inside a multi-statement transaction without DETACHED option")) // We can execute IMPORT under transaction with detached option. importWithDetached := func(txn *gosql.Tx) error { diff --git a/pkg/sql/logictest/testdata/logic_test/aggregate b/pkg/sql/logictest/testdata/logic_test/aggregate index 76249b6b8eb5..2e871da54671 100644 --- a/pkg/sql/logictest/testdata/logic_test/aggregate +++ b/pkg/sql/logictest/testdata/logic_test/aggregate @@ -3164,6 +3164,8 @@ false statement ok TRUNCATE t_every; + +statement ok INSERT INTO t_every VALUES (NULL), (NULL), (NULL) query B diff --git a/pkg/sql/logictest/testdata/logic_test/alter_column_type b/pkg/sql/logictest/testdata/logic_test/alter_column_type index e52e882368e4..a16042abb952 100644 --- a/pkg/sql/logictest/testdata/logic_test/alter_column_type +++ b/pkg/sql/logictest/testdata/logic_test/alter_column_type @@ -232,9 +232,17 @@ SELECT * FROM t3 ORDER BY id # Ensure ALTER COLUMN TYPE correctly changes the precision of TIMESTAMPTZ. statement ok CREATE TABLE t5 (x TIMESTAMPTZ(6)); + +statement ok INSERT INTO t5 VALUES ('2016-01-25 10:10:10.555555-05:00'); + +statement ok INSERT INTO t5 VALUES ('2016-01-26 10:10:10.555555-05:00'); + +statement ok ALTER TABLE t5 ALTER COLUMN x TYPE TIMESTAMPTZ(3); + +statement ok INSERT INTO t5 VALUES ('2016-01-26 10:10:10.55-05:00'); query T @@ -247,7 +255,11 @@ SELECT * FROM t5 ORDER BY x # Ensure column families stay the same. statement ok CREATE TABLE t6(id INT, id2 INT, FAMILY f1 (id), FAMILY f2 (id2)); + +statement ok INSERT INTO t6 VALUES (1), (2), (3); + +statement ok ALTER TABLE t6 ALTER COLUMN id2 TYPE STRING; query TT @@ -263,11 +275,14 @@ t6 CREATE TABLE public.t6 ( ) # Ensure the type of the default column is checked -statement error default for column "x" cannot be cast automatically to type DATE +statement ok CREATE TABLE t7 (x INT DEFAULT 1, y INT); + +statement ok INSERT INTO t7 (y) VALUES (1), (2), (3); + +statement error default for column "x" cannot be cast automatically to type DATE ALTER TABLE t7 ALTER COLUMN x TYPE DATE; -INSERT INTO t7 (y) VALUES (4); # Ensure a runtime error correctly rolls back and the table is unchanged. statement ok @@ -323,7 +338,11 @@ ALTER TABLE uniq ALTER COLUMN x TYPE STRING # Ensure we cannot change the column type of a column stored in a secondary index. statement ok CREATE TABLE t15 (x INT, y INT); + +statement ok CREATE INDEX ON t15 (x) STORING (y); + +statement ok INSERT INTO t15 VALUES (1, 1), (2, 2) statement error pq: unimplemented: ALTER COLUMN TYPE requiring rewrite of on-disk data is currently not supported for columns that are part of an index @@ -332,6 +351,8 @@ ALTER TABLE t15 ALTER COLUMN y TYPE STRING; # Ensure ALTER COLUMN TYPE works for collated strings. statement ok CREATE TABLE t16 (x STRING); + +statement ok INSERT INTO t16 VALUES ('Backhaus'), ('Bär'), ('Baz'); query T @@ -377,6 +398,8 @@ t17 CREATE TABLE public.t17 ( # Ensure ALTER COLUMN TYPE fails if the column is part of an FK relationship. statement ok CREATE TABLE t18 (x INT NOT NULL PRIMARY KEY); + +statement ok CREATE TABLE t19 (y INT NOT NULL REFERENCES t18 (x), INDEX(y)); statement error pq: unimplemented: ALTER COLUMN TYPE requiring rewrite of on-disk data is currently not supported for columns that are part of an index @@ -388,6 +411,8 @@ ALTER TABLE t19 ALTER COLUMN y TYPE STRING # Ensure ALTER COLUMN TYPE does not work inside a transaction. statement ok CREATE TABLE t20 (x INT); + +statement ok BEGIN statement error pq: unimplemented: ALTER COLUMN TYPE is not supported inside a transaction @@ -431,6 +456,8 @@ true # Ensure ALTER COLUMN TYPE rolls back if is not applicable to value in the column. statement ok CREATE TABLE t24 (x STRING); + +statement ok INSERT INTO t24 VALUES ('1'), ('hello'); statement error pq: could not parse "hello" as type int: strconv.ParseInt: parsing "hello": invalid syntax @@ -457,6 +484,8 @@ ALTER TABLE t25 ALTER COLUMN x TYPE STRING USING (x); statement ok CREATE TABLE t26 (x INT); + +statement ok CREATE TABLE t27 (x INT); # Ensure USING EXPRESSION cannot reference columns that do not exist in the @@ -484,7 +513,11 @@ ALTER TABLE t27 ALTER COLUMN x TYPE STRING USING (x::STRING) # converting to the same type. statement ok CREATE TABLE t28(x INT); + +statement ok INSERT INTO t28 VALUES (1), (2), (3); + +statement ok ALTER TABLE t28 ALTER COLUMN x TYPE INT USING (x * 5) query I @@ -498,7 +531,11 @@ SELECT x FROM t28 ORDER BY x # error before running the online schema change. statement ok CREATE TABLE t29 (x INT8); + +statement ok INSERT INTO t29 VALUES (1), (2), (3); + +statement ok ALTER TABLE t29 ALTER COLUMN x TYPE INT4; query I @@ -512,6 +549,8 @@ SELECT x FROM t29 ORDER BY x statement ok CREATE TABLE parent_71089 (id INT PRIMARY KEY); + +statement ok CREATE TABLE child_71089 (a INT, b INT REFERENCES parent_71089 (id) NOT NULL) statement ok @@ -522,6 +561,8 @@ ALTER TABLE child_71089 ALTER COLUMN a TYPE FLOAT; # try the conversion and error out if the cast cannot be applied. statement ok CREATE TABLE t30 (x STRING); + +statement ok INSERT INTO t30 VALUES (e'a\\01'); statement error pq: could not parse "a\\\\01" as type bytes: bytea encoded value ends with incomplete escape sequence @@ -546,6 +587,8 @@ ALTER TABLE t29 ALTER COLUMN x TYPE INT2 statement ok CREATE TABLE regression_54844 (i int8); + +statement ok INSERT INTO regression_54844 VALUES (-9223372036854775807) statement error integer out of range for type int2 diff --git a/pkg/sql/logictest/testdata/logic_test/alter_primary_key b/pkg/sql/logictest/testdata/logic_test/alter_primary_key index ad43177b293b..507ee9674527 100644 --- a/pkg/sql/logictest/testdata/logic_test/alter_primary_key +++ b/pkg/sql/logictest/testdata/logic_test/alter_primary_key @@ -45,12 +45,20 @@ sql.schema.alter_table.alter_primary_key # Test primary key changes on storing indexes with different column families (the randomizer will do this for us). statement ok DROP TABLE t; + +statement ok CREATE TABLE t ( x INT PRIMARY KEY, y INT, z INT NOT NULL, w INT, v INT, INDEX i1 (y) STORING (w, v), INDEX i2 (z) STORING (y, v) ); + +statement ok INSERT INTO t VALUES (1, 2, 3, 4, 5), (6, 7, 8, 9, 10), (11, 12, 13, 14, 15); + +statement ok ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (z); + +statement ok INSERT INTO t VALUES (16, 17, 18, 19, 20) query III rowsort @@ -72,6 +80,8 @@ SELECT y, z, v FROM t@i2 # Test that composite values are encoded correctly in covering indexes. statement ok CREATE TABLE t_composite (x INT PRIMARY KEY, y DECIMAL NOT NULL); + +statement ok INSERT INTO t_composite VALUES (1, 1.0), (2, 1.001) statement ok @@ -92,9 +102,17 @@ subtest foreign_keys # Test primary key changes on tables with inbound and outbound FK's. statement ok CREATE TABLE fk1 (x INT NOT NULL); + +statement ok CREATE TABLE fk2 (x INT NOT NULL, UNIQUE INDEX i (x)); + +statement ok ALTER TABLE fk1 ADD CONSTRAINT fk FOREIGN KEY (x) REFERENCES fk2(x); + +statement ok INSERT INTO fk2 VALUES (1); + +statement ok INSERT INTO fk1 VALUES (1) statement ok @@ -102,6 +120,8 @@ ALTER TABLE fk1 ALTER PRIMARY KEY USING COLUMNS (x) statement ok INSERT INTO fk2 VALUES (2); + +statement ok INSERT INTO fk1 VALUES (2) statement ok @@ -115,8 +135,14 @@ INSERT INTO fk1 VALUES (3) statement ok CREATE TABLE self (a INT PRIMARY KEY, x INT, y INT, z INT, w INT NOT NULL, INDEX (x), UNIQUE INDEX (y), INDEX (z)); + +statement ok INSERT INTO self VALUES (1, 1, 1, 1, 1); + +statement ok ALTER TABLE self ADD CONSTRAINT fk1 FOREIGN KEY (z) REFERENCES self (y); + +statement ok ALTER TABLE self ADD CONSTRAINT fk2 FOREIGN KEY (x) REFERENCES self (y); statement ok @@ -129,19 +155,39 @@ INSERT INTO self VALUES (3, 2, 3, 2, 3) # Set up a bunch of foreign key references pointing into and out of a table. statement ok CREATE TABLE t1 (x INT PRIMARY KEY, y INT NOT NULL, z INT, w INT, INDEX (y), INDEX (z), UNIQUE INDEX (w)); + +statement ok CREATE TABLE t2 (y INT, UNIQUE INDEX (y)); + +statement ok CREATE TABLE t3 (z INT, UNIQUE INDEX (z)); + +statement ok CREATE TABLE t4 (w INT, INDEX (w)); + +statement ok CREATE TABLE t5 (x INT, INDEX (x)); + +statement ok INSERT INTO t1 VALUES (1, 1, 1, 1); INSERT INTO t2 VALUES (1); INSERT INTO t3 VALUES (1); INSERT INTO t4 VALUES (1); INSERT INTO t5 VALUES (1); + +statement ok ALTER TABLE t1 ADD CONSTRAINT fk1 FOREIGN KEY (y) REFERENCES t2(y); + +statement ok ALTER TABLE t1 ADD CONSTRAINT fk2 FOREIGN KEY (z) REFERENCES t3(z); + +statement ok ALTER TABLE t4 ADD CONSTRAINT fk3 FOREIGN KEY (w) REFERENCES t1(w); + +statement ok ALTER TABLE t5 ADD CONSTRAINT fk4 FOREIGN KEY (x) REFERENCES t1(x); + +statement ok ALTER TABLE t1 ALTER PRIMARY KEY USING COLUMNS (y) statement ok @@ -160,7 +206,11 @@ INSERT INTO t4 VALUES (101) # Ensure that we still rewrite a primary index if the index column has name "rowid". statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (rowid INT PRIMARY KEY, y INT NOT NULL, FAMILY (rowid, y)); + +statement ok ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (y) query TT @@ -178,6 +228,8 @@ subtest index_rewrites # Test that indexes that need to get rewritten indeed get rewritten. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t ( x INT PRIMARY KEY, y INT NOT NULL, -- will be new primary key. @@ -193,7 +245,11 @@ CREATE TABLE t ( INDEX i7 (z) USING HASH WITH (bucket_count=4), -- will be rewritten. FAMILY (x, y, z, w, v) ); + +statement ok INSERT INTO t VALUES (1, 2, 3, 4, '{}'); + +statement ok ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (y) query TT @@ -342,6 +398,8 @@ subtest hash_sharded statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t ( x INT PRIMARY KEY, y INT NOT NULL, @@ -349,7 +407,11 @@ CREATE TABLE t ( INDEX i1 (z) USING HASH WITH (bucket_count=5), FAMILY (x, y, z) ); + +statement ok INSERT INTO t VALUES (1, 2, 3); + +statement ok ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (y) USING HASH WITH (bucket_count=10) query TT @@ -402,6 +464,8 @@ SELECT * FROM t@i1 statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t ( x INT PRIMARY KEY USING HASH WITH (bucket_count=5), y INT NOT NULL, @@ -409,7 +473,11 @@ CREATE TABLE t ( INDEX i (z), FAMILY (x, y, z) ); + +statement ok INSERT INTO t VALUES (1, 2, 3); + +statement ok ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (y) query TT @@ -439,6 +507,8 @@ SELECT * FROM t@i # Ensure we don't rewrite default primary index even if its name isn't rowid. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (rowid INT NOT NULL); query TT @@ -469,15 +539,25 @@ subtest encoding_bug # able to be updated and deleted with the primary index encoding. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (x INT PRIMARY KEY, y INT NOT NULL, z INT NOT NULL, FAMILY (x, y, z)); + +statement ok INSERT INTO t VALUES (1, 2, 3); + +statement ok ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (z); + +statement ok UPDATE t SET y = 3 WHERE z = 3 # Test for #45363. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (x INT PRIMARY KEY, y INT NOT NULL) statement ok @@ -494,6 +574,8 @@ ROLLBACK statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (x INT PRIMARY KEY, y INT NOT NULL) statement ok @@ -512,6 +594,8 @@ subtest add_pk_rowid # Tests for #45509. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (x INT PRIMARY KEY) statement error pq: multiple primary keys for table "t" are not allowed @@ -519,6 +603,8 @@ ALTER TABLE t ADD PRIMARY KEY (x) statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (x INT NOT NULL) statement ok @@ -535,7 +621,11 @@ t CREATE TABLE public.t ( statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (x INT NOT NULL); + +statement ok ALTER TABLE t ADD PRIMARY KEY (x) USING HASH WITH (bucket_count=4) query TT @@ -550,7 +640,11 @@ t CREATE TABLE public.t ( statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (x INT NOT NULL); + +statement ok ALTER TABLE t ADD CONSTRAINT "my_pk" PRIMARY KEY (x) query TT @@ -571,6 +665,8 @@ ALTER TABLE t DROP CONSTRAINT "my_pk", ADD CONSTRAINT "i" PRIMARY KEY (x); # Regression for #45362. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (x INT NOT NULL) statement ok @@ -590,7 +686,11 @@ ROLLBACK # TODO (rohany): This test might become obselete when #44923 is fixed. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (x INT NOT NULL); + +statement ok ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (x) query I @@ -603,6 +703,8 @@ subtest add_drop_pk statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (x INT PRIMARY KEY, y INT NOT NULL, FAMILY (x), FAMILY (y)) statement error pq: relation "t" \([0-9]+\): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction @@ -638,6 +740,8 @@ ALTER TABLE t ADD CONSTRAINT IF NOT EXISTS "t_pkey" PRIMARY KEY (x) # in the same transaction. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (x INT PRIMARY KEY, y INT NOT NULL, FAMILY (x), FAMILY (y)) statement ok @@ -651,7 +755,11 @@ ALTER TABLE t ADD CONSTRAINT "t_pkey" PRIMARY KEY (y) statement ok ROLLBACK; + +statement ok DROP TABLE t; + +statement ok CREATE TABLE t (x INT PRIMARY KEY, y INT NOT NULL, FAMILY (x), FAMILY (y)) statement ok @@ -681,6 +789,8 @@ t CREATE TABLE public.t ( # in any DML statements. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (x INT PRIMARY KEY, y INT NOT NULL) statement ok @@ -774,7 +884,11 @@ ROLLBACK # as a DROP PRIMARY KEY get rolled back on failure. statement ok DROP TABLE IF EXISTS t1, t2 CASCADE; + +statement ok CREATE TABLE t1 (x INT PRIMARY KEY, y INT NOT NULL); + +statement ok CREATE TABLE t2 (x INT) statement ok @@ -795,6 +909,8 @@ SELECT * FROM t2 statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (x INT PRIMARY KEY, y INT NOT NULL) statement error pq: table "t" does not have a primary key, cannot perform ADD COLUMN z INT8 AS \(x \+ 1\) STORED @@ -926,7 +1042,11 @@ SELECT index_id, index_name FROM crdb_internal.table_indexes WHERE descriptor_na # count shouldn't cause the old primary key to be copied. statement ok DROP TABLE IF EXISTS t CASCADE; + +statement ok CREATE TABLE t (x INT PRIMARY KEY USING HASH WITH (bucket_count=2)); + +statement ok ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (x) USING HASH WITH (bucket_count=3) query TT @@ -943,7 +1063,11 @@ t CREATE TABLE public.t ( # primary key to be copied. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (x INT PRIMARY KEY USING HASH WITH (bucket_count=2), y INT NOT NULL, FAMILY (x, y)); + +statement ok ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (y) USING HASH WITH (bucket_count=2) query TT @@ -962,29 +1086,55 @@ t CREATE TABLE public.t ( # Regression for #49079. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (x INT, y INT, z INT, PRIMARY KEY (x, y)); + +statement ok ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (y); + +statement ok SET sql_safe_updates=false; + +statement ok ALTER TABLE t DROP COLUMN z # Regression for #54629. statement ok CREATE TABLE t54629 (c INT NOT NULL, UNIQUE INDEX (c)); + +statement ok ALTER TABLE t54629 ALTER PRIMARY KEY USING COLUMNS (c); + +statement ok INSERT INTO t54629 VALUES (1); + +statement ok DELETE FROM t54629 WHERE c = 1 statement ok DROP TABLE t54629; + +statement ok CREATE TABLE t54629(a INT PRIMARY KEY, c INT NOT NULL, UNIQUE INDEX (c)); + +statement ok ALTER TABLE t54629 ALTER PRIMARY KEY USING COLUMNS (c); + +statement ok DROP INDEX t54629_a_key CASCADE; + +statement ok INSERT INTO t54629 VALUES (1, 1); + +statement ok DELETE FROM t54629 WHERE c = 1; # Validate ALTER ADD PRIMARY KEY idempotence for #59307 statement ok DROP TABLE t1 CASCADE; + +statement ok create table t1(id integer not null, id2 integer not null, name varchar(32)); query TTT @@ -1132,6 +1282,8 @@ subtest toggle_sharded_no_new_index statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (i INT PRIMARY KEY) query TTT @@ -1163,7 +1315,11 @@ t_pkey i ASC # #71552). statement ok CREATE TABLE t71553 (a INT PRIMARY KEY, b INT NOT NULL); + +statement ok INSERT INTO t71553 VALUES (1, 1); + +statement ok ALTER TABLE t71553 ALTER PRIMARY KEY USING COLUMNS (b); query II @@ -1394,9 +1550,15 @@ subtest pkey-comment-drop # Create a table with a primary key and add a comment on it. statement ok CREATE TABLE pkey_comment (a INT8, b INT8, c INT8, CONSTRAINT pkey PRIMARY KEY (a, b)); + +statement ok COMMENT ON INDEX pkey IS 'idx'; COMMENT ON CONSTRAINT pkey ON pkey_comment IS 'const'; + +statement ok CREATE UNIQUE INDEX i2 ON pkey_comment(c); + +statement ok COMMENT ON INDEX i2 IS 'idx2'; COMMENT ON CONSTRAINT i2 ON pkey_comment IS 'idx3'; diff --git a/pkg/sql/logictest/testdata/logic_test/alter_table b/pkg/sql/logictest/testdata/logic_test/alter_table index 64374d7cd197..ab682db27ef5 100644 --- a/pkg/sql/logictest/testdata/logic_test/alter_table +++ b/pkg/sql/logictest/testdata/logic_test/alter_table @@ -1186,6 +1186,8 @@ t3 CREATE TABLE public.t3 ( # Ensure that we still have the correct restrictions. statement ok DROP TABLE t1, t2 CASCADE; + +statement ok CREATE TABLE t1 (x INT PRIMARY KEY); CREATE TABLE t2 (x INT, y INT, INDEX i (x)) @@ -1203,7 +1205,11 @@ INSERT INTO t2 VALUES (1, 2) # Test using ADD COL REFERENCES in a self referencing constraint. statement ok DROP TABLE t1 CASCADE; + +statement ok CREATE TABLE t1 (x INT PRIMARY KEY); + +statement ok ALTER TABLE t1 ADD COLUMN x2 INT REFERENCES t1 (x) query TT @@ -1294,6 +1300,8 @@ t2 CREATE TABLE public.t2 ( # Test the above on a table not created in the same txn. statement ok DROP TABLE t1, t2 CASCADE; + +statement ok CREATE TABLE t1 (x INT PRIMARY KEY); CREATE TABLE t2 (y INT) @@ -1319,6 +1327,8 @@ t2 CREATE TABLE public.t2 ( # Test that an FK can use a newly created index. statement ok DROP TABLE t1, t2 CASCADE; + +statement ok CREATE TABLE t1 (x INT PRIMARY KEY); CREATE TABLE t2 (x INT) @@ -1840,6 +1850,8 @@ subtest self_reference_fk_not_valid statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (i INT PRIMARY KEY, j INT); ALTER TABLE t ADD CONSTRAINT fk FOREIGN KEY (j) REFERENCES t(i) NOT VALID; @@ -1855,7 +1867,11 @@ subtest regression_67234 statement ok CREATE TABLE t67234 (k INT PRIMARY KEY, a INT, b INT, FAMILY (k, a, b)); + +statement ok ALTER TABLE t67234 ADD CONSTRAINT t67234_c1 UNIQUE (a) WHERE b > 0; + +statement ok ALTER TABLE t67234 ADD CONSTRAINT t67234_c2 UNIQUE WITHOUT INDEX (b) WHERE a > 0 query T @@ -1890,6 +1906,8 @@ SELECT count(usage_count) subtest generated_as_identity statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (a INT UNIQUE) statement ok @@ -1922,6 +1940,8 @@ DROP TABLE t subtest generated_as_identity_with_seq_option statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (a INT UNIQUE) statement ok @@ -1955,6 +1975,8 @@ ALTER TABLE t ALTER COLUMN b TYPE numeric(10,2) statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (a INT UNIQUE) statement ok @@ -2009,6 +2031,8 @@ ALTER TABLE t ALTER COLUMN b TYPE numeric(10,2) # Test we can assign a PRIMARY KEY overriding the existing rowid PRIMARY KEY. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (id INT NOT NULL) statement ok @@ -2026,6 +2050,8 @@ t CREATE TABLE public.t ( # Table has a PRIMARY KEY named index; check it errors when assinging PRIMARY KEY to id. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t ( id INT NOT NULL, rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(), @@ -2040,6 +2066,8 @@ ALTER TABLE t ADD CONSTRAINT t_pkey PRIMARY KEY (id) statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t ( id INT NOT NULL, explicit_rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(), @@ -2051,6 +2079,8 @@ ALTER TABLE t ADD CONSTRAINT t_pkey PRIMARY KEY (id) statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE public.t ( id INT8 NOT NULL, rowid INT8 NOT VISIBLE NOT NULL, @@ -2062,6 +2092,8 @@ ALTER TABLE t ADD CONSTRAINT t_pkey PRIMARY KEY (id) statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE public.t ( id INT8 NOT NULL, rowid INT4 NOT VISIBLE NOT NULL DEFAULT unique_rowid(), @@ -2110,6 +2142,8 @@ statement ok CREATE TABLE IF NOT EXISTS multipleinstmt ( id INT8 DEFAULT nextval('multipleinstmt_seq') PRIMARY KEY, key STRING, value STRING ); + +statement ok INSERT INTO multipleinstmt (key, value) VALUES ('a', 'a'); INSERT INTO multipleinstmt (key, value) VALUES ('b', 'b'); INSERT INTO multipleinstmt (key, value) VALUES ('c', 'c'); diff --git a/pkg/sql/logictest/testdata/logic_test/alter_type b/pkg/sql/logictest/testdata/logic_test/alter_type index 96e4c6a401bc..db854f7368da 100644 --- a/pkg/sql/logictest/testdata/logic_test/alter_type +++ b/pkg/sql/logictest/testdata/logic_test/alter_type @@ -585,6 +585,8 @@ ALTER TYPE reg_64101 DROP VALUE 'a' statement ok DROP VIEW v_64101; + +statement ok CREATE VIEW v_64101 AS SELECT ARRAY['c'::reg_64101] statement ok @@ -595,9 +597,17 @@ ALTER TYPE reg_64101 DROP VALUE 'c' statement ok CREATE TYPE typ_64101 AS ENUM('a', 'b', 'c'); + +statement ok CREATE TABLE t1_64101("bob""b" typ_64101); + +statement ok CREATE TABLE t2_64101("bob""''b" typ_64101[]); + +statement ok INSERT INTO t1_64101 VALUES ('a'); + +statement ok INSERT INTO t2_64101 VALUES(ARRAY['b']) statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/array b/pkg/sql/logictest/testdata/logic_test/array index 7e3bb8b96f44..f32598c3f9bc 100644 --- a/pkg/sql/logictest/testdata/logic_test/array +++ b/pkg/sql/logictest/testdata/logic_test/array @@ -1314,6 +1314,8 @@ subtest array_compare statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (x INT[], y INT[], z STRING[]) statement error pq: unsupported comparison operator: < @@ -1452,7 +1454,11 @@ SELECT x FROM t WHERE x > ARRAY[1] ORDER BY x # Ensure that we can order by the arrays without any indexes. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (x INT[]); + +statement ok INSERT INTO t VALUES (ARRAY[1]), (ARRAY[5]), @@ -1527,7 +1533,11 @@ SELECT x FROM tarray ORDER BY x # Add multicolumn INDEX i (x, y, z) once #50659 is fixed. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (x INT, y INT[], z INT); + +statement ok INSERT INTO t VALUES (1, ARRAY[1, 2, 3], 3), (NULL, ARRAY[1, NULL, 3], NULL), @@ -1551,7 +1561,11 @@ SELECT x, y, z FROM t WHERE x = 2 AND y < ARRAY[10] ORDER BY y # Add parent PRIMARY KEY (x) once #50659 is fixed. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (x STRING[]); + +statement ok INSERT INTO t VALUES (ARRAY['']), (ARRAY['hello', 'hi\nthere']), @@ -1576,7 +1590,11 @@ SELECT x FROM t WHERE x > ARRAY['hell'] AND x < ARRAY['i'] # Add parent PRIMARY KEY (x) once #50659 is fixed. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (x BYTES[]); + +statement ok INSERT INTO t VALUES (ARRAY[b'\xFF', b'\x00']), (ARRAY[NULL, b'\x01', b'\x01', NULL]), @@ -1593,7 +1611,11 @@ SELECT x FROM t ORDER BY x # Add parent PRIMARY KEY (x DESC) once #50659 is fixed. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (x BYTES[]); + +statement ok INSERT INTO t VALUES (ARRAY[b'\xFF', b'\x00']), (ARRAY[NULL, b'\x01', b'\x01', NULL]), @@ -1610,7 +1632,11 @@ SELECT x FROM t ORDER BY x # Add parent PRIMARY KEY (x, y) once #50659 is fixed. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (x INT[], y INT[]); + +statement ok INSERT INTO t VALUES (ARRAY[1, 2], ARRAY[3, 4]), (ARRAY[NULL, NULL], ARRAY[NULL, NULL]), @@ -1657,8 +1683,14 @@ SELECT x, y FROM t WHERE x > ARRAY[NULL]:::INT[] ORDER BY y # Add t1 and t2 PRIMARY KEY x once #50659 is fixed. statement ok DROP TABLE IF EXISTS t1, t2 CASCADE; + +statement ok CREATE TABLE t1 (x INT[]); + +statement ok CREATE TABLE t2 (x INT[]); + +statement ok INSERT INTO t1 VALUES (ARRAY[1, 2]), (ARRAY[NULL]), @@ -1691,7 +1723,11 @@ SELECT t1.x FROM t1 INNER MERGE JOIN t2 ON t1.x = t2.x # Add INDEX (x) once #50659 is fixed. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (x INT[]); + +statement ok INSERT INTO t VALUES (ARRAY[1, 2]), (ARRAY[1, 2]), @@ -1720,6 +1756,8 @@ END # Test that string literals can be implicitly casted to array types statement ok DROP TABLE t; + +statement ok CREATE TABLE t ( a INT[], b DECIMAL[], @@ -1731,6 +1769,8 @@ CREATE TABLE t ( h INET[], i VARBIT[], j FLOAT[]); + +statement ok INSERT INTO t VALUES ( '{1, 2}', '{1.1, 2.2}', @@ -1794,6 +1834,8 @@ CREATE TABLE kv ( k INT PRIMARY KEY, v STRING ); + +statement ok INSERT INTO kv VALUES (1, 'one'), (2, 'two'), (3, 'three'), (4, 'four'), (5, null) query T rowsort @@ -2019,8 +2061,10 @@ SELECT array_remove(ARRAY[(1,'cat'),(2,'dog')], (3,'\xc03b4478eb'::BYTEA)) subtest regression_71394 -query T +statement ok PREPARE regression_71394 AS SELECT ARRAY[$1]::int[]; + +query T EXECUTE regression_71394(71394) ---- {71394} @@ -2031,10 +2075,14 @@ subtest array_enums statement ok CREATE TYPE letter AS ENUM ('a', 'b', 'c'); + +statement ok CREATE TABLE kv_enum ( k INT PRIMARY KEY, v letter ); + +statement ok INSERT INTO kv_enum VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, NULL) query T rowsort diff --git a/pkg/sql/logictest/testdata/logic_test/cascade b/pkg/sql/logictest/testdata/logic_test/cascade index 3870c5546f84..64739c791f51 100644 --- a/pkg/sql/logictest/testdata/logic_test/cascade +++ b/pkg/sql/logictest/testdata/logic_test/cascade @@ -41,8 +41,14 @@ CREATE TABLE grandchild ( g INT PRIMARY KEY, c INT REFERENCES child(c) ); + +statement ok INSERT INTO parent VALUES (1), (2); + +statement ok INSERT INTO child VALUES (10, 1), (11, 1), (20, 2), (21, 2); + +statement ok INSERT INTO grandchild VALUES (100, 10), (101, 10), (110, 11); statement ok @@ -72,8 +78,14 @@ CREATE TABLE grandchild ( g INT PRIMARY KEY, c INT REFERENCES child(c) ON DELETE CASCADE ); + +statement ok INSERT INTO parent VALUES (1), (2); + +statement ok INSERT INTO child VALUES (10, 1), (11, 1), (20, 2), (21, 2); + +statement ok INSERT INTO grandchild VALUES (100, 10), (101, 10), (110, 11), (200, 20) statement ok @@ -103,18 +115,26 @@ SELECT * FROM grandchild statement ok DROP TABLE grandchild; + +statement ok DROP TABLE child; + +statement ok DROP TABLE parent # Delete cascade with multiple columns and multiple child tables. statement ok CREATE TABLE parent_multi (pa INT, pb INT, pc INT, UNIQUE INDEX (pa,pb,pc)); + +statement ok CREATE TABLE child_multi_1 ( c INT, a INT, b INT, FOREIGN KEY (a,b,c) REFERENCES parent_multi(pa,pb,pc) ON DELETE CASCADE ); + +statement ok CREATE TABLE child_multi_2 ( b INT, c INT, diff --git a/pkg/sql/logictest/testdata/logic_test/cast b/pkg/sql/logictest/testdata/logic_test/cast index 402d361afe6c..82f2b6521dd4 100644 --- a/pkg/sql/logictest/testdata/logic_test/cast +++ b/pkg/sql/logictest/testdata/logic_test/cast @@ -951,6 +951,8 @@ INSERT INTO assn_cast_p VALUES (1.0, 10.0); # Test ON UPDATE CASCADE. statement ok CREATE TABLE assn_cast_c (c INT PRIMARY KEY, p DECIMAL(10, 0) REFERENCES assn_cast_p(p) ON UPDATE CASCADE); + +statement ok INSERT INTO assn_cast_c VALUES (1, 1.0); statement error update on table "assn_cast_c" violates foreign key constraint "assn_cast_c_p_fkey" @@ -966,7 +968,11 @@ SELECT * FROM assn_cast_c statement ok DROP TABLE assn_cast_c; + +statement ok CREATE TABLE assn_cast_c (c INT PRIMARY KEY, d DECIMAL(10, 0) REFERENCES assn_cast_p(d) ON UPDATE CASCADE); + +statement ok UPSERT INTO assn_cast_c VALUES (2, 10) statement error update on table "assn_cast_c" violates foreign key constraint "assn_cast_c_d_fkey" @@ -984,7 +990,11 @@ INSERT INTO assn_cast_p VALUES (2.0, 11.00) ON CONFLICT (d) DO UPDATE SET d = 12 # Test ON UPDATE SET DEFAULT. statement ok DROP TABLE assn_cast_c; + +statement ok CREATE TABLE assn_cast_c (c INT PRIMARY KEY, p DECIMAL(10, 0) DEFAULT 3.1 REFERENCES assn_cast_p(p) ON UPDATE SET DEFAULT); + +statement ok INSERT INTO assn_cast_c VALUES (2, 2.0); statement error update on table "assn_cast_c" violates foreign key constraint "assn_cast_c_p_fkey" @@ -1000,7 +1010,11 @@ SELECT * FROM assn_cast_c statement ok DROP TABLE assn_cast_c; + +statement ok CREATE TABLE assn_cast_c (c INT PRIMARY KEY, d DECIMAL(10, 0) DEFAULT 3.1 REFERENCES assn_cast_p(d) ON UPDATE SET DEFAULT); + +statement ok INSERT INTO assn_cast_c VALUES (2, 12) statement error update on table "assn_cast_c" violates foreign key constraint "assn_cast_c_d_fkey" @@ -1176,6 +1190,8 @@ SELECT t1.c0 FROM t1 WHERE t1.c0 BETWEEN t1.c0 AND INTERVAL '-1'::DECIMAL # Regression test for incorrectly casting integers out of range (#64429). statement ok CREATE TABLE t64429 (_int8 INT8, _int4 INT4); + +statement ok INSERT INTO t64429 VALUES (3000000000, 300000); statement error integer out of range for type int2 @@ -1209,10 +1225,14 @@ CREATE TABLE t66067_a ( c CHAR(26), CONSTRAINT c UNIQUE (c) ); + +statement ok CREATE TABLE t66067_b ( a INT, v VARCHAR(40) ); + +statement ok INSERT INTO t66067_a VALUES (1, 'foo'); INSERT INTO t66067_b VALUES (1, 'bar'); @@ -1296,6 +1316,8 @@ f statement ok CREATE TABLE t73450 (c CHAR); + +statement ok INSERT INTO t73450 VALUES ('f') query T diff --git a/pkg/sql/logictest/testdata/logic_test/column_families b/pkg/sql/logictest/testdata/logic_test/column_families index 00458c80854b..7cbb94cce73b 100644 --- a/pkg/sql/logictest/testdata/logic_test/column_families +++ b/pkg/sql/logictest/testdata/logic_test/column_families @@ -37,7 +37,11 @@ SELECT y, z FROM t WHERE x = 2 statement ok DROP TABLE t; + +statement ok CREATE TABLE t (x DECIMAL PRIMARY KEY, y INT, FAMILY (y), FAMILY (x)); + +statement ok INSERT INTO t VALUES (5.607, 1), (5.6007, 2) query TI rowsort @@ -51,16 +55,22 @@ SELECT * FROM t statement ok DROP TABLE t; + +statement ok CREATE TABLE t (x DECIMAL, y DECIMAL, z INT, FAMILY (z), FAMILY (y), FAMILY (x), PRIMARY KEY (x, y)); + +statement ok INSERT INTO t VALUES (1.00, 2.00, 1) query TTI SET tracing=on,kv,results; SELECT * FROM t; -SET tracing=off ---- 1.00 2.00 1 +statement ok +SET tracing=off + query T SELECT message FROM [SHOW KV TRACE FOR SESSION] WHERE message LIKE 'fetched: /t/t_pkey/%' diff --git a/pkg/sql/logictest/testdata/logic_test/crdb_internal b/pkg/sql/logictest/testdata/logic_test/crdb_internal index 8712bb00b449..ab152a39681d 100644 --- a/pkg/sql/logictest/testdata/logic_test/crdb_internal +++ b/pkg/sql/logictest/testdata/logic_test/crdb_internal @@ -708,8 +708,12 @@ SET application_name = 'test_max_retry' # become different from 0. statement OK CREATE SEQUENCE s; - SELECT IF(nextval('s')<3, crdb_internal.force_retry('1h'::INTERVAL), 0); - DROP SEQUENCE s + +statement OK +SELECT IF(nextval('s')<3, crdb_internal.force_retry('1h'::INTERVAL), 0); + +statement OK +DROP SEQUENCE s statement OK RESET application_name diff --git a/pkg/sql/logictest/testdata/logic_test/create_as b/pkg/sql/logictest/testdata/logic_test/create_as index 9ace5116b5c8..c8c51ae8fe6c 100644 --- a/pkg/sql/logictest/testdata/logic_test/create_as +++ b/pkg/sql/logictest/testdata/logic_test/create_as @@ -112,15 +112,21 @@ statement error pq: value type unknown cannot be used for table columns CREATE TABLE foo2 (x) AS (VALUES(NULL)) # Check nulls are handled properly (#13921) +statement ok +CREATE TABLE foo3 (x) AS VALUES (1), (NULL); + query I -CREATE TABLE foo3 (x) AS VALUES (1), (NULL); SELECT * FROM foo3 ORDER BY x +SELECT * FROM foo3 ORDER BY x ---- NULL 1 # Check that CREATE TABLE AS can use subqueries (#23002) +statement ok +CREATE TABLE foo4 (x) AS SELECT EXISTS(SELECT * FROM foo3 WHERE x IS NULL); + query B -CREATE TABLE foo4 (x) AS SELECT EXISTS(SELECT * FROM foo3 WHERE x IS NULL); SELECT * FROM foo4 +SELECT * FROM foo4 ---- true @@ -368,6 +374,8 @@ ROLLBACK # Test CREATE TABLE AS referring to a sequence. statement ok CREATE SEQUENCE seq; + +statement OK CREATE TABLE tab_from_seq AS (SELECT nextval('seq')) query I diff --git a/pkg/sql/logictest/testdata/logic_test/create_statements b/pkg/sql/logictest/testdata/logic_test/create_statements index d28ee17abb54..24c3bb772c88 100644 --- a/pkg/sql/logictest/testdata/logic_test/create_statements +++ b/pkg/sql/logictest/testdata/logic_test/create_statements @@ -6610,14 +6610,23 @@ CREATE INDEX a_idx ON a(b) WITH (fillfactor=50) ---- NOTICE: storage parameter "fillfactor" is ignored +statement ok +DROP TABLE a CASCADE; + query T noticetrace -DROP TABLE a CASCADE; CREATE TABLE a (b INT) WITH (autovacuum_enabled=off) +CREATE TABLE a (b INT) WITH (autovacuum_enabled=off) ---- NOTICE: storage parameter "autovacuum_enabled = 'off'" is ignored +statement ok +DROP TABLE a CASCADE; + query T noticetrace -DROP TABLE a CASCADE; CREATE TABLE a (b INT) WITH (autovacuum_enabled=on) +CREATE TABLE a (b INT) WITH (autovacuum_enabled=on) ---- +statement ok +DROP TABLE a CASCADE; + statement error parameter "autovacuum_enabled" requires a Boolean value -DROP TABLE a CASCADE; CREATE TABLE a (b INT) WITH (autovacuum_enabled='11') +CREATE TABLE a (b INT) WITH (autovacuum_enabled='11') diff --git a/pkg/sql/logictest/testdata/logic_test/create_table b/pkg/sql/logictest/testdata/logic_test/create_table index 9e499e3d2d40..039042219dd2 100644 --- a/pkg/sql/logictest/testdata/logic_test/create_table +++ b/pkg/sql/logictest/testdata/logic_test/create_table @@ -347,7 +347,10 @@ like_hash CREATE TABLE public.like_hash ( ) statement ok -DROP TABLE like_hash; CREATE TABLE like_hash (LIKE like_hash_base INCLUDING ALL) +DROP TABLE like_hash; + +statement ok +CREATE TABLE like_hash (LIKE like_hash_base INCLUDING ALL) query TT SHOW CREATE TABLE like_hash @@ -362,6 +365,8 @@ like_hash CREATE TABLE public.like_hash ( statement ok CREATE TABLE regression_67196 (pk INT PRIMARY KEY, hidden INT NOT VISIBLE); + +statement ok CREATE TABLE regression_67196_like (LIKE regression_67196) query TT diff --git a/pkg/sql/logictest/testdata/logic_test/database b/pkg/sql/logictest/testdata/logic_test/database index 52c46f7f1939..e90bc18c86cd 100644 --- a/pkg/sql/logictest/testdata/logic_test/database +++ b/pkg/sql/logictest/testdata/logic_test/database @@ -149,11 +149,21 @@ DROP DATABASE IF EXISTS b statement ok DROP DATABASE b2 CASCADE; - DROP DATABASE b3 CASCADE; - DROP DATABASE b4 CASCADE; - DROP DATABASE b5 CASCADE; - DROP DATABASE b6 CASCADE; - DROP DATABASE b7 CASCADE + +statement ok +DROP DATABASE b3 CASCADE; + +statement ok +DROP DATABASE b4 CASCADE; + +statement ok +DROP DATABASE b5 CASCADE; + +statement ok +DROP DATABASE b6 CASCADE; + +statement ok +DROP DATABASE b7 CASCADE statement error pgcode 42601 empty database name DROP DATABASE "" diff --git a/pkg/sql/logictest/testdata/logic_test/datetime b/pkg/sql/logictest/testdata/logic_test/datetime index 0adccbf095a0..1987bcb122ce 100644 --- a/pkg/sql/logictest/testdata/logic_test/datetime +++ b/pkg/sql/logictest/testdata/logic_test/datetime @@ -1893,6 +1893,8 @@ SELECT statement ok CREATE TABLE timestamp_datestyle_parse(pk SERIAL PRIMARY KEY, s string); + +statement ok INSERT INTO timestamp_datestyle_parse VALUES (1, '07-09-12 11:30:45.123'), (2, '07-09-12') @@ -1942,6 +1944,8 @@ SELECT statement ok CREATE TABLE time_datestyle_parse(pk SERIAL PRIMARY KEY, s string); + +statement ok INSERT INTO time_datestyle_parse VALUES (1, '2007-09-12 11:30:45.123+06'), (2, '2007-09-12 11:30:45.123+03') diff --git a/pkg/sql/logictest/testdata/logic_test/discard b/pkg/sql/logictest/testdata/logic_test/discard index 776bb036fca4..7ce33dd80e40 100644 --- a/pkg/sql/logictest/testdata/logic_test/discard +++ b/pkg/sql/logictest/testdata/logic_test/discard @@ -19,8 +19,11 @@ SET timezone = 'Europe/Amsterdam'; SHOW TIMEZONE ---- Europe/Amsterdam +statement ok +DISCARD ALL; + query T -DISCARD ALL; SHOW TIMEZONE +SHOW TIMEZONE ---- UTC @@ -29,8 +32,11 @@ SET TIME ZONE 'Europe/Amsterdam'; SHOW TIME ZONE ---- Europe/Amsterdam +statement ok +DISCARD ALL + query T -DISCARD ALL; SHOW TIME ZONE +SHOW TIME ZONE ---- UTC diff --git a/pkg/sql/logictest/testdata/logic_test/distinct b/pkg/sql/logictest/testdata/logic_test/distinct index 0739b06cf9fc..f218db9fb07b 100644 --- a/pkg/sql/logictest/testdata/logic_test/distinct +++ b/pkg/sql/logictest/testdata/logic_test/distinct @@ -200,7 +200,11 @@ SELECT DISTINCT (x) FROM t statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (x DECIMAL); + +statement ok INSERT INTO t VALUES (1.0), (1.00), (1.000) # We want to ensure that this only returns 1 element. We don't @@ -214,7 +218,11 @@ SELECT COUNT (*) FROM (SELECT DISTINCT (array[x]) FROM t) # Regression for #46709. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (i INT, x INT, y INT, z STRING); + +statement ok INSERT INTO t VALUES (1, 1, 2, 'hello'), (2, 1, 2, 'hello'), diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_stats b/pkg/sql/logictest/testdata/logic_test/distsql_stats index 3ba067e61088..903dac6f5a2f 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_stats +++ b/pkg/sql/logictest/testdata/logic_test/distsql_stats @@ -635,8 +635,14 @@ arr_stats_x {x} 4 3 1 false # Test that enum columns always have histograms collected for them. statement ok CREATE TYPE e AS ENUM ('hello', 'howdy', 'hi'); + +statement ok CREATE TABLE et (x e, y e, PRIMARY KEY (x)); + +statement ok INSERT INTO et VALUES ('hello', 'hello'), ('howdy', 'howdy'), ('hi', 'hi'); + +statement ok CREATE STATISTICS s FROM et query TTIIB colnames,rowsort @@ -732,7 +738,11 @@ SET CLUSTER SETTING sql.stats.multi_column_collection.enabled = false statement ok CREATE TABLE prim (a INT, b INT, c INT, PRIMARY KEY (a, b, c)); + +statement ok INSERT INTO prim VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3); + +statement ok CREATE STATISTICS s FROM prim query TTIIB colnames,rowsort @@ -768,7 +778,11 @@ upper_bound range_rows distinct_range_rows equal_rows # collected for them, with up to 200 buckets. statement ok CREATE TABLE sec (a INT, b INT, c INT, INDEX (a, b, c)); + +statement ok INSERT INTO sec VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3); + +statement ok CREATE STATISTICS s FROM sec query TTIIB colnames,rowsort @@ -813,7 +827,11 @@ CREATE TABLE partial ( INDEX (a) WHERE b > 0 OR c > 0, INVERTED INDEX (j) WHERE d = 10 ); + +statement ok INSERT INTO partial VALUES (1, 1, 1, 1, '{"a": "b"}'), (2, 2, 2, 10, '{"c": "d"}'), (3, 3, 3, 1, '{"e": "f"}'); + +statement ok CREATE STATISTICS s FROM partial query TTIIB colnames,rowsort @@ -899,6 +917,8 @@ CREATE TABLE expression ( INVERTED INDEX j_a ((j->'a')), INVERTED INDEX a_j_a (a, (j->'a')) ); + +statement ok INSERT INTO expression VALUES (1, 1, '{"a": "b"}'), (2, 10, '{"c": "d"}'), (3, 1, '{"e": "f"}'); statement ok @@ -926,7 +946,11 @@ s {rowid} 3 0 true # up to 2 buckets. statement ok CREATE TABLE noind (a INT, b INT, c INT); + +statement ok INSERT INTO noind VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3); + +statement ok CREATE STATISTICS s FROM noind query TTIIB colnames,rowsort @@ -965,7 +989,11 @@ CREATE TABLE geo_table ( geog GEOGRAPHY(GEOMETRY,4326) NULL, geom GEOMETRY(GEOMETRY,3857) NULL ); + +statement ok INSERT INTO geo_table VALUES (1, 'LINESTRING(0 0, 100 100)', ST_GeomFromText('LINESTRING(0 0, 100 100)', 3857)); + +statement ok CREATE STATISTICS s FROM geo_table; query TB colnames @@ -984,7 +1012,11 @@ column_names has_histogram statement ok CREATE INDEX geom_idx_1 ON geo_table USING GIST(geom) WITH (geometry_min_x=0, s2_max_level=15); + +statement ok CREATE INDEX geog_idx_1 ON geo_table USING GIST(geog) WITH (s2_level_mod=3); + +statement ok CREATE STATISTICS s FROM geo_table; query TB colnames @@ -1003,7 +1035,11 @@ column_names has_histogram statement ok CREATE INDEX geom_idx_2 ON geo_table USING GIST(geom) WITH (geometry_min_x=5); + +statement ok CREATE INDEX geog_idx_2 ON geo_table USING GIST(geog); + +statement ok CREATE STATISTICS s FROM geo_table; query TB colnames @@ -1034,6 +1070,8 @@ upper_bound statement ok DROP INDEX geo_table@geog_idx_1; + +statement ok CREATE STATISTICS s FROM geo_table; # Demonstrate that buckets change when the first chosen index is dropped. @@ -1058,7 +1096,11 @@ CREATE TABLE multi_col ( j JSON, INVERTED INDEX (s, j) ); + +statement ok INSERT INTO multi_col VALUES (1, 'foo', '{"a": "b"}'); + +statement ok CREATE STATISTICS s FROM multi_col; query TB colnames @@ -1105,7 +1147,11 @@ SET CLUSTER SETTING sql.stats.multi_column_collection.enabled = false # an error. statement ok CREATE TABLE all_null (k INT PRIMARY KEY, c INT); + +statement ok INSERT INTO all_null VALUES (1, NULL); + +statement ok CREATE STATISTICS s FROM all_null query T @@ -1155,8 +1201,14 @@ SELECT * FROM all_null WHERE c IS NOT NULL # Regression for 58220. statement ok CREATE TYPE greeting AS ENUM ('hello', 'howdy', 'hi'); + +statement ok CREATE TABLE greeting_stats (x greeting PRIMARY KEY); + +statement ok INSERT INTO greeting_stats VALUES ('hi'); + +statement ok CREATE STATISTICS s FROM greeting_stats query T @@ -1219,7 +1271,11 @@ CREATE TABLE t63387 ( j JSONB, INDEX (i) WHERE j->>'a' = 'b' ); + +statement ok INSERT INTO t63387 VALUES (1, '{}'); + +statement ok CREATE STATISTICS s FROM t63387; # Regression test for #71080. Stats collection should succeed on tables with NOT diff --git a/pkg/sql/logictest/testdata/logic_test/drop_database b/pkg/sql/logictest/testdata/logic_test/drop_database index 7ed71a4bdd0d..f2df10bcd1f1 100644 --- a/pkg/sql/logictest/testdata/logic_test/drop_database +++ b/pkg/sql/logictest/testdata/logic_test/drop_database @@ -262,10 +262,16 @@ SET sql_safe_updates = FALSE; DROP DATABASE foo # Check that the default databases can be dropped and re-created like any other. statement OK -DROP DATABASE defaultdb; DROP DATABASE postgres +DROP DATABASE defaultdb; + +statement OK +DROP DATABASE postgres statement ok -CREATE DATABASE defaultdb; CREATE DATABASE postgres +CREATE DATABASE defaultdb; + +statement OK +CREATE DATABASE postgres # Test that an empty database doesn't get a GC job. statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/drop_index b/pkg/sql/logictest/testdata/logic_test/drop_index index f77d4fd6b43d..468450c378bd 100644 --- a/pkg/sql/logictest/testdata/logic_test/drop_index +++ b/pkg/sql/logictest/testdata/logic_test/drop_index @@ -287,10 +287,20 @@ subtest fk_drop # there is another index that can satisfy the foreign key constraint. statement ok CREATE TABLE fk1 (x int); + +statement ok CREATE TABLE fk2 (x int PRIMARY KEY); + +statement ok CREATE INDEX i ON fk1 (x); + +statement ok CREATE INDEX i2 ON fk1 (x); + +statement ok ALTER TABLE fk1 ADD CONSTRAINT fk1 FOREIGN KEY (x) REFERENCES fk2 (x); + +statement ok DROP INDEX fk1@i CASCADE query TT diff --git a/pkg/sql/logictest/testdata/logic_test/drop_type b/pkg/sql/logictest/testdata/logic_test/drop_type index ff04a27481fb..2de2088194b8 100644 --- a/pkg/sql/logictest/testdata/logic_test/drop_type +++ b/pkg/sql/logictest/testdata/logic_test/drop_type @@ -5,6 +5,8 @@ SET enable_experimental_alter_column_type_general = true; # Basic test -- create and drop a type. statement ok CREATE TYPE t AS ENUM ('hello'); + +statement ok DROP TYPE t statement error type \"t\" does not exist @@ -17,6 +19,8 @@ SELECT ARRAY['hello']::_t # Try dropping a type within a transaction. statement ok CREATE TYPE t AS ENUM ('hello'); + +statement ok BEGIN; DROP TYPE t; @@ -73,6 +77,8 @@ DROP TYPE t # Ensure that references to the array type are tracked. statement ok CREATE TYPE t AS ENUM ('hello'); + +statement ok ALTER TABLE t1 ADD COLUMN x t[] statement error pq: cannot drop type "t" because other objects \(\[test.public.t1\]\) still depend on it @@ -80,12 +86,18 @@ DROP TYPE t statement ok ALTER TABLE t1 DROP COLUMN x; + +statement ok DROP TYPE t # Altering a column's type to a UDT should pick up the reference. statement ok CREATE TYPE t AS ENUM ('hello'); + +statement ok ALTER TABLE t1 ADD COLUMN x STRING; + +statement ok ALTER TABLE t1 ALTER COLUMN x SET DATA TYPE t statement error pq: cannot drop type "t" because other objects \(\[test.public.t1\]\) still depend on it @@ -101,6 +113,8 @@ DROP TYPE t # Ensure that views track their type dependencies. statement ok CREATE TYPE t AS ENUM ('hello'); + +statement ok CREATE VIEW v AS SELECT 'hello':::t statement error pq: cannot drop type "t" because other objects \(\[test.public.v\]\) still depend on it @@ -116,8 +130,14 @@ DROP TYPE t # default, computed and partial index predicates. statement ok CREATE TYPE t1 AS ENUM ('hello'); + +statement ok CREATE TYPE t2 AS ENUM ('howdy'); + +statement ok CREATE TYPE t3 AS ENUM ('hi'); + +statement ok CREATE TYPE t4 AS ENUM ('cheers') # First, add all of those expressions in the CREATE statement. @@ -144,18 +164,26 @@ DROP TYPE t4 # Now remove the schema elements and drop the types. statement ok DROP INDEX expr@i; + +statement ok DROP TYPE t4 statement ok ALTER TABLE expr DROP CONSTRAINT "check"; + +statement ok DROP TYPE t3 statement ok ALTER TABLE expr DROP COLUMN y; + +statement ok DROP TYPE t2 statement ok ALTER TABLE expr DROP COLUMN x; + +statement ok DROP TYPE t1 statement ok @@ -164,9 +192,17 @@ DROP TABLE expr # Now add all of these schema elements via ALTER commands. statement ok CREATE TABLE expr (); + +statement ok CREATE TYPE t1 AS ENUM ('hello'); + +statement ok CREATE TYPE t2 AS ENUM ('howdy'); + +statement ok CREATE TYPE t3 AS ENUM ('hi'); + +statement ok CREATE TYPE t4 AS ENUM ('cheers') # First try adding all of the schema elements in transactions and ensure that @@ -215,8 +251,14 @@ ROLLBACK # Now add all of the schema elements. statement ok ALTER TABLE expr ADD COLUMN x BOOL DEFAULT ('hello'::t1 = 'hello'::t1); + +statement ok ALTER TABLE expr ADD COLUMN y STRING AS ('howdy'::t2::STRING) STORED; + +statement ok ALTER TABLE expr ADD CONSTRAINT "check" CHECK ('hi'::t3::string = 'hi'); + +statement ok CREATE INDEX i ON expr (y) WHERE ('cheers'::t4 = 'cheers'::t4) statement error pq: cannot drop type "t1" because other objects \(\[test.public.expr\]\) still depend on it @@ -234,25 +276,39 @@ DROP TYPE t4 # Now remove the schema elements and drop the types. statement ok DROP INDEX expr@i; + +statement ok DROP TYPE t4 statement ok ALTER TABLE expr DROP CONSTRAINT "check"; + +statement ok DROP TYPE t3 statement ok ALTER TABLE expr DROP COLUMN y; + +statement ok DROP TYPE t2 statement ok ALTER TABLE expr DROP COLUMN x; + +statement ok DROP TYPE t1 # Check that truncated tables maintain their backreference. statement ok CREATE TYPE ty AS ENUM ('hello'); + +statement ok CREATE TABLE tab (x ty); + +statement ok INSERT INTO tab VALUES ('hello'); + +statement ok TRUNCATE TABLE tab statement error pq: cannot drop type "ty" because other objects \(\[test.public.tab\]\) still depend on it @@ -261,6 +317,8 @@ DROP TYPE ty # Ensure that we can drop a table then a type in the same txn. statement ok CREATE TYPE t AS ENUM ('hello'); + +statement ok CREATE TABLE tt (x t) statement ok @@ -272,6 +330,8 @@ COMMIT # Tests for dropping a database that contains types. statement ok CREATE DATABASE d; + +statement ok CREATE TYPE d.d_t AS ENUM () statement error pq: database \"d\" is not empty and RESTRICT was specified @@ -295,8 +355,14 @@ SELECT * FROM system.descriptor WHERE id = $t_id OR id = $t_id + 1 # Test when some objects in the database use the types. statement ok CREATE DATABASE d; + +statement ok CREATE TYPE d.d_t AS ENUM ('hello'); + +statement ok CREATE TABLE d.t1 (x d.d_t); + +statement ok CREATE TABLE d.t2 (y d.d_t[]) let $t_id @@ -316,10 +382,20 @@ SELECT * FROM system.descriptor WHERE id = $t_id OR id = $t_id + 1 # Create a database with a large number of types. statement ok CREATE DATABASE d; + +statement ok CREATE TYPE d.d_type_1 AS ENUM ('hello'); + +statement ok CREATE TYPE d.d_type_2 AS ENUM ('hello'); + +statement ok CREATE TYPE d.d_type_3 AS ENUM ('hello'); + +statement ok CREATE TYPE d.d_type_4 AS ENUM ('hello'); + +statement ok CREATE TYPE d.d_type_5 AS ENUM ('hello') statement ok @@ -332,7 +408,11 @@ subtest regression_57187 statement ok CREATE DATABASE d; + +statement ok CREATE TYPE d."a 0 # Regression test for #49630. statement ok DROP TABLE empty; + +statement ok CREATE TABLE xy (x INT PRIMARY KEY, y INT); CREATE TABLE fk_ref (r INT NOT NULL REFERENCES xy (x)); CREATE TABLE empty (v INT); + +statement ok INSERT INTO xy (VALUES (1, 1)); INSERT INTO fk_ref (VALUES (1)); diff --git a/pkg/sql/logictest/testdata/logic_test/manual_retry b/pkg/sql/logictest/testdata/logic_test/manual_retry index 20c2c94ac1e2..6b7a086e3996 100644 --- a/pkg/sql/logictest/testdata/logic_test/manual_retry +++ b/pkg/sql/logictest/testdata/logic_test/manual_retry @@ -23,7 +23,9 @@ subtest automatic_retry statement ok CREATE SEQUENCE s; - BEGIN TRANSACTION; + +statement ok +BEGIN TRANSACTION; SAVEPOINT cockroach_restart # The SELECT 1 is necessary to take the session out of the AutoRetry state, diff --git a/pkg/sql/logictest/testdata/logic_test/materialized_view b/pkg/sql/logictest/testdata/logic_test/materialized_view index 6f27a5ffd23e..8ea9435b7393 100644 --- a/pkg/sql/logictest/testdata/logic_test/materialized_view +++ b/pkg/sql/logictest/testdata/logic_test/materialized_view @@ -103,8 +103,14 @@ TRUNCATE v # runs into a uniqueness constraint violation. statement ok CREATE TABLE dup (x INT); + +statement ok CREATE MATERIALIZED VIEW v_dup AS SELECT x FROM dup; + +statement ok CREATE UNIQUE INDEX i ON v_dup (x); + +statement ok INSERT INTO dup VALUES (1), (1); statement error pq: duplicate key value violates unique constraint "i"\nDETAIL: Key \(x\)=\(1\) already exists\. @@ -137,6 +143,8 @@ DROP MATERIALIZED VIEW normal_view # an empty view. statement ok CREATE MATERIALIZED VIEW with_options AS SELECT 1; + +statement ok REFRESH MATERIALIZED VIEW with_options WITH DATA query I diff --git a/pkg/sql/logictest/testdata/logic_test/multi_statement b/pkg/sql/logictest/testdata/logic_test/multi_statement index 08a4a289f13d..f3fe3513d2b5 100644 --- a/pkg/sql/logictest/testdata/logic_test/multi_statement +++ b/pkg/sql/logictest/testdata/logic_test/multi_statement @@ -28,7 +28,7 @@ SELECT * FROM kv a b c d -# second statement returns an error +# second statement returns an error, and causes the whole batch to rollbacl statement error duplicate key value violates unique constraint "kv_pkey"\nDETAIL: Key \(k\)=\('a'\) already exists\. INSERT INTO kv (k,v) VALUES ('g', 'h'); INSERT INTO kv (k,v) VALUES ('a', 'b') @@ -37,7 +37,6 @@ SELECT * FROM kv ---- a b c d -g h # parse error runs nothing statement error at or near "k": syntax error @@ -48,7 +47,6 @@ SELECT * FROM kv ---- a b c d -g h statement error pq: relation "x.y" does not exist BEGIN; INSERT INTO x.y(a) VALUES (1); END diff --git a/pkg/sql/logictest/testdata/logic_test/on_update b/pkg/sql/logictest/testdata/logic_test/on_update index e26549538357..46f7b2d78aa8 100644 --- a/pkg/sql/logictest/testdata/logic_test/on_update +++ b/pkg/sql/logictest/testdata/logic_test/on_update @@ -303,6 +303,8 @@ DROP SEQUENCE seq_72116 statement ok DROP TABLE table_72116; + +statement ok CREATE TABLE table_72116 (a INT DEFAULT nextval('seq_72116') ON UPDATE NULL) statement error pq: cannot drop sequence seq_72116 because other objects depend on it @@ -310,7 +312,11 @@ DROP SEQUENCE seq_72116 statement ok DROP TABLE table_72116; + +statement ok CREATE TABLE table_72116 (a INT); + +statement ok ALTER TABLE table_72116 ADD COLUMN b INT DEFAULT (1) ON UPDATE nextval('seq_72116') statement error pq: cannot drop sequence seq_72116 because other objects depend on it diff --git a/pkg/sql/logictest/testdata/logic_test/partial_index b/pkg/sql/logictest/testdata/logic_test/partial_index index 368dbe01d70b..2208770966a1 100644 --- a/pkg/sql/logictest/testdata/logic_test/partial_index +++ b/pkg/sql/logictest/testdata/logic_test/partial_index @@ -863,15 +863,25 @@ INSERT INTO u VALUES (1, 1) ON CONFLICT (a) WHERE b < -1 DO NOTHING # an arbiter. statement ok CREATE UNIQUE INDEX i2 ON u (b) WHERE 1 = 1; + +statement ok INSERT INTO u VALUES (1, 1) ON CONFLICT (b) DO NOTHING; + +statement ok DELETE FROM u; + +statement ok DROP INDEX i2; # An ON CONFLICT with any WHERE clause can use a unique non-partial index as an # arbiter. statement ok CREATE UNIQUE INDEX i2 ON u (b); + +statement ok INSERT INTO u VALUES (1, 1) ON CONFLICT (b) WHERE b > 0 DO NOTHING; + +statement ok DROP INDEX i2; # An ON CONFLICT with a WHERE clause can be use a unique partial index if the @@ -889,6 +899,8 @@ SELECT * FROM u # arbiters. statement ok CREATE UNIQUE INDEX i2 ON u (a) WHERE b < 0; + +statement ok INSERT INTO u VALUES (-1, -1); statement error pgcode 23505 duplicate key value violates unique constraint \"i2\"\nDETAIL: Key \(a\)=\(-1\) already exists\. @@ -1425,7 +1437,11 @@ SELECT * FROM virt@idx WHERE c = 10 # computed columns. statement ok DELETE FROM virt; + +statement ok DROP INDEX virt@idx; + +statement ok CREATE UNIQUE INDEX idx ON virt (b) WHERE c > 10; statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/pg_catalog b/pkg/sql/logictest/testdata/logic_test/pg_catalog index fa572dd9eef5..545360c6d31a 100644 --- a/pkg/sql/logictest/testdata/logic_test/pg_catalog +++ b/pkg/sql/logictest/testdata/logic_test/pg_catalog @@ -4086,6 +4086,7 @@ distsql_workmem 64 MiB NULL enable_experimental_alter_column_type_general off NULL NULL NULL string enable_experimental_stream_replication off NULL NULL NULL string enable_implicit_select_for_update on NULL NULL NULL string +enable_implicit_transaction_for_batch_statements on NULL NULL NULL string enable_insert_fast_path on NULL NULL NULL string enable_multiregion_placement_policy off NULL NULL NULL string enable_seqscan on NULL NULL NULL string @@ -4204,6 +4205,7 @@ distsql_workmem 64 MiB NULL enable_experimental_alter_column_type_general off NULL user NULL off off enable_experimental_stream_replication off NULL user NULL off off enable_implicit_select_for_update on NULL user NULL on on +enable_implicit_transaction_for_batch_statements on NULL user NULL on on enable_insert_fast_path on NULL user NULL on on enable_multiregion_placement_policy off NULL user NULL off off enable_seqscan on NULL user NULL on on @@ -4317,6 +4319,7 @@ distsql_workmem NULL NULL NULL enable_experimental_alter_column_type_general NULL NULL NULL NULL NULL enable_experimental_stream_replication NULL NULL NULL NULL NULL enable_implicit_select_for_update NULL NULL NULL NULL NULL +enable_implicit_transaction_for_batch_statements NULL NULL NULL NULL NULL enable_insert_fast_path NULL NULL NULL NULL NULL enable_multiregion_placement_policy NULL NULL NULL NULL NULL enable_seqscan NULL NULL NULL NULL NULL @@ -4707,6 +4710,8 @@ conkey confkey # Test an index of 3 referencing an index of 2. statement ok DROP TABLE b; + +statement ok CREATE TABLE b ( id_b_1 INT, id_b_2 INT, @@ -4724,6 +4729,8 @@ conkey confkey # Test an index of 3 referencing an index of 1. statement ok DROP TABLE b; + +statement ok CREATE TABLE b ( id_b_1 INT, id_b_2 INT, @@ -4942,6 +4949,8 @@ SELECT adnum FROM pg_attrdef WHERE adrelid = 't46799'::REGCLASS statement ok ALTER TABLE t46799 DROP COLUMN y; + +statement ok ALTER TABLE t46799 ADD COLUMN y INT DEFAULT 1; # Make sure after adding and dropping the same column, the adnum for the re-added column increases. diff --git a/pkg/sql/logictest/testdata/logic_test/prepare b/pkg/sql/logictest/testdata/logic_test/prepare index b440aba76608..86473fcd2622 100644 --- a/pkg/sql/logictest/testdata/logic_test/prepare +++ b/pkg/sql/logictest/testdata/logic_test/prepare @@ -885,7 +885,10 @@ query error pq: column "b" does not exist EXECUTE change_drop statement ok -ALTER TABLE othertable ADD COLUMN b INT; UPDATE othertable SET b=10 +ALTER TABLE othertable ADD COLUMN b INT; + +statement ok +UPDATE othertable SET b=10 query II EXECUTE change_drop diff --git a/pkg/sql/logictest/testdata/logic_test/ranges b/pkg/sql/logictest/testdata/logic_test/ranges index f35b36022fc866c53a1ec585216d8bce369fe329..f67616fea731a0f8ce03bbd8517b9da876b24f06 100644 GIT binary patch delta 136 zcmX^2i*es?#s!6wFBiy6<|`EBoEJ@8x%_~vJ&z>w-D6WJe!sQa=AE4mr=i(Zn zP-0?aVru4)X*Bs_f!yZE!X<2|x;OtRKg$R-Sah-iAKT<>mGY?4lfP7IPB!9`+N@cn G$_xOebTTgh delta 61 zcmdn@oAKN)#s!6w`3mJlU4r}r6g>T0Tq6`pOpHuS%^WffttZD7if?W%+{Cuox#A4t RWZ5d6$>CKRn|rGanE}3W7C8U_ diff --git a/pkg/sql/logictest/testdata/logic_test/reassign_owned_by b/pkg/sql/logictest/testdata/logic_test/reassign_owned_by index 0f5689cd5a86..bea0112cef7e 100644 --- a/pkg/sql/logictest/testdata/logic_test/reassign_owned_by +++ b/pkg/sql/logictest/testdata/logic_test/reassign_owned_by @@ -7,7 +7,11 @@ REASSIGN OWNED BY fake_old_role TO new_role statement ok CREATE ROLE old_role; + +statement ok GRANT CREATE ON DATABASE test TO old_role; + +statement ok ALTER TABLE t OWNER TO old_role user testuser @@ -35,6 +39,8 @@ user root statement ok CREATE ROLE new_role; + +statement ok GRANT CREATE ON DATABASE test TO new_role user testuser @@ -69,6 +75,8 @@ CREATE ROLE testuser2 WITH LOGIN; # Create database for old role statement ok CREATE DATABASE d; + +statement ok ALTER DATABASE d OWNER TO testuser # Check ownership - testuser should own all objects just created @@ -106,11 +114,15 @@ DROP ROLE testuser # Can reassign from more than one old role to new role. statement ok use test; + +statement ok CREATE ROLE testuser; # Create schema for testuser and one for root. statement ok CREATE SCHEMA s1; + +statement ok ALTER SCHEMA s1 OWNER TO testuser statement ok @@ -141,6 +153,8 @@ s2 3957504279 # Ensure testuser2 is new owner by dropping. statement ok DROP SCHEMA s1; + +statement ok DROP SCHEMA s2 user root diff --git a/pkg/sql/logictest/testdata/logic_test/rename_database b/pkg/sql/logictest/testdata/logic_test/rename_database index e46a4a69e58d..b626c0cf4d1d 100644 --- a/pkg/sql/logictest/testdata/logic_test/rename_database +++ b/pkg/sql/logictest/testdata/logic_test/rename_database @@ -223,22 +223,53 @@ statement ok ALTER DATABASE db1 RENAME TO db3 statement ok -DROP DATABASE db2 CASCADE; DROP DATABASE db3 CASCADE +DROP DATABASE db2 CASCADE; statement ok -CREATE DATABASE db1; CREATE SEQUENCE db1.a_seq; CREATE SEQUENCE db1.b_seq; USE db1; +DROP DATABASE db3 CASCADE + +statement ok +CREATE DATABASE db1; + +statement ok +CREATE SEQUENCE db1.a_seq; +CREATE SEQUENCE db1.b_seq; + +statement ok +USE db1; statement ok CREATE TABLE db1.a (a int default nextval('a_seq') + nextval('b_seq') + 1); ALTER DATABASE db1 RENAME TO db2; USE db2; statement ok -DROP TABLE db2.a; CREATE TABLE db2.a (a int default nextval('a_seq') + nextval('db2.b_seq') + 1); ALTER DATABASE db2 RENAME TO db1; ALTER DATABASE db1 RENAME TO db2 +DROP TABLE db2.a; + +statement ok +CREATE TABLE db2.a (a int default nextval('a_seq') + nextval('db2.b_seq') + 1); + +statement ok +ALTER DATABASE db2 RENAME TO db1; + +statement ok +ALTER DATABASE db1 RENAME TO db2 + +statement ok +DROP TABLE db2.a; + +statement ok +CREATE TABLE db2.a (a int default nextval('a_seq') + nextval('db2.public.b_seq') + 1); + +statement ok +ALTER DATABASE db2 RENAME TO db1; ALTER DATABASE db1 RENAME TO db2 + +statement ok +DROP TABLE db2.a; statement ok -DROP TABLE db2.a; CREATE TABLE db2.a (a int default nextval('a_seq') + nextval('db2.public.b_seq') + 1); ALTER DATABASE db2 RENAME TO db1; ALTER DATABASE db1 RENAME TO db2 +CREATE TABLE db2.a (a int default nextval('a_seq') + nextval('public.b_seq') + 1); statement ok -DROP TABLE db2.a; CREATE TABLE db2.a (a int default nextval('a_seq') + nextval('public.b_seq') + 1); ALTER DATABASE db2 RENAME TO db1 +ALTER DATABASE db2 RENAME TO db1 statement ok USE defaultdb; DROP DATABASE db1 CASCADE diff --git a/pkg/sql/logictest/testdata/logic_test/row_level_ttl b/pkg/sql/logictest/testdata/logic_test/row_level_ttl index 785f64a007f9..ad42e95a6d32 100644 --- a/pkg/sql/logictest/testdata/logic_test/row_level_ttl +++ b/pkg/sql/logictest/testdata/logic_test/row_level_ttl @@ -153,6 +153,8 @@ WHERE label LIKE 'row-level-ttl-%' # Ensure schedules are removed on DROP TABLE. statement ok DROP TABLE tbl; + +statement ok CREATE TABLE tbl ( id INT PRIMARY KEY, text TEXT, @@ -177,6 +179,8 @@ WHERE label LIKE 'row-level-ttl-%' # Create TTL on a different schema and ensure schedules are removed when dropped. statement ok CREATE SCHEMA drop_me; + +statement ok CREATE TABLE drop_me.tbl () WITH (ttl_expire_after = '10 minutes'::interval); CREATE TABLE drop_me.tbl2 () WITH (ttl_expire_after = '10 minutes'::interval) @@ -198,7 +202,11 @@ WHERE label LIKE 'row-level-ttl-%' # Create TTL on a different database and ensure schedules are removed when dropped. statement ok CREATE DATABASE drop_me; + +statement ok USE drop_me; + +statement ok CREATE TABLE tbl () WITH (ttl_expire_after = '10 minutes'::interval); CREATE TABLE tbl2 () WITH (ttl_expire_after = '10 minutes'::interval) @@ -374,6 +382,8 @@ ALTER TABLE no_ttl_table SET (ttl_label_metrics = true) statement ok DROP TABLE tbl; + +statement ok CREATE TABLE tbl ( id INT PRIMARY KEY, text TEXT, @@ -442,6 +452,8 @@ CREATE TABLE public.tbl ( # Test adding to TTL table with crdb_internal_expiration already defined. statement ok DROP TABLE tbl; + +statement ok CREATE TABLE tbl ( id INT PRIMARY KEY, text TEXT, @@ -493,6 +505,8 @@ ALTER TABLE tbl SET (ttl_expire_after = '10 minutes') statement ok DROP TABLE tbl; + +statement ok CREATE TABLE tbl (id INT, text TEXT, PRIMARY KEY (id, text)) WITH (ttl_expire_after = '10 minutes') statement error non-ascending ordering on PRIMARY KEYs are not supported @@ -502,6 +516,8 @@ ALTER TABLE tbl ALTER PRIMARY KEY USING COLUMNS (id, text DESC) # the schedule and TTL is setup correctly. statement ok DROP TABLE tbl; + +statement ok CREATE TABLE tbl ( id INT PRIMARY KEY, text TEXT, diff --git a/pkg/sql/logictest/testdata/logic_test/secondary_index_column_families b/pkg/sql/logictest/testdata/logic_test/secondary_index_column_families index 9488926a22d6..2360e2706e0c 100644 --- a/pkg/sql/logictest/testdata/logic_test/secondary_index_column_families +++ b/pkg/sql/logictest/testdata/logic_test/secondary_index_column_families @@ -19,6 +19,8 @@ SELECT y, z, w FROM t@i WHERE y = 2 # Test some cases around insert on conflict. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t ( x INT PRIMARY KEY, y INT, @@ -47,6 +49,8 @@ SELECT y, z, v FROM t@i # Test some cases around upsert. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t ( x INT PRIMARY KEY, y STRING, @@ -55,6 +59,8 @@ CREATE TABLE t ( FAMILY (y), FAMILY (z), FAMILY (x, w), INDEX i (y) STORING (z, w) ); + +statement ok INSERT INTO t VALUES (1, '2', 3.0, 4), (5, '6', 7.00, 8); UPSERT INTO t VALUES (9, '10', 11.000, 12), (1, '3', 5.0, 16) @@ -68,6 +74,8 @@ SELECT y, z, w FROM t@i # Test some cases around schema changes. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t ( x INT PRIMARY KEY, y DECIMAL, @@ -75,6 +83,8 @@ CREATE TABLE t ( w INT, v INT ); + +statement ok INSERT INTO t VALUES (1, 2, 3, 4, 5), (6, 7, 8, 9, 10), (11, 12, 13, 14, 15); CREATE INDEX i ON t (y) STORING (z, w, v) @@ -97,6 +107,8 @@ SELECT * FROM t statement ok ALTER TABLE t ADD COLUMN u INT DEFAULT (20) CREATE FAMILY new_fam; + +statement ok CREATE INDEX i ON t (y) STORING (z, w, v, u) query TIIII rowsort diff --git a/pkg/sql/logictest/testdata/logic_test/set_role b/pkg/sql/logictest/testdata/logic_test/set_role index e1dd05385943..9efa32fb2848 100644 --- a/pkg/sql/logictest/testdata/logic_test/set_role +++ b/pkg/sql/logictest/testdata/logic_test/set_role @@ -332,6 +332,8 @@ user root statement ok GRANT ADMIN TO testuser; + +statement ok BEGIN; SET LOCAL ROLE testuser diff --git a/pkg/sql/logictest/testdata/logic_test/show_create_all_tables b/pkg/sql/logictest/testdata/logic_test/show_create_all_tables index 24e1af7f1e1e..435ab630d1a1 100644 --- a/pkg/sql/logictest/testdata/logic_test/show_create_all_tables +++ b/pkg/sql/logictest/testdata/logic_test/show_create_all_tables @@ -227,18 +227,26 @@ ALTER TABLE public.c VALIDATE CONSTRAINT c_i_fkey; # Test that a cycle between two tables is handled correctly. statement ok CREATE DATABASE test_cycle; + +statement ok USE test_cycle; + +statement ok CREATE TABLE loop_a ( id INT PRIMARY KEY, b_id INT, INDEX(b_id), FAMILY f1 (id, b_id) ); + +statement ok CREATE TABLE loop_b ( id INT PRIMARY KEY, a_id INT REFERENCES loop_a ON DELETE CASCADE, FAMILY f1 (id, a_id) ); + +statement ok ALTER TABLE loop_a ADD CONSTRAINT b_id_delete_constraint FOREIGN KEY (b_id) REFERENCES loop_b (id) ON DELETE CASCADE; @@ -268,7 +276,11 @@ ALTER TABLE public.loop_a VALIDATE CONSTRAINT b_id_delete_constraint; # Test that a primary key with a non-default name works. statement ok CREATE DATABASE test_primary_key; + +statement ok USE test_primary_key; + +statement ok CREATE TABLE test_primary_key.t ( i int, CONSTRAINT pk_name PRIMARY KEY (i) diff --git a/pkg/sql/logictest/testdata/logic_test/show_create_all_tables_builtin b/pkg/sql/logictest/testdata/logic_test/show_create_all_tables_builtin index f9978c2ce40f..8e4008670695 100644 --- a/pkg/sql/logictest/testdata/logic_test/show_create_all_tables_builtin +++ b/pkg/sql/logictest/testdata/logic_test/show_create_all_tables_builtin @@ -222,18 +222,26 @@ ALTER TABLE public.c VALIDATE CONSTRAINT c_i_fkey; # Test that a cycle between two tables is handled correctly. statement ok CREATE DATABASE test_cycle; + +statement ok USE test_cycle; + +statement ok CREATE TABLE loop_a ( id INT PRIMARY KEY, b_id INT, INDEX(b_id), FAMILY f1 (id, b_id) ); + +statement ok CREATE TABLE loop_b ( id INT PRIMARY KEY, a_id INT REFERENCES loop_a ON DELETE CASCADE, FAMILY f1 (id, a_id) ); + +statement ok ALTER TABLE loop_a ADD CONSTRAINT b_id_delete_constraint FOREIGN KEY (b_id) REFERENCES loop_b (id) ON DELETE CASCADE; @@ -262,6 +270,8 @@ ALTER TABLE public.loop_a VALIDATE CONSTRAINT b_id_delete_constraint; # Test that a primary key with a non-default name works. statement ok CREATE DATABASE test_primary_key; + +statement ok CREATE TABLE test_primary_key.t ( i int, CONSTRAINT pk_name PRIMARY KEY (i) diff --git a/pkg/sql/logictest/testdata/logic_test/show_source b/pkg/sql/logictest/testdata/logic_test/show_source index 06bc90cdd2dc..b0add7554fdf 100644 --- a/pkg/sql/logictest/testdata/logic_test/show_source +++ b/pkg/sql/logictest/testdata/logic_test/show_source @@ -55,6 +55,7 @@ distsql_workmem 64 MiB enable_experimental_alter_column_type_general off enable_experimental_stream_replication off enable_implicit_select_for_update on +enable_implicit_transaction_for_batch_statements on enable_insert_fast_path on enable_multiregion_placement_policy off enable_seqscan on @@ -424,8 +425,11 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE s start_key end_key replicas lease_holder NULL NULL {1} 1 +statement ok +CREATE INDEX ix ON foo(x) + query TTTI colnames -CREATE INDEX ix ON foo(x); SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM INDEX foo@ix] +SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM INDEX foo@ix] ---- start_key end_key replicas lease_holder NULL NULL {1} 1 diff --git a/pkg/sql/logictest/testdata/logic_test/statement_statistics b/pkg/sql/logictest/testdata/logic_test/statement_statistics index 9a047f9b1dc7..e313f0b1131e 100644 --- a/pkg/sql/logictest/testdata/logic_test/statement_statistics +++ b/pkg/sql/logictest/testdata/logic_test/statement_statistics @@ -23,7 +23,16 @@ true # Check that node_statement_statistics report per statement statement ok -SET application_name = hello; SELECT 1; SELECT 1,2; SELECT 1 +SET application_name = hello; + +statement ok +SELECT 1 + +statement ok +SELECT 1,2 + +statement ok +SELECT 1 # reset for other tests. statement ok @@ -32,8 +41,8 @@ SET application_name = '' query TB SELECT key, count >= 1 FROM crdb_internal.node_statement_statistics WHERE application_name = 'hello' AND key LIKE 'SELECT%' ORDER BY key ---- -SELECT _ true -SELECT _, _ true +SELECT _ true +SELECT _, _ true statement ok CREATE TABLE test(x INT, y INT, z INT); INSERT INTO test(x, y, z) VALUES (0,0,0); diff --git a/pkg/sql/logictest/testdata/logic_test/subquery_correlated b/pkg/sql/logictest/testdata/logic_test/subquery_correlated index 71db1eb47665..68d8184462d3 100644 --- a/pkg/sql/logictest/testdata/logic_test/subquery_correlated +++ b/pkg/sql/logictest/testdata/logic_test/subquery_correlated @@ -1050,6 +1050,8 @@ CREATE TABLE groups( id SERIAL PRIMARY KEY, data JSONB ); + +statement ok INSERT INTO groups(data) VALUES('{"name": "Group 1", "members": [{"name": "admin", "type": "USER"}, {"name": "user", "type": "USER"}]}'); INSERT INTO groups(data) VALUES('{"name": "Group 2", "members": [{"name": "admin2", "type": "USER"}]}'); @@ -1139,6 +1141,8 @@ CREATE TABLE users ( name VARCHAR(50), PRIMARY KEY (id) ); + +statement ok INSERT INTO users(id, name) VALUES (1, 'user1'); INSERT INTO users(id, name) VALUES (2, 'user2'); INSERT INTO users(id, name) VALUES (3, 'user3'); @@ -1151,6 +1155,8 @@ CREATE TABLE stuff ( PRIMARY KEY (id), FOREIGN KEY (user_id) REFERENCES users (id) ); + +statement ok INSERT INTO stuff(id, date, user_id) VALUES (1, '2007-10-15'::DATE, 1); INSERT INTO stuff(id, date, user_id) VALUES (2, '2007-12-15'::DATE, 1); INSERT INTO stuff(id, date, user_id) VALUES (3, '2007-11-15'::DATE, 1); diff --git a/pkg/sql/logictest/testdata/logic_test/temp_table b/pkg/sql/logictest/testdata/logic_test/temp_table index 52cd8af5afee..6a384af02c8e 100644 --- a/pkg/sql/logictest/testdata/logic_test/temp_table +++ b/pkg/sql/logictest/testdata/logic_test/temp_table @@ -234,7 +234,10 @@ SELECT * FROM regression_47030 1 statement ok -TRUNCATE regression_47030; INSERT INTO regression_47030 VALUES (2) +TRUNCATE regression_47030 + +statement ok +INSERT INTO regression_47030 VALUES (2) query I SELECT * FROM regression_47030 diff --git a/pkg/sql/logictest/testdata/logic_test/truncate b/pkg/sql/logictest/testdata/logic_test/truncate index 2b99034a5a8f..3cfe8343cf35 100644 --- a/pkg/sql/logictest/testdata/logic_test/truncate +++ b/pkg/sql/logictest/testdata/logic_test/truncate @@ -153,16 +153,38 @@ i3 NULL # don't all start from 1. statement ok DROP TABLE t; + +statement ok CREATE TABLE t (x INT, y INT, z INT); + +statement ok ALTER TABLE t DROP COLUMN y; + +statement ok ALTER TABLE t ADD COLUMN y INT; + +statement ok ALTER TABLE t DROP COLUMN y; + +statement ok ALTER TABLE t ADD COLUMN y INT; + +statement ok CREATE INDEX i ON t (x); + +statement ok DROP INDEX t@i; + +statement ok CREATE INDEX i ON t (x); + +statement ok DROP INDEX t@i; + +statement ok CREATE INDEX i ON t (x); + +statement ok COMMENT ON COLUMN t.y IS 'hello1'; COMMENT ON INDEX t@i IS 'hello2' diff --git a/pkg/sql/logictest/testdata/logic_test/txn b/pkg/sql/logictest/testdata/logic_test/txn index cdae56210e4d..ec660f10fde2 100644 --- a/pkg/sql/logictest/testdata/logic_test/txn +++ b/pkg/sql/logictest/testdata/logic_test/txn @@ -728,8 +728,10 @@ COMMIT # We use a sequence to avoid busy-looping the test. statement ok CREATE SEQUENCE s; - BEGIN TRANSACTION; - SELECT IF(nextval('s')<3, crdb_internal.force_retry('1h':::INTERVAL), 0) + +statement ok +BEGIN TRANSACTION; +SELECT IF(nextval('s')<3, crdb_internal.force_retry('1h':::INTERVAL), 0) # Demonstrate that the txn was indeed retried. query I @@ -739,13 +741,17 @@ SELECT currval('s') statement ok ROLLBACK; - DROP SEQUENCE s + +statement ok +DROP SEQUENCE s # Automatic retries for the first batch even when that first batch comes after # the BEGIN. statement ok CREATE SEQUENCE s; - BEGIN TRANSACTION; + +statement ok +BEGIN TRANSACTION; statement ok SELECT 1; @@ -759,14 +765,18 @@ SELECT currval('s') statement ok ROLLBACK; - DROP SEQUENCE s + +statement ok +DROP SEQUENCE s # Automatic retries for the first batch even when that first batch comes after # the BEGIN and the BEGIN also has special statements that don't move the txn # state out of the "AutoRetry" state. statement ok CREATE SEQUENCE s; - BEGIN TRANSACTION; + +statement ok +BEGIN TRANSACTION; SAVEPOINT cockroach_restart; SET TRANSACTION PRIORITY HIGH; SET TRANSACTION ISOLATION LEVEL SNAPSHOT; @@ -792,12 +802,16 @@ high statement ok ROLLBACK; - DROP SEQUENCE s + +statement ok +DROP SEQUENCE s # Like above, but the SAVEPOINT is its own batch. statement ok CREATE SEQUENCE s; - BEGIN TRANSACTION + +statement ok +BEGIN TRANSACTION statement ok SAVEPOINT cockroach_restart; @@ -813,13 +827,17 @@ SELECT currval('s') statement ok ROLLBACK; - DROP SEQUENCE s + +statement ok +DROP SEQUENCE s # Automatic retries for the first batch after an explicit restart. statement ok CREATE SEQUENCE s; - BEGIN TRANSACTION; + +statement ok +BEGIN TRANSACTION; SAVEPOINT cockroach_restart; SELECT 1; @@ -841,7 +859,9 @@ SELECT currval('s') statement ok ROLLBACK; - DROP SEQUENCE s + +statement ok +DROP SEQUENCE s # Test READ ONLY/WRITE syntax. @@ -1124,7 +1144,11 @@ iso_8601 statement ok TRUNCATE rewind_session_test; + +statement ok SET intervalstyle = 'postgres'; + +statement ok BEGIN; INSERT INTO rewind_session_test VALUES ('1 day 01:02:03'::interval::string); SET LOCAL intervalstyle = 'iso_8601'; diff --git a/pkg/sql/logictest/testdata/logic_test/union b/pkg/sql/logictest/testdata/logic_test/union index 405beb7fd34f..5829173daeb9 100644 --- a/pkg/sql/logictest/testdata/logic_test/union +++ b/pkg/sql/logictest/testdata/logic_test/union @@ -331,6 +331,8 @@ NULL statement ok DROP TABLE IF EXISTS t1, t2; + +statement ok CREATE TABLE t1 (a INT[]); CREATE TABLE t2 (b INT[]); INSERT INTO t1 VALUES (ARRAY[1]), (ARRAY[2]), (NULL); @@ -347,12 +349,18 @@ NULL # Allow UNION of hidden and non-hidden columns. statement ok CREATE TABLE ab (a INT, b INT); + +statement ok SELECT a, b, rowid FROM ab UNION VALUES (1, 2, 3); + +statement ok DROP TABLE ab; # Regression test for #59148. statement ok CREATE TABLE ab (a INT4, b INT8); + +statement ok INSERT INTO ab VALUES (1, 1), (1, 2), (2, 1), (2, 2); query I rowsort @@ -374,6 +382,8 @@ DROP TABLE ab; # between NULL and a tuple (#59611). statement ok CREATE TABLE t59611 (a INT); + +statement ok INSERT INTO t59611 VALUES (1) query T @@ -557,6 +567,8 @@ SELECT a FROM ab UNION ALL SELECT x AS a FROM xy statement ok TRUNCATE ab; TRUNCATE xy; + +statement ok INSERT INTO ab VALUES (1, 1), (2, 2), (3, 3), (4, 4), (5, 5); INSERT INTO xy VALUES (1, 1), (3, 3), (5, 5), (7, 7); diff --git a/pkg/sql/logictest/testdata/logic_test/update b/pkg/sql/logictest/testdata/logic_test/update index 1e2fcebebc63..4becf67c160c 100644 --- a/pkg/sql/logictest/testdata/logic_test/update +++ b/pkg/sql/logictest/testdata/logic_test/update @@ -387,8 +387,11 @@ UPDATE kv SET v = v + 1 ORDER BY v DESC LIMIT 3 RETURNING k,v # Check that UPDATE properly supports LIMIT (MySQL extension) +statement ok +TRUNCATE kv; + statement count 3 -TRUNCATE kv; INSERT INTO kv VALUES (1, 2), (2, 3), (3, 4) +INSERT INTO kv VALUES (1, 2), (2, 3), (3, 4) query II UPDATE kv SET v = v - 1 WHERE k < 10 ORDER BY k LIMIT 1 RETURNING k, v diff --git a/pkg/sql/logictest/testdata/logic_test/upsert b/pkg/sql/logictest/testdata/logic_test/upsert index 8de929c5cdee..21a616ddbb45 100644 --- a/pkg/sql/logictest/testdata/logic_test/upsert +++ b/pkg/sql/logictest/testdata/logic_test/upsert @@ -691,13 +691,19 @@ UPSERT INTO tc VALUES (1,2) subtest regression_29497 statement ok -CREATE TABLE t29497(x INT PRIMARY KEY); BEGIN; ALTER TABLE t29497 ADD COLUMN y INT NOT NULL DEFAULT 123 +CREATE TABLE t29497(x INT PRIMARY KEY); + +statement ok +BEGIN; ALTER TABLE t29497 ADD COLUMN y INT NOT NULL DEFAULT 123 statement error UPSERT has more expressions than target columns UPSERT INTO t29497 VALUES (1, 2) statement ok -ROLLBACK; BEGIN; ALTER TABLE t29497 ADD COLUMN y INT NOT NULL DEFAULT 123 +ROLLBACK; + +statement ok +BEGIN; ALTER TABLE t29497 ADD COLUMN y INT NOT NULL DEFAULT 123 statement error column "y" does not exist INSERT INTO t29497(x) VALUES (1) ON CONFLICT (x) DO UPDATE SET y = 456 diff --git a/pkg/sql/logictest/testdata/logic_test/vectorize b/pkg/sql/logictest/testdata/logic_test/vectorize index 70dc51b17036..696b05ca8f31 100644 --- a/pkg/sql/logictest/testdata/logic_test/vectorize +++ b/pkg/sql/logictest/testdata/logic_test/vectorize @@ -836,6 +836,8 @@ CREATE TABLE t40732 AS SELECT g::INT8 AS _int8, g::STRING AS _string, g::STRING::BYTES AS _bytes FROM generate_series(1, 5) AS g; + +statement ok SET vectorize = experimental_always; INSERT INTO t40732 DEFAULT VALUES; @@ -1192,6 +1194,8 @@ CREATE TABLE t64793 AS g % 2 = 1 AS _bool, g::STRING AS _string FROM ROWS FROM (generate_series(1, 5)) AS g; + +statement ok SET vectorize=experimental_always; INSERT INTO t64793 DEFAULT VALUES; diff --git a/pkg/sql/logictest/testdata/logic_test/views b/pkg/sql/logictest/testdata/logic_test/views index 12d034124ff8..16e8b680cd06 100644 --- a/pkg/sql/logictest/testdata/logic_test/views +++ b/pkg/sql/logictest/testdata/logic_test/views @@ -683,6 +683,8 @@ DROP TABLE t2 statement ok CREATE INDEX i ON t2 (x); CREATE INDEX i2 ON t2 (x); + +statement ok CREATE OR REPLACE VIEW tview AS SELECT x AS x, x+1 AS x1, x+2 AS x2, x+3 AS x3 FROM t2@i statement error pq: cannot drop index \"i\" because view \"tview\" depends on it @@ -727,6 +729,8 @@ user root # Only column a should be depended on in this case. statement ok DROP TABLE ab CASCADE; + +statement ok CREATE TABLE ab (a INT, b INT); CREATE VIEW vab (x) AS SELECT ab.a FROM ab, ab AS ab2 @@ -760,6 +764,8 @@ DROP TABLE toreg; statement ok DROP VIEW vregclass; + +statement ok CREATE VIEW vregclass AS SELECT x FROM (SELECT CAST('toreg' AS regclass) AS x) statement error pq: cannot drop relation "toreg" because view "vregclass" depends on it @@ -767,6 +773,8 @@ DROP TABLE toreg; statement ok DROP VIEW vregclass; + +statement ok CREATE SEQUENCE s_reg; CREATE VIEW vregclass AS SELECT x FROM [SELECT 's_reg'::REGCLASS AS x] @@ -777,11 +785,15 @@ DROP SEQUENCE s_reg # a variable. statement ok DROP VIEW vregclass; + +statement ok CREATE VIEW vregclass AS SELECT x::regclass FROM (SELECT 's_reg' AS x); DROP SEQUENCE s_reg; statement ok DROP VIEW vregclass; + +statement ok CREATE VIEW vregclass AS SELECT x::regclass FROM (SELECT 'does_not_exist' AS x); statement error pq: relation "does_not_exist" does not exist @@ -789,7 +801,11 @@ SELECT * FROM vregclass statement ok DROP VIEW vregclass; + +statement ok CREATE table tregclass(); + +statement ok CREATE VIEW vregclass AS SELECT 1 FROM (SELECT 1) AS foo WHERE 'tregclass'::regclass = 'tregclass'::regclass; statement error pq: cannot drop relation "tregclass" because view "vregclass" depends on it diff --git a/pkg/sql/logictest/testdata/logic_test/window b/pkg/sql/logictest/testdata/logic_test/window index 929b65baa112..09bc5f8b1916 100644 --- a/pkg/sql/logictest/testdata/logic_test/window +++ b/pkg/sql/logictest/testdata/logic_test/window @@ -4251,6 +4251,8 @@ NULL # OFFSET PRECEDING or OFFSET FOLLOWING (#67975). statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (x DATE); INSERT INTO t VALUES ('5874897-01-01'::DATE), ('1999-01-08'::DATE); SET vectorize=off; @@ -4265,6 +4267,8 @@ RESET vectorize; # shrinks. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (a INT); INSERT INTO t VALUES (1), (-1), (NULL); SET vectorize=off; @@ -4285,6 +4289,8 @@ RESET vectorize; # values. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (x INT, y STRING); INSERT INTO t VALUES (1, 'NotNull'), (1, NULL), (1, NULL), (2, 'NotNull'), (2, 'NotNull'), (2, 'NotNull'), (2, 'NotNull'); diff --git a/pkg/sql/logictest/testdata/logic_test/zigzag_join b/pkg/sql/logictest/testdata/logic_test/zigzag_join index 77d7a29daaf0..85bfe28b8bb9 100644 --- a/pkg/sql/logictest/testdata/logic_test/zigzag_join +++ b/pkg/sql/logictest/testdata/logic_test/zigzag_join @@ -58,9 +58,13 @@ SET enable_zigzag_join = true statement ok DROP INDEX a@a_idx; DROP INDEX a@b_idx; + +statement ok CREATE INDEX c_idx ON a(c); CREATE INDEX a_idx ON a(a); CREATE INDEX b_idx ON a(b); + +statement ok SELECT n,a,b FROM a WHERE a = 4 AND b = 1; # Regression test for 48003 ("non-values node passed as fixed value to zigzag diff --git a/pkg/sql/materialized_view_test.go b/pkg/sql/materialized_view_test.go index c500c0c7a0d7..020ee9b43339 100644 --- a/pkg/sql/materialized_view_test.go +++ b/pkg/sql/materialized_view_test.go @@ -106,17 +106,14 @@ func TestMaterializedViewRefreshVisibility(t *testing.T) { s, sqlDB, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop(ctx) + runner := sqlutils.MakeSQLRunner(sqlDB) // Make a materialized view and update the data behind it. - if _, err := sqlDB.Exec(` -CREATE DATABASE t; -CREATE TABLE t.t (x INT); -INSERT INTO t.t VALUES (1), (2); -CREATE MATERIALIZED VIEW t.v AS SELECT x FROM t.t; -INSERT INTO t.t VALUES (3); -`); err != nil { - t.Fatal(err) - } + runner.Exec(t, `CREATE DATABASE t;`) + runner.Exec(t, `CREATE TABLE t.t (x INT);`) + runner.Exec(t, `INSERT INTO t.t VALUES (1), (2);`) + runner.Exec(t, `CREATE MATERIALIZED VIEW t.v AS SELECT x FROM t.t;`) + runner.Exec(t, `INSERT INTO t.t VALUES (3);`) // Start a refresh. go func() { @@ -129,7 +126,6 @@ INSERT INTO t.t VALUES (3); <-waitForCommit // Before the refresh commits, we shouldn't see any updated data. - runner := sqlutils.MakeSQLRunner(sqlDB) runner.CheckQueryResults(t, "SELECT * FROM t.v ORDER BY x", [][]string{{"1"}, {"2"}}) // Let the refresh commit. diff --git a/pkg/sql/opt/exec/execbuilder/testdata/autocommit_nonmetamorphic b/pkg/sql/opt/exec/execbuilder/testdata/autocommit_nonmetamorphic index 07bd096941a0..048e6748a7e7 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/autocommit_nonmetamorphic +++ b/pkg/sql/opt/exec/execbuilder/testdata/autocommit_nonmetamorphic @@ -33,6 +33,8 @@ true statement ok SET TRACING=ON; INSERT INTO ab VALUES (1, 1); + +statement ok SET TRACING=OFF query TT @@ -54,6 +56,8 @@ true statement ok SET TRACING=ON; INSERT INTO ab VALUES (2, 2), (3, 3); + +statement ok SET TRACING=OFF query TT @@ -102,6 +106,8 @@ true statement ok SET TRACING=ON; INSERT INTO ab VALUES (6, 6), (7, 7) RETURNING a, b; + +statement ok SET TRACING=OFF query TT @@ -122,6 +128,8 @@ true statement ok SET TRACING=ON; INSERT INTO ab VALUES (8, 8), (9, 9) RETURNING a < b; + +statement ok SET TRACING=OFF query TT @@ -145,6 +153,8 @@ false statement ok SET TRACING=ON; INSERT INTO ab VALUES (10, 10), (11, 11) RETURNING a / b; + +statement ok SET TRACING=OFF query TT @@ -182,6 +192,8 @@ true statement ok SET TRACING=ON; UPSERT INTO ab VALUES (1, 1); + +statement ok SET TRACING=OFF query TT @@ -203,6 +215,8 @@ true statement ok SET TRACING=ON; UPSERT INTO ab VALUES (2, 2), (3, 3); + +statement ok SET TRACING=OFF query TT @@ -251,6 +265,8 @@ true statement ok SET TRACING=ON; UPSERT INTO ab VALUES (6, 6), (7, 7) RETURNING a, b; + +statement ok SET TRACING=OFF query TT @@ -272,6 +288,8 @@ false statement ok SET TRACING=ON; UPSERT INTO ab VALUES (8, 8), (9, 9) RETURNING a + b; + +statement ok SET TRACING=OFF query TT @@ -296,6 +314,8 @@ false statement ok SET TRACING=ON; UPSERT INTO ab VALUES (10, 10), (11, 11) RETURNING a / b; + +statement ok SET TRACING=OFF query TT @@ -333,6 +353,8 @@ true statement ok SET TRACING=ON; UPDATE ab SET b=b+1 WHERE a < 3; + +statement ok SET TRACING=OFF query TT @@ -383,6 +405,8 @@ true statement ok SET TRACING=ON; UPDATE ab SET b=b+1 WHERE a < 3 RETURNING a, b; + +statement ok SET TRACING=OFF query TT @@ -405,6 +429,8 @@ false statement ok SET TRACING=ON; UPDATE ab SET b=b+1 WHERE a < 3 RETURNING a + b; + +statement ok SET TRACING=OFF query TT @@ -430,6 +456,8 @@ false statement ok SET TRACING=ON; UPDATE ab SET b=b+1 WHERE a < 3 RETURNING a / b; + +statement ok SET TRACING=OFF query TT @@ -468,6 +496,8 @@ true statement ok SET TRACING=ON; DELETE FROM ab WHERE a = 1; + +statement ok SET TRACING=OFF query TT @@ -489,6 +519,8 @@ true statement ok SET TRACING=ON; DELETE FROM ab WHERE a IN (2, 3); + +statement ok SET TRACING=OFF query TT @@ -537,6 +569,8 @@ true statement ok SET TRACING=ON; DELETE FROM ab WHERE a IN (6, 7) RETURNING a, b; + +statement ok SET TRACING=OFF query TT @@ -559,6 +593,8 @@ false statement ok SET TRACING=ON; DELETE FROM ab WHERE a IN (8, 9) RETURNING a + b; + +statement ok SET TRACING=OFF query TT @@ -584,6 +620,8 @@ false statement ok SET TRACING=ON; DELETE FROM ab WHERE a IN (10, 11) RETURNING a / b; + +statement ok SET TRACING=OFF query TT @@ -633,6 +671,8 @@ false statement ok SET TRACING=ON; INSERT INTO fk_child VALUES (1, 1), (2, 2); + +statement ok SET TRACING=OFF query TT @@ -656,6 +696,8 @@ false statement ok SET TRACING=ON; UPDATE fk_child SET b=b+1 WHERE a < 2; + +statement ok SET TRACING=OFF query TT @@ -681,6 +723,8 @@ false statement ok SET TRACING=ON; DELETE FROM fk_parent WHERE p = 3; + +statement ok SET TRACING=OFF query TT @@ -698,7 +742,11 @@ dist sender send r44: sending batch 1 EndTxn to (n1,s1):1 # Test with a single cascade, which should use autocommit. statement ok DROP TABLE fk_child; + +statement ok CREATE TABLE fk_child (a INT, b INT REFERENCES fk_parent(p) ON DELETE CASCADE, FAMILY f1 (a, b)); + +statement ok INSERT INTO fk_child VALUES (1, 1), (2, 2) # Populate table descriptor cache. @@ -708,6 +756,8 @@ SELECT * FROM fk_parent JOIN fk_child ON p = b statement ok SET TRACING=ON; DELETE FROM fk_parent WHERE p = 2; + +statement ok SET TRACING=OFF query TT @@ -738,6 +788,8 @@ SET TRACING=ON; INSERT INTO ab ( SELECT a*10, b*10 FROM [ INSERT INTO ab VALUES (1, 1), (2, 2) RETURNING a, b ] ); + +statement ok SET TRACING=OFF query TT @@ -763,6 +815,8 @@ statement ok SET TRACING=ON; WITH cte AS (INSERT INTO ab VALUES (3, 3), (4, 4) RETURNING a, b) INSERT INTO ab (SELECT a*10, b*10 FROM cte); + +statement ok SET TRACING=OFF query TT @@ -797,6 +851,8 @@ true statement ok SET tracing = on; INSERT INTO guardrails VALUES (1); + +statement ok SET tracing = off; query TT diff --git a/pkg/sql/opt/exec/execbuilder/testdata/delete b/pkg/sql/opt/exec/execbuilder/testdata/delete index 136ceafc048c..a7f65a4a5ae7 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/delete +++ b/pkg/sql/opt/exec/execbuilder/testdata/delete @@ -242,7 +242,10 @@ statement ok INSERT INTO a VALUES(5) statement ok -SET tracing = on,kv; DELETE FROM a WHERE a = 5; SET tracing = off +SET tracing = on,kv; DELETE FROM a WHERE a = 5; + +statement ok +SET tracing = off query TT SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] diff --git a/pkg/sql/opt/exec/execbuilder/testdata/secondary_index_column_families_nonmetamorphic b/pkg/sql/opt/exec/execbuilder/testdata/secondary_index_column_families_nonmetamorphic index d0bddc697d1a..ed8e1f082157 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/secondary_index_column_families_nonmetamorphic +++ b/pkg/sql/opt/exec/execbuilder/testdata/secondary_index_column_families_nonmetamorphic @@ -232,6 +232,8 @@ CPut /Table/107/2/1/1/1 -> /TUPLE/2:2:Int/5 (replacing [10 35 4], if exists) # Test composite datatypes. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t ( x INT PRIMARY KEY, y DECIMAL, @@ -269,6 +271,8 @@ vectorized: true # Ensure that we always have a k/v in family 0. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t ( x INT PRIMARY KEY, y INT, @@ -292,6 +296,8 @@ Scan /Table/109/2/2/0 # Ensure that when backfilling an index we only insert the needed k/vs. statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t ( x INT PRIMARY KEY, y INT, z INT, w INT, FAMILY (y), FAMILY (x), FAMILY (z), FAMILY (w) @@ -327,6 +333,8 @@ fetched: /t/i/9/8 -> statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t ( x INT PRIMARY KEY, y INT, z INT, w INT, FAMILY (y), FAMILY (x), FAMILY (z), FAMILY (w) @@ -362,6 +370,8 @@ SELECT * FROM t@i statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t ( x INT PRIMARY KEY, y INT, a INT, b INT, c INT, d INT, e INT, f INT, FAMILY (x), FAMILY (y), FAMILY (a, b), FAMILY (c, d), FAMILY (e), FAMILY (f), @@ -458,12 +468,16 @@ SELECT * FROM t@i2 # Ensure that updating a row in the single family case still works. statement ok -DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS t + +statement ok CREATE TABLE t ( x INT PRIMARY KEY, y INT, z INT, w INT, INDEX i (y) STORING (z, w), FAMILY (x, y, z, w) -); +) + +statement ok INSERT INTO t VALUES (1, 2, 3, 4) # When the key is changed, we always delete and cput. diff --git a/pkg/sql/opt/exec/execbuilder/testdata/select b/pkg/sql/opt/exec/execbuilder/testdata/select index 3c38c32bf768..d29e1534c90a 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/select +++ b/pkg/sql/opt/exec/execbuilder/testdata/select @@ -5,7 +5,13 @@ # TODO(yuzefovich): clean up the tracing in the vectorized engine and remove # adjustment of vectorize mode (#55821). statement ok -SET vectorize=off; SET tracing = on; BEGIN; SELECT 1; COMMIT; SELECT 2; SET tracing = off; RESET vectorize +SET vectorize=off; SET tracing = on + +statement ok +BEGIN; SELECT 1; COMMIT; SELECT 2 + +statement ok +SET tracing = off; RESET vectorize # Inspect the trace: we exclude messages containing newlines as these # may contain non-deterministic txn object descriptions. @@ -20,22 +26,26 @@ FROM [SHOW TRACE FOR SESSION] WHERE message LIKE '%SPAN START%' OR message LIKE '%pos%executing%'; ---- 0 === SPAN START: session recording === session recording +1 === SPAN START: session tracing === session tracing +1 [Open pos:?] executing Sync session tracing +2 === SPAN START: commit sql txn === commit sql txn 0 [NoTxn pos:?] executing ExecStmt: BEGIN TRANSACTION session recording -1 === SPAN START: sql txn === sql txn -1 [Open pos:?] executing ExecStmt: SELECT 1 sql txn -2 === SPAN START: sql query === sql query -3 === SPAN START: consuming rows === consuming rows -4 === SPAN START: flow === flow -1 [Open pos:?] executing ExecStmt: COMMIT TRANSACTION sql txn -5 === SPAN START: sql query === sql query -6 === SPAN START: commit sql txn === commit sql txn +3 === SPAN START: sql txn === sql txn +3 [Open pos:?] executing ExecStmt: SELECT 1 sql txn +4 === SPAN START: sql query === sql query +5 === SPAN START: consuming rows === consuming rows +6 === SPAN START: flow === flow +3 [Open pos:?] executing ExecStmt: COMMIT TRANSACTION sql txn +7 === SPAN START: sql query === sql query +8 === SPAN START: commit sql txn === commit sql txn 0 [NoTxn pos:?] executing ExecStmt: SELECT 2 session recording -7 === SPAN START: sql txn === sql txn -7 [Open pos:?] executing ExecStmt: SELECT 2 sql txn -8 === SPAN START: sql query === sql query -9 === SPAN START: consuming rows === consuming rows -10 === SPAN START: flow === flow -11 === SPAN START: commit sql txn === commit sql txn +9 === SPAN START: sql txn === sql txn +9 [Open pos:?] executing ExecStmt: SELECT 2 sql txn +10 === SPAN START: sql query === sql query +11 === SPAN START: consuming rows === consuming rows +12 === SPAN START: flow === flow +13 === SPAN START: commit sql txn === commit sql txn +0 [NoTxn pos:?] executing Sync session recording 0 [NoTxn pos:?] executing ExecStmt: SET TRACING = off session recording statement ok diff --git a/pkg/sql/opt/exec/execbuilder/testdata/show_trace_nonmetamorphic b/pkg/sql/opt/exec/execbuilder/testdata/show_trace_nonmetamorphic index 887d1020dfb3..3e15752248e7 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/show_trace_nonmetamorphic +++ b/pkg/sql/opt/exec/execbuilder/testdata/show_trace_nonmetamorphic @@ -149,7 +149,10 @@ batch flow coordinator fast path completed sql query rows affected: 1 statement ok -SET tracing = on,kv,results; DROP TABLE t.kv2; SET tracing = off +SET tracing = on,kv,results; DROP TABLE t.kv2 + +statement ok +SET tracing = off query TT $trace_query @@ -159,7 +162,10 @@ commit sql txn Put /Table/3/1/109/2/1 -> table: table: 1)); INSERT INTO foo.a SELECT unnest(ARRAY[1,2,3,4,5]); SET CLUSTER SETTING cluster.organization = 'ACME'; @@ -19,7 +22,6 @@ SELECT (1, 20, 30, 40) = (SELECT a, 1, 2, 3 FROM foo.a LIMIT 1); error: pq: failed to satisfy CHECK constraint (a > 1:::INT8) sql-stats └── $ some app - ├── [nodist] CREATE DATABASE _ ├── [nodist] CREATE TABLE _ (_ INT8, CONSTRAINT _ CHECK (_ > _)) └── [failed,nodist] INSERT INTO _ SELECT unnest(ARRAY[_, _, __more3__]) diff --git a/pkg/sql/tests/data.go b/pkg/sql/tests/data.go index 35f5c6854470..5d4634e3120d 100644 --- a/pkg/sql/tests/data.go +++ b/pkg/sql/tests/data.go @@ -48,13 +48,16 @@ func CheckKeyCountE(t *testing.T, kvDB *kv.DB, span roachpb.Span, numKeys int) e func CreateKVTable(sqlDB *gosql.DB, name string, numRows int) error { // Fix the column families so the key counts don't change if the family // heuristics are updated. - schema := fmt.Sprintf(` - CREATE DATABASE IF NOT EXISTS t; - CREATE TABLE t.%s (k INT PRIMARY KEY, v INT, FAMILY (k), FAMILY (v)); - CREATE INDEX foo on t.%s (v);`, name, name) + schemaStmts := []string{ + `CREATE DATABASE IF NOT EXISTS t;`, + fmt.Sprintf(`CREATE TABLE t.%s (k INT PRIMARY KEY, v INT, FAMILY (k), FAMILY (v));`, name), + fmt.Sprintf(`CREATE INDEX foo on t.%s (v);`, name), + } - if _, err := sqlDB.Exec(schema); err != nil { - return err + for _, stmt := range schemaStmts { + if _, err := sqlDB.Exec(stmt); err != nil { + return err + } } // Bulk insert. diff --git a/pkg/sql/txn_restart_test.go b/pkg/sql/txn_restart_test.go index f4edd908ed54..b7574bedeb25 100644 --- a/pkg/sql/txn_restart_test.go +++ b/pkg/sql/txn_restart_test.go @@ -473,13 +473,10 @@ func TestTxnAutoRetry(t *testing.T) { // lib/pq connection directly. As of Feb 2016, there's code in cli/sql_util.go to // do that. sqlDB.SetMaxOpenConns(1) + sqlRunner := sqlutils.MakeSQLRunner(sqlDB) - if _, err := sqlDB.Exec(` -CREATE DATABASE t; -CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT, t DECIMAL); -`); err != nil { - t.Fatal(err) - } + sqlRunner.Exec(t, `CREATE DATABASE t;`) + sqlRunner.Exec(t, `CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT, t DECIMAL);`) // Set up error injection that causes retries. magicVals := createFilterVals(nil, nil) @@ -539,12 +536,16 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT, t DECIMAL); // current allocation count in monitor and checking that it has the // same value at the beginning of each retry. rows, err := sqlDB.Query(` +BEGIN; INSERT INTO t.public.test(k, v, t) VALUES (1, 'boulanger', cluster_logical_timestamp()) RETURNING 1; +END; BEGIN; INSERT INTO t.public.test(k, v, t) VALUES (2, 'dromedary', cluster_logical_timestamp()) RETURNING 1; INSERT INTO t.public.test(k, v, t) VALUES (3, 'fajita', cluster_logical_timestamp()) RETURNING 1; END; +BEGIN; INSERT INTO t.public.test(k, v, t) VALUES (4, 'hooly', cluster_logical_timestamp()) RETURNING 1; +END; BEGIN; INSERT INTO t.public.test(k, v, t) VALUES (5, 'josephine', cluster_logical_timestamp()) RETURNING 1; INSERT INTO t.public.test(k, v, t) VALUES (6, 'laureal', cluster_logical_timestamp()) RETURNING 1; diff --git a/pkg/testutils/jobutils/jobs_verification.go b/pkg/testutils/jobutils/jobs_verification.go index 823088faabba..d144302d137a 100644 --- a/pkg/testutils/jobutils/jobs_verification.go +++ b/pkg/testutils/jobutils/jobs_verification.go @@ -167,6 +167,8 @@ func verifySystemJob( sort.Sort(expected.DescriptorIDs) expected.Details = nil if e, a := expected, actual; !reflect.DeepEqual(e, a) { + fmt.Printf("%+v\n", expected) + fmt.Printf("%+v\n", actual) return errors.Errorf("job %d did not match:\n%s", offset, strings.Join(pretty.Diff(e, a), "\n")) } From d8cf539ff807a2e0d36da44173310589891e923e Mon Sep 17 00:00:00 2001 From: Rafi Shamim Date: Thu, 10 Mar 2022 20:44:42 -0500 Subject: [PATCH 2/3] sql: execute batch statements in an implicit transaction Release justification: high value bug fix to existing functionality. Release note (bug fix): Previously statements that arrived in a batch during the simple query protocol would all execute in their own implicit transactions. Now, we match the Postgres wire protocol, so all these statements share the same implicit transaction. If a BEGIN is included in a statement batch, then the existing implicit transaction is upgraded to an explicit transaction. --- pkg/sql/conn_executor.go | 6 +- pkg/sql/conn_executor_exec.go | 5 +- pkg/sql/conn_io.go | 4 + pkg/sql/pgwire/conn.go | 1 + pkg/sql/pgwire/testdata/pgtest/batch_stmt | 151 ++++++++++++++++++++++ 5 files changed, 163 insertions(+), 4 deletions(-) create mode 100644 pkg/sql/pgwire/testdata/pgtest/batch_stmt diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go index d715ff0cc428..23ccd6c1bf3d 100644 --- a/pkg/sql/conn_executor.go +++ b/pkg/sql/conn_executor.go @@ -1842,7 +1842,11 @@ func (ex *connExecutor) execCmd() error { ) res = stmtRes - canAutoCommit := ex.implicitTxn() + // In the simple protocol, autocommit only when this is the last statement + // in the batch. This matches the Postgres behavior. See + // "Multiple Statements in a Single Query" at + // https://www.postgresql.org/docs/14/protocol-flow.html. + canAutoCommit := ex.implicitTxn() && tcmd.LastInBatch ev, payload, err = ex.execStmt( ctx, tcmd.Statement, nil /* prepared */, nil /* pinfo */, stmtRes, canAutoCommit, ) diff --git a/pkg/sql/conn_executor_exec.go b/pkg/sql/conn_executor_exec.go index 063d6111dcee..ef0f77c22dc1 100644 --- a/pkg/sql/conn_executor_exec.go +++ b/pkg/sql/conn_executor_exec.go @@ -523,9 +523,8 @@ func (ex *connExecutor) execStmtInOpenState( switch s := ast.(type) { case *tree.BeginTransaction: - // BEGIN is only allowed if we are in an implicit txn that was started - // in the extended protocol. - if isExtendedProtocol && os.ImplicitTxn.Get() { + // BEGIN is only allowed if we are in an implicit txn. + if os.ImplicitTxn.Get() { ex.sessionDataStack.PushTopClone() return eventTxnUpgradeToExplicit{}, nil, nil } diff --git a/pkg/sql/conn_io.go b/pkg/sql/conn_io.go index 216c6c3104fc..04b4eea645e3 100644 --- a/pkg/sql/conn_io.go +++ b/pkg/sql/conn_io.go @@ -136,6 +136,10 @@ type ExecStmt struct { // stats reporting. ParseStart time.Time ParseEnd time.Time + + // LastInBatch indicates if this command contains the last query in a + // simple protocol Query message that contains a batch of 1 or more queries. + LastInBatch bool } // command implements the Command interface. diff --git a/pkg/sql/pgwire/conn.go b/pkg/sql/pgwire/conn.go index a2df43fb0ac1..d241653395d6 100644 --- a/pkg/sql/pgwire/conn.go +++ b/pkg/sql/pgwire/conn.go @@ -869,6 +869,7 @@ func (c *conn) handleSimpleQuery( TimeReceived: timeReceived, ParseStart: startParse, ParseEnd: endParse, + LastInBatch: i == len(stmts)-1, }); err != nil { return err } diff --git a/pkg/sql/pgwire/testdata/pgtest/batch_stmt b/pkg/sql/pgwire/testdata/pgtest/batch_stmt new file mode 100644 index 000000000000..6df5ee3cec3d --- /dev/null +++ b/pkg/sql/pgwire/testdata/pgtest/batch_stmt @@ -0,0 +1,151 @@ +send +Query {"String": "DROP TABLE IF EXISTS mytable"} +---- + +until ignore=NoticeResponse +ReadyForQuery +---- +{"Type":"CommandComplete","CommandTag":"DROP TABLE"} +{"Type":"ReadyForQuery","TxStatus":"I"} + +send +Query {"String": "CREATE TABLE mytable(a INT8)"} +---- + +until +ReadyForQuery +---- +{"Type":"CommandComplete","CommandTag":"CREATE TABLE"} +{"Type":"ReadyForQuery","TxStatus":"I"} + +# All the statements in the batch should be executed with the same implicit +# transaction. So no data should be inserted. +send +Query {"String": "INSERT INTO mytable VALUES(1); SELECT 1/0; INSERT INTO mytable VALUES(2);"} +---- + +until ignore=RowDescription +ErrorResponse +ReadyForQuery +---- +{"Type":"CommandComplete","CommandTag":"INSERT 0 1"} +{"Type":"ErrorResponse","Code":"22012"} +{"Type":"ReadyForQuery","TxStatus":"I"} + +send +Query {"String": "SELECT * FROM mytable"} +---- + +until ignore_table_oids +ReadyForQuery +---- +{"Type":"RowDescription","Fields":[{"Name":"a","TableOID":0,"TableAttributeNumber":1,"DataTypeOID":20,"DataTypeSize":8,"TypeModifier":-1,"Format":0}]} +{"Type":"CommandComplete","CommandTag":"SELECT 0"} +{"Type":"ReadyForQuery","TxStatus":"I"} + +# A batch can also start and end an explicit transaction, but the statements +# after the COMMIT are in a separate implicit transaction. +send +Query {"String": "BEGIN; INSERT INTO mytable VALUES(1); COMMIT; SELECT 1/0; INSERT INTO mytable VALUES(2);"} +---- + +until ignore=RowDescription +ErrorResponse +ReadyForQuery +---- +{"Type":"CommandComplete","CommandTag":"BEGIN"} +{"Type":"CommandComplete","CommandTag":"INSERT 0 1"} +{"Type":"CommandComplete","CommandTag":"COMMIT"} +{"Type":"ErrorResponse","Code":"22012"} +{"Type":"ReadyForQuery","TxStatus":"I"} + +send +Query {"String": "SELECT * FROM mytable"} +---- + +until ignore_table_oids +ReadyForQuery +---- +{"Type":"RowDescription","Fields":[{"Name":"a","TableOID":0,"TableAttributeNumber":1,"DataTypeOID":20,"DataTypeSize":8,"TypeModifier":-1,"Format":0}]} +{"Type":"DataRow","Values":[{"text":"1"}]} +{"Type":"CommandComplete","CommandTag":"SELECT 1"} +{"Type":"ReadyForQuery","TxStatus":"I"} + +# A BEGIN in the middle of a batch upgrades the implicit txn to an explicit one. +send +Query {"String": "INSERT INTO mytable VALUES(2); BEGIN; INSERT INTO mytable VALUES(3); COMMIT; INSERT INTO mytable VALUES(4); SELECT 1/0; "} +---- + +until ignore=RowDescription +ErrorResponse +ReadyForQuery +---- +{"Type":"CommandComplete","CommandTag":"INSERT 0 1"} +{"Type":"CommandComplete","CommandTag":"BEGIN"} +{"Type":"CommandComplete","CommandTag":"INSERT 0 1"} +{"Type":"CommandComplete","CommandTag":"COMMIT"} +{"Type":"CommandComplete","CommandTag":"INSERT 0 1"} +{"Type":"ErrorResponse","Code":"22012"} +{"Type":"ReadyForQuery","TxStatus":"I"} + +send +Query {"String": "SELECT * FROM mytable"} +---- + +until ignore_table_oids +ReadyForQuery +---- +{"Type":"RowDescription","Fields":[{"Name":"a","TableOID":0,"TableAttributeNumber":1,"DataTypeOID":20,"DataTypeSize":8,"TypeModifier":-1,"Format":0}]} +{"Type":"DataRow","Values":[{"text":"1"}]} +{"Type":"DataRow","Values":[{"text":"2"}]} +{"Type":"DataRow","Values":[{"text":"3"}]} +{"Type":"CommandComplete","CommandTag":"SELECT 3"} +{"Type":"ReadyForQuery","TxStatus":"I"} + +# If an explicit txn fails during a batch, the txn state should end up as E +# (for error). +send +Query {"String": "BEGIN; SELECT 1/0; COMMIT;"} +---- + +until ignore=RowDescription +ErrorResponse +ReadyForQuery +---- +{"Type":"CommandComplete","CommandTag":"BEGIN"} +{"Type":"ErrorResponse","Code":"22012"} +{"Type":"ReadyForQuery","TxStatus":"E"} + +send +Query {"String": "ROLLBACK"} +---- + +until ignore_table_oids +ReadyForQuery +---- +{"Type":"CommandComplete","CommandTag":"ROLLBACK"} +{"Type":"ReadyForQuery","TxStatus":"I"} + +# Execution of the Query message stops at the first error, so even the last +# ROLLBACK below does not get executed. +send +Query {"String": "BEGIN; SELECT 1/0; ROLLBACK;"} +---- + +until ignore=RowDescription +ErrorResponse +ReadyForQuery +---- +{"Type":"CommandComplete","CommandTag":"BEGIN"} +{"Type":"ErrorResponse","Code":"22012"} +{"Type":"ReadyForQuery","TxStatus":"E"} + +send +Query {"String": "ROLLBACK"} +---- + +until ignore_table_oids +ReadyForQuery +---- +{"Type":"CommandComplete","CommandTag":"ROLLBACK"} +{"Type":"ReadyForQuery","TxStatus":"I"} From 4fc43c804963952cc474cb324cea3182ed42b0c7 Mon Sep 17 00:00:00 2001 From: Rafi Shamim Date: Thu, 10 Mar 2022 21:27:59 -0500 Subject: [PATCH 3/3] sql: add session var for old implicit txn behavior Release justification: low risk new setting. Release note (sql change): The enable_implicit_transaction_for_batch_statements session variable was added. It defaults to true. When it is true, multiple statements in a single query (a.k.a. a "batch statement") will all be run in the same implicit transaction, which matches the Postgres wire protocol. This setting is provided for users who want to preserve the behavior of CockroachDB versions v21.2 and earlier. --- pkg/sql/conn_executor.go | 5 +- pkg/sql/exec_util.go | 4 ++ pkg/sql/pgwire/testdata/pgtest/batch_stmt | 49 +++++++++++++++++++ .../local_only_session_data.proto | 6 +++ pkg/sql/trace_test.go | 17 ++++--- pkg/sql/vars.go | 17 +++++++ 6 files changed, 91 insertions(+), 7 deletions(-) diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go index 23ccd6c1bf3d..ae665e2dee47 100644 --- a/pkg/sql/conn_executor.go +++ b/pkg/sql/conn_executor.go @@ -1846,7 +1846,10 @@ func (ex *connExecutor) execCmd() error { // in the batch. This matches the Postgres behavior. See // "Multiple Statements in a Single Query" at // https://www.postgresql.org/docs/14/protocol-flow.html. - canAutoCommit := ex.implicitTxn() && tcmd.LastInBatch + // The behavior is configurable, in case users want to preserve the + // behavior from v21.2 and earlier. + implicitTxnForBatch := ex.sessionData().EnableImplicitTransactionForBatchStatements + canAutoCommit := ex.implicitTxn() && (tcmd.LastInBatch || !implicitTxnForBatch) ev, payload, err = ex.execStmt( ctx, tcmd.Statement, nil /* prepared */, nil /* pinfo */, stmtRes, canAutoCommit, ) diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index 18f012cd2fea..6b676d2c1363 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -3174,6 +3174,10 @@ func (m *sessionDataMutator) SetCostScansWithDefaultColSize(val bool) { m.data.CostScansWithDefaultColSize = val } +func (m *sessionDataMutator) SetEnableImplicitTransactionForBatchStatements(val bool) { + m.data.EnableImplicitTransactionForBatchStatements = val +} + // Utility functions related to scrubbing sensitive information on SQL Stats. // quantizeCounts ensures that the Count field in the diff --git a/pkg/sql/pgwire/testdata/pgtest/batch_stmt b/pkg/sql/pgwire/testdata/pgtest/batch_stmt index 6df5ee3cec3d..25f45ed2fbd6 100644 --- a/pkg/sql/pgwire/testdata/pgtest/batch_stmt +++ b/pkg/sql/pgwire/testdata/pgtest/batch_stmt @@ -1,3 +1,13 @@ +send crdb_only +Query {"String": "SET enable_implicit_transaction_for_batch_statements = 'true'"} +---- + +until crdb_only +ReadyForQuery +---- +{"Type":"CommandComplete","CommandTag":"SET"} +{"Type":"ReadyForQuery","TxStatus":"I"} + send Query {"String": "DROP TABLE IF EXISTS mytable"} ---- @@ -149,3 +159,42 @@ ReadyForQuery ---- {"Type":"CommandComplete","CommandTag":"ROLLBACK"} {"Type":"ReadyForQuery","TxStatus":"I"} + +send crdb_only +Query {"String": "SET enable_implicit_transaction_for_batch_statements = 'false'"} +---- + +until crdb_only +ReadyForQuery +---- +{"Type":"CommandComplete","CommandTag":"SET"} +{"Type":"ReadyForQuery","TxStatus":"I"} + +# With the enable_implicit_transaction_for_batch_statements setting off, the +# following batch statement should now result in one INSERT succeeding. +send crdb_only +Query {"String": "INSERT INTO mytable VALUES(4); SELECT 1/0; INSERT INTO mytable VALUES(5);"} +---- + +until crdb_only ignore=RowDescription +ErrorResponse +ReadyForQuery +---- +{"Type":"CommandComplete","CommandTag":"INSERT 0 1"} +{"Type":"ErrorResponse","Code":"22012"} +{"Type":"ReadyForQuery","TxStatus":"I"} + +send crdb_only +Query {"String": "SELECT * FROM mytable"} +---- + +until crdb_only ignore_table_oids +ReadyForQuery +---- +{"Type":"RowDescription","Fields":[{"Name":"a","TableOID":0,"TableAttributeNumber":1,"DataTypeOID":20,"DataTypeSize":8,"TypeModifier":-1,"Format":0}]} +{"Type":"DataRow","Values":[{"text":"1"}]} +{"Type":"DataRow","Values":[{"text":"2"}]} +{"Type":"DataRow","Values":[{"text":"3"}]} +{"Type":"DataRow","Values":[{"text":"4"}]} +{"Type":"CommandComplete","CommandTag":"SELECT 4"} +{"Type":"ReadyForQuery","TxStatus":"I"} diff --git a/pkg/sql/sessiondatapb/local_only_session_data.proto b/pkg/sql/sessiondatapb/local_only_session_data.proto index ec7140ec5366..8c0738b6e72d 100644 --- a/pkg/sql/sessiondatapb/local_only_session_data.proto +++ b/pkg/sql/sessiondatapb/local_only_session_data.proto @@ -236,6 +236,12 @@ message LocalOnlySessionData { // OverrideAlterPrimaryRegionInSuperRegion is true when the user is allowed // to modify a primary region that is part of a super region. bool override_alter_primary_region_in_super_region = 65; + // EnableImplicitTransactionForBatchStatements configures the handling of + // multiple statements in a single query. If enabled, then an implicit + // transaction is used when multiple statements are sent in a single query. + // Setting this to false is a divergence from the pgwire protocol, but + // matches the behavior of CockroachDB v21.2 and earlier. + bool enable_implicit_transaction_for_batch_statements = 66; /////////////////////////////////////////////////////////////////////////// // WARNING: consider whether a session parameter you're adding needs to // diff --git a/pkg/sql/trace_test.go b/pkg/sql/trace_test.go index e9136b5d22e5..988567386d72 100644 --- a/pkg/sql/trace_test.go +++ b/pkg/sql/trace_test.go @@ -264,16 +264,17 @@ func TestTrace(t *testing.T) { defer cluster.Stopper().Stop(context.Background()) clusterDB := cluster.ServerConn(0) + if _, err := clusterDB.Exec(`CREATE DATABASE test;`); err != nil { + t.Fatal(err) + } if _, err := clusterDB.Exec(` - CREATE DATABASE test; - --- test.foo is a single range table. CREATE TABLE test.foo (id INT PRIMARY KEY); - --- test.bar is a multi-range table. - CREATE TABLE test.bar (id INT PRIMARY KEY); - ALTER TABLE test.bar SPLIT AT VALUES (5); - `); err != nil { + CREATE TABLE test.bar (id INT PRIMARY KEY);`); err != nil { + t.Fatal(err) + } + if _, err := clusterDB.Exec(`ALTER TABLE test.bar SPLIT AT VALUES (5);`); err != nil { t.Fatal(err) } @@ -306,6 +307,10 @@ func TestTrace(t *testing.T) { pgURL, cleanup := sqlutils.PGUrl( t, cluster.Server(i).ServingSQLAddr(), "TestTrace", url.User(security.RootUser)) defer cleanup() + q := pgURL.Query() + // This makes it easier to test with the `tracing` sesssion var. + q.Add("enable_implicit_transaction_for_batch_statements", "false") + pgURL.RawQuery = q.Encode() sqlDB, err := gosql.Open("postgres", pgURL.String()) if err != nil { t.Fatal(err) diff --git a/pkg/sql/vars.go b/pkg/sql/vars.go index a498bef1f5cc..eacf40c39e54 100644 --- a/pkg/sql/vars.go +++ b/pkg/sql/vars.go @@ -1973,6 +1973,23 @@ var varGen = map[string]sessionVar{ GlobalDefault: func(sv *settings.Values) string { return formatBoolAsPostgresSetting(overrideAlterPrimaryRegionInSuperRegion.Get(sv)) }}, + + // CockroachDB extension. + `enable_implicit_transaction_for_batch_statements`: { + GetStringVal: makePostgresBoolGetStringValFn(`enable_implicit_transaction_for_batch_statements`), + Set: func(_ context.Context, m sessionDataMutator, s string) error { + b, err := paramparse.ParseBoolVar("enable_implicit_transaction_for_batch_statements", s) + if err != nil { + return err + } + m.SetEnableImplicitTransactionForBatchStatements(b) + return nil + }, + Get: func(evalCtx *extendedEvalContext) (string, error) { + return formatBoolAsPostgresSetting(evalCtx.SessionData().EnableImplicitTransactionForBatchStatements), nil + }, + GlobalDefault: globalTrue, + }, } const compatErrMsg = "this parameter is currently recognized only for compatibility and has no effect in CockroachDB."