diff --git a/docs/generated/sql/bnf/delete_stmt.bnf b/docs/generated/sql/bnf/delete_stmt.bnf index e82054b0f2c0..3ddf01c6c7fc 100644 --- a/docs/generated/sql/bnf/delete_stmt.bnf +++ b/docs/generated/sql/bnf/delete_stmt.bnf @@ -1,2 +1,2 @@ delete_stmt ::= - ( ( 'WITH' ( ( common_table_expr ) ( ( ',' common_table_expr ) )* ) | 'WITH' 'RECURSIVE' ( ( common_table_expr ) ( ( ',' common_table_expr ) )* ) ) | ) 'DELETE' 'FROM' ( ( ( 'ONLY' | ) table_name opt_index_flags ( '*' | ) ) | ( ( 'ONLY' | ) table_name opt_index_flags ( '*' | ) ) table_alias_name | ( ( 'ONLY' | ) table_name opt_index_flags ( '*' | ) ) 'AS' table_alias_name ) ( 'USING' ( ( table_ref ) ( ( ',' table_ref ) )* ) | ) ( ( 'WHERE' a_expr ) | ) ( sort_clause | ) ( limit_clause | ) ( 'RETURNING' target_list | 'RETURNING' 'NOTHING' | ) + ( ( 'WITH' ( ( common_table_expr ) ( ( ',' common_table_expr ) )* ) | 'WITH' 'RECURSIVE' ( ( common_table_expr ) ( ( ',' common_table_expr ) )* ) ) | ) 'DELETE' opt_batch_clause 'FROM' ( ( ( 'ONLY' | ) table_name opt_index_flags ( '*' | ) ) | ( ( 'ONLY' | ) table_name opt_index_flags ( '*' | ) ) table_alias_name | ( ( 'ONLY' | ) table_name opt_index_flags ( '*' | ) ) 'AS' table_alias_name ) ( 'USING' ( ( table_ref ) ( ( ',' table_ref ) )* ) | ) ( ( 'WHERE' a_expr ) | ) ( sort_clause | ) ( limit_clause | ) ( 'RETURNING' target_list | 'RETURNING' 'NOTHING' | ) diff --git a/docs/generated/sql/bnf/stmt_block.bnf b/docs/generated/sql/bnf/stmt_block.bnf index 8afa0cda18e7..14ceea13c785 100644 --- a/docs/generated/sql/bnf/stmt_block.bnf +++ b/docs/generated/sql/bnf/stmt_block.bnf @@ -198,7 +198,7 @@ create_stmt ::= | create_schedule_stmt delete_stmt ::= - opt_with_clause 'DELETE' 'FROM' table_expr_opt_alias_idx opt_using_clause opt_where_clause opt_sort_clause opt_limit_clause returning_clause + opt_with_clause 'DELETE' opt_batch_clause 'FROM' table_expr_opt_alias_idx opt_using_clause opt_where_clause opt_sort_clause opt_limit_clause returning_clause drop_stmt ::= drop_ddl_stmt @@ -603,6 +603,11 @@ opt_with_clause ::= with_clause | +opt_batch_clause ::= + 'BATCH' + | 'BATCH' '(' batch_param_list ')' + | + table_expr_opt_alias_idx ::= table_name_opt_idx | table_name_opt_idx table_alias_name @@ -1037,6 +1042,7 @@ unreserved_keyword ::= | 'BACKUP' | 'BACKUPS' | 'BACKWARD' + | 'BATCH' | 'BEFORE' | 'BEGIN' | 'BINARY' @@ -1378,6 +1384,7 @@ unreserved_keyword ::= | 'SHARED' | 'SHOW' | 'SIMPLE' + | 'SIZE' | 'SKIP' | 'SKIP_LOCALITIES_CHECK' | 'SKIP_MISSING_FOREIGN_KEYS' @@ -1794,6 +1801,9 @@ with_clause ::= 'WITH' cte_list | 'WITH' 'RECURSIVE' cte_list +batch_param_list ::= + ( batch_param ) ( ( ',' batch_param ) )* + table_name_opt_idx ::= opt_only table_name opt_index_flags opt_descendant @@ -2563,6 +2573,9 @@ opt_full_backup_clause ::= cte_list ::= ( common_table_expr ) ( ( ',' common_table_expr ) )* +batch_param ::= + 'SIZE' a_expr + opt_only ::= 'ONLY' | @@ -3510,6 +3523,7 @@ bare_label_keywords ::= | 'BACKUP' | 'BACKUPS' | 'BACKWARD' + | 'BATCH' | 'BEFORE' | 'BEGIN' | 'BETWEEN' @@ -3936,6 +3950,7 @@ bare_label_keywords ::= | 'SHOW' | 'SIMILAR' | 'SIMPLE' + | 'SIZE' | 'SKIP' | 'SKIP_LOCALITIES_CHECK' | 'SKIP_MISSING_FOREIGN_KEYS' diff --git a/pkg/ccl/logictestccl/tests/3node-tenant/generated_test.go b/pkg/ccl/logictestccl/tests/3node-tenant/generated_test.go index 816d5b0df5f9..9fc4c086cb31 100644 --- a/pkg/ccl/logictestccl/tests/3node-tenant/generated_test.go +++ b/pkg/ccl/logictestccl/tests/3node-tenant/generated_test.go @@ -570,6 +570,13 @@ func TestTenantLogic_delete( runLogicTest(t, "delete") } +func TestTenantLogic_delete_batch( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "delete_batch") +} + func TestTenantLogic_dependencies( t *testing.T, ) { diff --git a/pkg/cmd/roachtest/tests/failover.go b/pkg/cmd/roachtest/tests/failover.go index 3b9f41a251dd..e173e842d59d 100644 --- a/pkg/cmd/roachtest/tests/failover.go +++ b/pkg/cmd/roachtest/tests/failover.go @@ -194,9 +194,11 @@ func runFailoverChaos(ctx context.Context, t test.Test, c cluster.Cluster, readO settings.Env = append(settings.Env, "COCKROACH_ENABLE_UNSAFE_TEST_BUILTINS=true") settings.Env = append(settings.Env, "COCKROACH_SCAN_MAX_IDLE_TIME=100ms") // speed up replication + m := c.NewMonitor(ctx, c.Range(1, 9)) + failers := []Failer{} for _, failureMode := range allFailureModes { - failer := makeFailerWithoutLocalNoop(t, c, failureMode, opts, settings, rng) + failer := makeFailerWithoutLocalNoop(t, c, m, failureMode, opts, settings, rng) if c.IsLocal() && !failer.CanUseLocal() { t.L().Printf("skipping failure mode %q on local cluster", failureMode) continue @@ -209,7 +211,6 @@ func runFailoverChaos(ctx context.Context, t test.Test, c cluster.Cluster, readO c.Put(ctx, t.Cockroach(), "./cockroach") c.Start(ctx, t.L(), opts, settings, c.Range(1, 9)) - m := c.NewMonitor(ctx, c.Range(1, 9)) conn := c.Conn(ctx, t.L(), 1) // Place 5 replicas of all ranges on n3-n9, keeping n1-n2 as SQL gateways. @@ -285,7 +286,7 @@ func runFailoverChaos(ctx context.Context, t test.Test, c cluster.Cluster, readO d.numReplicas = 1 + rng.Intn(5) d.onlyLeaseholders = rng.Float64() < 0.5 } - failer.Ready(ctx, m) + failer.Ready(ctx) nodeFailers[node] = failer } @@ -369,14 +370,15 @@ func runFailoverPartialLeaseGateway(ctx context.Context, t test.Test, c cluster. settings := install.MakeClusterSettings() settings.Env = append(settings.Env, "COCKROACH_SCAN_MAX_IDLE_TIME=100ms") // speed up replication - failer := makeFailer(t, c, failureModeBlackhole, opts, settings, rng).(PartialFailer) + m := c.NewMonitor(ctx, c.Range(1, 7)) + + failer := makeFailer(t, c, m, failureModeBlackhole, opts, settings, rng).(PartialFailer) failer.Setup(ctx) defer failer.Cleanup(ctx) c.Put(ctx, t.Cockroach(), "./cockroach") c.Start(ctx, t.L(), opts, settings, c.Range(1, 7)) - m := c.NewMonitor(ctx, c.Range(1, 7)) conn := c.Conn(ctx, t.L(), 1) // Place all ranges on n1-n3 to start with. @@ -441,7 +443,7 @@ func runFailoverPartialLeaseGateway(ctx context.Context, t test.Test, c cluster. for _, tc := range testcases { sleepFor(ctx, t, time.Minute) - failer.Ready(ctx, m) + failer.Ready(ctx) // Ranges and leases may occasionally escape their constraints. Move // them to where they should be. @@ -503,7 +505,9 @@ func runFailoverPartialLeaseLeader(ctx context.Context, t test.Test, c cluster.C settings.Env = append(settings.Env, "COCKROACH_DISABLE_LEADER_FOLLOWS_LEASEHOLDER=true") settings.Env = append(settings.Env, "COCKROACH_SCAN_MAX_IDLE_TIME=100ms") // speed up replication - failer := makeFailer(t, c, failureModeBlackhole, opts, settings, rng).(PartialFailer) + m := c.NewMonitor(ctx, c.Range(1, 6)) + + failer := makeFailer(t, c, m, failureModeBlackhole, opts, settings, rng).(PartialFailer) failer.Setup(ctx) defer failer.Cleanup(ctx) @@ -524,7 +528,6 @@ func runFailoverPartialLeaseLeader(ctx context.Context, t test.Test, c cluster.C // Now that system ranges are properly placed on n1-n3, start n4-n6. c.Start(ctx, t.L(), opts, settings, c.Range(4, 6)) - m := c.NewMonitor(ctx, c.Range(1, 6)) // Create the kv database on n4-n6. t.L().Printf("creating workload database") @@ -577,7 +580,7 @@ func runFailoverPartialLeaseLeader(ctx context.Context, t test.Test, c cluster.C for _, node := range []int{4, 5, 6} { sleepFor(ctx, t, time.Minute) - failer.Ready(ctx, m) + failer.Ready(ctx) // Ranges may occasionally escape their constraints. Move them to where // they should be. @@ -637,14 +640,15 @@ func runFailoverPartialLeaseLiveness(ctx context.Context, t test.Test, c cluster settings := install.MakeClusterSettings() settings.Env = append(settings.Env, "COCKROACH_SCAN_MAX_IDLE_TIME=100ms") // speed up replication - failer := makeFailer(t, c, failureModeBlackhole, opts, settings, rng).(PartialFailer) + m := c.NewMonitor(ctx, c.Range(1, 7)) + + failer := makeFailer(t, c, m, failureModeBlackhole, opts, settings, rng).(PartialFailer) failer.Setup(ctx) defer failer.Cleanup(ctx) c.Put(ctx, t.Cockroach(), "./cockroach") c.Start(ctx, t.L(), opts, settings, c.Range(1, 7)) - m := c.NewMonitor(ctx, c.Range(1, 7)) conn := c.Conn(ctx, t.L(), 1) // Place all ranges on n1-n3, and an extra liveness leaseholder replica on n4. @@ -694,7 +698,7 @@ func runFailoverPartialLeaseLiveness(ctx context.Context, t test.Test, c cluster for _, node := range []int{5, 6, 7} { sleepFor(ctx, t, time.Minute) - failer.Ready(ctx, m) + failer.Ready(ctx) // Ranges and leases may occasionally escape their constraints. Move // them to where they should be. @@ -755,14 +759,15 @@ func runFailoverNonSystem( settings.Env = append(settings.Env, "COCKROACH_ENABLE_UNSAFE_TEST_BUILTINS=true") settings.Env = append(settings.Env, "COCKROACH_SCAN_MAX_IDLE_TIME=100ms") // speed up replication - failer := makeFailer(t, c, failureMode, opts, settings, rng) + m := c.NewMonitor(ctx, c.Range(1, 6)) + + failer := makeFailer(t, c, m, failureMode, opts, settings, rng) failer.Setup(ctx) defer failer.Cleanup(ctx) c.Put(ctx, t.Cockroach(), "./cockroach") c.Start(ctx, t.L(), opts, settings, c.Range(1, 6)) - m := c.NewMonitor(ctx, c.Range(1, 6)) conn := c.Conn(ctx, t.L(), 1) // Constrain all existing zone configs to n1-n3. @@ -805,7 +810,7 @@ func runFailoverNonSystem( for _, node := range []int{4, 5, 6} { sleepFor(ctx, t, time.Minute) - failer.Ready(ctx, m) + failer.Ready(ctx) // Ranges may occasionally escape their constraints. Move them // to where they should be. @@ -864,14 +869,15 @@ func runFailoverLiveness( settings.Env = append(settings.Env, "COCKROACH_ENABLE_UNSAFE_TEST_BUILTINS=true") settings.Env = append(settings.Env, "COCKROACH_SCAN_MAX_IDLE_TIME=100ms") // speed up replication - failer := makeFailer(t, c, failureMode, opts, settings, rng) + m := c.NewMonitor(ctx, c.Range(1, 4)) + + failer := makeFailer(t, c, m, failureMode, opts, settings, rng) failer.Setup(ctx) defer failer.Cleanup(ctx) c.Put(ctx, t.Cockroach(), "./cockroach") c.Start(ctx, t.L(), opts, settings, c.Range(1, 4)) - m := c.NewMonitor(ctx, c.Range(1, 4)) conn := c.Conn(ctx, t.L(), 1) // Constrain all existing zone configs to n1-n3. @@ -919,7 +925,7 @@ func runFailoverLiveness( for i := 0; i < 9; i++ { sleepFor(ctx, t, time.Minute) - failer.Ready(ctx, m) + failer.Ready(ctx) // Ranges and leases may occasionally escape their constraints. Move them // to where they should be. @@ -979,14 +985,15 @@ func runFailoverSystemNonLiveness( settings.Env = append(settings.Env, "COCKROACH_ENABLE_UNSAFE_TEST_BUILTINS=true") settings.Env = append(settings.Env, "COCKROACH_SCAN_MAX_IDLE_TIME=100ms") // speed up replication - failer := makeFailer(t, c, failureMode, opts, settings, rng) + m := c.NewMonitor(ctx, c.Range(1, 6)) + + failer := makeFailer(t, c, m, failureMode, opts, settings, rng) failer.Setup(ctx) defer failer.Cleanup(ctx) c.Put(ctx, t.Cockroach(), "./cockroach") c.Start(ctx, t.L(), opts, settings, c.Range(1, 6)) - m := c.NewMonitor(ctx, c.Range(1, 6)) conn := c.Conn(ctx, t.L(), 1) // Constrain all existing zone configs to n4-n6, except liveness which is @@ -1034,7 +1041,7 @@ func runFailoverSystemNonLiveness( for _, node := range []int{4, 5, 6} { sleepFor(ctx, t, time.Minute) - failer.Ready(ctx, m) + failer.Ready(ctx) // Ranges may occasionally escape their constraints. Move them // to where they should be. @@ -1093,12 +1100,13 @@ var allFailureModes = []failureMode{ func makeFailer( t test.Test, c cluster.Cluster, + m cluster.Monitor, failureMode failureMode, opts option.StartOpts, settings install.ClusterSettings, rng *rand.Rand, ) Failer { - f := makeFailerWithoutLocalNoop(t, c, failureMode, opts, settings, rng) + f := makeFailerWithoutLocalNoop(t, c, m, failureMode, opts, settings, rng) if c.IsLocal() && !f.CanUseLocal() { t.L().Printf( `failure mode %q not supported on local clusters, using "noop" failure mode instead`, @@ -1111,6 +1119,7 @@ func makeFailer( func makeFailerWithoutLocalNoop( t test.Test, c cluster.Cluster, + m cluster.Monitor, failureMode failureMode, opts option.StartOpts, settings install.ClusterSettings, @@ -1140,6 +1149,7 @@ func makeFailerWithoutLocalNoop( return &crashFailer{ t: t, c: c, + m: m, startOpts: opts, startSettings: settings, } @@ -1147,6 +1157,7 @@ func makeFailerWithoutLocalNoop( return &deadlockFailer{ t: t, c: c, + m: m, rng: rng, startOpts: opts, startSettings: settings, @@ -1157,6 +1168,7 @@ func makeFailerWithoutLocalNoop( return &diskStallFailer{ t: t, c: c, + m: m, startOpts: opts, startSettings: settings, staller: &dmsetupDiskStaller{t: t, c: c}, @@ -1194,7 +1206,7 @@ type Failer interface { // Ready is called some time before failing each node, when the cluster and // workload is running and after recovering the previous node failure if any. - Ready(ctx context.Context, m cluster.Monitor) + Ready(ctx context.Context) // Cleanup cleans up when the test exits. This is needed e.g. when the cluster // is reused by a different test. @@ -1223,7 +1235,7 @@ func (f *noopFailer) String() string { return string(f. func (f *noopFailer) CanUseLocal() bool { return true } func (f *noopFailer) CanRunWith(failureMode) bool { return true } func (f *noopFailer) Setup(context.Context) {} -func (f *noopFailer) Ready(context.Context, cluster.Monitor) {} +func (f *noopFailer) Ready(context.Context) {} func (f *noopFailer) Cleanup(context.Context) {} func (f *noopFailer) Fail(context.Context, int) {} func (f *noopFailer) FailPartial(context.Context, int, []int) {} @@ -1251,11 +1263,11 @@ func (f *blackholeFailer) Mode() failureMode { return failureModeBlackhole } -func (f *blackholeFailer) String() string { return string(f.Mode()) } -func (f *blackholeFailer) CanUseLocal() bool { return false } // needs iptables -func (f *blackholeFailer) CanRunWith(failureMode) bool { return true } -func (f *blackholeFailer) Setup(context.Context) {} -func (f *blackholeFailer) Ready(context.Context, cluster.Monitor) {} +func (f *blackholeFailer) String() string { return string(f.Mode()) } +func (f *blackholeFailer) CanUseLocal() bool { return false } // needs iptables +func (f *blackholeFailer) CanRunWith(failureMode) bool { return true } +func (f *blackholeFailer) Setup(context.Context) {} +func (f *blackholeFailer) Ready(context.Context) {} func (f *blackholeFailer) Cleanup(ctx context.Context) { f.c.Run(ctx, f.c.All(), `sudo iptables -F`) @@ -1336,13 +1348,13 @@ type crashFailer struct { startSettings install.ClusterSettings } -func (f *crashFailer) Mode() failureMode { return failureModeCrash } -func (f *crashFailer) String() string { return string(f.Mode()) } -func (f *crashFailer) CanUseLocal() bool { return true } -func (f *crashFailer) CanRunWith(failureMode) bool { return true } -func (f *crashFailer) Setup(_ context.Context) {} -func (f *crashFailer) Ready(_ context.Context, m cluster.Monitor) { f.m = m } -func (f *crashFailer) Cleanup(_ context.Context) {} +func (f *crashFailer) Mode() failureMode { return failureModeCrash } +func (f *crashFailer) String() string { return string(f.Mode()) } +func (f *crashFailer) CanUseLocal() bool { return true } +func (f *crashFailer) CanRunWith(failureMode) bool { return true } +func (f *crashFailer) Setup(context.Context) {} +func (f *crashFailer) Ready(context.Context) {} +func (f *crashFailer) Cleanup(context.Context) {} func (f *crashFailer) Fail(ctx context.Context, nodeID int) { f.m.ExpectDeath() @@ -1378,9 +1390,7 @@ func (f *deadlockFailer) CanRunWith(m failureMode) bool { return true } func (f *deadlockFailer) Setup(context.Context) {} func (f *deadlockFailer) Cleanup(context.Context) {} -func (f *deadlockFailer) Ready(ctx context.Context, m cluster.Monitor) { - f.m = m - +func (f *deadlockFailer) Ready(ctx context.Context) { // In chaos tests, other nodes will be failing concurrently. We therefore // can't run SHOW CLUSTER RANGES WITH DETAILS in Fail(), since it needs to // read from all ranges. Instead, we fetch a snapshot of replicas and leases @@ -1495,15 +1505,12 @@ func (f *diskStallFailer) Mode() failureMode { return failureModeDiskS func (f *diskStallFailer) String() string { return string(f.Mode()) } func (f *diskStallFailer) CanUseLocal() bool { return false } // needs dmsetup func (f *diskStallFailer) CanRunWith(failureMode) bool { return true } +func (f *diskStallFailer) Ready(context.Context) {} func (f *diskStallFailer) Setup(ctx context.Context) { f.staller.Setup(ctx) } -func (f *diskStallFailer) Ready(_ context.Context, m cluster.Monitor) { - f.m = m -} - func (f *diskStallFailer) Cleanup(ctx context.Context) { f.staller.Unstall(ctx, f.c.All()) // We have to stop the cluster before cleaning up the staller. @@ -1545,7 +1552,7 @@ func (f *pauseFailer) CanRunWith(other failureMode) bool { return other != failureModeDiskStall } -func (f *pauseFailer) Ready(ctx context.Context, _ cluster.Monitor) { +func (f *pauseFailer) Ready(ctx context.Context) { // The process pause can trip the disk stall detector, so we disable it. We // could let it fire, but we'd like to see if the node can recover from the // pause and keep working. diff --git a/pkg/security/tls_ciphersuites.go b/pkg/security/tls_ciphersuites.go index fd62efd316f1..0127f6d12619 100644 --- a/pkg/security/tls_ciphersuites.go +++ b/pkg/security/tls_ciphersuites.go @@ -52,7 +52,7 @@ func RecommendedCipherSuites() []uint16 { } // OldCipherSuites returns a list of "old" cipher suites for TLS v1.2, -// from the list created by Mozilla[1]. These are enabled with the +// which adds back all ciphers from v22.1. These are enabled with the // use of the COCKROACH_TLS_ENABLE_OLD_CIPHER_SUITES environment // variable, and should strictly be used when the software // CockroachDB is being used with cannot be upgraded. @@ -62,11 +62,15 @@ func RecommendedCipherSuites() []uint16 { // backwards compatibility should be added here, so organizations // can opt into using deprecated cipher suites rather than opting // every CRDB cluster into a worse security stance. -// -// [1]: https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility func OldCipherSuites() []uint16 { return []uint16{ + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, } } diff --git a/pkg/sql/logictest/testdata/logic_test/delete_batch b/pkg/sql/logictest/testdata/logic_test/delete_batch new file mode 100644 index 000000000000..9f077129e86b --- /dev/null +++ b/pkg/sql/logictest/testdata/logic_test/delete_batch @@ -0,0 +1,27 @@ +subtest default_size + +statement error DELETE BATCH not implemented +DELETE BATCH FROM tbl; + +subtest end + +subtest constant_size + +statement error DELETE BATCH \(SIZE \) not implemented +DELETE BATCH (SIZE 1) FROM tbl; + +subtest end + +subtest subquery_size + +statement error DELETE BATCH \(SIZE \) not implemented +DELETE BATCH (SIZE (SELECT 1)) FROM tbl; + +subtest end + +subtest multiple_sizes + +statement error invalid parameter at index 1, SIZE already specified +DELETE BATCH (SIZE 1, SIZE 1) FROM tbl; + +subtest end diff --git a/pkg/sql/logictest/tests/fakedist-disk/generated_test.go b/pkg/sql/logictest/tests/fakedist-disk/generated_test.go index 4efc374c8465..5addf5d3d164 100644 --- a/pkg/sql/logictest/tests/fakedist-disk/generated_test.go +++ b/pkg/sql/logictest/tests/fakedist-disk/generated_test.go @@ -548,6 +548,13 @@ func TestLogic_delete( runLogicTest(t, "delete") } +func TestLogic_delete_batch( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "delete_batch") +} + func TestLogic_dependencies( t *testing.T, ) { diff --git a/pkg/sql/logictest/tests/fakedist-vec-off/generated_test.go b/pkg/sql/logictest/tests/fakedist-vec-off/generated_test.go index 5c71acf2f337..ea40211c8829 100644 --- a/pkg/sql/logictest/tests/fakedist-vec-off/generated_test.go +++ b/pkg/sql/logictest/tests/fakedist-vec-off/generated_test.go @@ -548,6 +548,13 @@ func TestLogic_delete( runLogicTest(t, "delete") } +func TestLogic_delete_batch( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "delete_batch") +} + func TestLogic_dependencies( t *testing.T, ) { diff --git a/pkg/sql/logictest/tests/fakedist/generated_test.go b/pkg/sql/logictest/tests/fakedist/generated_test.go index a80d691fff98..4c51b334b384 100644 --- a/pkg/sql/logictest/tests/fakedist/generated_test.go +++ b/pkg/sql/logictest/tests/fakedist/generated_test.go @@ -548,6 +548,13 @@ func TestLogic_delete( runLogicTest(t, "delete") } +func TestLogic_delete_batch( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "delete_batch") +} + func TestLogic_dependencies( t *testing.T, ) { diff --git a/pkg/sql/logictest/tests/local-legacy-schema-changer/generated_test.go b/pkg/sql/logictest/tests/local-legacy-schema-changer/generated_test.go index 32dadfab5f99..6abad146f6d7 100644 --- a/pkg/sql/logictest/tests/local-legacy-schema-changer/generated_test.go +++ b/pkg/sql/logictest/tests/local-legacy-schema-changer/generated_test.go @@ -541,6 +541,13 @@ func TestLogic_delete( runLogicTest(t, "delete") } +func TestLogic_delete_batch( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "delete_batch") +} + func TestLogic_dependencies( t *testing.T, ) { diff --git a/pkg/sql/logictest/tests/local-mixed-22.2-23.1/generated_test.go b/pkg/sql/logictest/tests/local-mixed-22.2-23.1/generated_test.go index 68c67f8d9e7b..ca2b8731b9d2 100644 --- a/pkg/sql/logictest/tests/local-mixed-22.2-23.1/generated_test.go +++ b/pkg/sql/logictest/tests/local-mixed-22.2-23.1/generated_test.go @@ -541,6 +541,13 @@ func TestLogic_delete( runLogicTest(t, "delete") } +func TestLogic_delete_batch( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "delete_batch") +} + func TestLogic_dependencies( t *testing.T, ) { diff --git a/pkg/sql/logictest/tests/local-vec-off/generated_test.go b/pkg/sql/logictest/tests/local-vec-off/generated_test.go index 3ecd2dfa1c2f..adbb1c4a925f 100644 --- a/pkg/sql/logictest/tests/local-vec-off/generated_test.go +++ b/pkg/sql/logictest/tests/local-vec-off/generated_test.go @@ -548,6 +548,13 @@ func TestLogic_delete( runLogicTest(t, "delete") } +func TestLogic_delete_batch( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "delete_batch") +} + func TestLogic_dependencies( t *testing.T, ) { diff --git a/pkg/sql/logictest/tests/local/generated_test.go b/pkg/sql/logictest/tests/local/generated_test.go index 7cfb9aca8fc3..9a5cb42a797a 100644 --- a/pkg/sql/logictest/tests/local/generated_test.go +++ b/pkg/sql/logictest/tests/local/generated_test.go @@ -569,6 +569,13 @@ func TestLogic_delete( runLogicTest(t, "delete") } +func TestLogic_delete_batch( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "delete_batch") +} + func TestLogic_dependencies( t *testing.T, ) { diff --git a/pkg/sql/opt/optbuilder/delete.go b/pkg/sql/opt/optbuilder/delete.go index bcac82ceb73d..12d15db1a896 100644 --- a/pkg/sql/opt/optbuilder/delete.go +++ b/pkg/sql/opt/optbuilder/delete.go @@ -38,6 +38,28 @@ func (b *Builder) buildDelete(del *tree.Delete, inScope *scope) (outScope *scope "DELETE statement requires LIMIT when ORDER BY is used")) } + batch := del.Batch + if batch != nil { + var hasSize bool + for i, param := range batch.Params { + switch param.(type) { + case *tree.SizeBatchParam: + if hasSize { + panic(pgerror.Newf(pgcode.Syntax, "invalid parameter at index %d, SIZE already specified", i)) + } + hasSize = true + } + } + if hasSize { + // TODO(ecwall): remove when DELETE BATCH is supported + panic(pgerror.Newf(pgcode.Syntax, + "DELETE BATCH (SIZE ) not implemented")) + } + // TODO(ecwall): remove when DELETE BATCH is supported + panic(pgerror.Newf(pgcode.Syntax, + "DELETE BATCH not implemented")) + } + // Find which table we're working on, check the permissions. tab, depName, alias, refColumns := b.resolveTableForMutation(del.Table, privilege.DELETE) diff --git a/pkg/sql/parser/sql.y b/pkg/sql/parser/sql.y index 9efbbe30ba52..e771e74f2ea0 100644 --- a/pkg/sql/parser/sql.y +++ b/pkg/sql/parser/sql.y @@ -482,6 +482,18 @@ func (u *sqlSymUnion) tblExprs() tree.TableExprs { func (u *sqlSymUnion) from() tree.From { return u.val.(tree.From) } +func (u *sqlSymUnion) batch() *tree.Batch { + if batch, ok := u.val.(*tree.Batch); ok { + return batch + } + return nil +} +func (u *sqlSymUnion) batchParam() tree.BatchParam { + return u.val.(tree.BatchParam) +} +func (u *sqlSymUnion) batchParams() []tree.BatchParam { + return u.val.([]tree.BatchParam) +} func (u *sqlSymUnion) superRegion() tree.SuperRegion { return u.val.(tree.SuperRegion) } @@ -893,7 +905,7 @@ func (u *sqlSymUnion) showCreateFormatOption() tree.ShowCreateFormatOption { %token ALL ALTER ALWAYS ANALYSE ANALYZE AND AND_AND ANY ANNOTATE_TYPE ARRAY AS ASC AS_JSON AT_AT %token ASENSITIVE ASYMMETRIC AT ATOMIC ATTRIBUTE AUTHORIZATION AUTOMATIC AVAILABILITY -%token BACKUP BACKUPS BACKWARD BEFORE BEGIN BETWEEN BIGINT BIGSERIAL BINARY BIT +%token BACKUP BACKUPS BACKWARD BATCH BEFORE BEGIN BETWEEN BIGINT BIGSERIAL BINARY BIT %token BUCKET_COUNT %token BOOLEAN BOTH BOX2D BUNDLE BY @@ -978,7 +990,7 @@ func (u *sqlSymUnion) showCreateFormatOption() tree.ShowCreateFormatOption { %token SAVEPOINT SCANS SCATTER SCHEDULE SCHEDULES SCROLL SCHEMA SCHEMA_ONLY SCHEMAS SCRUB %token SEARCH SECOND SECONDARY SECURITY SELECT SEQUENCE SEQUENCES %token SERIALIZABLE SERVER SERVICE SESSION SESSIONS SESSION_USER SET SETOF SETS SETTING SETTINGS -%token SHARE SHARED SHOW SIMILAR SIMPLE SKIP SKIP_LOCALITIES_CHECK SKIP_MISSING_FOREIGN_KEYS +%token SHARE SHARED SHOW SIMILAR SIMPLE SIZE SKIP SKIP_LOCALITIES_CHECK SKIP_MISSING_FOREIGN_KEYS %token SKIP_MISSING_SEQUENCES SKIP_MISSING_SEQUENCE_OWNERS SKIP_MISSING_VIEWS SKIP_MISSING_UDFS SMALLINT SMALLSERIAL SNAPSHOT SOME SPLIT SQL %token SQLLOGIN %token STABLE START STATE STATISTICS STATUS STDIN STDOUT STOP STREAM STRICT STRING STORAGE STORE STORED STORING SUBSTRING SUPER @@ -1436,6 +1448,7 @@ func (u *sqlSymUnion) showCreateFormatOption() tree.ShowCreateFormatOption { %type index_params create_as_params %type name_list privilege_list %type <[]int32> opt_array_bounds +%type <*tree.Batch> opt_batch_clause %type from_clause %type from_list rowsfrom_list opt_from_list %type table_pattern_list @@ -1457,6 +1470,9 @@ func (u *sqlSymUnion) showCreateFormatOption() tree.ShowCreateFormatOption { %type opt_using_clause %type opt_clear_data +%type batch_param +%type <[]tree.BatchParam> batch_param_list + %type <[]tree.SequenceOption> sequence_option_list opt_sequence_option_list %type sequence_option_elem @@ -5261,27 +5277,62 @@ changefeed_sink: } // %Help: DELETE - delete rows from a table // %Category: DML -// %Text: DELETE FROM [WHERE ] -// [ORDER BY ] -// [USING ] -// [LIMIT ] -// [RETURNING ] +// %Text: +// DELETE +// [BATCH [SIZE ]] +// FROM +// [WHERE ] +// [ORDER BY ] +// [USING ] +// [LIMIT ] +// [RETURNING ] // %SeeAlso: WEBDOCS/delete.html delete_stmt: - opt_with_clause DELETE FROM table_expr_opt_alias_idx opt_using_clause opt_where_clause opt_sort_clause opt_limit_clause returning_clause + opt_with_clause DELETE opt_batch_clause FROM table_expr_opt_alias_idx opt_using_clause opt_where_clause opt_sort_clause opt_limit_clause returning_clause { $$.val = &tree.Delete{ With: $1.with(), - Table: $4.tblExpr(), - Using: $5.tblExprs(), - Where: tree.NewWhere(tree.AstWhere, $6.expr()), - OrderBy: $7.orderBy(), - Limit: $8.limit(), - Returning: $9.retClause(), + Batch: $3.batch(), + Table: $5.tblExpr(), + Using: $6.tblExprs(), + Where: tree.NewWhere(tree.AstWhere, $7.expr()), + OrderBy: $8.orderBy(), + Limit: $9.limit(), + Returning: $10.retClause(), } } | opt_with_clause DELETE error // SHOW HELP: DELETE +opt_batch_clause: + BATCH + { + $$.val = &tree.Batch{} + } +| BATCH '(' batch_param_list ')' + { + $$.val = &tree.Batch{Params: $3.batchParams()} + } +| /* EMPTY */ + { + $$.val = (*tree.Batch)(nil) + } + +batch_param_list: + batch_param + { + $$.val = []tree.BatchParam{$1.batchParam()} + } +| batch_param_list ',' batch_param + { + $$.val = append($1.batchParams(), $3.batchParam()) + } + +batch_param: + SIZE a_expr + { + $$.val = &tree.SizeBatchParam{Size: $2.expr()} + } + opt_using_clause: USING from_list { @@ -16353,6 +16404,7 @@ unreserved_keyword: | BACKUP | BACKUPS | BACKWARD +| BATCH | BEFORE | BEGIN | BINARY @@ -16694,6 +16746,7 @@ unreserved_keyword: | SHARED | SHOW | SIMPLE +| SIZE | SKIP | SKIP_LOCALITIES_CHECK | SKIP_MISSING_FOREIGN_KEYS @@ -16817,6 +16870,7 @@ bare_label_keywords: | BACKUP | BACKUPS | BACKWARD +| BATCH | BEFORE | BEGIN | BETWEEN @@ -17243,6 +17297,7 @@ bare_label_keywords: | SHOW | SIMILAR | SIMPLE +| SIZE | SKIP | SKIP_LOCALITIES_CHECK | SKIP_MISSING_FOREIGN_KEYS diff --git a/pkg/sql/parser/testdata/delete b/pkg/sql/parser/testdata/delete index 5dc6bcda6325..1b9095ce9906 100644 --- a/pkg/sql/parser/testdata/delete +++ b/pkg/sql/parser/testdata/delete @@ -173,3 +173,27 @@ DELETE FROM a USING b AS one, c AS two, d AS three, e AS four WHERE (f != g) AND DELETE FROM a USING b AS one, c AS two, d AS three, e AS four WHERE ((((f) != (g))) AND (((g) = (h)))) RETURNING (e) -- fully parenthesized DELETE FROM a USING b AS one, c AS two, d AS three, e AS four WHERE (f != g) AND (g = h) RETURNING e -- literals removed DELETE FROM _ USING _ AS _, _ AS _, _ AS _, _ AS _ WHERE (_ != _) AND (_ = _) RETURNING _ -- identifiers removed + +parse +DELETE BATCH FROM a +---- +DELETE BATCH FROM a +DELETE BATCH FROM a -- fully parenthesized +DELETE BATCH FROM a -- literals removed +DELETE BATCH FROM _ -- identifiers removed + +parse +DELETE BATCH (SIZE 1) FROM a +---- +DELETE BATCH (SIZE 1) FROM a +DELETE BATCH (SIZE 1) FROM a -- fully parenthesized +DELETE BATCH (SIZE 1) FROM a -- literals removed +DELETE BATCH (SIZE 1) FROM _ -- identifiers removed + +parse +DELETE BATCH (SIZE (SELECT 1)) FROM a +---- +DELETE BATCH (SIZE (SELECT 1)) FROM a +DELETE BATCH (SIZE (SELECT (1))) FROM a -- fully parenthesized +DELETE BATCH (SIZE (SELECT _)) FROM a -- literals removed +DELETE BATCH (SIZE (SELECT 1)) FROM _ -- identifiers removed diff --git a/pkg/sql/sem/tree/BUILD.bazel b/pkg/sql/sem/tree/BUILD.bazel index c498171e860b..2fcb2eb35900 100644 --- a/pkg/sql/sem/tree/BUILD.bazel +++ b/pkg/sql/sem/tree/BUILD.bazel @@ -21,6 +21,7 @@ go_library( "analyze.go", "annotation.go", "backup.go", + "batch.go", "changefeed.go", "col_name.go", "comment_on_column.go", diff --git a/pkg/sql/sem/tree/batch.go b/pkg/sql/sem/tree/batch.go new file mode 100644 index 000000000000..3bd53b0fcfc3 --- /dev/null +++ b/pkg/sql/sem/tree/batch.go @@ -0,0 +1,57 @@ +// Copyright 2023 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tree + +// Batch represents a BATCH clause. +type Batch struct { + Params []BatchParam +} + +var _ NodeFormatter = &Batch{} + +type BatchParam interface { + NodeFormatter +} + +// SizeBatchParam represents a BATCH (SIZE size) parameter. +type SizeBatchParam struct { + // Size is the expression specified by SIZE . + // It must be positive. + Size Expr +} + +// BatchParam represents a BATCH (param) parameter. +var _ BatchParam = &SizeBatchParam{} + +// Format implements NodeFormatter. +func (p *SizeBatchParam) Format(ctx *FmtCtx) { + ctx.WriteString("SIZE ") + p.Size.Format(ctx) +} + +// Format implements the NodeFormatter interface. +func (b *Batch) Format(ctx *FmtCtx) { + if b == nil { + return + } + ctx.WriteString("BATCH ") + params := b.Params + if len(params) > 0 { + ctx.WriteString("(") + for i, param := range params { + if i > 0 { + ctx.WriteString(",") + } + param.Format(ctx) + } + ctx.WriteString(") ") + } +} diff --git a/pkg/sql/sem/tree/delete.go b/pkg/sql/sem/tree/delete.go index 998dba9d48b8..acefa2e2ade4 100644 --- a/pkg/sql/sem/tree/delete.go +++ b/pkg/sql/sem/tree/delete.go @@ -21,6 +21,7 @@ package tree // Delete represents a DELETE statement. type Delete struct { + Batch *Batch With *With Table TableExpr Where *Where @@ -33,7 +34,9 @@ type Delete struct { // Format implements the NodeFormatter interface. func (node *Delete) Format(ctx *FmtCtx) { ctx.FormatNode(node.With) - ctx.WriteString("DELETE FROM ") + ctx.WriteString("DELETE ") + ctx.FormatNode(node.Batch) + ctx.WriteString("FROM ") ctx.FormatNode(node.Table) if len(node.Using) > 0 { ctx.WriteString(" USING ") diff --git a/pkg/sql/sem/tree/pretty.go b/pkg/sql/sem/tree/pretty.go index d3d2b255a1cb..2be4128e832e 100644 --- a/pkg/sql/sem/tree/pretty.go +++ b/pkg/sql/sem/tree/pretty.go @@ -1173,8 +1173,16 @@ func (node *Update) doc(p *PrettyCfg) pretty.Doc { func (node *Delete) doc(p *PrettyCfg) pretty.Doc { items := make([]pretty.TableRow, 0, 7) items = append(items, - node.With.docRow(p), - p.row("DELETE FROM", p.Doc(node.Table))) + node.With.docRow(p)) + tableLbl := "DELETE FROM" + batch := node.Batch + if batch != nil { + tableLbl = "FROM" + items = append(items, + p.row("DELETE", p.Doc(batch))) + } + items = append(items, + p.row(tableLbl, p.Doc(node.Table))) if len(node.Using) > 0 { items = append(items, p.row("USING", p.Doc(&node.Using))) }