diff --git a/pkg/sql/data_source.go b/pkg/sql/data_source.go index 0e8e04148edf..3580435ce9e6 100644 --- a/pkg/sql/data_source.go +++ b/pkg/sql/data_source.go @@ -435,7 +435,7 @@ func (p *planner) getSequenceSource( } return planDataSource{ plan: node, - info: sqlbase.NewSourceInfoForSingleTable(tn, sequenceSelectColumns), + info: sqlbase.NewSourceInfoForSingleTable(tn, sqlbase.SequenceSelectColumns), }, nil } diff --git a/pkg/sql/opt/bench/stub_factory.go b/pkg/sql/opt/bench/stub_factory.go index 612ee6d470d6..8e43a24ae056 100644 --- a/pkg/sql/opt/bench/stub_factory.go +++ b/pkg/sql/opt/bench/stub_factory.go @@ -289,3 +289,9 @@ func (f *stubFactory) ConstructErrorIfRows( func (f *stubFactory) ConstructOpaque(metadata opt.OpaqueMetadata) (exec.Node, error) { return struct{}{}, nil } + +func (f *stubFactory) ConstructAlterTableSplit( + index cat.Index, input exec.Node, expiration tree.TypedExpr, +) (exec.Node, error) { + return struct{}{}, nil +} diff --git a/pkg/sql/opt/cat/index.go b/pkg/sql/opt/cat/index.go index 7927fed0eb6d..790fc24902d0 100644 --- a/pkg/sql/opt/cat/index.go +++ b/pkg/sql/opt/cat/index.go @@ -38,6 +38,10 @@ type Index interface { // Table returns a reference to the table this index is based on. Table() Table + // Ordinal returns the ordinal of this index within the context of its Table. + // Specifically idx = Table().Index(idx.Ordinal). + Ordinal() int + // IsUnique returns true if this index is declared as UNIQUE in the schema. IsUnique() bool diff --git a/pkg/sql/opt/exec/execbuilder/relational.go b/pkg/sql/opt/exec/execbuilder/relational.go index 43a69ad894ca..3b68c329f842 100644 --- a/pkg/sql/opt/exec/execbuilder/relational.go +++ b/pkg/sql/opt/exec/execbuilder/relational.go @@ -244,6 +244,9 @@ func (b *Builder) buildRelational(e memo.RelExpr) (execPlan, error) { case *memo.OpaqueRelExpr: ep, err = b.buildOpaque(t) + case *memo.AlterTableSplitExpr: + ep, err = b.buildAlterTableSplit(t) + default: if opt.IsSetOp(e) { ep, err = b.buildSetOp(e) diff --git a/pkg/sql/opt/exec/execbuilder/statement.go b/pkg/sql/opt/exec/execbuilder/statement.go index 0e30adeb32e1..6a85392f35e3 100644 --- a/pkg/sql/opt/exec/execbuilder/statement.go +++ b/pkg/sql/opt/exec/execbuilder/statement.go @@ -95,8 +95,8 @@ func (b *Builder) buildExplain(explain *memo.ExplainExpr) (execPlan, error) { } ep := execPlan{root: node} - for i := range explain.ColList { - ep.outputCols.Set(int(explain.ColList[i]), i) + for i, c := range explain.ColList { + ep.outputCols.Set(int(c), i) } // The subqueries are now owned by the explain node; remove them so they don't // also show up in the final plan. @@ -110,10 +110,34 @@ func (b *Builder) buildShowTrace(show *memo.ShowTraceForSessionExpr) (execPlan, return execPlan{}, err } ep := execPlan{root: node} - for i := range show.ColList { - ep.outputCols.Set(int(show.ColList[i]), i) + for i, c := range show.ColList { + ep.outputCols.Set(int(c), i) + } + return ep, nil +} + +func (b *Builder) buildAlterTableSplit(split *memo.AlterTableSplitExpr) (execPlan, error) { + input, err := b.buildRelational(split.Input) + if err != nil { + return execPlan{}, err + } + scalarCtx := buildScalarCtx{} + expiration, err := b.buildScalar(&scalarCtx, split.Expiration) + if err != nil { + return execPlan{}, err + } + table := b.mem.Metadata().Table(split.Table) + node, err := b.factory.ConstructAlterTableSplit( + table.Index(split.Index), + input.root, + expiration, + ) + if err != nil { + return execPlan{}, err + } + ep := execPlan{root: node} + for i, c := range split.Columns { + ep.outputCols.Set(int(c), i) } - // The subqueries are now owned by the explain node; remove them so they don't - // also show up in the final plan. return ep, nil } diff --git a/pkg/sql/opt/exec/execbuilder/testdata/ddl b/pkg/sql/opt/exec/execbuilder/testdata/ddl index cca6d6751abb..6f6bbfd91f7e 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/ddl +++ b/pkg/sql/opt/exec/execbuilder/testdata/ddl @@ -91,17 +91,12 @@ CREATE TABLE s (k1 INT, k2 INT, v INT, PRIMARY KEY (k1,k2)) query TTTTT colnames EXPLAIN (VERBOSE) ALTER TABLE s SPLIT AT SELECT k1,k2 FROM s ORDER BY k1 LIMIT 3 ---- -tree field description columns ordering -split · · (key, pretty, split_enforced_until) · - └── limit · · (k1, k2) k1!=NULL; k2!=NULL; key(k1,k2); +k1 - │ count 3 · · - └── render · · (k1, k2) k1!=NULL; k2!=NULL; key(k1,k2); +k1 - │ render 0 test.public.s.k1 · · - │ render 1 test.public.s.k2 · · - └── scan · · (k1, k2, v[omitted]) k1!=NULL; k2!=NULL; key(k1,k2); +k1 -· table s@primary · · -· spans ALL · · -· limit 3 · · +tree field description columns ordering +split · · (key, pretty, split_enforced_until) · + └── scan · · (k1, k2) +k1 +· table s@primary · · +· spans ALL · · +· limit 3 · · statement ok DROP TABLE t; DROP TABLE other diff --git a/pkg/sql/opt/exec/execbuilder/testdata/subquery b/pkg/sql/opt/exec/execbuilder/testdata/subquery index 8de62df0609a..2afdec743da3 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/subquery +++ b/pkg/sql/opt/exec/execbuilder/testdata/subquery @@ -9,19 +9,16 @@ CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT) query TTT EXPLAIN ALTER TABLE abc SPLIT AT VALUES ((SELECT 42)) ---- -root · · - ├── split · · - │ └── values · · - │ size 1 column, 1 row - └── subquery · · - │ id @S1 - │ original sql (SELECT 42) - │ exec mode one row - └── max1row · · - └── limit · · - │ count 2 - └── render · · - └── emptyrow · · +root · · + ├── split · · + │ └── values · · + │ size 1 column, 1 row + └── subquery · · + │ id @S1 + │ original sql (SELECT 42) + │ exec mode one row + └── values · · +· size 1 column, 1 row statement ok ALTER TABLE abc SPLIT AT VALUES ((SELECT 1)) diff --git a/pkg/sql/opt/exec/factory.go b/pkg/sql/opt/exec/factory.go index df4a3f21c220..252dc6534569 100644 --- a/pkg/sql/opt/exec/factory.go +++ b/pkg/sql/opt/exec/factory.go @@ -404,6 +404,10 @@ type Factory interface { // ConstructOpaque creates a node for an opaque operator. ConstructOpaque(metadata opt.OpaqueMetadata) (Node, error) + + // ConstructAlterTableSplit creates a node that implements ALTER TABLE/INDEX + // SPLIT AT. + ConstructAlterTableSplit(index cat.Index, input Node, expiration tree.TypedExpr) (Node, error) } // OutputOrdering indicates the required output ordering on a Node that is being diff --git a/pkg/sql/opt/memo/expr_format.go b/pkg/sql/opt/memo/expr_format.go index de1e292e0e45..48f642d1c007 100644 --- a/pkg/sql/opt/memo/expr_format.go +++ b/pkg/sql/opt/memo/expr_format.go @@ -967,6 +967,14 @@ func FormatPrivate(f *ExprFmtCtx, private interface{}, physProps *physical.Requi f.Buffer.WriteByte(' ') f.Buffer.WriteString(t.Metadata.String()) + case *AlterTableSplitPrivate: + tab := f.Memo.metadata.Table(t.Table) + if t.Index == cat.PrimaryIndex { + fmt.Fprintf(f.Buffer, " %s", tableAlias(f, t.Table)) + } else { + fmt.Fprintf(f.Buffer, " %s@%s", tableAlias(f, t.Table), tab.Index(t.Index).Name()) + } + case *JoinPrivate: // Nothing to show; flags are shown separately. diff --git a/pkg/sql/opt/memo/logical_props_builder.go b/pkg/sql/opt/memo/logical_props_builder.go index 47551f83d8a8..fbb40e42bd28 100644 --- a/pkg/sql/opt/memo/logical_props_builder.go +++ b/pkg/sql/opt/memo/logical_props_builder.go @@ -684,13 +684,12 @@ func (b *logicalPropsBuilder) buildValuesProps(values *ValuesExpr, rel *props.Re } } -func (b *logicalPropsBuilder) buildExplainProps(explain *ExplainExpr, rel *props.Relational) { - BuildSharedProps(b.mem, explain, &rel.Shared) +func (b *logicalPropsBuilder) buildBasicProps(e opt.Expr, cols opt.ColList, rel *props.Relational) { + BuildSharedProps(b.mem, e, &rel.Shared) // Output Columns // -------------- - // Output columns are stored in the definition. - rel.OutputCols = explain.ColList.ToSet() + rel.OutputCols = cols.ToSet() // Not Null Columns // ---------------- @@ -698,11 +697,11 @@ func (b *logicalPropsBuilder) buildExplainProps(explain *ExplainExpr, rel *props // Outer Columns // ------------- - // EXPLAIN doesn't allow outer columns. + // No outer columns. // Functional Dependencies // ----------------------- - // Explain operator has an empty FD set. + // Empty FD set. // Cardinality // ----------- @@ -716,39 +715,28 @@ func (b *logicalPropsBuilder) buildExplainProps(explain *ExplainExpr, rel *props } } +func (b *logicalPropsBuilder) buildExplainProps(explain *ExplainExpr, rel *props.Relational) { + b.buildBasicProps(explain, explain.ColList, rel) +} + func (b *logicalPropsBuilder) buildShowTraceForSessionProps( showTrace *ShowTraceForSessionExpr, rel *props.Relational, ) { - BuildSharedProps(b.mem, showTrace, &rel.Shared) - - // Output Columns - // -------------- - // Output columns are stored in the definition. - rel.OutputCols = showTrace.ColList.ToSet() - - // Not Null Columns - // ---------------- - // All columns are assumed to be nullable. - - // Outer Columns - // ------------- - // SHOW TRACE doesn't allow outer columns. - - // Functional Dependencies - // ----------------------- - // ShowTrace operator has an empty FD set. - - // Cardinality - // ----------- - // Don't make any assumptions about cardinality of output. - rel.Cardinality = props.AnyCardinality + b.buildBasicProps(showTrace, showTrace.ColList, rel) +} - // Statistics - // ---------- - if !b.disableStats { - b.sb.buildUnknown(rel) - } +func (b *logicalPropsBuilder) buildOpaqueRelProps(op *OpaqueRelExpr, rel *props.Relational) { + b.buildBasicProps(op, op.Columns, rel) + rel.CanHaveSideEffects = true + rel.CanMutate = true +} +func (b *logicalPropsBuilder) buildAlterTableSplitProps( + split *AlterTableSplitExpr, rel *props.Relational, +) { + b.buildBasicProps(split, split.Columns, rel) + rel.CanHaveSideEffects = true + rel.CanMutate = true } func (b *logicalPropsBuilder) buildLimitProps(limit *LimitExpr, rel *props.Relational) { @@ -1785,39 +1773,6 @@ func (h *joinPropsHelper) cardinality() props.Cardinality { } } -func (b *logicalPropsBuilder) buildOpaqueRelProps(op *OpaqueRelExpr, rel *props.Relational) { - BuildSharedProps(b.mem, op, &rel.Shared) - rel.CanHaveSideEffects = true - rel.CanMutate = true - - // Output Columns - // -------------- - rel.OutputCols = op.Columns.ToSet() - - // Not Null Columns - // ---------------- - // All columns are assumed to be nullable. - - // Outer Columns - // ------------- - // No outer columns. - - // Functional Dependencies - // ----------------------- - // None. - - // Cardinality - // ----------- - // Any. - rel.Cardinality = props.AnyCardinality - - // Statistics - // ---------- - if !b.disableStats { - b.sb.buildUnknown(rel) - } -} - func (b *logicalPropsBuilder) buildFakeRelProps(fake *FakeRelExpr, rel *props.Relational) { *rel = *fake.Props } diff --git a/pkg/sql/opt/ops/statement.opt b/pkg/sql/opt/ops/statement.opt index 1c3e47e90911..94555133a31d 100644 --- a/pkg/sql/opt/ops/statement.opt +++ b/pkg/sql/opt/ops/statement.opt @@ -90,3 +90,36 @@ define OpaqueRelPrivate { Columns ColList Metadata OpaqueMetadata } + +# AlterTableSplit represents an `ALTER TABLE/INDEX .. SPLIT AT ..` statement. +[Relational, DDL] +define AlterTableSplit { + # The input expression provides values for the index columns (or a prefix of + # them). + Input RelExpr + + # Expiration is a string scalar that indicates a timestamp after which the + # ranges are eligible for automatic merging (or Null if there is no + # expiration). + Expiration ScalarExpr + + _ AlterTableSplitPrivate +} + +[Private] +define AlterTableSplitPrivate { + # Table identifies the table to alter. It is an id that can be passed to + # the Metadata.Table method in order to fetch cat.Table metadata. + Table TableID + + # Index identifies the index to scan (whether primary or secondary). It + # can be passed to the cat.Table.Index(i int) method in order to fetch the + # cat.Index metadata. + Index int + + # Props stores the required physical properties for the enclosed expression. + Props PhysProps + + # Columns stores the column IDs for the statement result columns. + Columns ColList +} diff --git a/pkg/sql/opt/optbuilder/alter_table.go b/pkg/sql/opt/optbuilder/alter_table.go new file mode 100644 index 000000000000..510b97e6be75 --- /dev/null +++ b/pkg/sql/opt/optbuilder/alter_table.go @@ -0,0 +1,97 @@ +// Copyright 2018 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package optbuilder + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/opt" + "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" + "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" + "github.com/cockroachdb/cockroach/pkg/sql/types" +) + +// buildAlterTableSplit builds an ALTER TABLE/INDEX .. SPLIT AT .. statement. +func (b *Builder) buildAlterTableSplit(split *tree.Split, inScope *scope) (outScope *scope) { + flags := cat.Flags{ + AvoidDescriptorCaches: true, + NoTableStats: true, + } + index, err := cat.ResolveTableIndex(b.ctx, b.catalog, flags, &split.TableOrIndex) + if err != nil { + panic(builderError{err}) + } + table := index.Table() + if err := b.catalog.CheckPrivilege(b.ctx, table, privilege.INSERT); err != nil { + panic(builderError{err}) + } + + b.DisableMemoReuse = true + + // Calculate the desired types for the select statement. It is OK if the + // select statement returns fewer columns (the relevant prefix is used). + desiredTypes := make([]*types.T, index.LaxKeyColumnCount()) + for i := range desiredTypes { + desiredTypes[i] = index.Column(i).DatumType() + } + + // We don't allow the input statement to reference outer columns, so we + // pass a "blank" scope rather than inScope. + emptyScope := &scope{builder: b} + stmtScope := b.buildStmt(split.Rows, desiredTypes, emptyScope) + if len(stmtScope.cols) == 0 { + panic(pgerror.Newf(pgcode.Syntax, "no columns in SPLIT AT data")) + } + if len(stmtScope.cols) > len(desiredTypes) { + panic(pgerror.Newf(pgcode.Syntax, "too many columns in SPLIT AT data")) + } + for i := range stmtScope.cols { + if !stmtScope.cols[i].typ.Equivalent(desiredTypes[i]) { + panic(pgerror.Newf( + pgcode.Syntax, "SPLIT AT data column %d (%s) must be of type %s, not type %s", + i+1, index.Column(i).ColName(), desiredTypes[i], stmtScope.cols[i].typ, + )) + } + } + + // Build the expiration scalar. + var expiration opt.ScalarExpr + if split.ExpireExpr != nil { + emptyScope.context = "ALTER TABLE SPLIT AT" + // We need to save and restore the previous value of the field in + // semaCtx in case we are recursively called within a subquery + // context. + defer b.semaCtx.Properties.Restore(b.semaCtx.Properties) + b.semaCtx.Properties.Require(emptyScope.context, tree.RejectSpecial) + + texpr := emptyScope.resolveType(split.ExpireExpr, types.String) + expiration = b.buildScalar(texpr, emptyScope, nil /* outScope */, nil /* outCol */, nil /* colRefs */) + } else { + expiration = b.factory.ConstructNull(types.String) + } + + outScope = inScope.push() + b.synthesizeResultColumns(outScope, sqlbase.AlterTableSplitColumns) + outScope.expr = b.factory.ConstructAlterTableSplit( + stmtScope.expr.(memo.RelExpr), + expiration, + &memo.AlterTableSplitPrivate{ + Table: b.factory.Metadata().AddTable(table), + Index: index.Ordinal(), + Columns: colsToColList(outScope.cols), + Props: stmtScope.makePhysicalProps(), + }, + ) + return outScope +} diff --git a/pkg/sql/opt/optbuilder/builder.go b/pkg/sql/opt/optbuilder/builder.go index b9b7221b98e1..d4c46851e499 100644 --- a/pkg/sql/opt/optbuilder/builder.go +++ b/pkg/sql/opt/optbuilder/builder.go @@ -210,15 +210,9 @@ func (b *Builder) buildStmt( ) (outScope *scope) { // NB: The case statements are sorted lexicographically. switch stmt := stmt.(type) { - case *tree.CreateTable: - return b.buildCreateTable(stmt, inScope) - case *tree.Delete: return b.buildDelete(stmt, inScope) - case *tree.Explain: - return b.buildExplain(stmt, inScope) - case *tree.Insert: return b.buildInsert(stmt, inScope) @@ -228,11 +222,20 @@ func (b *Builder) buildStmt( case *tree.Select: return b.buildSelect(stmt, desiredTypes, inScope) + case *tree.Update: + return b.buildUpdate(stmt, inScope) + + case *tree.CreateTable: + return b.buildCreateTable(stmt, inScope) + + case *tree.Explain: + return b.buildExplain(stmt, inScope) + case *tree.ShowTraceForSession: return b.buildShowTrace(stmt, inScope) - case *tree.Update: - return b.buildUpdate(stmt, inScope) + case *tree.Split: + return b.buildAlterTableSplit(stmt, inScope) default: // See if this statement can be rewritten to another statement using the diff --git a/pkg/sql/opt/optbuilder/opaque.go b/pkg/sql/opt/optbuilder/opaque.go index 406639ee5bc6..65b86a650ae4 100644 --- a/pkg/sql/opt/optbuilder/opaque.go +++ b/pkg/sql/opt/optbuilder/opaque.go @@ -42,10 +42,7 @@ func (b *Builder) tryBuildOpaque(stmt tree.Statement, inScope *scope) (outScope panic(builderError{err}) } outScope = inScope.push() - for i := range cols { - col := b.synthesizeColumn(outScope, cols[i].Name, cols[i].Typ, nil /* expr */, nil /* scalar */) - col.hidden = cols[i].Hidden - } + b.synthesizeResultColumns(outScope, cols) outScope.expr = b.factory.ConstructOpaqueRel(&memo.OpaqueRelPrivate{ Columns: colsToColList(outScope.cols), Metadata: obj, diff --git a/pkg/sql/opt/optbuilder/select.go b/pkg/sql/opt/optbuilder/select.go index 916450c4ceeb..8f2030b5ec18 100644 --- a/pkg/sql/opt/optbuilder/select.go +++ b/pkg/sql/opt/optbuilder/select.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" @@ -429,10 +430,10 @@ func (b *Builder) buildSequenceSelect(seq cat.Sequence, inScope *scope) (outScop md := b.factory.Metadata() outScope = inScope.push() - cols := opt.ColList{ - md.AddColumn("last_value", types.Int), - md.AddColumn("log_cnt", types.Int), - md.AddColumn("is_called", types.Bool), + cols := make(opt.ColList, len(sqlbase.SequenceSelectColumns)) + + for i, c := range sqlbase.SequenceSelectColumns { + cols[i] = md.AddColumn(c.Name, c.Typ) } outScope.cols = make([]scopeColumn, 3) diff --git a/pkg/sql/opt/optbuilder/testdata/alter_table b/pkg/sql/opt/optbuilder/testdata/alter_table new file mode 100644 index 000000000000..06b25b9d987b --- /dev/null +++ b/pkg/sql/opt/optbuilder/testdata/alter_table @@ -0,0 +1,80 @@ +exec-ddl +CREATE TABLE abc (a INT PRIMARY KEY, b INT, c STRING, INDEX b (b), UNIQUE INDEX bc (b,c)) +---- + +build +ALTER TABLE abc SPLIT AT VALUES (1), (2) +---- +alter-table-split + ├── columns: key:2(bytes) pretty:3(string) split_enforced_until:4(timestamp) + ├── values + │ ├── columns: column1:1(int!null) + │ ├── tuple [type=tuple{int}] + │ │ └── const: 1 [type=int] + │ └── tuple [type=tuple{int}] + │ └── const: 2 [type=int] + └── null [type=string] + +build +ALTER TABLE abc SPLIT AT VALUES (1), (2) WITH EXPIRATION '2200-01-01 00:00:00.0' +---- +alter-table-split + ├── columns: key:2(bytes) pretty:3(string) split_enforced_until:4(timestamp) + ├── values + │ ├── columns: column1:1(int!null) + │ ├── tuple [type=tuple{int}] + │ │ └── const: 1 [type=int] + │ └── tuple [type=tuple{int}] + │ └── const: 2 [type=int] + └── const: '2200-01-01 00:00:00.0' [type=string] + +build +ALTER TABLE abc SPLIT AT VALUES (1, 2), (3, 4) +---- +error (42601): too many columns in SPLIT AT data + +build +ALTER INDEX abc@bc SPLIT AT VALUES (1), (2) WITH EXPIRATION '2200-01-01 00:00:00.0' +---- +alter-table-split + ├── columns: key:2(bytes) pretty:3(string) split_enforced_until:4(timestamp) + ├── values + │ ├── columns: column1:1(int!null) + │ ├── tuple [type=tuple{int}] + │ │ └── const: 1 [type=int] + │ └── tuple [type=tuple{int}] + │ └── const: 2 [type=int] + └── const: '2200-01-01 00:00:00.0' [type=string] + +build +ALTER INDEX abc@bc SPLIT AT VALUES (1, 'foo'), (2, 'bar') +---- +alter-table-split + ├── columns: key:3(bytes) pretty:4(string) split_enforced_until:5(timestamp) + ├── values + │ ├── columns: column1:1(int!null) column2:2(string!null) + │ ├── tuple [type=tuple{int, string}] + │ │ ├── const: 1 [type=int] + │ │ └── const: 'foo' [type=string] + │ └── tuple [type=tuple{int, string}] + │ ├── const: 2 [type=int] + │ └── const: 'bar' [type=string] + └── null [type=string] + +build +ALTER INDEX abc@bc SPLIT AT VALUES (1, 2), (3, 4) +---- +error (42601): SPLIT AT data column 2 (c) must be of type string, not type int + +build +ALTER INDEX abc@bc SPLIT AT SELECT b FROM abc ORDER BY a +---- +alter-table-split + ├── columns: key:4(bytes) pretty:5(string) split_enforced_until:6(timestamp) + ├── project + │ ├── columns: b:2(int) [hidden: abc.a:1(int!null)] + │ ├── ordering: +1 + │ └── scan abc + │ ├── columns: abc.a:1(int!null) abc.b:2(int) abc.c:3(string) + │ └── ordering: +1 + └── null [type=string] diff --git a/pkg/sql/opt/ordering/ordering.go b/pkg/sql/opt/ordering/ordering.go index fb4111aa14bd..70dc9c1eefb7 100644 --- a/pkg/sql/opt/ordering/ordering.go +++ b/pkg/sql/opt/ordering/ordering.go @@ -140,11 +140,6 @@ func init() { buildChildReqOrdering: limitOrOffsetBuildChildReqOrdering, buildProvidedOrdering: limitOrOffsetBuildProvided, } - funcMap[opt.ExplainOp] = funcs{ - canProvideOrdering: canNeverProvideOrdering, - buildChildReqOrdering: explainBuildChildReqOrdering, - buildProvidedOrdering: noProvidedOrdering, - } funcMap[opt.ScalarGroupByOp] = funcs{ // ScalarGroupBy always has exactly one result; any required ordering should // have been simplified to Any (unless normalization rules are disabled). @@ -187,6 +182,16 @@ func init() { buildChildReqOrdering: mutationBuildChildReqOrdering, buildProvidedOrdering: mutationBuildProvided, } + funcMap[opt.ExplainOp] = funcs{ + canProvideOrdering: canNeverProvideOrdering, + buildChildReqOrdering: explainBuildChildReqOrdering, + buildProvidedOrdering: noProvidedOrdering, + } + funcMap[opt.AlterTableSplitOp] = funcs{ + canProvideOrdering: canNeverProvideOrdering, + buildChildReqOrdering: alterTableSplitBuildChildReqOrdering, + buildProvidedOrdering: noProvidedOrdering, + } } func canNeverProvideOrdering(expr memo.RelExpr, required *physical.OrderingChoice) bool { diff --git a/pkg/sql/opt/ordering/explain.go b/pkg/sql/opt/ordering/statement.go similarity index 73% rename from pkg/sql/opt/ordering/explain.go rename to pkg/sql/opt/ordering/statement.go index 09bd5627fd15..034ec0022bca 100644 --- a/pkg/sql/opt/ordering/explain.go +++ b/pkg/sql/opt/ordering/statement.go @@ -20,3 +20,12 @@ func explainBuildChildReqOrdering( ) physical.OrderingChoice { return parent.(*memo.ExplainExpr).Props.Ordering } + +func alterTableSplitBuildChildReqOrdering( + parent memo.RelExpr, required *physical.OrderingChoice, childIdx int, +) physical.OrderingChoice { + if childIdx != 0 { + return physical.OrderingChoice{} + } + return parent.(*memo.AlterTableSplitExpr).Props.Ordering +} diff --git a/pkg/sql/opt/testutils/testcat/create_table.go b/pkg/sql/opt/testutils/testcat/create_table.go index 7393738de75a..20cb0a1c0167 100644 --- a/pkg/sql/opt/testutils/testcat/create_table.go +++ b/pkg/sql/opt/testutils/testcat/create_table.go @@ -412,7 +412,7 @@ func (tt *Table) addIndex(def *tree.IndexTableDef, typ indexType) *Index { if len(tt.Indexes) != 0 { panic("primary index should always be 0th index") } - idx.Ordinal = len(tt.Indexes) + idx.ordinal = len(tt.Indexes) tt.Indexes = append(tt.Indexes, idx) return idx } @@ -465,7 +465,7 @@ func (tt *Table) addIndex(def *tree.IndexTableDef, typ indexType) *Index { } } - idx.Ordinal = len(tt.Indexes) + idx.ordinal = len(tt.Indexes) tt.Indexes = append(tt.Indexes, idx) return idx diff --git a/pkg/sql/opt/testutils/testcat/test_catalog.go b/pkg/sql/opt/testutils/testcat/test_catalog.go index a466cc079b8a..0d841cd3b736 100644 --- a/pkg/sql/opt/testutils/testcat/test_catalog.go +++ b/pkg/sql/opt/testutils/testcat/test_catalog.go @@ -663,9 +663,6 @@ func (tt *Table) FindOrdinal(name string) int { type Index struct { IdxName string - // Ordinal is the ordinal of this index in the table. - Ordinal int - // KeyCount is the number of columns that make up the unique key for the // index. See the cat.Index.KeyColumnCount for more details. KeyCount int @@ -687,13 +684,16 @@ type Index struct { // the parent table, database, or even the default zone. IdxZone *config.ZoneConfig + // Ordinal is the ordinal of this index in the table. + ordinal int + // table is a back reference to the table this index is on. table *Table } // ID is part of the cat.Index interface. func (ti *Index) ID() cat.StableID { - return 1 + cat.StableID(ti.Ordinal) + return 1 + cat.StableID(ti.ordinal) } // Name is part of the cat.Index interface. @@ -706,6 +706,11 @@ func (ti *Index) Table() cat.Table { return ti.table } +// Ordinal is part of the cat.Index interface. +func (ti *Index) Ordinal() int { + return ti.ordinal +} + // IsUnique is part of the cat.Index interface. func (ti *Index) IsUnique() bool { return ti.Unique diff --git a/pkg/sql/opt/xform/physical_props.go b/pkg/sql/opt/xform/physical_props.go index 9bab44b9ac1e..208e86b38a9f 100644 --- a/pkg/sql/opt/xform/physical_props.go +++ b/pkg/sql/opt/xform/physical_props.go @@ -46,9 +46,13 @@ func BuildChildPhysicalProps( ) *physical.Required { var childProps physical.Required - // The only operation that requires a presentation of its input is Explain. - if parent.Op() == opt.ExplainOp { + // Most operations don't require a presentation of their input; these are the + // exceptions. + switch parent.Op() { + case opt.ExplainOp: childProps.Presentation = parent.(*memo.ExplainExpr).Props.Presentation + case opt.AlterTableSplitOp: + childProps.Presentation = parent.(*memo.AlterTableSplitExpr).Props.Presentation } childProps.Ordering = ordering.BuildChildRequired(parent, &parentProps.Ordering, nth) diff --git a/pkg/sql/opt_catalog.go b/pkg/sql/opt_catalog.go index 39ebaa5eeb07..b0dff17842ef 100644 --- a/pkg/sql/opt_catalog.go +++ b/pkg/sql/opt_catalog.go @@ -580,7 +580,7 @@ func newOptTable( } } - ot.indexes[i].init(ot, idxDesc, idxZone) + ot.indexes[i].init(ot, i, idxDesc, idxZone) if fk := &idxDesc.ForeignKey; fk.IsSet() { ot.outboundFKs = append(ot.outboundFKs, optForeignKeyConstraint{ name: idxDesc.ForeignKey.Name, @@ -855,6 +855,7 @@ type optIndex struct { // otherwise it is desc.StoreColumnIDs. storedCols []sqlbase.ColumnID + indexOrdinal int numCols int numKeyCols int numLaxKeyCols int @@ -864,10 +865,13 @@ var _ cat.Index = &optIndex{} // init can be used instead of newOptIndex when we have a pre-allocated instance // (e.g. as part of a bigger struct). -func (oi *optIndex) init(tab *optTable, desc *sqlbase.IndexDescriptor, zone *config.ZoneConfig) { +func (oi *optIndex) init( + tab *optTable, indexOrdinal int, desc *sqlbase.IndexDescriptor, zone *config.ZoneConfig, +) { oi.tab = tab oi.desc = desc oi.zone = zone + oi.indexOrdinal = indexOrdinal if desc == &tab.desc.PrimaryIndex { // Although the primary index contains all columns in the table, the index // descriptor does not contain columns that are not explicitly part of the @@ -1000,6 +1004,11 @@ func (oi *optIndex) Table() cat.Table { return oi.tab } +// Ordinal is part of the cat.Index interface. +func (oi *optIndex) Ordinal() int { + return oi.indexOrdinal +} + type optTableStat struct { createdAt time.Time columnOrdinals []int diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go index 413c278148f3..cb8dde0fbb2f 100644 --- a/pkg/sql/opt_exec_factory.go +++ b/pkg/sql/opt_exec_factory.go @@ -1592,6 +1592,24 @@ func (ef *execFactory) ConstructOpaque(metadata opt.OpaqueMetadata) (exec.Node, return o.plan, nil } +// ConstructAlterTableSplit is part of the exec.Factory interface. +func (ef *execFactory) ConstructAlterTableSplit( + index cat.Index, input exec.Node, expiration tree.TypedExpr, +) (exec.Node, error) { + expirationTime, err := parseExpirationTime(ef.planner.EvalContext(), expiration) + if err != nil { + return nil, err + } + + return &splitNode{ + force: ef.planner.SessionData().ForceSplitAt, + tableDesc: &index.Table().(*optTable).desc.TableDescriptor, + index: index.(*optIndex).desc, + rows: input.(planNode), + expirationTime: expirationTime, + }, nil +} + // renderBuilder encapsulates the code to build a renderNode. type renderBuilder struct { r *renderNode diff --git a/pkg/sql/plan_columns.go b/pkg/sql/plan_columns.go index ad7ff9e44d11..59d81f1bc33e 100644 --- a/pkg/sql/plan_columns.go +++ b/pkg/sql/plan_columns.go @@ -96,25 +96,25 @@ func getPlanColumns(plan planNode, mut bool) sqlbase.ResultColumns { // Nodes with a fixed schema. case *scrubNode: - return n.getColumns(mut, scrubColumns) + return n.getColumns(mut, sqlbase.ScrubColumns) case *explainDistSQLNode: return n.getColumns(mut, sqlbase.ExplainDistSQLColumns) case *relocateNode: - return n.getColumns(mut, relocateNodeColumns) + return n.getColumns(mut, sqlbase.AlterTableRelocateColumns) case *scatterNode: - return n.getColumns(mut, scatterNodeColumns) + return n.getColumns(mut, sqlbase.AlterTableScatterColumns) case *showFingerprintsNode: - return n.getColumns(mut, showFingerprintsColumns) + return n.getColumns(mut, sqlbase.ShowFingerprintsColumns) case *splitNode: - return n.getColumns(mut, splitNodeColumns) + return n.getColumns(mut, sqlbase.AlterTableSplitColumns) case *unsplitNode: - return n.getColumns(mut, unsplitNodeColumns) + return n.getColumns(mut, sqlbase.AlterTableUnsplitColumns) case *unsplitAllNode: - return n.getColumns(mut, unsplitNodeColumns) + return n.getColumns(mut, sqlbase.AlterTableUnsplitColumns) case *showTraceReplicaNode: return n.getColumns(mut, sqlbase.ShowReplicaTraceColumns) case *sequenceSelectNode: - return n.getColumns(mut, sequenceSelectColumns) + return n.getColumns(mut, sqlbase.SequenceSelectColumns) // Nodes that have the same schema as their source or their // valueNode helper. diff --git a/pkg/sql/relocate.go b/pkg/sql/relocate.go index f26bdfb7077f..562ca5b98957 100644 --- a/pkg/sql/relocate.go +++ b/pkg/sql/relocate.go @@ -108,17 +108,6 @@ func (p *planner) Relocate(ctx context.Context, n *tree.Relocate) (planNode, err }, nil } -var relocateNodeColumns = sqlbase.ResultColumns{ - { - Name: "key", - Typ: types.Bytes, - }, - { - Name: "pretty", - Typ: types.String, - }, -} - // relocateRun contains the run-time state of // relocateNode during local execution. type relocateRun struct { diff --git a/pkg/sql/scatter.go b/pkg/sql/scatter.go index e48ea50f3d2b..1bd5ec76a129 100644 --- a/pkg/sql/scatter.go +++ b/pkg/sql/scatter.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/pkg/errors" ) @@ -147,17 +146,6 @@ func (n *scatterNode) Next(params runParams) (bool, error) { return hasNext, nil } -var scatterNodeColumns = sqlbase.ResultColumns{ - { - Name: "key", - Typ: types.Bytes, - }, - { - Name: "pretty", - Typ: types.String, - }, -} - func (n *scatterNode) Values() tree.Datums { r := n.run.ranges[n.run.rangeIdx] return tree.Datums{ diff --git a/pkg/sql/scrub.go b/pkg/sql/scrub.go index 454f251c8c6c..9d3554c32dbb 100644 --- a/pkg/sql/scrub.go +++ b/pkg/sql/scrub.go @@ -78,17 +78,6 @@ func (p *planner) Scrub(ctx context.Context, n *tree.Scrub) (planNode, error) { return &scrubNode{n: n}, nil } -var scrubColumns = sqlbase.ResultColumns{ - {Name: "job_uuid", Typ: types.Uuid}, - {Name: "error_type", Typ: types.String}, - {Name: "database", Typ: types.String}, - {Name: "table", Typ: types.String}, - {Name: "primary_key", Typ: types.String}, - {Name: "timestamp", Typ: types.Timestamp}, - {Name: "repaired", Typ: types.Bool}, - {Name: "details", Typ: types.Jsonb}, -} - // scrubRun contains the run-time state of scrubNode during local execution. type scrubRun struct { checkQueue []checkOperation diff --git a/pkg/sql/sequence_select.go b/pkg/sql/sequence_select.go index 9e4b44915855..a364291e6323 100644 --- a/pkg/sql/sequence_select.go +++ b/pkg/sql/sequence_select.go @@ -15,7 +15,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" - "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/pkg/errors" ) @@ -68,18 +67,3 @@ func (ss *sequenceSelectNode) Values() tree.Datums { } func (ss *sequenceSelectNode) Close(ctx context.Context) {} - -var sequenceSelectColumns = sqlbase.ResultColumns{ - { - Name: `last_value`, - Typ: types.Int, - }, - { - Name: `log_cnt`, - Typ: types.Int, - }, - { - Name: `is_called`, - Typ: types.Bool, - }, -} diff --git a/pkg/sql/show_fingerprints.go b/pkg/sql/show_fingerprints.go index 509185ec438a..67158370eb85 100644 --- a/pkg/sql/show_fingerprints.go +++ b/pkg/sql/show_fingerprints.go @@ -68,11 +68,6 @@ func (p *planner) ShowFingerprints( }, nil } -var showFingerprintsColumns = sqlbase.ResultColumns{ - {Name: "index_name", Typ: types.String}, - {Name: "fingerprint", Typ: types.String}, -} - // showFingerprintsRun contains the run-time state of // showFingerprintsNode during local execution. type showFingerprintsRun struct { diff --git a/pkg/sql/split.go b/pkg/sql/split.go index a89b30d22f6a..b74a9b8c8a51 100644 --- a/pkg/sql/split.go +++ b/pkg/sql/split.go @@ -75,9 +75,16 @@ func (p *planner) Split(ctx context.Context, n *tree.Split) (planNode, error) { } } - expirationTime, err := parseExpirationTime(&p.semaCtx, p.EvalContext(), n.ExpireExpr) - if err != nil { - return nil, err + expirationTime := hlc.MaxTimestamp + if n.ExpireExpr != nil { + typedExpireExpr, err := n.ExpireExpr.TypeCheck(&p.semaCtx, types.String) + if err != nil { + return nil, err + } + expirationTime, err = parseExpirationTime(p.EvalContext(), typedExpireExpr) + if err != nil { + return nil, err + } } return &splitNode{ @@ -89,21 +96,6 @@ func (p *planner) Split(ctx context.Context, n *tree.Split) (planNode, error) { }, nil } -var splitNodeColumns = sqlbase.ResultColumns{ - { - Name: "key", - Typ: types.Bytes, - }, - { - Name: "pretty", - Typ: types.String, - }, - { - Name: "split_enforced_until", - Typ: types.Timestamp, - }, -} - // splitRun contains the run-time state of splitNode during local execution. type splitRun struct { lastSplitKey []byte @@ -194,22 +186,18 @@ func getRowKey( // parseExpriationTime parses an expression into a hlc.Timestamp representing // the expiration time of the split. func parseExpirationTime( - semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, expireExpr tree.Expr, + evalCtx *tree.EvalContext, expireExpr tree.TypedExpr, ) (hlc.Timestamp, error) { - if expireExpr == nil { - return hlc.MaxTimestamp, nil - } - typedExpireExpr, err := expireExpr.TypeCheck(semaCtx, types.String) - if err != nil { - return hlc.Timestamp{}, err - } - if !tree.IsConst(evalCtx, typedExpireExpr) { + if !tree.IsConst(evalCtx, expireExpr) { return hlc.Timestamp{}, errors.Errorf("SPLIT AT: only constant expressions are allowed for expiration") } - d, err := typedExpireExpr.Eval(evalCtx) + d, err := expireExpr.Eval(evalCtx) if err != nil { return hlc.Timestamp{}, err } + if d == tree.DNull { + return hlc.MaxTimestamp, nil + } stmtTimestamp := evalCtx.GetStmtTimestamp() ts, err := tree.DatumToHLC(evalCtx, stmtTimestamp, d) if err != nil { diff --git a/pkg/sql/sqlbase/result_columns.go b/pkg/sql/sqlbase/result_columns.go index 15e65eb02541..5b610121f4b3 100644 --- a/pkg/sql/sqlbase/result_columns.go +++ b/pkg/sql/sqlbase/result_columns.go @@ -159,3 +159,58 @@ var ShowSyntaxColumns = ResultColumns{ {Name: "field", Typ: types.String}, {Name: "message", Typ: types.String}, } + +// ShowFingerprintsColumns are the result columns of a +// SHOW EXPERIMENTAL_FINGERPRINTS statement. +var ShowFingerprintsColumns = ResultColumns{ + {Name: "index_name", Typ: types.String}, + {Name: "fingerprint", Typ: types.String}, +} + +// AlterTableSplitColumns are the result columns of an +// ALTER TABLE/INDEX .. SPLIT AT statement. +var AlterTableSplitColumns = ResultColumns{ + {Name: "key", Typ: types.Bytes}, + {Name: "pretty", Typ: types.String}, + {Name: "split_enforced_until", Typ: types.Timestamp}, +} + +// AlterTableUnsplitColumns are the result columns of an +// ALTER TABLE/INDEX .. UNSPLIT statement. +var AlterTableUnsplitColumns = ResultColumns{ + {Name: "key", Typ: types.Bytes}, + {Name: "pretty", Typ: types.String}, +} + +// AlterTableRelocateColumns are the result columns of an +// ALTER TABLE/INDEX .. EXPERIMENTAL_RELOCATE statement. +var AlterTableRelocateColumns = ResultColumns{ + {Name: "key", Typ: types.Bytes}, + {Name: "pretty", Typ: types.String}, +} + +// AlterTableScatterColumns are the result columns of an +// ALTER TABLE/INDEX .. SCATTER statement. +var AlterTableScatterColumns = ResultColumns{ + {Name: "key", Typ: types.Bytes}, + {Name: "pretty", Typ: types.String}, +} + +// ScrubColumns are the result columns of a SCRUB statement. +var ScrubColumns = ResultColumns{ + {Name: "job_uuid", Typ: types.Uuid}, + {Name: "error_type", Typ: types.String}, + {Name: "database", Typ: types.String}, + {Name: "table", Typ: types.String}, + {Name: "primary_key", Typ: types.String}, + {Name: "timestamp", Typ: types.Timestamp}, + {Name: "repaired", Typ: types.Bool}, + {Name: "details", Typ: types.Jsonb}, +} + +// SequenceSelectColumns are the result columns of a sequence data source. +var SequenceSelectColumns = ResultColumns{ + {Name: `last_value`, Typ: types.Int}, + {Name: `log_cnt`, Typ: types.Int}, + {Name: `is_called`, Typ: types.Bool}, +} diff --git a/pkg/sql/unsplit.go b/pkg/sql/unsplit.go index 7f24d4ea0055..781a115f3098 100644 --- a/pkg/sql/unsplit.go +++ b/pkg/sql/unsplit.go @@ -90,17 +90,6 @@ func (p *planner) Unsplit(ctx context.Context, n *tree.Unsplit) (planNode, error return ret, nil } -var unsplitNodeColumns = sqlbase.ResultColumns{ - { - Name: "key", - Typ: types.Bytes, - }, - { - Name: "pretty", - Typ: types.String, - }, -} - // unsplitRun contains the run-time state of unsplitNode during local execution. type unsplitRun struct { lastUnsplitKey []byte