Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] opt: add hint to ignore preserved-multiplicity consistency #82188

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/generated/sql/bnf/stmt_block.bnf
Original file line number Diff line number Diff line change
Expand Up @@ -1127,6 +1127,7 @@ unreserved_keyword ::=
| 'NOWAIT'
| 'NULLS'
| 'IGNORE_FOREIGN_KEYS'
| 'IGNORE_PRESERVED_CONSISTENCY'
| 'INSENSITIVE'
| 'OF'
| 'OFF'
Expand Down
17 changes: 17 additions & 0 deletions pkg/sql/opt/memo/multiplicity_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,9 @@ func filtersMatchLeftRowsAtMostOnce(left, right RelExpr, filters FiltersExpr) bo
// according to the join filters. This is true when the following conditions are
// satisfied:
//
// 0. No table on the right side is using IgnorePreservedConsistency (e.g.
// this is not a SELECT FOR UPDATE SKIP LOCKED).
//
// 1. If this is a cross join (there are no filters), then either:
// a. The minimum cardinality of the right input is greater than zero. There
// must be at least one right row for the left rows to be preserved.
Expand Down Expand Up @@ -257,6 +260,9 @@ func filtersMatchLeftRowsAtMostOnce(left, right RelExpr, filters FiltersExpr) bo
// columns in the foreign key must be not-null in order to guarantee that all
// rows will have a match in the referenced table.
func filtersMatchAllLeftRows(left, right RelExpr, filters FiltersExpr) bool {
if CheckIgnorePreservedConsistency(right.Memo().Metadata(), right) {
return false
}
if filters.IsTrue() {
// Cross join case.
if !right.Relational().Cardinality.CanBeZero() {
Expand Down Expand Up @@ -446,6 +452,17 @@ func rightHasSingleFilterThatMatchesLeft(left, right RelExpr, leftCol, rightCol
return leftConst == rightConst
}

func CheckIgnorePreservedConsistency(md *opt.Metadata, right RelExpr) (ignore bool) {
right.Relational().OutputCols.ForEach(func(colID opt.ColumnID) {
tableID := md.ColumnMeta(colID).Table
if tableID != 0 && md.TableMeta(tableID).IgnorePreservedConsistency {
ignore = true
return
}
})
return
}

// checkSelfJoinCase returns true if all equalities in the given FiltersExpr
// are between columns from the same position in the same base table. Panics
// if verifyFilters is not checked first.
Expand Down
19 changes: 10 additions & 9 deletions pkg/sql/opt/metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -486,15 +486,16 @@ func (md *Metadata) DuplicateTable(
}

md.tables = append(md.tables, TableMeta{
MetaID: newTabID,
Table: tabMeta.Table,
Alias: tabMeta.Alias,
IgnoreForeignKeys: tabMeta.IgnoreForeignKeys,
Constraints: constraints,
ComputedCols: computedCols,
partialIndexPredicates: partialIndexPredicates,
indexPartitionLocalities: tabMeta.indexPartitionLocalities,
checkConstraintsStats: checkConstraintsStats,
MetaID: newTabID,
Table: tabMeta.Table,
Alias: tabMeta.Alias,
IgnoreForeignKeys: tabMeta.IgnoreForeignKeys,
IgnorePreservedConsistency: tabMeta.IgnorePreservedConsistency,
Constraints: constraints,
ComputedCols: computedCols,
partialIndexPredicates: partialIndexPredicates,
indexPartitionLocalities: tabMeta.indexPartitionLocalities,
checkConstraintsStats: checkConstraintsStats,
})

return newTabID
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/opt/norm/general_funcs.go
Original file line number Diff line number Diff line change
Expand Up @@ -933,6 +933,10 @@ func (c *CustomFuncs) JoinPreservesRightRows(join memo.RelExpr) bool {
return mult.JoinPreservesRightRows(join.Op())
}

func (c *CustomFuncs) IndexJoinPreservesRows(expr memo.RelExpr) bool {
return !memo.CheckIgnorePreservedConsistency(c.mem.Metadata(), expr)
}

// NoJoinHints returns true if no hints were specified for this join.
func (c *CustomFuncs) NoJoinHints(p *memo.JoinPrivate) bool {
return p.Flags.Empty()
Expand Down
8 changes: 8 additions & 0 deletions pkg/sql/opt/norm/testdata/rules/limit
Original file line number Diff line number Diff line change
Expand Up @@ -1571,3 +1571,11 @@ distinct-on
│ └── column1
└── first-agg
└── "?column?"

exec-ddl
CREATE TABLE m (n INT PRIMARY KEY, o INT, p INT, INDEX (p))
----

optsteps
SELECT * FROM m WHERE p > 5 AND p < 100 ORDER BY p LIMIT 5
----
3 changes: 3 additions & 0 deletions pkg/sql/opt/optbuilder/select.go
Original file line number Diff line number Diff line change
Expand Up @@ -461,6 +461,9 @@ func (b *Builder) buildScan(
if indexFlags.IgnoreUniqueWithoutIndexKeys {
tabMeta.IgnoreUniqueWithoutIndexKeys = true
}
if indexFlags.IgnorePreservedConsistency {
tabMeta.IgnorePreservedConsistency = true
}
}

outScope = inScope.push()
Expand Down
7 changes: 7 additions & 0 deletions pkg/sql/opt/table_meta.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,12 @@ type TableMeta struct {
// depend on the consistency of unique without index constraints.
IgnoreUniqueWithoutIndexKeys bool

// IgnorePreservedConsistency is true if we should disable any rules that
// depend on preserved-multiplicity consistency of this table (i.e. rules that
// assume there is always a PK row for every secondary index row or a FK row
// for every FK reference). This is used for SKIP LOCKED queries.
IgnorePreservedConsistency bool

// Constraints stores a *FiltersExpr containing filters that are known to
// evaluate to true on the table data. This list is extracted from validated
// check constraints; specifically, those check constraints that we can prove
Expand Down Expand Up @@ -202,6 +208,7 @@ func (tm *TableMeta) copyFrom(from *TableMeta, copyScalarFn func(Expr) Expr) {
Alias: from.Alias,
IgnoreForeignKeys: from.IgnoreForeignKeys,
IgnoreUniqueWithoutIndexKeys: from.IgnoreUniqueWithoutIndexKeys,
IgnorePreservedConsistency: from.IgnorePreservedConsistency,
// Annotations are not copied.
}

Expand Down
10 changes: 9 additions & 1 deletion pkg/sql/opt/xform/groupby_funcs.go
Original file line number Diff line number Diff line change
Expand Up @@ -389,9 +389,11 @@ func (c *CustomFuncs) GenerateLimitedGroupByScans(
}
// Iterate over all non-inverted and non-partial secondary indexes.
var pkCols opt.ColSet
var iter scanIndexIter
var sb indexScanBuilder
sb.Init(c, sp.Table)
tabMeta := c.e.mem.Metadata().TableMeta(sp.Table)

var iter scanIndexIter
iter.Init(c.e.evalCtx, c.e.f, c.e.mem, &c.im, sp, nil /* filters */, rejectPrimaryIndex|rejectInvertedIndexes)
iter.ForEach(func(index cat.Index, filters memo.FiltersExpr, indexCols opt.ColSet, isCovering bool, constProj memo.ProjectionsExpr) {
// The iterator only produces pseudo-partial indexes (the predicate is
Expand All @@ -411,6 +413,12 @@ func (c *CustomFuncs) GenerateLimitedGroupByScans(
return
}

// Otherwise, try to construct an IndexJoin operator that provides the
// columns missing from the index.
if sp.Flags.NoIndexJoin || tabMeta.IgnorePreservedConsistency {
return
}

// Calculate the PK columns once.
if pkCols.Empty() {
pkCols = c.PrimaryKeyCols(sp.Table)
Expand Down
13 changes: 11 additions & 2 deletions pkg/sql/opt/xform/limit_funcs.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ func (c *CustomFuncs) GenerateLimitedScans(
var pkCols opt.ColSet
var sb indexScanBuilder
sb.Init(c, scanPrivate.Table)
tabMeta := c.e.mem.Metadata().TableMeta(scanPrivate.Table)

// Iterate over all non-inverted, non-partial indexes, looking for those
// that can be limited.
Expand Down Expand Up @@ -145,7 +146,7 @@ func (c *CustomFuncs) GenerateLimitedScans(

// Otherwise, try to construct an IndexJoin operator that provides the
// columns missing from the index.
if scanPrivate.Flags.NoIndexJoin {
if scanPrivate.Flags.NoIndexJoin || tabMeta.IgnorePreservedConsistency {
return
}

Expand Down Expand Up @@ -276,9 +277,11 @@ func (c *CustomFuncs) GenerateLimitedTopKScans(
}
// Iterate over all non-inverted and non-partial secondary indexes.
var pkCols opt.ColSet
var iter scanIndexIter
var sb indexScanBuilder
sb.Init(c, sp.Table)
tabMeta := c.e.mem.Metadata().TableMeta(sp.Table)

var iter scanIndexIter
iter.Init(c.e.evalCtx, c.e.f, c.e.mem, &c.im, sp, nil /* filters */, rejectPrimaryIndex|rejectInvertedIndexes)
iter.ForEach(func(index cat.Index, filters memo.FiltersExpr, indexCols opt.ColSet, isCovering bool, constProj memo.ProjectionsExpr) {
// The iterator only produces pseudo-partial indexes (the predicate is
Expand All @@ -298,6 +301,12 @@ func (c *CustomFuncs) GenerateLimitedTopKScans(
return
}

// Otherwise, try to construct an IndexJoin operator that provides the
// columns missing from the index.
if sp.Flags.NoIndexJoin || tabMeta.IgnorePreservedConsistency {
return
}

// Calculate the PK columns once.
if pkCols.Empty() {
pkCols = c.PrimaryKeyCols(sp.Table)
Expand Down
3 changes: 2 additions & 1 deletion pkg/sql/opt/xform/rules/limit.opt
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@
# TODO(radu): we can similarly push Offset too.
[PushLimitIntoIndexJoin, Explore]
(Limit
(IndexJoin $input:* $indexJoinPrivate:*)
(IndexJoin $input:* $indexJoinPrivate:*) &
(IndexJoinPreservesRows $input)
$limitExpr:(Const $limit:* & (IsPositiveInt $limit))
$ordering:* &
(OrderingCanProjectCols
Expand Down
18 changes: 15 additions & 3 deletions pkg/sql/opt/xform/select_funcs.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,11 +102,13 @@ func (c *CustomFuncs) GeneratePartialIndexScans(
) {
// Iterate over all partial indexes.
var pkCols opt.ColSet
var sb indexScanBuilder
sb.Init(c, scanPrivate.Table)
tabMeta := c.e.mem.Metadata().TableMeta(scanPrivate.Table)

var iter scanIndexIter
iter.Init(c.e.evalCtx, c.e.f, c.e.mem, &c.im, scanPrivate, filters, rejectNonPartialIndexes|rejectInvertedIndexes)
iter.ForEach(func(index cat.Index, remainingFilters memo.FiltersExpr, indexCols opt.ColSet, isCovering bool, constProj memo.ProjectionsExpr) {
var sb indexScanBuilder
sb.Init(c, scanPrivate.Table)
newScanPrivate := *scanPrivate
newScanPrivate.Index = index.Ordinal()
newScanPrivate.Cols = indexCols.Intersection(scanPrivate.Cols)
Expand All @@ -121,6 +123,12 @@ func (c *CustomFuncs) GeneratePartialIndexScans(
return
}

// Otherwise, try to construct an IndexJoin operator that provides the
// columns missing from the index.
if scanPrivate.Flags.NoIndexJoin || tabMeta.IgnorePreservedConsistency {
return
}

// Calculate the PK columns once.
if pkCols.Empty() {
pkCols = c.PrimaryKeyCols(scanPrivate.Table)
Expand Down Expand Up @@ -464,7 +472,7 @@ func (c *CustomFuncs) GenerateConstrainedScans(

// Otherwise, construct an IndexJoin operator that provides the columns
// missing from the index.
if scanPrivate.Flags.NoIndexJoin {
if scanPrivate.Flags.NoIndexJoin || tabMeta.IgnorePreservedConsistency {
return
}

Expand Down Expand Up @@ -892,6 +900,10 @@ func (c *CustomFuncs) GenerateInvertedIndexScans(
newScanPrivate.SetConstraint(c.e.evalCtx, constraint)
newScanPrivate.InvertedConstraint = spansToRead

if scanPrivate.Flags.NoIndexJoin || tabMeta.IgnorePreservedConsistency {
return
}

// Calculate the PK columns once.
if pkCols.Empty() {
pkCols = c.PrimaryKeyCols(scanPrivate.Table)
Expand Down
11 changes: 10 additions & 1 deletion pkg/sql/parser/sql.y
Original file line number Diff line number Diff line change
Expand Up @@ -838,7 +838,8 @@ func (u *sqlSymUnion) asTenantClause() tree.TenantID {
%token <str> HAVING HASH HEADER HIGH HISTOGRAM HOLD HOUR

%token <str> IDENTITY
%token <str> IF IFERROR IFNULL IGNORE_FOREIGN_KEYS ILIKE IMMEDIATE IMPORT IN INCLUDE
%token <str> IF IFERROR IFNULL IGNORE_FOREIGN_KEYS IGNORE_PRESERVED_CONSISTENCY ILIKE IMMEDIATE
%token <str> IMPORT IN INCLUDE
%token <str> INCLUDING INCREMENT INCREMENTAL INCREMENTAL_LOCATION
%token <str> INET INET_CONTAINED_BY_OR_EQUALS
%token <str> INET_CONTAINS_OR_EQUALS INDEX INDEXES INHERITS INJECT INITIALLY
Expand Down Expand Up @@ -10664,6 +10665,12 @@ index_flags_param:
/* SKIP DOC */
$$.val = &tree.IndexFlags{IgnoreForeignKeys: true}
}
|
IGNORE_PRESERVED_CONSISTENCY
{
/* SKIP DOC */
$$.val = &tree.IndexFlags{IgnorePreservedConsistency: true}
}
|
FORCE_ZIGZAG
{
Expand Down Expand Up @@ -10741,6 +10748,7 @@ opt_index_flags:
// '{' NO_ZIGZAG_JOIN [, ...] '}'
// '{' NO_FULL_SCAN [, ...] '}'
// '{' IGNORE_FOREIGN_KEYS [, ...] '}'
// '{' IGNORE_PRESERVED_CONSISTENCY [, ...] '}'
// '{' FORCE_ZIGZAG = <idxname> [, ...] '}'
//
// Join types:
Expand Down Expand Up @@ -14255,6 +14263,7 @@ unreserved_keyword:
| NOWAIT
| NULLS
| IGNORE_FOREIGN_KEYS
| IGNORE_PRESERVED_CONSISTENCY
| INSENSITIVE
| OF
| OFF
Expand Down
13 changes: 13 additions & 0 deletions pkg/sql/sem/tree/select.go
Original file line number Diff line number Diff line change
Expand Up @@ -282,6 +282,7 @@ type IndexID = catid.IndexID
// - NO_ZIGZAG_JOIN
// - NO_FULL_SCAN
// - IGNORE_FOREIGN_KEYS
// - IGNORE_PRESERVED_CONSISTENCY
// - FORCE_ZIGZAG
// - FORCE_ZIGZAG=<index_name|index_id>*
// It is used optionally after a table name in SELECT statements.
Expand All @@ -304,6 +305,11 @@ type IndexFlags struct {
// IgnoreUniqueWithoutIndexKeys disables optimizations based on unique without
// index constraints.
IgnoreUniqueWithoutIndexKeys bool
// IgnorePreservedConsistency disables optimizations based on
// preserved-multiplicity consistency of this table (e.g. consistency of
// primary and secondary index rows, consistency of foreign key
// references). This is used for SKIP LOCKED queries.
IgnorePreservedConsistency bool
// Zigzag hinting fields are distinct:
// ForceZigzag means we saw a TABLE@{FORCE_ZIGZAG}
// ZigzagIndexes means we saw TABLE@{FORCE_ZIGZAG=name}
Expand Down Expand Up @@ -345,6 +351,8 @@ func (ih *IndexFlags) CombineWith(other *IndexFlags) error {
result.IgnoreForeignKeys = ih.IgnoreForeignKeys || other.IgnoreForeignKeys
result.IgnoreUniqueWithoutIndexKeys = ih.IgnoreUniqueWithoutIndexKeys ||
other.IgnoreUniqueWithoutIndexKeys
result.IgnorePreservedConsistency = ih.IgnorePreservedConsistency ||
other.IgnorePreservedConsistency

if other.Direction != 0 {
if ih.Direction != 0 {
Expand Down Expand Up @@ -472,6 +480,11 @@ func (ih *IndexFlags) Format(ctx *FmtCtx) {
ctx.WriteString("IGNORE_UNIQUE_WITHOUT_INDEX_KEYS")
}

if ih.IgnorePreservedConsistency {
sep()
ctx.WriteString("IGNORE_PRESERVED_CONSISTENCY")
}

if ih.ForceZigzag || len(ih.ZigzagIndexes) > 0 || len(ih.ZigzagIndexIDs) > 0 {
sep()
if ih.ForceZigzag {
Expand Down