Skip to content

Commit

Permalink
planner: apply min-max eliminator on common handle columns (#18589)
Browse files Browse the repository at this point in the history
* planner: apply min-max eliminator on common handle columns

* reuse AccessPath.IsTablePath()

* planner: apply min-max eliminator on common handle columns

* address comments

* address comment

Co-authored-by: ti-srebot <[email protected]>
  • Loading branch information
tangenta and ti-srebot authored Jul 17, 2020
1 parent d86cb28 commit ace80c7
Show file tree
Hide file tree
Showing 6 changed files with 90 additions and 7 deletions.
13 changes: 13 additions & 0 deletions executor/aggregate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -839,6 +839,19 @@ func (s *testSuiteAgg) TestAggEliminator(c *C) {
tk.MustQuery("select group_concat(b, b) from t group by a").Sort().Check(testkit.Rows("-1-1", "-2-2", "11", "<nil>"))
}

func (s *testSuiteAgg) TestClusterIndexMaxMinEliminator(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t;")
tk.MustExec("set @@tidb_enable_clustered_index=1;")
tk.MustExec("create table t (a int, b int, c int, primary key(a, b));")
for i := 0; i < 10+1; i++ {
tk.MustExec("insert into t values (?, ?, ?)", i, i, i)
}
tk.MustQuery("select max(a), min(a+b) from t;").Check(testkit.Rows("10 0"))
tk.MustQuery("select max(a+b), min(a+b) from t;").Check(testkit.Rows("20 0"))
tk.MustQuery("select min(a), max(a), min(b), max(b) from t;").Check(testkit.Rows("0 10 0 10"))
}

func (s *testSuiteAgg) TestMaxMinFloatScalaFunc(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)

Expand Down
2 changes: 2 additions & 0 deletions planner/core/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -658,6 +658,8 @@ func (s *testIntegrationSuite) TestMaxMinEliminate(c *C) {
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key)")
tk.MustExec("set @@tidb_enable_clustered_index=1;")
tk.MustExec("create table cluster_index_t(a int, b int, c int, primary key (a, b));")

var input []string
var output []struct {
Expand Down
21 changes: 17 additions & 4 deletions planner/core/rule_max_min_eliminate.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ func (a *maxMinEliminator) checkColCanUseIndex(plan LogicalPlan, col *expression
case *DataSource:
// Check whether there is an AccessPath can use index for col.
for _, path := range p.possibleAccessPaths {
// TODO: support common handle path.
if path.IsIntHandlePath {
// Since table path can contain accessConds of at most one column,
// we only need to check if all of the conditions can be pushed down as accessConds
Expand All @@ -73,15 +72,18 @@ func (a *maxMinEliminator) checkColCanUseIndex(plan LogicalPlan, col *expression
return true
}
} else {
// For index paths, we have to check:
indexCols, indexColLen := path.FullIdxCols, path.FullIdxColLens
if path.IsCommonHandlePath {
indexCols, indexColLen = commonHandleIndexColAndLength(p.handleCols)
}
// 1. whether all of the conditions can be pushed down as accessConds.
// 2. whether the AccessPath can satisfy the order property of `col` with these accessConds.
result, err := ranger.DetachCondAndBuildRangeForIndex(p.ctx, conditions, path.FullIdxCols, path.FullIdxColLens)
result, err := ranger.DetachCondAndBuildRangeForIndex(p.ctx, conditions, indexCols, indexColLen)
if err != nil || len(result.RemainedConds) != 0 {
continue
}
for i := 0; i <= result.EqCondCount; i++ {
if i < len(path.FullIdxCols) && col.Equal(nil, path.FullIdxCols[i]) {
if i < len(indexCols) && col.Equal(nil, indexCols[i]) {
return true
}
}
Expand All @@ -93,6 +95,17 @@ func (a *maxMinEliminator) checkColCanUseIndex(plan LogicalPlan, col *expression
}
}

func commonHandleIndexColAndLength(handleCols HandleCols) ([]*expression.Column, []int) {
handleLen := handleCols.NumCols()
indexCols := make([]*expression.Column, 0, handleLen)
indexColLen := make([]int, 0, handleLen)
for i := 0; i < handleLen; i++ {
indexCols = append(indexCols, handleCols.GetCol(i))
indexColLen = append(indexColLen, types.UnspecifiedLength)
}
return indexCols, indexColLen
}

// cloneSubPlans shallow clones the subPlan. We only consider `Selection` and `DataSource` here,
// because we have restricted the subPlan in `checkColCanUseIndex`.
func (a *maxMinEliminator) cloneSubPlans(plan LogicalPlan) LogicalPlan {
Expand Down
6 changes: 5 additions & 1 deletion planner/core/testdata/integration_suite_in.json
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,11 @@
{
"name": "TestMaxMinEliminate",
"cases": [
"explain (select max(a) from t) union (select min(a) from t)"
"explain (select max(a) from t) union (select min(a) from t)",
"explain select min(a), max(a) from cluster_index_t",
"explain select min(b), max(b) from cluster_index_t where a = 1",
"explain select min(a), max(a) from cluster_index_t where b = 1",
"explain select min(b), max(b) from cluster_index_t where b = 1"
]
},
{
Expand Down
52 changes: 52 additions & 0 deletions planner/core/testdata/integration_suite_out.json
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,58 @@
" └─Limit_61 1.00 cop[tikv] offset:0, count:1",
" └─TableFullScan_60 1.00 cop[tikv] table:t keep order:true, stats:pseudo"
]
},
{
"SQL": "explain select min(a), max(a) from cluster_index_t",
"Plan": [
"HashJoin_16 1.00 root CARTESIAN inner join",
"├─StreamAgg_43(Build) 1.00 root funcs:max(test.cluster_index_t.a)->Column#5",
"│ └─Limit_47 1.00 root offset:0, count:1",
"│ └─TableReader_57 1.00 root data:Limit_56",
"│ └─Limit_56 1.00 cop[tikv] offset:0, count:1",
"│ └─TableFullScan_55 1.00 cop[tikv] table:cluster_index_t keep order:true, desc, stats:pseudo",
"└─StreamAgg_22(Probe) 1.00 root funcs:min(test.cluster_index_t.a)->Column#4",
" └─Limit_26 1.00 root offset:0, count:1",
" └─TableReader_36 1.00 root data:Limit_35",
" └─Limit_35 1.00 cop[tikv] offset:0, count:1",
" └─TableFullScan_34 1.00 cop[tikv] table:cluster_index_t keep order:true, stats:pseudo"
]
},
{
"SQL": "explain select min(b), max(b) from cluster_index_t where a = 1",
"Plan": [
"HashJoin_19 1.00 root CARTESIAN inner join",
"├─StreamAgg_46(Build) 1.00 root funcs:max(test.cluster_index_t.b)->Column#5",
"│ └─Limit_50 1.00 root offset:0, count:1",
"│ └─TableReader_60 1.00 root data:Limit_59",
"│ └─Limit_59 1.00 cop[tikv] offset:0, count:1",
"│ └─TableRangeScan_58 1.00 cop[tikv] table:cluster_index_t range:[1,1], keep order:true, desc, stats:pseudo",
"└─StreamAgg_25(Probe) 1.00 root funcs:min(test.cluster_index_t.b)->Column#4",
" └─Limit_29 1.00 root offset:0, count:1",
" └─TableReader_39 1.00 root data:Limit_38",
" └─Limit_38 1.00 cop[tikv] offset:0, count:1",
" └─TableRangeScan_37 1.00 cop[tikv] table:cluster_index_t range:[1,1], keep order:true, stats:pseudo"
]
},
{
"SQL": "explain select min(a), max(a) from cluster_index_t where b = 1",
"Plan": [
"StreamAgg_20 1.00 root funcs:min(Column#8)->Column#4, funcs:max(Column#9)->Column#5",
"└─TableReader_21 1.00 root data:StreamAgg_9",
" └─StreamAgg_9 1.00 cop[tikv] funcs:min(test.cluster_index_t.a)->Column#8, funcs:max(test.cluster_index_t.a)->Column#9",
" └─Selection_19 10.00 cop[tikv] eq(test.cluster_index_t.b, 1)",
" └─TableFullScan_18 10000.00 cop[tikv] table:cluster_index_t keep order:false, stats:pseudo"
]
},
{
"SQL": "explain select min(b), max(b) from cluster_index_t where b = 1",
"Plan": [
"StreamAgg_20 1.00 root funcs:min(Column#8)->Column#4, funcs:max(Column#9)->Column#5",
"└─TableReader_21 1.00 root data:StreamAgg_9",
" └─StreamAgg_9 1.00 cop[tikv] funcs:min(test.cluster_index_t.b)->Column#8, funcs:max(test.cluster_index_t.b)->Column#9",
" └─Selection_19 10.00 cop[tikv] eq(test.cluster_index_t.b, 1)",
" └─TableFullScan_18 10000.00 cop[tikv] table:cluster_index_t keep order:false, stats:pseudo"
]
}
]
},
Expand Down
3 changes: 1 addition & 2 deletions util/ranger/checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,7 @@ func (c *conditionChecker) check(condition expression.Expression) bool {
case *expression.ScalarFunction:
return c.checkScalarFunction(x)
case *expression.Column:
s, _ := condition.(*expression.Column)
if s.RetType.EvalType() == types.ETString {
if x.RetType.EvalType() == types.ETString {
return false
}
return c.checkColumn(x)
Expand Down

0 comments on commit ace80c7

Please sign in to comment.