From ade7ddb6a2d7bff6d4541fcafad98672fd6aedd0 Mon Sep 17 00:00:00 2001 From: xuhuaiyu <391585975@qq.com> Date: Tue, 16 Nov 2021 15:34:28 +0800 Subject: [PATCH 1/7] *: track the memory usage of IndexJoin more accurate --- ddl/db_test.go | 2 +- distsql/request_builder.go | 16 ++++++++++++---- executor/builder.go | 9 +++++++++ executor/chunk_size_control_test.go | 9 ++++++--- executor/index_lookup_hash_join.go | 1 + executor/index_lookup_join.go | 26 ++++++++++++++++++++++---- executor/point_get.go | 2 +- executor/point_get_test.go | 5 ++++- executor/show.go | 13 +++++++------ executor/split.go | 9 +++++---- expression/integration_test.go | 5 ++++- server/http_handler.go | 4 ++-- server/http_handler_test.go | 4 +++- session/schema_amender_serial_test.go | 7 ++++++- session/tidb_test.go | 6 +++++- store/helper/helper.go | 2 +- store/mockstore/cluster_test.go | 5 +++-- tablecodec/tablecodec.go | 11 +++++++---- tablecodec/tablecodec_test.go | 15 ++++++++------- util/codec/bytes.go | 10 +++++++--- util/codec/codec.go | 6 +++--- 21 files changed, 117 insertions(+), 50 deletions(-) diff --git a/ddl/db_test.go b/ddl/db_test.go index cb1c90b54df49..e699d3409f424 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -2166,7 +2166,7 @@ func checkGlobalIndexRow(c *C, ctx sessionctx.Context, tblInfo *model.TableInfo, // Check global index entry. encodedValue, err := codec.EncodeKey(sc, nil, idxVals...) c.Assert(err, IsNil) - key := tablecodec.EncodeIndexSeekKey(tblInfo.ID, indexInfo.ID, encodedValue) + key := tablecodec.EncodeIndexSeekKey(nil, tblInfo.ID, indexInfo.ID, encodedValue) c.Assert(err, IsNil) value, err := txn.Get(context.Background(), key) c.Assert(err, IsNil) diff --git a/distsql/request_builder.go b/distsql/request_builder.go index 44378b2a262a2..18c03c631b253 100644 --- a/distsql/request_builder.go +++ b/distsql/request_builder.go @@ -587,8 +587,8 @@ func IndexRangesToKVRangesForTables(sc *stmtctx.StatementContext, tids []int64, high = kv.Key(high).PrefixNext() } for _, tid := range tids { - startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) - endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) + startKey := tablecodec.EncodeIndexSeekKey(sc, tid, idxID, low) + endKey := tablecodec.EncodeIndexSeekKey(sc, tid, idxID, high) krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) } } @@ -650,8 +650,8 @@ func indexRangesToKVWithoutSplit(sc *stmtctx.StatementContext, tids []int64, idx return nil, err } for _, tid := range tids { - startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) - endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) + startKey := tablecodec.EncodeIndexSeekKey(sc, tid, idxID, low) + endKey := tablecodec.EncodeIndexSeekKey(sc, tid, idxID, high) krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) } } @@ -660,11 +660,17 @@ func indexRangesToKVWithoutSplit(sc *stmtctx.StatementContext, tids []int64, idx func encodeIndexKey(sc *stmtctx.StatementContext, ran *ranger.Range) ([]byte, []byte, error) { low, err := codec.EncodeKey(sc, nil, ran.LowVal...) + sc.MemTracker.Consume(int64(cap(low))) if err != nil { return nil, nil, err } if ran.LowExclude { + // PrefixNext builds a new Key whose length equals to `low`. The memory + // consumption needs to be considered since the consumption is high when + // the count of `ran` is hugh. + sc.MemTracker.Consume(int64(cap(low))) low = kv.Key(low).PrefixNext() + sc.MemTracker.Consume(-int64(cap(low))) } high, err := codec.EncodeKey(sc, nil, ran.HighVal...) if err != nil { @@ -672,7 +678,9 @@ func encodeIndexKey(sc *stmtctx.StatementContext, ran *ranger.Range) ([]byte, [] } if !ran.HighExclude { + sc.MemTracker.Consume(int64(cap(high))) high = kv.Key(high).PrefixNext() + sc.MemTracker.Consume(-int64(cap(high))) } var hasNull bool diff --git a/executor/builder.go b/executor/builder.go index c7852d83e9655..810f2fe07b570 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -4080,6 +4080,7 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l if err != nil { return nil, err } + tmpDatumRangesLen := len(tmpDatumRanges) for _, nextColRan := range nextColRanges { for _, ran := range ranges { ran.LowVal[lastPos] = nextColRan.LowVal[0] @@ -4089,9 +4090,17 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l tmpDatumRanges = append(tmpDatumRanges, ran.Clone()) } } + if len(tmpDatumRanges) > tmpDatumRangesLen { + for _, ran := range tmpDatumRanges[tmpDatumRangesLen:] { + sc.MemTracker.Consume(2 * types.EstimatedMemUsage(ran.LowVal, len(ran.LowVal))) + } + } } if cwc == nil { + if len(kvRanges) != 0 { + sc.MemTracker.Consume(2 * int64(len(kvRanges[0].StartKey)*len(kvRanges))) + } sort.Slice(kvRanges, func(i, j int) bool { return bytes.Compare(kvRanges[i].StartKey, kvRanges[j].StartKey) < 0 }) diff --git a/executor/chunk_size_control_test.go b/executor/chunk_size_control_test.go index 310e5092695bd..f760a5bfe0414 100644 --- a/executor/chunk_size_control_test.go +++ b/executor/chunk_size_control_test.go @@ -17,6 +17,8 @@ package executor_test import ( "context" "fmt" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/util/memory" "strings" "sync" "time" @@ -95,7 +97,7 @@ func generateTableSplitKeyForInt(tid int64, splitNum []int) [][]byte { return results } -func generateIndexSplitKeyForInt(tid, idx int64, splitNum []int) [][]byte { +func generateIndexSplitKeyForInt(sc *stmtctx.StatementContext, tid, idx int64, splitNum []int) [][]byte { results := make([][]byte, 0, len(splitNum)) for _, num := range splitNum { d := new(types.Datum) @@ -104,7 +106,7 @@ func generateIndexSplitKeyForInt(tid, idx int64, splitNum []int) [][]byte { if err != nil { panic(err) } - results = append(results, tablecodec.EncodeIndexSeekKey(tid, idx, b)) + results = append(results, tablecodec.EncodeIndexSeekKey(sc, tid, idx, b)) } return results } @@ -202,8 +204,9 @@ func (s *testChunkSizeControlSuite) TestLimitAndIndexScan(c *C) { tid := tbl.Meta().ID idx := tbl.Meta().Indices[0].ID + sc := &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} // construct two regions split by 100 - splitKeys := generateIndexSplitKeyForInt(tid, idx, []int{100}) + splitKeys := generateIndexSplitKeyForInt(sc, tid, idx, []int{100}) regionIDs := manipulateCluster(cluster, splitKeys) noDelayThreshold := time.Millisecond * 100 diff --git a/executor/index_lookup_hash_join.go b/executor/index_lookup_hash_join.go index 755c0713f13f8..2eb4e4bd6fcde 100644 --- a/executor/index_lookup_hash_join.go +++ b/executor/index_lookup_hash_join.go @@ -432,6 +432,7 @@ func (e *IndexNestedLoopHashJoin) newInnerWorker(taskCh chan *indexHashJoinTask, indexRanges: copiedRanges, keyOff2IdxOff: e.keyOff2IdxOff, stats: innerStats, + lookup: &e.IndexLookUpJoin, }, taskCh: taskCh, joiner: e.joiners[workerID], diff --git a/executor/index_lookup_join.go b/executor/index_lookup_join.go index 5f4945f3fd55c..fe8ab6eb9e76f 100644 --- a/executor/index_lookup_join.go +++ b/executor/index_lookup_join.go @@ -82,7 +82,8 @@ type IndexLookUpJoin struct { memTracker *memory.Tracker // track memory usage. - stats *indexLookUpJoinRuntimeStats + stats *indexLookUpJoinRuntimeStats + ctxCancelReason atomic.Value } type outerCtx struct { @@ -145,6 +146,7 @@ type innerWorker struct { outerCtx outerCtx ctx sessionctx.Context executorChk *chunk.Chunk + lookup *IndexLookUpJoin indexRanges []*ranger.Range nextColCompareFilters *plannercore.ColWithCmpFuncManager @@ -222,6 +224,7 @@ func (e *IndexLookUpJoin) newInnerWorker(taskCh chan *lookUpJoinTask) *innerWork indexRanges: copiedRanges, keyOff2IdxOff: e.keyOff2IdxOff, stats: innerStats, + lookup: e, } if e.lastColHelper != nil { // nextCwf.TmpConstant needs to be reset for every individual @@ -298,6 +301,9 @@ func (e *IndexLookUpJoin) getFinishedTask(ctx context.Context) (*lookUpJoinTask, select { case task = <-e.resultCh: case <-ctx.Done(): + if err := e.ctxCancelReason.Load(); err != nil { + return nil, err.(error) + } return nil, ctx.Err() } if task == nil { @@ -310,6 +316,9 @@ func (e *IndexLookUpJoin) getFinishedTask(ctx context.Context) (*lookUpJoinTask, return nil, err } case <-ctx.Done(): + if err := e.ctxCancelReason.Load(); err != nil { + return nil, err.(error) + } return nil, ctx.Err() } @@ -338,8 +347,10 @@ func (ow *outerWorker) run(ctx context.Context, wg *sync.WaitGroup) { buf = buf[:stackSize] logutil.Logger(ctx).Error("outerWorker panicked", zap.String("stack", string(buf))) task := &lookUpJoinTask{doneCh: make(chan error, 1)} - task.doneCh <- errors.Errorf("%v", r) - ow.pushToChan(ctx, task, ow.resultCh) + err := errors.Errorf("%v", r) + task.doneCh <- err + ow.lookup.ctxCancelReason.Store(err) + ow.lookup.cancelFunc() } close(ow.resultCh) close(ow.innerCh) @@ -454,8 +465,11 @@ func (iw *innerWorker) run(ctx context.Context, wg *sync.WaitGroup) { stackSize := runtime.Stack(buf, false) buf = buf[:stackSize] logutil.Logger(ctx).Error("innerWorker panicked", zap.String("stack", string(buf))) + err := errors.Errorf("%v", r) // "task != nil" is guaranteed when panic happened. - task.doneCh <- errors.Errorf("%v", r) + task.doneCh <- err + iw.lookup.ctxCancelReason.Store(err) + iw.lookup.cancelFunc() } wg.Done() }() @@ -519,6 +533,7 @@ func (iw *innerWorker) constructLookupContent(task *lookUpJoinTask) ([]*indexJoi numRows := chk.NumRows() for rowIdx := 0; rowIdx < numRows; rowIdx++ { dLookUpKey, dHashKey, err := iw.constructDatumLookupKey(task, chkIdx, rowIdx) + iw.lookup.memTracker.Consume(types.EstimatedMemUsage(dLookUpKey, len(dLookUpKey))) if err != nil { if terror.ErrorEqual(err, types.ErrWrongValue) { // We ignore rows with invalid datetime. @@ -665,6 +680,9 @@ func (iw *innerWorker) fetchInnerResults(ctx context.Context, task *lookUpJoinTa for { select { case <-ctx.Done(): + if err := iw.lookup.ctxCancelReason.Load(); err != nil { + return err.(error) + } return ctx.Err() default: } diff --git a/executor/point_get.go b/executor/point_get.go index 489bbf9bb8085..f2142bb0ac709 100644 --- a/executor/point_get.go +++ b/executor/point_get.go @@ -466,7 +466,7 @@ func EncodeUniqueIndexKey(ctx sessionctx.Context, tblInfo *model.TableInfo, idxI if err != nil { return nil, err } - return tablecodec.EncodeIndexSeekKey(tID, idxInfo.ID, encodedIdxVals), nil + return tablecodec.EncodeIndexSeekKey(ctx.GetSessionVars().StmtCtx, tID, idxInfo.ID, encodedIdxVals), nil } // EncodeUniqueIndexValuesForKey encodes unique index values for a key. diff --git a/executor/point_get_test.go b/executor/point_get_test.go index 131803a95a57f..1edfba98998d8 100644 --- a/executor/point_get_test.go +++ b/executor/point_get_test.go @@ -17,6 +17,8 @@ package executor_test import ( "context" "fmt" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/util/memory" "strings" "sync" "time" @@ -616,6 +618,7 @@ func (s *testPointGetSuite) TestReturnValues(c *C) { tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly + tk.Se.GetSessionVars().StmtCtx = &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} tk.MustExec("create table t (a varchar(64) primary key, b int)") tk.MustExec("insert t values ('a', 1), ('b', 2), ('c', 3)") tk.MustExec("begin pessimistic") @@ -623,7 +626,7 @@ func (s *testPointGetSuite) TestReturnValues(c *C) { tid := tk.GetTableID("t") idxVal, err := codec.EncodeKey(tk.Se.GetSessionVars().StmtCtx, nil, types.NewStringDatum("b")) c.Assert(err, IsNil) - pk := tablecodec.EncodeIndexSeekKey(tid, 1, idxVal) + pk := tablecodec.EncodeIndexSeekKey(tk.Se.GetSessionVars().StmtCtx, tid, 1, idxVal) txnCtx := tk.Se.GetSessionVars().TxnCtx val, ok := txnCtx.GetKeyInPessimisticLockCache(pk) c.Assert(ok, IsTrue) diff --git a/executor/show.go b/executor/show.go index 9fcbb67e7cf66..7a0e18cd11303 100644 --- a/executor/show.go +++ b/executor/show.go @@ -1759,14 +1759,15 @@ func (e *ShowExec) fetchShowTableRegions() error { // Get table regions from from pd, not from regionCache, because the region cache maybe outdated. var regions []regionMeta + sc := e.ctx.GetSessionVars().StmtCtx if len(e.IndexName.L) != 0 { indexInfo := tb.Meta().FindIndexByName(e.IndexName.L) if indexInfo == nil { return plannercore.ErrKeyDoesNotExist.GenWithStackByArgs(e.IndexName, tb.Meta().Name) } - regions, err = getTableIndexRegions(indexInfo, physicalIDs, tikvStore, splitStore) + regions, err = getTableIndexRegions(sc, indexInfo, physicalIDs, tikvStore, splitStore) } else { - regions, err = getTableRegions(tb, physicalIDs, tikvStore, splitStore) + regions, err = getTableRegions(sc, tb, physicalIDs, tikvStore, splitStore) } if err != nil { @@ -1776,11 +1777,11 @@ func (e *ShowExec) fetchShowTableRegions() error { return nil } -func getTableRegions(tb table.Table, physicalIDs []int64, tikvStore helper.Storage, splitStore kv.SplittableStore) ([]regionMeta, error) { +func getTableRegions(sc *stmtctx.StatementContext, tb table.Table, physicalIDs []int64, tikvStore helper.Storage, splitStore kv.SplittableStore) ([]regionMeta, error) { regions := make([]regionMeta, 0, len(physicalIDs)) uniqueRegionMap := make(map[uint64]struct{}) for _, id := range physicalIDs { - rs, err := getPhysicalTableRegions(id, tb.Meta(), tikvStore, splitStore, uniqueRegionMap) + rs, err := getPhysicalTableRegions(sc, id, tb.Meta(), tikvStore, splitStore, uniqueRegionMap) if err != nil { return nil, err } @@ -1789,11 +1790,11 @@ func getTableRegions(tb table.Table, physicalIDs []int64, tikvStore helper.Stora return regions, nil } -func getTableIndexRegions(indexInfo *model.IndexInfo, physicalIDs []int64, tikvStore helper.Storage, splitStore kv.SplittableStore) ([]regionMeta, error) { +func getTableIndexRegions(sc *stmtctx.StatementContext, indexInfo *model.IndexInfo, physicalIDs []int64, tikvStore helper.Storage, splitStore kv.SplittableStore) ([]regionMeta, error) { regions := make([]regionMeta, 0, len(physicalIDs)) uniqueRegionMap := make(map[uint64]struct{}) for _, id := range physicalIDs { - rs, err := getPhysicalIndexRegions(id, indexInfo, tikvStore, splitStore, uniqueRegionMap) + rs, err := getPhysicalIndexRegions(sc, id, indexInfo, tikvStore, splitStore, uniqueRegionMap) if err != nil { return nil, err } diff --git a/executor/split.go b/executor/split.go index beca16bfe5d90..808288f00333d 100644 --- a/executor/split.go +++ b/executor/split.go @@ -19,6 +19,7 @@ import ( "context" "encoding/binary" "fmt" + "github.com/pingcap/tidb/sessionctx/stmtctx" "math" "time" @@ -622,7 +623,7 @@ type regionMeta struct { approximateKeys int64 } -func getPhysicalTableRegions(physicalTableID int64, tableInfo *model.TableInfo, tikvStore helper.Storage, s kv.SplittableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) { +func getPhysicalTableRegions(sc *stmtctx.StatementContext, physicalTableID int64, tableInfo *model.TableInfo, tikvStore helper.Storage, s kv.SplittableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) { if uniqueRegionMap == nil { uniqueRegionMap = make(map[uint64]struct{}) } @@ -651,7 +652,7 @@ func getPhysicalTableRegions(physicalTableID int64, tableInfo *model.TableInfo, if index.State != model.StatePublic { continue } - startKey, endKey := tablecodec.GetTableIndexKeyRange(physicalTableID, index.ID) + startKey, endKey := tablecodec.GetTableIndexKeyRange(sc, physicalTableID, index.ID) regionMetas, err := regionCache.LoadRegionsInKeyRange(tikv.NewBackofferWithVars(context.Background(), 20000, nil), startKey, endKey) if err != nil { return nil, err @@ -670,12 +671,12 @@ func getPhysicalTableRegions(physicalTableID int64, tableInfo *model.TableInfo, return regions, nil } -func getPhysicalIndexRegions(physicalTableID int64, indexInfo *model.IndexInfo, tikvStore helper.Storage, s kv.SplittableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) { +func getPhysicalIndexRegions(sc *stmtctx.StatementContext, physicalTableID int64, indexInfo *model.IndexInfo, tikvStore helper.Storage, s kv.SplittableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) { if uniqueRegionMap == nil { uniqueRegionMap = make(map[uint64]struct{}) } - startKey, endKey := tablecodec.GetTableIndexKeyRange(physicalTableID, indexInfo.ID) + startKey, endKey := tablecodec.GetTableIndexKeyRange(sc, physicalTableID, indexInfo.ID) regionCache := tikvStore.GetRegionCache() regions, err := regionCache.LoadRegionsInKeyRange(tikv.NewBackofferWithVars(context.Background(), 20000, nil), startKey, endKey) if err != nil { diff --git a/expression/integration_test.go b/expression/integration_test.go index 1d998efbc82e1..79242033b9d02 100644 --- a/expression/integration_test.go +++ b/expression/integration_test.go @@ -19,6 +19,8 @@ import ( "context" "encoding/hex" "fmt" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/util/memory" "math" "math/rand" "sort" @@ -5098,10 +5100,11 @@ func (s *testIntegrationSuite) TestTiDBInternalFunc(c *C) { is = dom.InfoSchema() tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) + sc := &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} buildIndexKeyFromData := func(tableID, indexID int64, data []types.Datum) string { k, err := codec.EncodeKey(tk.Se.GetSessionVars().StmtCtx, nil, data...) c.Assert(err, IsNil) - k = tablecodec.EncodeIndexSeekKey(tableID, indexID, k) + k = tablecodec.EncodeIndexSeekKey(sc, tableID, indexID, k) hexKey := hex.EncodeToString(codec.EncodeBytes(nil, k)) return hexKey } diff --git a/server/http_handler.go b/server/http_handler.go index 3fe4ac8587cd7..38cee12cb8b0d 100644 --- a/server/http_handler.go +++ b/server/http_handler.go @@ -1322,7 +1322,7 @@ func (h tableHandler) handleScatterTableRequest(schema infoschema.InfoSchema, tb for _, index := range tbl.Indices() { indexID := index.Meta().ID indexName := index.Meta().Name.String() - startKey, endKey := tablecodec.GetTableIndexKeyRange(tableID, indexID) + startKey, endKey := tablecodec.GetTableIndexKeyRange(nil, tableID, indexID) startKey = codec.EncodeBytes([]byte{}, startKey) endKey = codec.EncodeBytes([]byte{}, endKey) name := tableName + "-" + indexName @@ -1450,7 +1450,7 @@ func (h tableHandler) getRegionsByID(tbl table.Table, id int64, name string) (*T indexID := index.Meta().ID indices[i].Name = index.Meta().Name.String() indices[i].ID = indexID - startKey, endKey := tablecodec.GetTableIndexKeyRange(id, indexID) + startKey, endKey := tablecodec.GetTableIndexKeyRange(nil, id, indexID) regions, err := pdCli.ScanRegions(ctx, startKey, endKey, -1) if err != nil { return nil, err diff --git a/server/http_handler_test.go b/server/http_handler_test.go index 3278732a0059f..39b188128b28b 100644 --- a/server/http_handler_test.go +++ b/server/http_handler_test.go @@ -23,6 +23,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "github.com/pingcap/tidb/util/memory" "io" "net" "net/http" @@ -93,7 +94,8 @@ func TestRegionIndexRange(t *testing.T) { encodedValue, err := codec.EncodeKey(&stmtctx.StatementContext{TimeZone: time.Local}, nil, indexValues...) require.NoError(t, err) - startKey := tablecodec.EncodeIndexSeekKey(sTableID, sIndex, encodedValue) + sc := &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} + startKey := tablecodec.EncodeIndexSeekKey(sc, sTableID, sIndex, encodedValue) recordPrefix := tablecodec.GenTableRecordPrefix(eTableID) endKey := tablecodec.EncodeRecordKey(recordPrefix, kv.IntHandle(recordID)) diff --git a/session/schema_amender_serial_test.go b/session/schema_amender_serial_test.go index a22f21ad6e225..2daf31ad1b38c 100644 --- a/session/schema_amender_serial_test.go +++ b/session/schema_amender_serial_test.go @@ -17,15 +17,18 @@ package session import ( "bytes" "context" + "github.com/pingcap/tidb/util/memory" "sort" "strconv" "testing" + "time" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/table" @@ -431,9 +434,11 @@ func TestAmendCollectAndGenMutations(t *testing.T) { schemaAmender := NewSchemaAmenderForTikvTxn(se) // Some noisy index key values. + + se.sessionVars.StmtCtx = &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} for i := 0; i < 4; i++ { idxValue := []byte("idxValue") - idxKey := tablecodec.EncodeIndexSeekKey(oldTbInfo.Meta().ID, oldTbInfo.Indices()[i].Meta().ID, idxValue) + idxKey := tablecodec.EncodeIndexSeekKey(se.sessionVars.StmtCtx, oldTbInfo.Meta().ID, oldTbInfo.Indices()[i].Meta().ID, idxValue) err = txn.Set(idxKey, idxValue) require.NoError(t, err) mutations.Push(kvrpcpb.Op_Put, idxKey, idxValue, false) diff --git a/session/tidb_test.go b/session/tidb_test.go index 4c854b4b66354..d47a59a2ccdf9 100644 --- a/session/tidb_test.go +++ b/session/tidb_test.go @@ -16,8 +16,11 @@ package session import ( "context" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/util/memory" "sync" "testing" + "time" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" @@ -74,8 +77,9 @@ func TestParseErrorWarn(t *testing.T) { func TestKeysNeedLock(t *testing.T) { t.Parallel() + sc := &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} rowKey := tablecodec.EncodeRowKeyWithHandle(1, kv.IntHandle(1)) - indexKey := tablecodec.EncodeIndexSeekKey(1, 1, []byte{1}) + indexKey := tablecodec.EncodeIndexSeekKey(sc, 1, 1, []byte{1}) uniqueValue := make([]byte, 8) uniqueUntouched := append(uniqueValue, '1') nonUniqueVal := []byte{'0'} diff --git a/store/helper/helper.go b/store/helper/helper.go index 3b8e122071959..073895ec4b92d 100644 --- a/store/helper/helper.go +++ b/store/helper/helper.go @@ -637,7 +637,7 @@ func newTableWithKeyRange(db *model.DBInfo, table *model.TableInfo) tableInfoWit } func newIndexWithKeyRange(db *model.DBInfo, table *model.TableInfo, index *model.IndexInfo) tableInfoWithKeyRange { - sk, ek := tablecodec.GetTableIndexKeyRange(table.ID, index.ID) + sk, ek := tablecodec.GetTableIndexKeyRange(nil, table.ID, index.ID) startKey := bytesKeyToHex(codec.EncodeBytes(nil, sk)) endKey := bytesKeyToHex(codec.EncodeBytes(nil, ek)) return tableInfoWithKeyRange{ diff --git a/store/mockstore/cluster_test.go b/store/mockstore/cluster_test.go index 9384a433fcba7..70099609d14ec 100644 --- a/store/mockstore/cluster_test.go +++ b/store/mockstore/cluster_test.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/rowcodec" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/testutils" @@ -52,7 +53,7 @@ func TestClusterSplit(t *testing.T) { idxID := int64(2) colID := int64(3) handle := int64(1) - sc := &stmtctx.StatementContext{TimeZone: time.UTC} + sc := &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} for i := 0; i < 1000; i++ { rowKey := tablecodec.EncodeRowKeyWithHandle(tblID, kv.IntHandle(handle)) colValue := types.NewStringDatum(strconv.Itoa(int(handle))) @@ -64,7 +65,7 @@ func TestClusterSplit(t *testing.T) { encodedIndexValue, err1 := codec.EncodeKey(sc, nil, []types.Datum{colValue, types.NewIntDatum(handle)}...) require.NoError(t, err1) - idxKey := tablecodec.EncodeIndexSeekKey(tblID, idxID, encodedIndexValue) + idxKey := tablecodec.EncodeIndexSeekKey(sc, tblID, idxID, encodedIndexValue) txn.Set(idxKey, []byte{'0'}) handle++ } diff --git a/tablecodec/tablecodec.go b/tablecodec/tablecodec.go index f610e09104572..6f33e435f6404 100644 --- a/tablecodec/tablecodec.go +++ b/tablecodec/tablecodec.go @@ -628,11 +628,14 @@ func Unflatten(datum types.Datum, ft *types.FieldType, loc *time.Location) (type } // EncodeIndexSeekKey encodes an index value to kv.Key. -func EncodeIndexSeekKey(tableID int64, idxID int64, encodedValue []byte) kv.Key { +func EncodeIndexSeekKey(sc *stmtctx.StatementContext, tableID int64, idxID int64, encodedValue []byte) kv.Key { key := make([]byte, 0, RecordRowKeyLen+len(encodedValue)) key = appendTableIndexPrefix(key, tableID) key = codec.EncodeInt(key, idxID) key = append(key, encodedValue...) + if sc != nil && sc.MemTracker != nil { + sc.MemTracker.Consume(int64(cap(key))) + } return key } @@ -1029,9 +1032,9 @@ func GetTableHandleKeyRange(tableID int64) (startKey, endKey []byte) { } // GetTableIndexKeyRange returns table index's key range with tableID and indexID. -func GetTableIndexKeyRange(tableID, indexID int64) (startKey, endKey []byte) { - startKey = EncodeIndexSeekKey(tableID, indexID, nil) - endKey = EncodeIndexSeekKey(tableID, indexID, []byte{255}) +func GetTableIndexKeyRange(sc *stmtctx.StatementContext, tableID, indexID int64) (startKey, endKey []byte) { + startKey = EncodeIndexSeekKey(sc, tableID, indexID, nil) + endKey = EncodeIndexSeekKey(sc, tableID, indexID, []byte{255}) return } diff --git a/tablecodec/tablecodec_test.go b/tablecodec/tablecodec_test.go index 554287b8dc6a7..0992103888391 100644 --- a/tablecodec/tablecodec_test.go +++ b/tablecodec/tablecodec_test.go @@ -16,6 +16,7 @@ package tablecodec import ( "fmt" + "github.com/pingcap/tidb/util/memory" "math" "testing" "time" @@ -335,12 +336,12 @@ func TestCutKeyNew(t *testing.T) { values := []types.Datum{types.NewIntDatum(1), types.NewBytesDatum([]byte("abc")), types.NewFloat64Datum(5.5)} handle := types.NewIntDatum(100) values = append(values, handle) - sc := &stmtctx.StatementContext{TimeZone: time.UTC} + sc := &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} encodedValue, err := codec.EncodeKey(sc, nil, values...) require.NoError(t, err) tableID := int64(4) indexID := int64(5) - indexKey := EncodeIndexSeekKey(tableID, indexID, encodedValue) + indexKey := EncodeIndexSeekKey(sc, tableID, indexID, encodedValue) valuesBytes, handleBytes, err := CutIndexKeyNew(indexKey, 3) require.NoError(t, err) for i := 0; i < 3; i++ { @@ -364,7 +365,7 @@ func TestCutKey(t *testing.T) { require.NoError(t, err) tableID := int64(4) indexID := int64(5) - indexKey := EncodeIndexSeekKey(tableID, indexID, encodedValue) + indexKey := EncodeIndexSeekKey(sc, tableID, indexID, encodedValue) valuesMap, handleBytes, err := CutIndexKey(indexKey, colIDs) require.NoError(t, err) for i, colID := range colIDs { @@ -394,7 +395,7 @@ func TestIndexKey(t *testing.T) { t.Parallel() tableID := int64(4) indexID := int64(5) - indexKey := EncodeIndexSeekKey(tableID, indexID, []byte{}) + indexKey := EncodeIndexSeekKey(nil, tableID, indexID, []byte{}) tTableID, tIndexID, isRecordKey, err := DecodeKeyHead(indexKey) require.NoError(t, err) require.Equal(t, tableID, tTableID) @@ -482,7 +483,7 @@ func TestDecodeIndexKey(t *testing.T) { sc := &stmtctx.StatementContext{TimeZone: time.UTC} encodedValue, err := codec.EncodeKey(sc, nil, values...) require.NoError(t, err) - indexKey := EncodeIndexSeekKey(tableID, indexID, encodedValue) + indexKey := EncodeIndexSeekKey(sc, tableID, indexID, encodedValue) decodeTableID, decodeIndexID, decodeValues, err := DecodeIndexKey(indexKey) require.NoError(t, err) @@ -508,8 +509,8 @@ func TestRange(t *testing.T) { require.Less(t, string(e1), string(s2)) require.Less(t, string(s2), string(e2)) - s1, e1 = GetTableIndexKeyRange(42, 666) - s2, e2 = GetTableIndexKeyRange(42, 667) + s1, e1 = GetTableIndexKeyRange(nil, 42, 666) + s2, e2 = GetTableIndexKeyRange(nil, 42, 667) require.Less(t, string(s1), string(e1)) require.Less(t, string(e1), string(s2)) require.Less(t, string(s2), string(e2)) diff --git a/util/codec/bytes.go b/util/codec/bytes.go index c4f61442822fe..e3bae59b1f041 100644 --- a/util/codec/bytes.go +++ b/util/codec/bytes.go @@ -20,6 +20,7 @@ import ( "unsafe" "github.com/pingcap/errors" + "github.com/pingcap/tidb/sessionctx/stmtctx" ) const ( @@ -49,7 +50,7 @@ func EncodeBytes(b []byte, data []byte) []byte { // that is `(len(data) / 8 + 1) * 9` in our implement. dLen := len(data) reallocSize := (dLen/encGroupSize + 1) * (encGroupSize + 1) - result := reallocBytes(b, reallocSize) + result := reallocBytes(nil, b, reallocSize) for idx := 0; idx <= dLen; idx += encGroupSize { remain := dLen - idx padCount := 0 @@ -150,7 +151,7 @@ func DecodeBytesDesc(b []byte, buf []byte) ([]byte, []byte, error) { // efficient in both space and time compare to EncodeBytes. Note that the encoded // result is not memcomparable. func EncodeCompactBytes(b []byte, data []byte) []byte { - b = reallocBytes(b, binary.MaxVarintLen64+len(data)) + b = reallocBytes(nil, b, binary.MaxVarintLen64+len(data)) b = EncodeVarint(b, int64(len(data))) return append(b, data...) } @@ -202,11 +203,14 @@ func reverseBytes(b []byte) { } // reallocBytes is like realloc. -func reallocBytes(b []byte, n int) []byte { +func reallocBytes(sc *stmtctx.StatementContext, b []byte, n int) []byte { newSize := len(b) + n if cap(b) < newSize { bs := make([]byte, len(b), newSize) copy(bs, b) + if sc != nil && sc.MemTracker != nil { + sc.MemTracker.Consume(int64(cap(bs) - cap(b))) + } return bs } diff --git a/util/codec/codec.go b/util/codec/codec.go index f6daf7e54261d..625accbde1004 100644 --- a/util/codec/codec.go +++ b/util/codec/codec.go @@ -55,7 +55,7 @@ const ( sizeFloat64 = unsafe.Sizeof(float64(0)) ) -func preRealloc(b []byte, vals []types.Datum, comparable bool) []byte { +func preRealloc(sc *stmtctx.StatementContext, b []byte, vals []types.Datum, comparable bool) []byte { var size int for i := range vals { switch vals[i].Kind() { @@ -75,13 +75,13 @@ func preRealloc(b []byte, vals []types.Datum, comparable bool) []byte { return b } } - return reallocBytes(b, size) + return reallocBytes(sc, b, size) } // encode will encode a datum and append it to a byte slice. If comparable is true, the encoded bytes can be sorted as it's original order. // If hash is true, the encoded bytes can be checked equal as it's original value. func encode(sc *stmtctx.StatementContext, b []byte, vals []types.Datum, comparable bool) (_ []byte, err error) { - b = preRealloc(b, vals, comparable) + b = preRealloc(sc, b, vals, comparable) for i, length := 0, len(vals); i < length; i++ { switch vals[i].Kind() { case types.KindInt64: From 1f5403f5c6c09b55ebf6276cc2401818316c4af2 Mon Sep 17 00:00:00 2001 From: xuhuaiyu <391585975@qq.com> Date: Thu, 18 Nov 2021 17:32:47 +0800 Subject: [PATCH 2/7] refine --- br/pkg/backup/client.go | 2 +- br/pkg/lightning/backend/local/duplicate.go | 4 +- ddl/db_test.go | 2 +- distsql/request_builder.go | 48 ++++++++++-------- distsql/request_builder_test.go | 4 +- executor/builder.go | 52 +++++++++---------- executor/chunk_size_control_test.go | 9 ++-- executor/distsql.go | 6 +-- executor/executor_pkg_test.go | 2 +- executor/executor_test.go | 55 +++++++++++++++++++++ executor/index_lookup_hash_join.go | 2 + executor/index_lookup_join.go | 7 ++- executor/index_lookup_merge_join.go | 2 +- executor/index_merge_reader.go | 2 +- executor/point_get.go | 2 +- executor/point_get_test.go | 5 +- executor/show.go | 13 +++-- executor/split.go | 9 ++-- expression/integration_test.go | 5 +- planner/core/exhaust_physical_plans.go | 8 +++ server/http_handler.go | 4 +- server/http_handler_test.go | 4 +- session/schema_amender_serial_test.go | 7 +-- session/tidb_test.go | 6 +-- store/helper/helper.go | 2 +- store/mockstore/cluster_test.go | 5 +- tablecodec/tablecodec.go | 11 ++--- tablecodec/tablecodec_test.go | 15 +++--- util/codec/bytes.go | 10 ++-- util/codec/codec.go | 6 +-- 30 files changed, 179 insertions(+), 130 deletions(-) diff --git a/br/pkg/backup/client.go b/br/pkg/backup/client.go index 79edc3403be68..971e0f3cfc7b9 100644 --- a/br/pkg/backup/client.go +++ b/br/pkg/backup/client.go @@ -236,7 +236,7 @@ func appendRanges(tbl *model.TableInfo, tblID int64) ([]kv.KeyRange, error) { continue } ranges = ranger.FullRange() - idxRanges, err := distsql.IndexRangesToKVRanges(nil, tblID, index.ID, ranges, nil) + idxRanges, err := distsql.IndexRangesToKVRanges(nil, tblID, index.ID, ranges, nil, nil) if err != nil { return nil, errors.Trace(err) } diff --git a/br/pkg/lightning/backend/local/duplicate.go b/br/pkg/lightning/backend/local/duplicate.go index fc88055c40c61..7e63b34f5a072 100644 --- a/br/pkg/lightning/backend/local/duplicate.go +++ b/br/pkg/lightning/backend/local/duplicate.go @@ -541,7 +541,7 @@ func (manager *DuplicateManager) CollectDuplicateRowsFromLocalIndex( ranges := ranger.FullRange() var keysRanges []tidbkv.KeyRange for _, id := range tableIDs { - partitionKeysRanges, err := distsql.IndexRangesToKVRanges(nil, id, indexInfo.ID, ranges, nil) + partitionKeysRanges, err := distsql.IndexRangesToKVRanges(nil, id, indexInfo.ID, ranges, nil, nil) if err != nil { return false, err } @@ -806,7 +806,7 @@ func buildTableRequests(tableID int64, isCommonHandle bool) ([]*DuplicateRequest func buildIndexRequests(tableID int64, indexInfo *model.IndexInfo) ([]*DuplicateRequest, error) { ranges := ranger.FullRange() - keysRanges, err := distsql.IndexRangesToKVRanges(nil, tableID, indexInfo.ID, ranges, nil) + keysRanges, err := distsql.IndexRangesToKVRanges(nil, tableID, indexInfo.ID, ranges, nil, nil) if err != nil { return nil, errors.Trace(err) } diff --git a/ddl/db_test.go b/ddl/db_test.go index e699d3409f424..cb1c90b54df49 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -2166,7 +2166,7 @@ func checkGlobalIndexRow(c *C, ctx sessionctx.Context, tblInfo *model.TableInfo, // Check global index entry. encodedValue, err := codec.EncodeKey(sc, nil, idxVals...) c.Assert(err, IsNil) - key := tablecodec.EncodeIndexSeekKey(nil, tblInfo.ID, indexInfo.ID, encodedValue) + key := tablecodec.EncodeIndexSeekKey(tblInfo.ID, indexInfo.ID, encodedValue) c.Assert(err, IsNil) value, err := txn.Get(context.Background(), key) c.Assert(err, IsNil) diff --git a/distsql/request_builder.go b/distsql/request_builder.go index 18c03c631b253..87a17e6f47a18 100644 --- a/distsql/request_builder.go +++ b/distsql/request_builder.go @@ -18,6 +18,7 @@ import ( "fmt" "math" "sort" + "sync/atomic" "github.com/pingcap/errors" "github.com/pingcap/failpoint" @@ -92,7 +93,7 @@ func (builder *RequestBuilder) SetTableRanges(tid int64, tableRanges []*ranger.R // "ranges" to "KeyRanges" firstly. func (builder *RequestBuilder) SetIndexRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range) *RequestBuilder { if builder.err == nil { - builder.Request.KeyRanges, builder.err = IndexRangesToKVRanges(sc, tid, idxID, ranges, nil) + builder.Request.KeyRanges, builder.err = IndexRangesToKVRanges(sc, tid, idxID, ranges, nil, nil) } return builder } @@ -101,7 +102,7 @@ func (builder *RequestBuilder) SetIndexRanges(sc *stmtctx.StatementContext, tid, // "ranges" to "KeyRanges" firstly. func (builder *RequestBuilder) SetIndexRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range) *RequestBuilder { if builder.err == nil { - builder.Request.KeyRanges, builder.err = IndexRangesToKVRangesForTables(sc, tids, idxID, ranges, nil) + builder.Request.KeyRanges, builder.err = IndexRangesToKVRangesForTables(sc, tids, idxID, ranges, nil, nil) } return builder } @@ -550,14 +551,14 @@ func PartitionHandlesToKVRanges(handles []kv.Handle) []kv.KeyRange { } // IndexRangesToKVRanges converts index ranges to "KeyRange". -func IndexRangesToKVRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) { - return IndexRangesToKVRangesForTables(sc, []int64{tid}, idxID, ranges, fb) +func IndexRangesToKVRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { + return IndexRangesToKVRangesForTables(sc, []int64{tid}, idxID, ranges, fb, interruptSignal) } // IndexRangesToKVRangesForTables converts indexes ranges to "KeyRange". -func IndexRangesToKVRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) { +func IndexRangesToKVRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { if fb == nil || fb.Hist == nil { - return indexRangesToKVWithoutSplit(sc, tids, idxID, ranges) + return indexRangesToKVWithoutSplit(sc, tids, idxID, ranges, interruptSignal) } feedbackRanges := make([]*ranger.Range, 0, len(ranges)) for _, ran := range ranges { @@ -587,8 +588,8 @@ func IndexRangesToKVRangesForTables(sc *stmtctx.StatementContext, tids []int64, high = kv.Key(high).PrefixNext() } for _, tid := range tids { - startKey := tablecodec.EncodeIndexSeekKey(sc, tid, idxID, low) - endKey := tablecodec.EncodeIndexSeekKey(sc, tid, idxID, high) + startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) + endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) } } @@ -642,16 +643,31 @@ func VerifyTxnScope(txnScope string, physicalTableID int64, is infoschema.InfoSc return true } -func indexRangesToKVWithoutSplit(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range) ([]kv.KeyRange, error) { +func indexRangesToKVWithoutSplit(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { krs := make([]kv.KeyRange, 0, len(ranges)) - for _, ran := range ranges { + const step = 8 + var memUsage int64 = 0 + // encodeIndexKey and EncodeIndexSeekKey is time-consuming, thus we need to + // check the interrupt signal periodically. + for i, ran := range ranges { + if i%step == 0 { + if sc != nil && sc.MemTracker != nil { + sc.MemTracker.Consume(memUsage) + memUsage = 0 + } + if interruptSignal != nil && interruptSignal.Load().(bool) { + return nil, nil + } + } low, high, err := encodeIndexKey(sc, ran) + memUsage += int64(cap(low)) + int64(cap(high)) if err != nil { return nil, err } for _, tid := range tids { - startKey := tablecodec.EncodeIndexSeekKey(sc, tid, idxID, low) - endKey := tablecodec.EncodeIndexSeekKey(sc, tid, idxID, high) + startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) + endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) + memUsage += int64(cap(startKey)) + int64(cap(endKey)) krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) } } @@ -660,17 +676,11 @@ func indexRangesToKVWithoutSplit(sc *stmtctx.StatementContext, tids []int64, idx func encodeIndexKey(sc *stmtctx.StatementContext, ran *ranger.Range) ([]byte, []byte, error) { low, err := codec.EncodeKey(sc, nil, ran.LowVal...) - sc.MemTracker.Consume(int64(cap(low))) if err != nil { return nil, nil, err } if ran.LowExclude { - // PrefixNext builds a new Key whose length equals to `low`. The memory - // consumption needs to be considered since the consumption is high when - // the count of `ran` is hugh. - sc.MemTracker.Consume(int64(cap(low))) low = kv.Key(low).PrefixNext() - sc.MemTracker.Consume(-int64(cap(low))) } high, err := codec.EncodeKey(sc, nil, ran.HighVal...) if err != nil { @@ -678,9 +688,7 @@ func encodeIndexKey(sc *stmtctx.StatementContext, ran *ranger.Range) ([]byte, [] } if !ran.HighExclude { - sc.MemTracker.Consume(int64(cap(high))) high = kv.Key(high).PrefixNext() - sc.MemTracker.Consume(-int64(cap(high))) } var hasNull bool diff --git a/distsql/request_builder_test.go b/distsql/request_builder_test.go index 3b9a7926e0fe1..036863b74b959 100644 --- a/distsql/request_builder_test.go +++ b/distsql/request_builder_test.go @@ -181,7 +181,7 @@ func TestIndexRangesToKVRanges(t *testing.T) { }, } - actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 12, 15, ranges, nil) + actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 12, 15, ranges, nil, nil) require.NoError(t, err) for i := range actual { require.Equal(t, expect[i], actual[i]) @@ -612,7 +612,7 @@ func TestIndexRangesToKVRangesWithFbs(t *testing.T) { }, } fb := newTestFb() - actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 0, 0, ranges, fb) + actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 0, 0, ranges, fb, nil) require.NoError(t, err) expect := []kv.KeyRange{ { diff --git a/executor/builder.go b/executor/builder.go index 810f2fe07b570..5e946ab292c83 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -22,6 +22,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" "unsafe" @@ -2770,6 +2771,7 @@ func (b *executorBuilder) buildIndexLookUpJoin(v *plannercore.PhysicalIndexJoin) indexRanges: v.Ranges, keyOff2IdxOff: v.KeyOff2IdxOff, lastColHelper: v.CompareFilters, + finished: &atomic.Value{}, } childrenUsedSchema := markChildrenUsedCols(v.Schema(), v.Children()[0].Schema(), v.Children()[1].Schema()) e.joiner = newJoiner(b.ctx, v.JoinType, v.InnerChildIdx == 0, defaultValues, v.OtherConditions, leftTypes, rightTypes, childrenUsedSchema) @@ -3597,21 +3599,21 @@ type mockPhysicalIndexReader struct { } func (builder *dataReaderBuilder) buildExecutorForIndexJoin(ctx context.Context, lookUpContents []*indexJoinLookUpContent, - IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool) (Executor, error) { - return builder.buildExecutorForIndexJoinInternal(ctx, builder.Plan, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles) + IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, interruptSignal *atomic.Value) (Executor, error) { + return builder.buildExecutorForIndexJoinInternal(ctx, builder.Plan, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, interruptSignal) } func (builder *dataReaderBuilder) buildExecutorForIndexJoinInternal(ctx context.Context, plan plannercore.Plan, lookUpContents []*indexJoinLookUpContent, - IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool) (Executor, error) { + IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, interruptSignal *atomic.Value) (Executor, error) { switch v := plan.(type) { case *plannercore.PhysicalTableReader: - return builder.buildTableReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles) + return builder.buildTableReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, interruptSignal) case *plannercore.PhysicalIndexReader: - return builder.buildIndexReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc) + return builder.buildIndexReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, interruptSignal) case *plannercore.PhysicalIndexLookUpReader: - return builder.buildIndexLookUpReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc) + return builder.buildIndexLookUpReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, interruptSignal) case *plannercore.PhysicalUnionScan: - return builder.buildUnionScanForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles) + return builder.buildUnionScanForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, interruptSignal) // The inner child of IndexJoin might be Projection when a combination of the following conditions is true: // 1. The inner child fetch data using indexLookupReader // 2. PK is not handle @@ -3619,11 +3621,11 @@ func (builder *dataReaderBuilder) buildExecutorForIndexJoinInternal(ctx context. // In this case, an extra column tidb_rowid will be appended in the output result of IndexLookupReader(see copTask.doubleReadNeedProj). // Then we need a Projection upon IndexLookupReader to prune the redundant column. case *plannercore.PhysicalProjection: - return builder.buildProjectionForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc) + return builder.buildProjectionForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, interruptSignal) // Need to support physical selection because after PR 16389, TiDB will push down all the expr supported by TiKV or TiFlash // in predicate push down stage, so if there is an expr which only supported by TiFlash, a physical selection will be added after index read case *plannercore.PhysicalSelection: - childExec, err := builder.buildExecutorForIndexJoinInternal(ctx, v.Children()[0], lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles) + childExec, err := builder.buildExecutorForIndexJoinInternal(ctx, v.Children()[0], lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, interruptSignal) if err != nil { return nil, err } @@ -3641,9 +3643,9 @@ func (builder *dataReaderBuilder) buildExecutorForIndexJoinInternal(ctx context. func (builder *dataReaderBuilder) buildUnionScanForIndexJoin(ctx context.Context, v *plannercore.PhysicalUnionScan, values []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, - cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool) (Executor, error) { + cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, interruptSignal *atomic.Value) (Executor, error) { childBuilder := &dataReaderBuilder{Plan: v.Children()[0], executorBuilder: builder.executorBuilder} - reader, err := childBuilder.buildExecutorForIndexJoin(ctx, values, indexRanges, keyOff2IdxOff, cwc, canReorderHandles) + reader, err := childBuilder.buildExecutorForIndexJoin(ctx, values, indexRanges, keyOff2IdxOff, cwc, canReorderHandles, interruptSignal) if err != nil { return nil, err } @@ -3657,7 +3659,7 @@ func (builder *dataReaderBuilder) buildUnionScanForIndexJoin(ctx context.Context func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Context, v *plannercore.PhysicalTableReader, lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, - cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool) (Executor, error) { + cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, interruptSignal *atomic.Value) (Executor, error) { e, err := buildNoRangeTableReader(builder.executorBuilder, v) if err != nil { return nil, err @@ -3665,7 +3667,7 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte tbInfo := e.table.Meta() if v.IsCommonHandle { if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().UseDynamicPartitionPrune() { - kvRanges, err := buildKvRangesForIndexJoin(e.ctx, getPhysicalTableID(e.table), -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + kvRanges, err := buildKvRangesForIndexJoin(e.ctx, getPhysicalTableID(e.table), -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc, interruptSignal) if err != nil { return nil, err } @@ -3694,7 +3696,7 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte return nil, err } pid := p.GetPhysicalID() - tmp, err := buildKvRangesForIndexJoin(e.ctx, pid, -1, []*indexJoinLookUpContent{content}, indexRanges, keyOff2IdxOff, cwc) + tmp, err := buildKvRangesForIndexJoin(e.ctx, pid, -1, []*indexJoinLookUpContent{content}, indexRanges, keyOff2IdxOff, cwc, interruptSignal) if err != nil { return nil, err } @@ -3709,7 +3711,7 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte kvRanges = make([]kv.KeyRange, 0, len(partitions)*len(lookUpContents)) for _, p := range partitions { pid := p.GetPhysicalID() - tmp, err := buildKvRangesForIndexJoin(e.ctx, pid, -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + tmp, err := buildKvRangesForIndexJoin(e.ctx, pid, -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc, interruptSignal) if err != nil { return nil, err } @@ -3875,14 +3877,14 @@ func (builder *dataReaderBuilder) buildTableReaderFromKvRanges(ctx context.Conte } func (builder *dataReaderBuilder) buildIndexReaderForIndexJoin(ctx context.Context, v *plannercore.PhysicalIndexReader, - lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager) (Executor, error) { + lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, interruptSignal *atomic.Value) (Executor, error) { e, err := buildNoRangeIndexReader(builder.executorBuilder, v) if err != nil { return nil, err } tbInfo := e.table.Meta() if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().UseDynamicPartitionPrune() { - kvRanges, err := buildKvRangesForIndexJoin(e.ctx, e.physicalTableID, e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + kvRanges, err := buildKvRangesForIndexJoin(e.ctx, e.physicalTableID, e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, interruptSignal) if err != nil { return nil, err } @@ -3921,7 +3923,7 @@ func (builder *dataReaderBuilder) buildIndexReaderForIndexJoin(ctx context.Conte } func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context.Context, v *plannercore.PhysicalIndexLookUpReader, - lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager) (Executor, error) { + lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, interruptSignal *atomic.Value) (Executor, error) { e, err := buildNoRangeIndexLookUpReader(builder.executorBuilder, v) if err != nil { return nil, err @@ -3929,7 +3931,7 @@ func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context tbInfo := e.table.Meta() if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().UseDynamicPartitionPrune() { - e.kvRanges, err = buildKvRangesForIndexJoin(e.ctx, getPhysicalTableID(e.table), e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + e.kvRanges, err = buildKvRangesForIndexJoin(e.ctx, getPhysicalTableID(e.table), e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, interruptSignal) if err != nil { return nil, err } @@ -3969,18 +3971,18 @@ func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context } func (builder *dataReaderBuilder) buildProjectionForIndexJoin(ctx context.Context, v *plannercore.PhysicalProjection, - lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager) (Executor, error) { + lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, interruptSignal *atomic.Value) (Executor, error) { var ( childExec Executor err error ) switch op := v.Children()[0].(type) { case *plannercore.PhysicalIndexLookUpReader: - if childExec, err = builder.buildIndexLookUpReaderForIndexJoin(ctx, op, lookUpContents, indexRanges, keyOff2IdxOff, cwc); err != nil { + if childExec, err = builder.buildIndexLookUpReaderForIndexJoin(ctx, op, lookUpContents, indexRanges, keyOff2IdxOff, cwc, interruptSignal); err != nil { return nil, err } case *plannercore.PhysicalTableReader: - if childExec, err = builder.buildTableReaderForIndexJoin(ctx, op, lookUpContents, indexRanges, keyOff2IdxOff, cwc, true); err != nil { + if childExec, err = builder.buildTableReaderForIndexJoin(ctx, op, lookUpContents, indexRanges, keyOff2IdxOff, cwc, true, interruptSignal); err != nil { return nil, err } default: @@ -4049,7 +4051,7 @@ func buildRangesForIndexJoin(ctx sessionctx.Context, lookUpContents []*indexJoin // buildKvRangesForIndexJoin builds kv ranges for index join when the inner plan is index scan plan. func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, lookUpContents []*indexJoinLookUpContent, - ranges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager) (_ []kv.KeyRange, err error) { + ranges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, interruptSignal *atomic.Value) (_ []kv.KeyRange, err error) { kvRanges := make([]kv.KeyRange, 0, len(ranges)*len(lookUpContents)) lastPos := len(ranges[0].LowVal) - 1 sc := ctx.GetSessionVars().StmtCtx @@ -4068,7 +4070,7 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l if indexID == -1 { tmpKvRanges, err = distsql.CommonHandleRangesToKVRanges(sc, []int64{tableID}, ranges) } else { - tmpKvRanges, err = distsql.IndexRangesToKVRanges(sc, tableID, indexID, ranges, nil) + tmpKvRanges, err = distsql.IndexRangesToKVRanges(sc, tableID, indexID, ranges, nil, interruptSignal) } if err != nil { return nil, err @@ -4115,7 +4117,7 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l if indexID == -1 { return distsql.CommonHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tableID}, tmpDatumRanges) } - return distsql.IndexRangesToKVRanges(ctx.GetSessionVars().StmtCtx, tableID, indexID, tmpDatumRanges, nil) + return distsql.IndexRangesToKVRanges(ctx.GetSessionVars().StmtCtx, tableID, indexID, tmpDatumRanges, nil, interruptSignal) } func (b *executorBuilder) buildWindow(v *plannercore.PhysicalWindow) Executor { diff --git a/executor/chunk_size_control_test.go b/executor/chunk_size_control_test.go index f760a5bfe0414..310e5092695bd 100644 --- a/executor/chunk_size_control_test.go +++ b/executor/chunk_size_control_test.go @@ -17,8 +17,6 @@ package executor_test import ( "context" "fmt" - "github.com/pingcap/tidb/sessionctx/stmtctx" - "github.com/pingcap/tidb/util/memory" "strings" "sync" "time" @@ -97,7 +95,7 @@ func generateTableSplitKeyForInt(tid int64, splitNum []int) [][]byte { return results } -func generateIndexSplitKeyForInt(sc *stmtctx.StatementContext, tid, idx int64, splitNum []int) [][]byte { +func generateIndexSplitKeyForInt(tid, idx int64, splitNum []int) [][]byte { results := make([][]byte, 0, len(splitNum)) for _, num := range splitNum { d := new(types.Datum) @@ -106,7 +104,7 @@ func generateIndexSplitKeyForInt(sc *stmtctx.StatementContext, tid, idx int64, s if err != nil { panic(err) } - results = append(results, tablecodec.EncodeIndexSeekKey(sc, tid, idx, b)) + results = append(results, tablecodec.EncodeIndexSeekKey(tid, idx, b)) } return results } @@ -204,9 +202,8 @@ func (s *testChunkSizeControlSuite) TestLimitAndIndexScan(c *C) { tid := tbl.Meta().ID idx := tbl.Meta().Indices[0].ID - sc := &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} // construct two regions split by 100 - splitKeys := generateIndexSplitKeyForInt(sc, tid, idx, []int{100}) + splitKeys := generateIndexSplitKeyForInt(tid, idx, []int{100}) regionIDs := manipulateCluster(cluster, splitKeys) noDelayThreshold := time.Millisecond * 100 diff --git a/executor/distsql.go b/executor/distsql.go index 899d2e822f176..852a25f5ebb19 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -226,7 +226,7 @@ func (e *IndexReaderExecutor) buildKeyRanges(sc *stmtctx.StatementContext, range if e.index.ID == -1 { return distsql.CommonHandleRangesToKVRanges(sc, []int64{physicalID}, ranges) } - return distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, ranges, e.feedback) + return distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, ranges, e.feedback, nil) } // Open implements the Executor Open interface. @@ -437,7 +437,7 @@ func (e *IndexLookUpExecutor) buildTableKeyRanges() (err error) { if e.index.ID == -1 { kvRange, err = distsql.CommonHandleRangesToKVRanges(sc, []int64{physicalID}, ranges) } else { - kvRange, err = distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, ranges, e.feedback) + kvRange, err = distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, ranges, e.feedback, nil) } if err != nil { return err @@ -449,7 +449,7 @@ func (e *IndexLookUpExecutor) buildTableKeyRanges() (err error) { if e.index.ID == -1 { e.kvRanges, err = distsql.CommonHandleRangesToKVRanges(sc, []int64{physicalID}, e.ranges) } else { - e.kvRanges, err = distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, e.ranges, e.feedback) + e.kvRanges, err = distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, e.ranges, e.feedback, nil) } } return err diff --git a/executor/executor_pkg_test.go b/executor/executor_pkg_test.go index 3fca4be179169..65308b8ca2442 100644 --- a/executor/executor_pkg_test.go +++ b/executor/executor_pkg_test.go @@ -195,7 +195,7 @@ func SubTestBuildKvRangesForIndexJoinWithoutCwc(t *testing.T) { keyOff2IdxOff := []int{1, 3} ctx := mock.NewContext() - kvRanges, err := buildKvRangesForIndexJoin(ctx, 0, 0, joinKeyRows, indexRanges, keyOff2IdxOff, nil) + kvRanges, err := buildKvRangesForIndexJoin(ctx, 0, 0, joinKeyRows, indexRanges, keyOff2IdxOff, nil, nil) require.NoError(t, err) // Check the kvRanges is in order. for i, kvRange := range kvRanges { diff --git a/executor/executor_test.go b/executor/executor_test.go index 1cb3f94a7765a..8c03fe83a4412 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -20,6 +20,7 @@ import ( "flag" "fmt" "math" + "math/rand" "net" "os" "path/filepath" @@ -9325,3 +9326,57 @@ func (s *testSuiteP1) TestIssue29412(c *C) { tk.MustExec("insert into t29142_1 value(20);") tk.MustQuery("select sum(distinct a) as x from t29142_1 having x > some ( select a from t29142_2 where x in (a));").Check(nil) } + +func (s *testSerialSuite) TestIssue28650(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2;") + tk.MustExec("create table t1(a int, index(a));") + tk.MustExec("create table t2(a int, c int, b char(50), index(a,c,b));") + tk.MustExec("set tidb_enable_rate_limit_action=off;") + + wg := &sync.WaitGroup{} + sql := `explain analyze + select /*+ stream_agg(@sel_1) stream_agg(@sel_3) inl_join(@sel_2 t2)*/ count(1) from + ( + SELECT t2.a AS t2_external_user_ext_id, t2.b AS t2_t1_ext_id FROM t2 INNER JOIN (SELECT t1.a AS d_t1_ext_id FROM t1 GROUP BY t1.a) AS anon_1 ON anon_1.d_t1_ext_id = t2.a WHERE t2.c = 123 AND t2.b + IN ("%s") ) tmp` + + wg.Add(1) + go func() { + defer wg.Done() + inElems := make([]string, 1000) + for i := 0; i < len(inElems); i++ { + inElems[i] = fmt.Sprintf("wm_%dbDgAAwCD-v1QB%dxky-g_dxxQCw", rand.Intn(100), rand.Intn(100)) + } + sql = fmt.Sprintf(sql, strings.Join(inElems, "\",\"")) + }() + + tk.MustExec("insert into t1 select rand()*400;") + for i := 0; i < 10; i++ { + tk.MustExec("insert into t1 select rand()*400 from t1;") + } + config.UpdateGlobal(func(conf *config.Config) { + conf.OOMAction = config.OOMActionCancel + }) + defer func() { + config.UpdateGlobal(func(conf *config.Config) { + conf.OOMAction = config.OOMActionLog + }) + }() + wg.Wait() + tk.MustExec("set @@tidb_mem_quota_query = 1073741824") // 1GB + c.Assert(tk.QueryToErr(sql), IsNil) + tk.MustExec("set @@tidb_mem_quota_query = 104857600") // 100MB, out of memory during executing + c.Assert(strings.Contains(tk.QueryToErr(sql).Error(), "Out Of Memory Quota!"), IsTrue) + tk.MustExec("set @@tidb_mem_quota_query = 64000") // 64KB, out of memory during building the plan + func() { + defer func() { + r := recover() + c.Assert(r, NotNil) + err := errors.Errorf("%v", r) + c.Assert(strings.Contains(err.Error(), "Out Of Memory Quota!"), IsTrue) + }() + tk.MustExec(sql) + }() +} diff --git a/executor/index_lookup_hash_join.go b/executor/index_lookup_hash_join.go index 2eb4e4bd6fcde..e4d045d8df97e 100644 --- a/executor/index_lookup_hash_join.go +++ b/executor/index_lookup_hash_join.go @@ -152,6 +152,7 @@ func (e *IndexNestedLoopHashJoin) Open(ctx context.Context) error { e.stats = &indexLookUpJoinRuntimeStats{} e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) } + e.finished.Store(false) e.startWorkers(ctx) return nil } @@ -319,6 +320,7 @@ func (e *IndexNestedLoopHashJoin) Close() error { close(e.joinChkResourceCh[i]) } e.joinChkResourceCh = nil + e.finished.Store(false) return e.baseExecutor.Close() } diff --git a/executor/index_lookup_join.go b/executor/index_lookup_join.go index fe8ab6eb9e76f..ccccd7ed6551a 100644 --- a/executor/index_lookup_join.go +++ b/executor/index_lookup_join.go @@ -84,6 +84,7 @@ type IndexLookUpJoin struct { stats *indexLookUpJoinRuntimeStats ctxCancelReason atomic.Value + finished *atomic.Value } type outerCtx struct { @@ -163,6 +164,7 @@ func (e *IndexLookUpJoin) Open(ctx context.Context) error { e.memTracker = memory.NewTracker(e.id, -1) e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) e.innerPtrBytes = make([][]byte, 0, 8) + e.finished.Store(false) if e.runtimeStats != nil { e.stats = &indexLookUpJoinRuntimeStats{} e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) @@ -342,6 +344,7 @@ func (ow *outerWorker) run(ctx context.Context, wg *sync.WaitGroup) { defer trace.StartRegion(ctx, "IndexLookupJoinOuterWorker").End() defer func() { if r := recover(); r != nil { + ow.lookup.finished.Store(true) buf := make([]byte, 4096) stackSize := runtime.Stack(buf, false) buf = buf[:stackSize] @@ -461,6 +464,7 @@ func (iw *innerWorker) run(ctx context.Context, wg *sync.WaitGroup) { var task *lookUpJoinTask defer func() { if r := recover(); r != nil { + iw.lookup.finished.Store(true) buf := make([]byte, 4096) stackSize := runtime.Stack(buf, false) buf = buf[:stackSize] @@ -666,7 +670,7 @@ func (iw *innerWorker) fetchInnerResults(ctx context.Context, task *lookUpJoinTa atomic.AddInt64(&iw.stats.fetch, int64(time.Since(start))) }() } - innerExec, err := iw.readerBuilder.buildExecutorForIndexJoin(ctx, lookUpContent, iw.indexRanges, iw.keyOff2IdxOff, iw.nextColCompareFilters, true) + innerExec, err := iw.readerBuilder.buildExecutorForIndexJoin(ctx, lookUpContent, iw.indexRanges, iw.keyOff2IdxOff, iw.nextColCompareFilters, true, iw.lookup.finished) if innerExec != nil { defer terror.Call(innerExec.Close) } @@ -751,6 +755,7 @@ func (e *IndexLookUpJoin) Close() error { e.workerWg.Wait() e.memTracker = nil e.task = nil + e.finished.Store(false) return e.baseExecutor.Close() } diff --git a/executor/index_lookup_merge_join.go b/executor/index_lookup_merge_join.go index 9bbe55537421b..9d4785ff596cf 100644 --- a/executor/index_lookup_merge_join.go +++ b/executor/index_lookup_merge_join.go @@ -502,7 +502,7 @@ func (imw *innerMergeWorker) handleTask(ctx context.Context, task *lookUpMergeJo dLookUpKeys[i], dLookUpKeys[lenKeys-i-1] = dLookUpKeys[lenKeys-i-1], dLookUpKeys[i] } } - imw.innerExec, err = imw.readerBuilder.buildExecutorForIndexJoin(ctx, dLookUpKeys, imw.indexRanges, imw.keyOff2IdxOff, imw.nextColCompareFilters, false) + imw.innerExec, err = imw.readerBuilder.buildExecutorForIndexJoin(ctx, dLookUpKeys, imw.indexRanges, imw.keyOff2IdxOff, imw.nextColCompareFilters, false, nil) if imw.innerExec != nil { defer terror.Call(imw.innerExec.Close) } diff --git a/executor/index_merge_reader.go b/executor/index_merge_reader.go index 60828bd514ac4..095926d4a033f 100644 --- a/executor/index_merge_reader.go +++ b/executor/index_merge_reader.go @@ -151,7 +151,7 @@ func (e *IndexMergeReaderExecutor) buildKeyRangesForTable(tbl table.Table) (rang } continue } - keyRange, err := distsql.IndexRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, getPhysicalTableID(tbl), e.indexes[i].ID, e.ranges[i], e.feedbacks[i]) + keyRange, err := distsql.IndexRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, getPhysicalTableID(tbl), e.indexes[i].ID, e.ranges[i], e.feedbacks[i], nil) if err != nil { return nil, err } diff --git a/executor/point_get.go b/executor/point_get.go index f2142bb0ac709..489bbf9bb8085 100644 --- a/executor/point_get.go +++ b/executor/point_get.go @@ -466,7 +466,7 @@ func EncodeUniqueIndexKey(ctx sessionctx.Context, tblInfo *model.TableInfo, idxI if err != nil { return nil, err } - return tablecodec.EncodeIndexSeekKey(ctx.GetSessionVars().StmtCtx, tID, idxInfo.ID, encodedIdxVals), nil + return tablecodec.EncodeIndexSeekKey(tID, idxInfo.ID, encodedIdxVals), nil } // EncodeUniqueIndexValuesForKey encodes unique index values for a key. diff --git a/executor/point_get_test.go b/executor/point_get_test.go index 1edfba98998d8..131803a95a57f 100644 --- a/executor/point_get_test.go +++ b/executor/point_get_test.go @@ -17,8 +17,6 @@ package executor_test import ( "context" "fmt" - "github.com/pingcap/tidb/sessionctx/stmtctx" - "github.com/pingcap/tidb/util/memory" "strings" "sync" "time" @@ -618,7 +616,6 @@ func (s *testPointGetSuite) TestReturnValues(c *C) { tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly - tk.Se.GetSessionVars().StmtCtx = &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} tk.MustExec("create table t (a varchar(64) primary key, b int)") tk.MustExec("insert t values ('a', 1), ('b', 2), ('c', 3)") tk.MustExec("begin pessimistic") @@ -626,7 +623,7 @@ func (s *testPointGetSuite) TestReturnValues(c *C) { tid := tk.GetTableID("t") idxVal, err := codec.EncodeKey(tk.Se.GetSessionVars().StmtCtx, nil, types.NewStringDatum("b")) c.Assert(err, IsNil) - pk := tablecodec.EncodeIndexSeekKey(tk.Se.GetSessionVars().StmtCtx, tid, 1, idxVal) + pk := tablecodec.EncodeIndexSeekKey(tid, 1, idxVal) txnCtx := tk.Se.GetSessionVars().TxnCtx val, ok := txnCtx.GetKeyInPessimisticLockCache(pk) c.Assert(ok, IsTrue) diff --git a/executor/show.go b/executor/show.go index 7a0e18cd11303..9fcbb67e7cf66 100644 --- a/executor/show.go +++ b/executor/show.go @@ -1759,15 +1759,14 @@ func (e *ShowExec) fetchShowTableRegions() error { // Get table regions from from pd, not from regionCache, because the region cache maybe outdated. var regions []regionMeta - sc := e.ctx.GetSessionVars().StmtCtx if len(e.IndexName.L) != 0 { indexInfo := tb.Meta().FindIndexByName(e.IndexName.L) if indexInfo == nil { return plannercore.ErrKeyDoesNotExist.GenWithStackByArgs(e.IndexName, tb.Meta().Name) } - regions, err = getTableIndexRegions(sc, indexInfo, physicalIDs, tikvStore, splitStore) + regions, err = getTableIndexRegions(indexInfo, physicalIDs, tikvStore, splitStore) } else { - regions, err = getTableRegions(sc, tb, physicalIDs, tikvStore, splitStore) + regions, err = getTableRegions(tb, physicalIDs, tikvStore, splitStore) } if err != nil { @@ -1777,11 +1776,11 @@ func (e *ShowExec) fetchShowTableRegions() error { return nil } -func getTableRegions(sc *stmtctx.StatementContext, tb table.Table, physicalIDs []int64, tikvStore helper.Storage, splitStore kv.SplittableStore) ([]regionMeta, error) { +func getTableRegions(tb table.Table, physicalIDs []int64, tikvStore helper.Storage, splitStore kv.SplittableStore) ([]regionMeta, error) { regions := make([]regionMeta, 0, len(physicalIDs)) uniqueRegionMap := make(map[uint64]struct{}) for _, id := range physicalIDs { - rs, err := getPhysicalTableRegions(sc, id, tb.Meta(), tikvStore, splitStore, uniqueRegionMap) + rs, err := getPhysicalTableRegions(id, tb.Meta(), tikvStore, splitStore, uniqueRegionMap) if err != nil { return nil, err } @@ -1790,11 +1789,11 @@ func getTableRegions(sc *stmtctx.StatementContext, tb table.Table, physicalIDs [ return regions, nil } -func getTableIndexRegions(sc *stmtctx.StatementContext, indexInfo *model.IndexInfo, physicalIDs []int64, tikvStore helper.Storage, splitStore kv.SplittableStore) ([]regionMeta, error) { +func getTableIndexRegions(indexInfo *model.IndexInfo, physicalIDs []int64, tikvStore helper.Storage, splitStore kv.SplittableStore) ([]regionMeta, error) { regions := make([]regionMeta, 0, len(physicalIDs)) uniqueRegionMap := make(map[uint64]struct{}) for _, id := range physicalIDs { - rs, err := getPhysicalIndexRegions(sc, id, indexInfo, tikvStore, splitStore, uniqueRegionMap) + rs, err := getPhysicalIndexRegions(id, indexInfo, tikvStore, splitStore, uniqueRegionMap) if err != nil { return nil, err } diff --git a/executor/split.go b/executor/split.go index 808288f00333d..beca16bfe5d90 100644 --- a/executor/split.go +++ b/executor/split.go @@ -19,7 +19,6 @@ import ( "context" "encoding/binary" "fmt" - "github.com/pingcap/tidb/sessionctx/stmtctx" "math" "time" @@ -623,7 +622,7 @@ type regionMeta struct { approximateKeys int64 } -func getPhysicalTableRegions(sc *stmtctx.StatementContext, physicalTableID int64, tableInfo *model.TableInfo, tikvStore helper.Storage, s kv.SplittableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) { +func getPhysicalTableRegions(physicalTableID int64, tableInfo *model.TableInfo, tikvStore helper.Storage, s kv.SplittableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) { if uniqueRegionMap == nil { uniqueRegionMap = make(map[uint64]struct{}) } @@ -652,7 +651,7 @@ func getPhysicalTableRegions(sc *stmtctx.StatementContext, physicalTableID int64 if index.State != model.StatePublic { continue } - startKey, endKey := tablecodec.GetTableIndexKeyRange(sc, physicalTableID, index.ID) + startKey, endKey := tablecodec.GetTableIndexKeyRange(physicalTableID, index.ID) regionMetas, err := regionCache.LoadRegionsInKeyRange(tikv.NewBackofferWithVars(context.Background(), 20000, nil), startKey, endKey) if err != nil { return nil, err @@ -671,12 +670,12 @@ func getPhysicalTableRegions(sc *stmtctx.StatementContext, physicalTableID int64 return regions, nil } -func getPhysicalIndexRegions(sc *stmtctx.StatementContext, physicalTableID int64, indexInfo *model.IndexInfo, tikvStore helper.Storage, s kv.SplittableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) { +func getPhysicalIndexRegions(physicalTableID int64, indexInfo *model.IndexInfo, tikvStore helper.Storage, s kv.SplittableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) { if uniqueRegionMap == nil { uniqueRegionMap = make(map[uint64]struct{}) } - startKey, endKey := tablecodec.GetTableIndexKeyRange(sc, physicalTableID, indexInfo.ID) + startKey, endKey := tablecodec.GetTableIndexKeyRange(physicalTableID, indexInfo.ID) regionCache := tikvStore.GetRegionCache() regions, err := regionCache.LoadRegionsInKeyRange(tikv.NewBackofferWithVars(context.Background(), 20000, nil), startKey, endKey) if err != nil { diff --git a/expression/integration_test.go b/expression/integration_test.go index 79242033b9d02..1d998efbc82e1 100644 --- a/expression/integration_test.go +++ b/expression/integration_test.go @@ -19,8 +19,6 @@ import ( "context" "encoding/hex" "fmt" - "github.com/pingcap/tidb/sessionctx/stmtctx" - "github.com/pingcap/tidb/util/memory" "math" "math/rand" "sort" @@ -5100,11 +5098,10 @@ func (s *testIntegrationSuite) TestTiDBInternalFunc(c *C) { is = dom.InfoSchema() tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) - sc := &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} buildIndexKeyFromData := func(tableID, indexID int64, data []types.Datum) string { k, err := codec.EncodeKey(tk.Se.GetSessionVars().StmtCtx, nil, data...) c.Assert(err, IsNil) - k = tablecodec.EncodeIndexSeekKey(sc, tableID, indexID, k) + k = tablecodec.EncodeIndexSeekKey(tableID, indexID, k) hexKey := hex.EncodeToString(codec.EncodeBytes(nil, k)) return hexKey } diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index d09f5a00b76d5..6f445f8db9660 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -1566,6 +1566,11 @@ func (ijHelper *indexJoinBuildHelper) buildTemplateRange(matchedKeyCnt int, eqAn if len(oneColumnRan) == 0 { return nil, true, nil } + if sc.MemTracker != nil { + for _, ran := range oneColumnRan { + sc.MemTracker.Consume(2 * types.EstimatedMemUsage(ran.LowVal, len(ran.LowVal))) + } + } for _, ran := range ranges { ran.LowVal[i] = oneColumnRan[0].LowVal[0] ran.HighVal[i] = oneColumnRan[0].HighVal[0] @@ -1575,6 +1580,9 @@ func (ijHelper *indexJoinBuildHelper) buildTemplateRange(matchedKeyCnt int, eqAn newRanges := make([]*ranger.Range, 0, curRangeLen) for oldRangeIdx := 0; oldRangeIdx < curRangeLen; oldRangeIdx++ { newRange := ranges[oldRangeIdx].Clone() + if sc.MemTracker != nil { + sc.MemTracker.Consume(2 * types.EstimatedMemUsage(newRange.LowVal, len(newRange.LowVal))) + } newRange.LowVal[i] = oneColumnRan[ranIdx].LowVal[0] newRange.HighVal[i] = oneColumnRan[ranIdx].HighVal[0] newRanges = append(newRanges, newRange) diff --git a/server/http_handler.go b/server/http_handler.go index 38cee12cb8b0d..3fe4ac8587cd7 100644 --- a/server/http_handler.go +++ b/server/http_handler.go @@ -1322,7 +1322,7 @@ func (h tableHandler) handleScatterTableRequest(schema infoschema.InfoSchema, tb for _, index := range tbl.Indices() { indexID := index.Meta().ID indexName := index.Meta().Name.String() - startKey, endKey := tablecodec.GetTableIndexKeyRange(nil, tableID, indexID) + startKey, endKey := tablecodec.GetTableIndexKeyRange(tableID, indexID) startKey = codec.EncodeBytes([]byte{}, startKey) endKey = codec.EncodeBytes([]byte{}, endKey) name := tableName + "-" + indexName @@ -1450,7 +1450,7 @@ func (h tableHandler) getRegionsByID(tbl table.Table, id int64, name string) (*T indexID := index.Meta().ID indices[i].Name = index.Meta().Name.String() indices[i].ID = indexID - startKey, endKey := tablecodec.GetTableIndexKeyRange(nil, id, indexID) + startKey, endKey := tablecodec.GetTableIndexKeyRange(id, indexID) regions, err := pdCli.ScanRegions(ctx, startKey, endKey, -1) if err != nil { return nil, err diff --git a/server/http_handler_test.go b/server/http_handler_test.go index 39b188128b28b..3278732a0059f 100644 --- a/server/http_handler_test.go +++ b/server/http_handler_test.go @@ -23,7 +23,6 @@ import ( "encoding/base64" "encoding/json" "fmt" - "github.com/pingcap/tidb/util/memory" "io" "net" "net/http" @@ -94,8 +93,7 @@ func TestRegionIndexRange(t *testing.T) { encodedValue, err := codec.EncodeKey(&stmtctx.StatementContext{TimeZone: time.Local}, nil, indexValues...) require.NoError(t, err) - sc := &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} - startKey := tablecodec.EncodeIndexSeekKey(sc, sTableID, sIndex, encodedValue) + startKey := tablecodec.EncodeIndexSeekKey(sTableID, sIndex, encodedValue) recordPrefix := tablecodec.GenTableRecordPrefix(eTableID) endKey := tablecodec.EncodeRecordKey(recordPrefix, kv.IntHandle(recordID)) diff --git a/session/schema_amender_serial_test.go b/session/schema_amender_serial_test.go index 2daf31ad1b38c..a22f21ad6e225 100644 --- a/session/schema_amender_serial_test.go +++ b/session/schema_amender_serial_test.go @@ -17,18 +17,15 @@ package session import ( "bytes" "context" - "github.com/pingcap/tidb/util/memory" "sort" "strconv" "testing" - "time" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/planner/core" - "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/table" @@ -434,11 +431,9 @@ func TestAmendCollectAndGenMutations(t *testing.T) { schemaAmender := NewSchemaAmenderForTikvTxn(se) // Some noisy index key values. - - se.sessionVars.StmtCtx = &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} for i := 0; i < 4; i++ { idxValue := []byte("idxValue") - idxKey := tablecodec.EncodeIndexSeekKey(se.sessionVars.StmtCtx, oldTbInfo.Meta().ID, oldTbInfo.Indices()[i].Meta().ID, idxValue) + idxKey := tablecodec.EncodeIndexSeekKey(oldTbInfo.Meta().ID, oldTbInfo.Indices()[i].Meta().ID, idxValue) err = txn.Set(idxKey, idxValue) require.NoError(t, err) mutations.Push(kvrpcpb.Op_Put, idxKey, idxValue, false) diff --git a/session/tidb_test.go b/session/tidb_test.go index d47a59a2ccdf9..4c854b4b66354 100644 --- a/session/tidb_test.go +++ b/session/tidb_test.go @@ -16,11 +16,8 @@ package session import ( "context" - "github.com/pingcap/tidb/sessionctx/stmtctx" - "github.com/pingcap/tidb/util/memory" "sync" "testing" - "time" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" @@ -77,9 +74,8 @@ func TestParseErrorWarn(t *testing.T) { func TestKeysNeedLock(t *testing.T) { t.Parallel() - sc := &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} rowKey := tablecodec.EncodeRowKeyWithHandle(1, kv.IntHandle(1)) - indexKey := tablecodec.EncodeIndexSeekKey(sc, 1, 1, []byte{1}) + indexKey := tablecodec.EncodeIndexSeekKey(1, 1, []byte{1}) uniqueValue := make([]byte, 8) uniqueUntouched := append(uniqueValue, '1') nonUniqueVal := []byte{'0'} diff --git a/store/helper/helper.go b/store/helper/helper.go index 073895ec4b92d..3b8e122071959 100644 --- a/store/helper/helper.go +++ b/store/helper/helper.go @@ -637,7 +637,7 @@ func newTableWithKeyRange(db *model.DBInfo, table *model.TableInfo) tableInfoWit } func newIndexWithKeyRange(db *model.DBInfo, table *model.TableInfo, index *model.IndexInfo) tableInfoWithKeyRange { - sk, ek := tablecodec.GetTableIndexKeyRange(nil, table.ID, index.ID) + sk, ek := tablecodec.GetTableIndexKeyRange(table.ID, index.ID) startKey := bytesKeyToHex(codec.EncodeBytes(nil, sk)) endKey := bytesKeyToHex(codec.EncodeBytes(nil, ek)) return tableInfoWithKeyRange{ diff --git a/store/mockstore/cluster_test.go b/store/mockstore/cluster_test.go index 70099609d14ec..9384a433fcba7 100644 --- a/store/mockstore/cluster_test.go +++ b/store/mockstore/cluster_test.go @@ -28,7 +28,6 @@ import ( "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/codec" - "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/rowcodec" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/testutils" @@ -53,7 +52,7 @@ func TestClusterSplit(t *testing.T) { idxID := int64(2) colID := int64(3) handle := int64(1) - sc := &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} + sc := &stmtctx.StatementContext{TimeZone: time.UTC} for i := 0; i < 1000; i++ { rowKey := tablecodec.EncodeRowKeyWithHandle(tblID, kv.IntHandle(handle)) colValue := types.NewStringDatum(strconv.Itoa(int(handle))) @@ -65,7 +64,7 @@ func TestClusterSplit(t *testing.T) { encodedIndexValue, err1 := codec.EncodeKey(sc, nil, []types.Datum{colValue, types.NewIntDatum(handle)}...) require.NoError(t, err1) - idxKey := tablecodec.EncodeIndexSeekKey(sc, tblID, idxID, encodedIndexValue) + idxKey := tablecodec.EncodeIndexSeekKey(tblID, idxID, encodedIndexValue) txn.Set(idxKey, []byte{'0'}) handle++ } diff --git a/tablecodec/tablecodec.go b/tablecodec/tablecodec.go index 6f33e435f6404..f610e09104572 100644 --- a/tablecodec/tablecodec.go +++ b/tablecodec/tablecodec.go @@ -628,14 +628,11 @@ func Unflatten(datum types.Datum, ft *types.FieldType, loc *time.Location) (type } // EncodeIndexSeekKey encodes an index value to kv.Key. -func EncodeIndexSeekKey(sc *stmtctx.StatementContext, tableID int64, idxID int64, encodedValue []byte) kv.Key { +func EncodeIndexSeekKey(tableID int64, idxID int64, encodedValue []byte) kv.Key { key := make([]byte, 0, RecordRowKeyLen+len(encodedValue)) key = appendTableIndexPrefix(key, tableID) key = codec.EncodeInt(key, idxID) key = append(key, encodedValue...) - if sc != nil && sc.MemTracker != nil { - sc.MemTracker.Consume(int64(cap(key))) - } return key } @@ -1032,9 +1029,9 @@ func GetTableHandleKeyRange(tableID int64) (startKey, endKey []byte) { } // GetTableIndexKeyRange returns table index's key range with tableID and indexID. -func GetTableIndexKeyRange(sc *stmtctx.StatementContext, tableID, indexID int64) (startKey, endKey []byte) { - startKey = EncodeIndexSeekKey(sc, tableID, indexID, nil) - endKey = EncodeIndexSeekKey(sc, tableID, indexID, []byte{255}) +func GetTableIndexKeyRange(tableID, indexID int64) (startKey, endKey []byte) { + startKey = EncodeIndexSeekKey(tableID, indexID, nil) + endKey = EncodeIndexSeekKey(tableID, indexID, []byte{255}) return } diff --git a/tablecodec/tablecodec_test.go b/tablecodec/tablecodec_test.go index 0992103888391..554287b8dc6a7 100644 --- a/tablecodec/tablecodec_test.go +++ b/tablecodec/tablecodec_test.go @@ -16,7 +16,6 @@ package tablecodec import ( "fmt" - "github.com/pingcap/tidb/util/memory" "math" "testing" "time" @@ -336,12 +335,12 @@ func TestCutKeyNew(t *testing.T) { values := []types.Datum{types.NewIntDatum(1), types.NewBytesDatum([]byte("abc")), types.NewFloat64Datum(5.5)} handle := types.NewIntDatum(100) values = append(values, handle) - sc := &stmtctx.StatementContext{TimeZone: time.UTC, MemTracker: memory.NewTracker(0, 1<<30)} + sc := &stmtctx.StatementContext{TimeZone: time.UTC} encodedValue, err := codec.EncodeKey(sc, nil, values...) require.NoError(t, err) tableID := int64(4) indexID := int64(5) - indexKey := EncodeIndexSeekKey(sc, tableID, indexID, encodedValue) + indexKey := EncodeIndexSeekKey(tableID, indexID, encodedValue) valuesBytes, handleBytes, err := CutIndexKeyNew(indexKey, 3) require.NoError(t, err) for i := 0; i < 3; i++ { @@ -365,7 +364,7 @@ func TestCutKey(t *testing.T) { require.NoError(t, err) tableID := int64(4) indexID := int64(5) - indexKey := EncodeIndexSeekKey(sc, tableID, indexID, encodedValue) + indexKey := EncodeIndexSeekKey(tableID, indexID, encodedValue) valuesMap, handleBytes, err := CutIndexKey(indexKey, colIDs) require.NoError(t, err) for i, colID := range colIDs { @@ -395,7 +394,7 @@ func TestIndexKey(t *testing.T) { t.Parallel() tableID := int64(4) indexID := int64(5) - indexKey := EncodeIndexSeekKey(nil, tableID, indexID, []byte{}) + indexKey := EncodeIndexSeekKey(tableID, indexID, []byte{}) tTableID, tIndexID, isRecordKey, err := DecodeKeyHead(indexKey) require.NoError(t, err) require.Equal(t, tableID, tTableID) @@ -483,7 +482,7 @@ func TestDecodeIndexKey(t *testing.T) { sc := &stmtctx.StatementContext{TimeZone: time.UTC} encodedValue, err := codec.EncodeKey(sc, nil, values...) require.NoError(t, err) - indexKey := EncodeIndexSeekKey(sc, tableID, indexID, encodedValue) + indexKey := EncodeIndexSeekKey(tableID, indexID, encodedValue) decodeTableID, decodeIndexID, decodeValues, err := DecodeIndexKey(indexKey) require.NoError(t, err) @@ -509,8 +508,8 @@ func TestRange(t *testing.T) { require.Less(t, string(e1), string(s2)) require.Less(t, string(s2), string(e2)) - s1, e1 = GetTableIndexKeyRange(nil, 42, 666) - s2, e2 = GetTableIndexKeyRange(nil, 42, 667) + s1, e1 = GetTableIndexKeyRange(42, 666) + s2, e2 = GetTableIndexKeyRange(42, 667) require.Less(t, string(s1), string(e1)) require.Less(t, string(e1), string(s2)) require.Less(t, string(s2), string(e2)) diff --git a/util/codec/bytes.go b/util/codec/bytes.go index e3bae59b1f041..c4f61442822fe 100644 --- a/util/codec/bytes.go +++ b/util/codec/bytes.go @@ -20,7 +20,6 @@ import ( "unsafe" "github.com/pingcap/errors" - "github.com/pingcap/tidb/sessionctx/stmtctx" ) const ( @@ -50,7 +49,7 @@ func EncodeBytes(b []byte, data []byte) []byte { // that is `(len(data) / 8 + 1) * 9` in our implement. dLen := len(data) reallocSize := (dLen/encGroupSize + 1) * (encGroupSize + 1) - result := reallocBytes(nil, b, reallocSize) + result := reallocBytes(b, reallocSize) for idx := 0; idx <= dLen; idx += encGroupSize { remain := dLen - idx padCount := 0 @@ -151,7 +150,7 @@ func DecodeBytesDesc(b []byte, buf []byte) ([]byte, []byte, error) { // efficient in both space and time compare to EncodeBytes. Note that the encoded // result is not memcomparable. func EncodeCompactBytes(b []byte, data []byte) []byte { - b = reallocBytes(nil, b, binary.MaxVarintLen64+len(data)) + b = reallocBytes(b, binary.MaxVarintLen64+len(data)) b = EncodeVarint(b, int64(len(data))) return append(b, data...) } @@ -203,14 +202,11 @@ func reverseBytes(b []byte) { } // reallocBytes is like realloc. -func reallocBytes(sc *stmtctx.StatementContext, b []byte, n int) []byte { +func reallocBytes(b []byte, n int) []byte { newSize := len(b) + n if cap(b) < newSize { bs := make([]byte, len(b), newSize) copy(bs, b) - if sc != nil && sc.MemTracker != nil { - sc.MemTracker.Consume(int64(cap(bs) - cap(b))) - } return bs } diff --git a/util/codec/codec.go b/util/codec/codec.go index 625accbde1004..f6daf7e54261d 100644 --- a/util/codec/codec.go +++ b/util/codec/codec.go @@ -55,7 +55,7 @@ const ( sizeFloat64 = unsafe.Sizeof(float64(0)) ) -func preRealloc(sc *stmtctx.StatementContext, b []byte, vals []types.Datum, comparable bool) []byte { +func preRealloc(b []byte, vals []types.Datum, comparable bool) []byte { var size int for i := range vals { switch vals[i].Kind() { @@ -75,13 +75,13 @@ func preRealloc(sc *stmtctx.StatementContext, b []byte, vals []types.Datum, comp return b } } - return reallocBytes(sc, b, size) + return reallocBytes(b, size) } // encode will encode a datum and append it to a byte slice. If comparable is true, the encoded bytes can be sorted as it's original order. // If hash is true, the encoded bytes can be checked equal as it's original value. func encode(sc *stmtctx.StatementContext, b []byte, vals []types.Datum, comparable bool) (_ []byte, err error) { - b = preRealloc(sc, b, vals, comparable) + b = preRealloc(b, vals, comparable) for i, length := 0, len(vals); i < length; i++ { switch vals[i].Kind() { case types.KindInt64: From fe22b7db27204a4f01047ddbd0f73c2b97bbc4c9 Mon Sep 17 00:00:00 2001 From: xuhuaiyu <391585975@qq.com> Date: Thu, 18 Nov 2021 18:09:53 +0800 Subject: [PATCH 3/7] refine --- br/pkg/backup/client.go | 2 +- br/pkg/lightning/backend/local/duplicate.go | 4 ++-- distsql/request_builder.go | 22 ++++++++++++++++----- distsql/request_builder_test.go | 4 ++-- executor/builder.go | 4 ++-- executor/distsql.go | 6 +++--- executor/index_merge_reader.go | 2 +- 7 files changed, 28 insertions(+), 16 deletions(-) diff --git a/br/pkg/backup/client.go b/br/pkg/backup/client.go index 971e0f3cfc7b9..79edc3403be68 100644 --- a/br/pkg/backup/client.go +++ b/br/pkg/backup/client.go @@ -236,7 +236,7 @@ func appendRanges(tbl *model.TableInfo, tblID int64) ([]kv.KeyRange, error) { continue } ranges = ranger.FullRange() - idxRanges, err := distsql.IndexRangesToKVRanges(nil, tblID, index.ID, ranges, nil, nil) + idxRanges, err := distsql.IndexRangesToKVRanges(nil, tblID, index.ID, ranges, nil) if err != nil { return nil, errors.Trace(err) } diff --git a/br/pkg/lightning/backend/local/duplicate.go b/br/pkg/lightning/backend/local/duplicate.go index 7e63b34f5a072..fc88055c40c61 100644 --- a/br/pkg/lightning/backend/local/duplicate.go +++ b/br/pkg/lightning/backend/local/duplicate.go @@ -541,7 +541,7 @@ func (manager *DuplicateManager) CollectDuplicateRowsFromLocalIndex( ranges := ranger.FullRange() var keysRanges []tidbkv.KeyRange for _, id := range tableIDs { - partitionKeysRanges, err := distsql.IndexRangesToKVRanges(nil, id, indexInfo.ID, ranges, nil, nil) + partitionKeysRanges, err := distsql.IndexRangesToKVRanges(nil, id, indexInfo.ID, ranges, nil) if err != nil { return false, err } @@ -806,7 +806,7 @@ func buildTableRequests(tableID int64, isCommonHandle bool) ([]*DuplicateRequest func buildIndexRequests(tableID int64, indexInfo *model.IndexInfo) ([]*DuplicateRequest, error) { ranges := ranger.FullRange() - keysRanges, err := distsql.IndexRangesToKVRanges(nil, tableID, indexInfo.ID, ranges, nil, nil) + keysRanges, err := distsql.IndexRangesToKVRanges(nil, tableID, indexInfo.ID, ranges, nil) if err != nil { return nil, errors.Trace(err) } diff --git a/distsql/request_builder.go b/distsql/request_builder.go index 87a17e6f47a18..02913dde9fed3 100644 --- a/distsql/request_builder.go +++ b/distsql/request_builder.go @@ -93,7 +93,7 @@ func (builder *RequestBuilder) SetTableRanges(tid int64, tableRanges []*ranger.R // "ranges" to "KeyRanges" firstly. func (builder *RequestBuilder) SetIndexRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range) *RequestBuilder { if builder.err == nil { - builder.Request.KeyRanges, builder.err = IndexRangesToKVRanges(sc, tid, idxID, ranges, nil, nil) + builder.Request.KeyRanges, builder.err = IndexRangesToKVRanges(sc, tid, idxID, ranges, nil) } return builder } @@ -102,7 +102,7 @@ func (builder *RequestBuilder) SetIndexRanges(sc *stmtctx.StatementContext, tid, // "ranges" to "KeyRanges" firstly. func (builder *RequestBuilder) SetIndexRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range) *RequestBuilder { if builder.err == nil { - builder.Request.KeyRanges, builder.err = IndexRangesToKVRangesForTables(sc, tids, idxID, ranges, nil, nil) + builder.Request.KeyRanges, builder.err = IndexRangesToKVRangesForTables(sc, tids, idxID, ranges, nil) } return builder } @@ -551,12 +551,24 @@ func PartitionHandlesToKVRanges(handles []kv.Handle) []kv.KeyRange { } // IndexRangesToKVRanges converts index ranges to "KeyRange". -func IndexRangesToKVRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { - return IndexRangesToKVRangesForTables(sc, []int64{tid}, idxID, ranges, fb, interruptSignal) +func IndexRangesToKVRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) { + return IndexRangesToKVRangesWithInterruptSignal(sc, tid, idxID, ranges, fb, nil) +} + +// IndexRangesToKVRangesWithInterruptSignal converts index ranges to "KeyRange". +// The process can be interrupted by set `interruptSignal` to true. +func IndexRangesToKVRangesWithInterruptSignal(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { + return indexRangesToKVRangesForTablesWithInterruptSignal(sc, []int64{tid}, idxID, ranges, fb, interruptSignal) } // IndexRangesToKVRangesForTables converts indexes ranges to "KeyRange". -func IndexRangesToKVRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { +func IndexRangesToKVRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) { + return indexRangesToKVRangesForTablesWithInterruptSignal(sc, tids, idxID, ranges, fb, nil) +} + +// IndexRangesToKVRangesForTablesWithInterruptSignal converts indexes ranges to "KeyRange". +// The process can be interrupted by set `interruptSignal` to true. +func indexRangesToKVRangesForTablesWithInterruptSignal(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { if fb == nil || fb.Hist == nil { return indexRangesToKVWithoutSplit(sc, tids, idxID, ranges, interruptSignal) } diff --git a/distsql/request_builder_test.go b/distsql/request_builder_test.go index 036863b74b959..3b9a7926e0fe1 100644 --- a/distsql/request_builder_test.go +++ b/distsql/request_builder_test.go @@ -181,7 +181,7 @@ func TestIndexRangesToKVRanges(t *testing.T) { }, } - actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 12, 15, ranges, nil, nil) + actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 12, 15, ranges, nil) require.NoError(t, err) for i := range actual { require.Equal(t, expect[i], actual[i]) @@ -612,7 +612,7 @@ func TestIndexRangesToKVRangesWithFbs(t *testing.T) { }, } fb := newTestFb() - actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 0, 0, ranges, fb, nil) + actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 0, 0, ranges, fb) require.NoError(t, err) expect := []kv.KeyRange{ { diff --git a/executor/builder.go b/executor/builder.go index 5e946ab292c83..b59d06e57da74 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -4070,7 +4070,7 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l if indexID == -1 { tmpKvRanges, err = distsql.CommonHandleRangesToKVRanges(sc, []int64{tableID}, ranges) } else { - tmpKvRanges, err = distsql.IndexRangesToKVRanges(sc, tableID, indexID, ranges, nil, interruptSignal) + tmpKvRanges, err = distsql.IndexRangesToKVRangesWithInterruptSignal(sc, tableID, indexID, ranges, nil, interruptSignal) } if err != nil { return nil, err @@ -4117,7 +4117,7 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l if indexID == -1 { return distsql.CommonHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tableID}, tmpDatumRanges) } - return distsql.IndexRangesToKVRanges(ctx.GetSessionVars().StmtCtx, tableID, indexID, tmpDatumRanges, nil, interruptSignal) + return distsql.IndexRangesToKVRangesWithInterruptSignal(ctx.GetSessionVars().StmtCtx, tableID, indexID, tmpDatumRanges, nil, interruptSignal) } func (b *executorBuilder) buildWindow(v *plannercore.PhysicalWindow) Executor { diff --git a/executor/distsql.go b/executor/distsql.go index 852a25f5ebb19..899d2e822f176 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -226,7 +226,7 @@ func (e *IndexReaderExecutor) buildKeyRanges(sc *stmtctx.StatementContext, range if e.index.ID == -1 { return distsql.CommonHandleRangesToKVRanges(sc, []int64{physicalID}, ranges) } - return distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, ranges, e.feedback, nil) + return distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, ranges, e.feedback) } // Open implements the Executor Open interface. @@ -437,7 +437,7 @@ func (e *IndexLookUpExecutor) buildTableKeyRanges() (err error) { if e.index.ID == -1 { kvRange, err = distsql.CommonHandleRangesToKVRanges(sc, []int64{physicalID}, ranges) } else { - kvRange, err = distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, ranges, e.feedback, nil) + kvRange, err = distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, ranges, e.feedback) } if err != nil { return err @@ -449,7 +449,7 @@ func (e *IndexLookUpExecutor) buildTableKeyRanges() (err error) { if e.index.ID == -1 { e.kvRanges, err = distsql.CommonHandleRangesToKVRanges(sc, []int64{physicalID}, e.ranges) } else { - e.kvRanges, err = distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, e.ranges, e.feedback, nil) + e.kvRanges, err = distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, e.ranges, e.feedback) } } return err diff --git a/executor/index_merge_reader.go b/executor/index_merge_reader.go index 095926d4a033f..60828bd514ac4 100644 --- a/executor/index_merge_reader.go +++ b/executor/index_merge_reader.go @@ -151,7 +151,7 @@ func (e *IndexMergeReaderExecutor) buildKeyRangesForTable(tbl table.Table) (rang } continue } - keyRange, err := distsql.IndexRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, getPhysicalTableID(tbl), e.indexes[i].ID, e.ranges[i], e.feedbacks[i], nil) + keyRange, err := distsql.IndexRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, getPhysicalTableID(tbl), e.indexes[i].ID, e.ranges[i], e.feedbacks[i]) if err != nil { return nil, err } From edfcd2d01d53fd6f62e17cd6eb653ef0845f58c8 Mon Sep 17 00:00:00 2001 From: xuhuaiyu <391585975@qq.com> Date: Tue, 23 Nov 2021 15:24:00 +0800 Subject: [PATCH 4/7] address comment and refine the memory track for indexhashjoin --- distsql/request_builder.go | 41 ++++++++++----------- executor/builder.go | 55 +++++++++++++++-------------- executor/executor_pkg_test.go | 2 +- executor/executor_test.go | 34 ++++++++++-------- executor/index_lookup_hash_join.go | 19 ++++++++++ executor/index_lookup_join.go | 15 +++++++- executor/index_lookup_merge_join.go | 2 +- util/memory/tracker.go | 4 +++ 8 files changed, 107 insertions(+), 65 deletions(-) diff --git a/distsql/request_builder.go b/distsql/request_builder.go index 02913dde9fed3..012a0a8266b61 100644 --- a/distsql/request_builder.go +++ b/distsql/request_builder.go @@ -552,25 +552,25 @@ func PartitionHandlesToKVRanges(handles []kv.Handle) []kv.KeyRange { // IndexRangesToKVRanges converts index ranges to "KeyRange". func IndexRangesToKVRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) { - return IndexRangesToKVRangesWithInterruptSignal(sc, tid, idxID, ranges, fb, nil) + return IndexRangesToKVRangesWithInterruptSignal(sc, tid, idxID, ranges, fb, nil, nil) } // IndexRangesToKVRangesWithInterruptSignal converts index ranges to "KeyRange". // The process can be interrupted by set `interruptSignal` to true. -func IndexRangesToKVRangesWithInterruptSignal(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { - return indexRangesToKVRangesForTablesWithInterruptSignal(sc, []int64{tid}, idxID, ranges, fb, interruptSignal) +func IndexRangesToKVRangesWithInterruptSignal(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback, memTracker *memory.Tracker, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { + return indexRangesToKVRangesForTablesWithInterruptSignal(sc, []int64{tid}, idxID, ranges, fb, memTracker, interruptSignal) } // IndexRangesToKVRangesForTables converts indexes ranges to "KeyRange". func IndexRangesToKVRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) { - return indexRangesToKVRangesForTablesWithInterruptSignal(sc, tids, idxID, ranges, fb, nil) + return indexRangesToKVRangesForTablesWithInterruptSignal(sc, tids, idxID, ranges, fb, nil, nil) } // IndexRangesToKVRangesForTablesWithInterruptSignal converts indexes ranges to "KeyRange". // The process can be interrupted by set `interruptSignal` to true. -func indexRangesToKVRangesForTablesWithInterruptSignal(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { +func indexRangesToKVRangesForTablesWithInterruptSignal(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback, memTracker *memory.Tracker, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { if fb == nil || fb.Hist == nil { - return indexRangesToKVWithoutSplit(sc, tids, idxID, ranges, interruptSignal) + return indexRangesToKVWithoutSplit(sc, tids, idxID, ranges, memTracker, interruptSignal) } feedbackRanges := make([]*ranger.Range, 0, len(ranges)) for _, ran := range ranges { @@ -655,33 +655,34 @@ func VerifyTxnScope(txnScope string, physicalTableID int64, is infoschema.InfoSc return true } -func indexRangesToKVWithoutSplit(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { +func indexRangesToKVWithoutSplit(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, memTracker *memory.Tracker, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { krs := make([]kv.KeyRange, 0, len(ranges)) - const step = 8 - var memUsage int64 = 0 + const CheckSignalStep = 8 + var estimatedMemUsage int64 // encodeIndexKey and EncodeIndexSeekKey is time-consuming, thus we need to // check the interrupt signal periodically. for i, ran := range ranges { - if i%step == 0 { - if sc != nil && sc.MemTracker != nil { - sc.MemTracker.Consume(memUsage) - memUsage = 0 - } - if interruptSignal != nil && interruptSignal.Load().(bool) { - return nil, nil - } - } low, high, err := encodeIndexKey(sc, ran) - memUsage += int64(cap(low)) + int64(cap(high)) if err != nil { return nil, err } for _, tid := range tids { startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) - memUsage += int64(cap(startKey)) + int64(cap(endKey)) + if i == 0 { + estimatedMemUsage += int64(cap(startKey)) + int64(cap(endKey)) + } krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) } + if i%CheckSignalStep == 0 { + if i == 0 && memTracker != nil { + estimatedMemUsage *= int64(len(ranges)) + memTracker.Consume(estimatedMemUsage) + } + if interruptSignal != nil && interruptSignal.Load().(bool) { + return nil, nil + } + } } return krs, nil } diff --git a/executor/builder.go b/executor/builder.go index b59d06e57da74..ddb10d310ce6c 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -58,6 +58,7 @@ import ( "github.com/pingcap/tidb/util/cteutil" "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/rowcodec" "github.com/pingcap/tidb/util/timeutil" @@ -3599,21 +3600,21 @@ type mockPhysicalIndexReader struct { } func (builder *dataReaderBuilder) buildExecutorForIndexJoin(ctx context.Context, lookUpContents []*indexJoinLookUpContent, - IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, interruptSignal *atomic.Value) (Executor, error) { - return builder.buildExecutorForIndexJoinInternal(ctx, builder.Plan, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, interruptSignal) + IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (Executor, error) { + return builder.buildExecutorForIndexJoinInternal(ctx, builder.Plan, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) } func (builder *dataReaderBuilder) buildExecutorForIndexJoinInternal(ctx context.Context, plan plannercore.Plan, lookUpContents []*indexJoinLookUpContent, - IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, interruptSignal *atomic.Value) (Executor, error) { + IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (Executor, error) { switch v := plan.(type) { case *plannercore.PhysicalTableReader: - return builder.buildTableReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, interruptSignal) + return builder.buildTableReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) case *plannercore.PhysicalIndexReader: - return builder.buildIndexReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, interruptSignal) + return builder.buildIndexReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) case *plannercore.PhysicalIndexLookUpReader: - return builder.buildIndexLookUpReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, interruptSignal) + return builder.buildIndexLookUpReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) case *plannercore.PhysicalUnionScan: - return builder.buildUnionScanForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, interruptSignal) + return builder.buildUnionScanForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) // The inner child of IndexJoin might be Projection when a combination of the following conditions is true: // 1. The inner child fetch data using indexLookupReader // 2. PK is not handle @@ -3621,11 +3622,11 @@ func (builder *dataReaderBuilder) buildExecutorForIndexJoinInternal(ctx context. // In this case, an extra column tidb_rowid will be appended in the output result of IndexLookupReader(see copTask.doubleReadNeedProj). // Then we need a Projection upon IndexLookupReader to prune the redundant column. case *plannercore.PhysicalProjection: - return builder.buildProjectionForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, interruptSignal) + return builder.buildProjectionForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) // Need to support physical selection because after PR 16389, TiDB will push down all the expr supported by TiKV or TiFlash // in predicate push down stage, so if there is an expr which only supported by TiFlash, a physical selection will be added after index read case *plannercore.PhysicalSelection: - childExec, err := builder.buildExecutorForIndexJoinInternal(ctx, v.Children()[0], lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, interruptSignal) + childExec, err := builder.buildExecutorForIndexJoinInternal(ctx, v.Children()[0], lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) if err != nil { return nil, err } @@ -3643,9 +3644,9 @@ func (builder *dataReaderBuilder) buildExecutorForIndexJoinInternal(ctx context. func (builder *dataReaderBuilder) buildUnionScanForIndexJoin(ctx context.Context, v *plannercore.PhysicalUnionScan, values []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, - cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, interruptSignal *atomic.Value) (Executor, error) { + cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (Executor, error) { childBuilder := &dataReaderBuilder{Plan: v.Children()[0], executorBuilder: builder.executorBuilder} - reader, err := childBuilder.buildExecutorForIndexJoin(ctx, values, indexRanges, keyOff2IdxOff, cwc, canReorderHandles, interruptSignal) + reader, err := childBuilder.buildExecutorForIndexJoin(ctx, values, indexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) if err != nil { return nil, err } @@ -3659,7 +3660,7 @@ func (builder *dataReaderBuilder) buildUnionScanForIndexJoin(ctx context.Context func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Context, v *plannercore.PhysicalTableReader, lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, - cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, interruptSignal *atomic.Value) (Executor, error) { + cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (Executor, error) { e, err := buildNoRangeTableReader(builder.executorBuilder, v) if err != nil { return nil, err @@ -3667,7 +3668,7 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte tbInfo := e.table.Meta() if v.IsCommonHandle { if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().UseDynamicPartitionPrune() { - kvRanges, err := buildKvRangesForIndexJoin(e.ctx, getPhysicalTableID(e.table), -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc, interruptSignal) + kvRanges, err := buildKvRangesForIndexJoin(e.ctx, getPhysicalTableID(e.table), -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) if err != nil { return nil, err } @@ -3696,7 +3697,7 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte return nil, err } pid := p.GetPhysicalID() - tmp, err := buildKvRangesForIndexJoin(e.ctx, pid, -1, []*indexJoinLookUpContent{content}, indexRanges, keyOff2IdxOff, cwc, interruptSignal) + tmp, err := buildKvRangesForIndexJoin(e.ctx, pid, -1, []*indexJoinLookUpContent{content}, indexRanges, keyOff2IdxOff, cwc, nil, interruptSignal) if err != nil { return nil, err } @@ -3711,7 +3712,7 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte kvRanges = make([]kv.KeyRange, 0, len(partitions)*len(lookUpContents)) for _, p := range partitions { pid := p.GetPhysicalID() - tmp, err := buildKvRangesForIndexJoin(e.ctx, pid, -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc, interruptSignal) + tmp, err := buildKvRangesForIndexJoin(e.ctx, pid, -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) if err != nil { return nil, err } @@ -3877,14 +3878,14 @@ func (builder *dataReaderBuilder) buildTableReaderFromKvRanges(ctx context.Conte } func (builder *dataReaderBuilder) buildIndexReaderForIndexJoin(ctx context.Context, v *plannercore.PhysicalIndexReader, - lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, interruptSignal *atomic.Value) (Executor, error) { + lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, memoryTracker *memory.Tracker, interruptSignal *atomic.Value) (Executor, error) { e, err := buildNoRangeIndexReader(builder.executorBuilder, v) if err != nil { return nil, err } tbInfo := e.table.Meta() if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().UseDynamicPartitionPrune() { - kvRanges, err := buildKvRangesForIndexJoin(e.ctx, e.physicalTableID, e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, interruptSignal) + kvRanges, err := buildKvRangesForIndexJoin(e.ctx, e.physicalTableID, e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memoryTracker, interruptSignal) if err != nil { return nil, err } @@ -3923,7 +3924,7 @@ func (builder *dataReaderBuilder) buildIndexReaderForIndexJoin(ctx context.Conte } func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context.Context, v *plannercore.PhysicalIndexLookUpReader, - lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, interruptSignal *atomic.Value) (Executor, error) { + lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, memTracker *memory.Tracker, interruptSignal *atomic.Value) (Executor, error) { e, err := buildNoRangeIndexLookUpReader(builder.executorBuilder, v) if err != nil { return nil, err @@ -3931,7 +3932,7 @@ func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context tbInfo := e.table.Meta() if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().UseDynamicPartitionPrune() { - e.kvRanges, err = buildKvRangesForIndexJoin(e.ctx, getPhysicalTableID(e.table), e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, interruptSignal) + e.kvRanges, err = buildKvRangesForIndexJoin(e.ctx, getPhysicalTableID(e.table), e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) if err != nil { return nil, err } @@ -3971,18 +3972,18 @@ func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context } func (builder *dataReaderBuilder) buildProjectionForIndexJoin(ctx context.Context, v *plannercore.PhysicalProjection, - lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, interruptSignal *atomic.Value) (Executor, error) { + lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, memTracker *memory.Tracker, interruptSignal *atomic.Value) (Executor, error) { var ( childExec Executor err error ) switch op := v.Children()[0].(type) { case *plannercore.PhysicalIndexLookUpReader: - if childExec, err = builder.buildIndexLookUpReaderForIndexJoin(ctx, op, lookUpContents, indexRanges, keyOff2IdxOff, cwc, interruptSignal); err != nil { + if childExec, err = builder.buildIndexLookUpReaderForIndexJoin(ctx, op, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal); err != nil { return nil, err } case *plannercore.PhysicalTableReader: - if childExec, err = builder.buildTableReaderForIndexJoin(ctx, op, lookUpContents, indexRanges, keyOff2IdxOff, cwc, true, interruptSignal); err != nil { + if childExec, err = builder.buildTableReaderForIndexJoin(ctx, op, lookUpContents, indexRanges, keyOff2IdxOff, cwc, true, memTracker, interruptSignal); err != nil { return nil, err } default: @@ -4051,7 +4052,7 @@ func buildRangesForIndexJoin(ctx sessionctx.Context, lookUpContents []*indexJoin // buildKvRangesForIndexJoin builds kv ranges for index join when the inner plan is index scan plan. func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, lookUpContents []*indexJoinLookUpContent, - ranges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, interruptSignal *atomic.Value) (_ []kv.KeyRange, err error) { + ranges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, memTracker *memory.Tracker, interruptSignal *atomic.Value) (_ []kv.KeyRange, err error) { kvRanges := make([]kv.KeyRange, 0, len(ranges)*len(lookUpContents)) lastPos := len(ranges[0].LowVal) - 1 sc := ctx.GetSessionVars().StmtCtx @@ -4070,7 +4071,7 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l if indexID == -1 { tmpKvRanges, err = distsql.CommonHandleRangesToKVRanges(sc, []int64{tableID}, ranges) } else { - tmpKvRanges, err = distsql.IndexRangesToKVRangesWithInterruptSignal(sc, tableID, indexID, ranges, nil, interruptSignal) + tmpKvRanges, err = distsql.IndexRangesToKVRangesWithInterruptSignal(sc, tableID, indexID, ranges, nil, memTracker, interruptSignal) } if err != nil { return nil, err @@ -4094,14 +4095,14 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l } if len(tmpDatumRanges) > tmpDatumRangesLen { for _, ran := range tmpDatumRanges[tmpDatumRangesLen:] { - sc.MemTracker.Consume(2 * types.EstimatedMemUsage(ran.LowVal, len(ran.LowVal))) + memTracker.Consume(2 * types.EstimatedMemUsage(ran.LowVal, len(ran.LowVal))) } } } if cwc == nil { if len(kvRanges) != 0 { - sc.MemTracker.Consume(2 * int64(len(kvRanges[0].StartKey)*len(kvRanges))) + memTracker.Consume(2 * int64(len(kvRanges[0].StartKey)*len(kvRanges))) } sort.Slice(kvRanges, func(i, j int) bool { return bytes.Compare(kvRanges[i].StartKey, kvRanges[j].StartKey) < 0 @@ -4117,7 +4118,7 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l if indexID == -1 { return distsql.CommonHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tableID}, tmpDatumRanges) } - return distsql.IndexRangesToKVRangesWithInterruptSignal(ctx.GetSessionVars().StmtCtx, tableID, indexID, tmpDatumRanges, nil, interruptSignal) + return distsql.IndexRangesToKVRangesWithInterruptSignal(ctx.GetSessionVars().StmtCtx, tableID, indexID, tmpDatumRanges, nil, memTracker, interruptSignal) } func (b *executorBuilder) buildWindow(v *plannercore.PhysicalWindow) Executor { diff --git a/executor/executor_pkg_test.go b/executor/executor_pkg_test.go index 65308b8ca2442..dc3e68acaa4f5 100644 --- a/executor/executor_pkg_test.go +++ b/executor/executor_pkg_test.go @@ -195,7 +195,7 @@ func SubTestBuildKvRangesForIndexJoinWithoutCwc(t *testing.T) { keyOff2IdxOff := []int{1, 3} ctx := mock.NewContext() - kvRanges, err := buildKvRangesForIndexJoin(ctx, 0, 0, joinKeyRows, indexRanges, keyOff2IdxOff, nil, nil) + kvRanges, err := buildKvRangesForIndexJoin(ctx, 0, 0, joinKeyRows, indexRanges, keyOff2IdxOff, nil, nil, nil) require.NoError(t, err) // Check the kvRanges is in order. for i, kvRange := range kvRanges { diff --git a/executor/executor_test.go b/executor/executor_test.go index 8c03fe83a4412..14ed93fa4d45e 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -9337,19 +9337,21 @@ func (s *testSerialSuite) TestIssue28650(c *C) { wg := &sync.WaitGroup{} sql := `explain analyze - select /*+ stream_agg(@sel_1) stream_agg(@sel_3) inl_join(@sel_2 t2)*/ count(1) from + select /*+ stream_agg(@sel_1) stream_agg(@sel_3) %s(@sel_2 t2)*/ count(1) from ( SELECT t2.a AS t2_external_user_ext_id, t2.b AS t2_t1_ext_id FROM t2 INNER JOIN (SELECT t1.a AS d_t1_ext_id FROM t1 GROUP BY t1.a) AS anon_1 ON anon_1.d_t1_ext_id = t2.a WHERE t2.c = 123 AND t2.b IN ("%s") ) tmp` wg.Add(1) + sqls := make([]string, 2) go func() { defer wg.Done() inElems := make([]string, 1000) for i := 0; i < len(inElems); i++ { inElems[i] = fmt.Sprintf("wm_%dbDgAAwCD-v1QB%dxky-g_dxxQCw", rand.Intn(100), rand.Intn(100)) } - sql = fmt.Sprintf(sql, strings.Join(inElems, "\",\"")) + sqls[0] = fmt.Sprintf(sql, "inl_join", strings.Join(inElems, "\",\"")) + sqls[1] = fmt.Sprintf(sql, "inl_hash_join", strings.Join(inElems, "\",\"")) }() tk.MustExec("insert into t1 select rand()*400;") @@ -9365,18 +9367,20 @@ func (s *testSerialSuite) TestIssue28650(c *C) { }) }() wg.Wait() - tk.MustExec("set @@tidb_mem_quota_query = 1073741824") // 1GB - c.Assert(tk.QueryToErr(sql), IsNil) - tk.MustExec("set @@tidb_mem_quota_query = 104857600") // 100MB, out of memory during executing - c.Assert(strings.Contains(tk.QueryToErr(sql).Error(), "Out Of Memory Quota!"), IsTrue) - tk.MustExec("set @@tidb_mem_quota_query = 64000") // 64KB, out of memory during building the plan - func() { - defer func() { - r := recover() - c.Assert(r, NotNil) - err := errors.Errorf("%v", r) - c.Assert(strings.Contains(err.Error(), "Out Of Memory Quota!"), IsTrue) + for _, sql := range sqls { + tk.MustExec("set @@tidb_mem_quota_query = 1073741824") // 1GB + c.Assert(tk.QueryToErr(sql), IsNil) + tk.MustExec("set @@tidb_mem_quota_query = 67108864") // 64MB, out of memory during executing + c.Assert(strings.Contains(tk.QueryToErr(sql).Error(), "Out Of Memory Quota!"), IsTrue) + tk.MustExec("set @@tidb_mem_quota_query = 65536") // 64KB, out of memory during building the plan + func() { + defer func() { + r := recover() + c.Assert(r, NotNil) + err := errors.Errorf("%v", r) + c.Assert(strings.Contains(err.Error(), "Out Of Memory Quota!"), IsTrue) + }() + tk.MustExec(sql) }() - tk.MustExec(sql) - }() + } } diff --git a/executor/index_lookup_hash_join.go b/executor/index_lookup_hash_join.go index e4d045d8df97e..762457c79c8a8 100644 --- a/executor/index_lookup_hash_join.go +++ b/executor/index_lookup_hash_join.go @@ -202,6 +202,7 @@ func (e *IndexNestedLoopHashJoin) startWorkers(ctx context.Context) { func (e *IndexNestedLoopHashJoin) finishJoinWorkers(r interface{}) { if r != nil { + e.IndexLookUpJoin.finished.Store(true) err := errors.New(fmt.Sprintf("%v", r)) if !e.keepOuterOrder { e.resultCh <- &indexHashJoinResult{err: err} @@ -210,6 +211,7 @@ func (e *IndexNestedLoopHashJoin) finishJoinWorkers(r interface{}) { e.taskCh <- task } if e.cancelFunc != nil { + e.IndexLookUpJoin.ctxCancelReason.Store(err) e.cancelFunc() } } @@ -246,6 +248,9 @@ func (e *IndexNestedLoopHashJoin) Next(ctx context.Context, req *chunk.Chunk) er return result.err } case <-ctx.Done(): + if err := e.IndexLookUpJoin.ctxCancelReason.Load(); err != nil { + return err.(error) + } return ctx.Err() } req.SwapColumns(result.chk) @@ -275,6 +280,9 @@ func (e *IndexNestedLoopHashJoin) runInOrder(ctx context.Context, req *chunk.Chu return result.err } case <-ctx.Done(): + if err := e.IndexLookUpJoin.ctxCancelReason.Load(); err != nil { + return err.(error) + } return ctx.Err() } req.SwapColumns(result.chk) @@ -435,6 +443,7 @@ func (e *IndexNestedLoopHashJoin) newInnerWorker(taskCh chan *indexHashJoinTask, keyOff2IdxOff: e.keyOff2IdxOff, stats: innerStats, lookup: &e.IndexLookUpJoin, + memTracker: memory.NewTracker(memory.LabelForIndexJoinInnerWorker, -1), }, taskCh: taskCh, joiner: e.joiners[workerID], @@ -444,6 +453,7 @@ func (e *IndexNestedLoopHashJoin) newInnerWorker(taskCh chan *indexHashJoinTask, joinKeyBuf: make([]byte, 1), outerRowStatus: make([]outerRowStatusFlag, 0, e.maxChunkSize), } + iw.memTracker.AttachTo(e.memTracker) if e.lastColHelper != nil { // nextCwf.TmpConstant needs to be reset for every individual // inner worker to avoid data race when the inner workers is running @@ -587,6 +597,9 @@ func (iw *indexHashJoinInnerWorker) handleHashJoinInnerWorkerPanic(r interface{} } func (iw *indexHashJoinInnerWorker) handleTask(ctx context.Context, task *indexHashJoinTask, joinResult *indexHashJoinResult, h hash.Hash64, resultCh chan *indexHashJoinResult) error { + defer func() { + iw.memTracker.Consume(-iw.memTracker.BytesConsumed()) + }() var joinStartTime time.Time if iw.stats != nil { start := time.Now() @@ -634,6 +647,9 @@ func (iw *indexHashJoinInnerWorker) doJoinUnordered(ctx context.Context, task *i select { case resultCh <- joinResult: case <-ctx.Done(): + if err := iw.lookup.ctxCancelReason.Load(); err != nil { + return err.(error) + } return ctx.Err() } joinResult, ok = iw.getNewJoinResult(ctx) @@ -782,6 +798,9 @@ func (iw *indexHashJoinInnerWorker) doJoinInOrder(ctx context.Context, task *ind select { case resultCh <- joinResult: case <-ctx.Done(): + if err := iw.lookup.ctxCancelReason.Load(); err != nil { + return err.(error) + } return ctx.Err() } joinResult, ok = iw.getNewJoinResult(ctx) diff --git a/executor/index_lookup_join.go b/executor/index_lookup_join.go index ccccd7ed6551a..a33962850e218 100644 --- a/executor/index_lookup_join.go +++ b/executor/index_lookup_join.go @@ -153,6 +153,7 @@ type innerWorker struct { nextColCompareFilters *plannercore.ColWithCmpFuncManager keyOff2IdxOff []int stats *innerWorkerRuntimeStats + memTracker *memory.Tracker } // Open implements the Executor interface. @@ -227,6 +228,15 @@ func (e *IndexLookUpJoin) newInnerWorker(taskCh chan *lookUpJoinTask) *innerWork keyOff2IdxOff: e.keyOff2IdxOff, stats: innerStats, lookup: e, + memTracker: memory.NewTracker(memory.LabelForIndexJoinInnerWorker, -1), + } + iw.memTracker.AttachTo(e.memTracker) + for _, ran := range copiedRanges { + // We should not consume this memory usage in `iw.memTracker`. The + // memory usage of inner worker will be reset the end of iw.handleTask. + // While the life cycle of this memory consumption exists throughout the + // whole active period of inner worker. + e.ctx.GetSessionVars().StmtCtx.MemTracker.Consume(2 * types.EstimatedMemUsage(ran.LowVal, len(ran.LowVal))) } if e.lastColHelper != nil { // nextCwf.TmpConstant needs to be reset for every individual @@ -507,6 +517,9 @@ func (iw *innerWorker) handleTask(ctx context.Context, task *lookUpJoinTask) err atomic.AddInt64(&iw.stats.totalTime, int64(time.Since(start))) }() } + defer func() { + iw.memTracker.Consume(-iw.memTracker.BytesConsumed()) + }() lookUpContents, err := iw.constructLookupContent(task) if err != nil { return err @@ -670,7 +683,7 @@ func (iw *innerWorker) fetchInnerResults(ctx context.Context, task *lookUpJoinTa atomic.AddInt64(&iw.stats.fetch, int64(time.Since(start))) }() } - innerExec, err := iw.readerBuilder.buildExecutorForIndexJoin(ctx, lookUpContent, iw.indexRanges, iw.keyOff2IdxOff, iw.nextColCompareFilters, true, iw.lookup.finished) + innerExec, err := iw.readerBuilder.buildExecutorForIndexJoin(ctx, lookUpContent, iw.indexRanges, iw.keyOff2IdxOff, iw.nextColCompareFilters, true, iw.memTracker, iw.lookup.finished) if innerExec != nil { defer terror.Call(innerExec.Close) } diff --git a/executor/index_lookup_merge_join.go b/executor/index_lookup_merge_join.go index 9d4785ff596cf..746fc6a5733fc 100644 --- a/executor/index_lookup_merge_join.go +++ b/executor/index_lookup_merge_join.go @@ -502,7 +502,7 @@ func (imw *innerMergeWorker) handleTask(ctx context.Context, task *lookUpMergeJo dLookUpKeys[i], dLookUpKeys[lenKeys-i-1] = dLookUpKeys[lenKeys-i-1], dLookUpKeys[i] } } - imw.innerExec, err = imw.readerBuilder.buildExecutorForIndexJoin(ctx, dLookUpKeys, imw.indexRanges, imw.keyOff2IdxOff, imw.nextColCompareFilters, false, nil) + imw.innerExec, err = imw.readerBuilder.buildExecutorForIndexJoin(ctx, dLookUpKeys, imw.indexRanges, imw.keyOff2IdxOff, imw.nextColCompareFilters, false, nil, nil) if imw.innerExec != nil { defer terror.Call(imw.innerExec.Close) } diff --git a/util/memory/tracker.go b/util/memory/tracker.go index cda7a67ad278b..470029e309402 100644 --- a/util/memory/tracker.go +++ b/util/memory/tracker.go @@ -552,4 +552,8 @@ const ( LabelForSimpleTask int = -18 // LabelForCTEStorage represents the label of CTE storage LabelForCTEStorage int = -19 + // LabelForIndexJoinInnerWorker represents the label of IndexJoin InnerWorker + LabelForIndexJoinInnerWorker int = -20 + // LabelForIndexJoinOuterWorker represents the label of IndexJoin OuterWorker + LabelForIndexJoinOuterWorker int = -21 ) From 0a43b34f84cde179e760d151bdc3095099b11ab8 Mon Sep 17 00:00:00 2001 From: xuhuaiyu <391585975@qq.com> Date: Tue, 23 Nov 2021 16:23:18 +0800 Subject: [PATCH 5/7] refine --- executor/builder.go | 12 +++--------- executor/executor_test.go | 2 +- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/executor/builder.go b/executor/builder.go index ddb10d310ce6c..7c682a81514ca 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -4083,7 +4083,6 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l if err != nil { return nil, err } - tmpDatumRangesLen := len(tmpDatumRanges) for _, nextColRan := range nextColRanges { for _, ran := range ranges { ran.LowVal[lastPos] = nextColRan.LowVal[0] @@ -4093,17 +4092,12 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l tmpDatumRanges = append(tmpDatumRanges, ran.Clone()) } } - if len(tmpDatumRanges) > tmpDatumRangesLen { - for _, ran := range tmpDatumRanges[tmpDatumRangesLen:] { - memTracker.Consume(2 * types.EstimatedMemUsage(ran.LowVal, len(ran.LowVal))) - } - } } + if len(tmpDatumRanges) != 0 { + memTracker.Consume(2 * int64(len(tmpDatumRanges)) * types.EstimatedMemUsage(tmpDatumRanges[0].LowVal, len(tmpDatumRanges[0].LowVal))) + } if cwc == nil { - if len(kvRanges) != 0 { - memTracker.Consume(2 * int64(len(kvRanges[0].StartKey)*len(kvRanges))) - } sort.Slice(kvRanges, func(i, j int) bool { return bytes.Compare(kvRanges[i].StartKey, kvRanges[j].StartKey) < 0 }) diff --git a/executor/executor_test.go b/executor/executor_test.go index 14ed93fa4d45e..86b181b178cc0 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -9370,7 +9370,7 @@ func (s *testSerialSuite) TestIssue28650(c *C) { for _, sql := range sqls { tk.MustExec("set @@tidb_mem_quota_query = 1073741824") // 1GB c.Assert(tk.QueryToErr(sql), IsNil) - tk.MustExec("set @@tidb_mem_quota_query = 67108864") // 64MB, out of memory during executing + tk.MustExec("set @@tidb_mem_quota_query = 33554432") // 32MB, out of memory during executing c.Assert(strings.Contains(tk.QueryToErr(sql).Error(), "Out Of Memory Quota!"), IsTrue) tk.MustExec("set @@tidb_mem_quota_query = 65536") // 64KB, out of memory during building the plan func() { From e4a02dd0292a8afd2c88cb429e4e5961daf06790 Mon Sep 17 00:00:00 2001 From: xuhuaiyu <391585975@qq.com> Date: Tue, 23 Nov 2021 18:25:45 +0800 Subject: [PATCH 6/7] address comment --- distsql/request_builder.go | 3 +++ executor/builder.go | 8 +++++--- executor/index_lookup_hash_join.go | 8 ++++++++ executor/index_lookup_join.go | 8 +++++--- planner/core/exhaust_physical_plans.go | 10 ++++------ 5 files changed, 25 insertions(+), 12 deletions(-) diff --git a/distsql/request_builder.go b/distsql/request_builder.go index 012a0a8266b61..db62df97dc1c1 100644 --- a/distsql/request_builder.go +++ b/distsql/request_builder.go @@ -666,6 +666,9 @@ func indexRangesToKVWithoutSplit(sc *stmtctx.StatementContext, tids []int64, idx if err != nil { return nil, err } + if i == 0 { + estimatedMemUsage += int64(cap(low) + cap(high)) + } for _, tid := range tids { startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) diff --git a/executor/builder.go b/executor/builder.go index 7c682a81514ca..fb8c194f94e59 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -4093,9 +4093,11 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l } } } - - if len(tmpDatumRanges) != 0 { - memTracker.Consume(2 * int64(len(tmpDatumRanges)) * types.EstimatedMemUsage(tmpDatumRanges[0].LowVal, len(tmpDatumRanges[0].LowVal))) + if len(kvRanges) != 0 && memTracker != nil { + memTracker.Consume(int64(2 * cap(kvRanges[0].StartKey) * len(kvRanges))) + } + if len(tmpDatumRanges) != 0 && memTracker != nil { + memTracker.Consume(2 * int64(len(tmpDatumRanges)) * types.EstimatedMemUsage(tmpDatumRanges[0].LowVal, len(tmpDatumRanges))) } if cwc == nil { sort.Slice(kvRanges, func(i, j int) bool { diff --git a/executor/index_lookup_hash_join.go b/executor/index_lookup_hash_join.go index 762457c79c8a8..20220b9950661 100644 --- a/executor/index_lookup_hash_join.go +++ b/executor/index_lookup_hash_join.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/expression" plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" @@ -454,6 +455,13 @@ func (e *IndexNestedLoopHashJoin) newInnerWorker(taskCh chan *indexHashJoinTask, outerRowStatus: make([]outerRowStatusFlag, 0, e.maxChunkSize), } iw.memTracker.AttachTo(e.memTracker) + if len(copiedRanges) != 0 { + // We should not consume this memory usage in `iw.memTracker`. The + // memory usage of inner worker will be reset the end of iw.handleTask. + // While the life cycle of this memory consumption exists throughout the + // whole active period of inner worker. + e.ctx.GetSessionVars().StmtCtx.MemTracker.Consume(2 * types.EstimatedMemUsage(copiedRanges[0].LowVal, len(copiedRanges))) + } if e.lastColHelper != nil { // nextCwf.TmpConstant needs to be reset for every individual // inner worker to avoid data race when the inner workers is running diff --git a/executor/index_lookup_join.go b/executor/index_lookup_join.go index a33962850e218..306527e08e1eb 100644 --- a/executor/index_lookup_join.go +++ b/executor/index_lookup_join.go @@ -231,12 +231,12 @@ func (e *IndexLookUpJoin) newInnerWorker(taskCh chan *lookUpJoinTask) *innerWork memTracker: memory.NewTracker(memory.LabelForIndexJoinInnerWorker, -1), } iw.memTracker.AttachTo(e.memTracker) - for _, ran := range copiedRanges { + if len(copiedRanges) != 0 { // We should not consume this memory usage in `iw.memTracker`. The // memory usage of inner worker will be reset the end of iw.handleTask. // While the life cycle of this memory consumption exists throughout the // whole active period of inner worker. - e.ctx.GetSessionVars().StmtCtx.MemTracker.Consume(2 * types.EstimatedMemUsage(ran.LowVal, len(ran.LowVal))) + e.ctx.GetSessionVars().StmtCtx.MemTracker.Consume(2 * types.EstimatedMemUsage(copiedRanges[0].LowVal, len(copiedRanges))) } if e.lastColHelper != nil { // nextCwf.TmpConstant needs to be reset for every individual @@ -550,7 +550,6 @@ func (iw *innerWorker) constructLookupContent(task *lookUpJoinTask) ([]*indexJoi numRows := chk.NumRows() for rowIdx := 0; rowIdx < numRows; rowIdx++ { dLookUpKey, dHashKey, err := iw.constructDatumLookupKey(task, chkIdx, rowIdx) - iw.lookup.memTracker.Consume(types.EstimatedMemUsage(dLookUpKey, len(dLookUpKey))) if err != nil { if terror.ErrorEqual(err, types.ErrWrongValue) { // We ignore rows with invalid datetime. @@ -559,6 +558,9 @@ func (iw *innerWorker) constructLookupContent(task *lookUpJoinTask) ([]*indexJoi } return nil, err } + if rowIdx == 0 { + iw.lookup.memTracker.Consume(types.EstimatedMemUsage(dLookUpKey, numRows)) + } if dHashKey == nil { // Append null to make looUpKeys the same length as outer Result. task.encodedLookUpKeys[chkIdx].AppendNull(0) diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index 6f445f8db9660..f3466a6b11f85 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -1567,9 +1567,7 @@ func (ijHelper *indexJoinBuildHelper) buildTemplateRange(matchedKeyCnt int, eqAn return nil, true, nil } if sc.MemTracker != nil { - for _, ran := range oneColumnRan { - sc.MemTracker.Consume(2 * types.EstimatedMemUsage(ran.LowVal, len(ran.LowVal))) - } + sc.MemTracker.Consume(2 * types.EstimatedMemUsage(oneColumnRan[0].LowVal, len(oneColumnRan))) } for _, ran := range ranges { ran.LowVal[i] = oneColumnRan[0].LowVal[0] @@ -1580,13 +1578,13 @@ func (ijHelper *indexJoinBuildHelper) buildTemplateRange(matchedKeyCnt int, eqAn newRanges := make([]*ranger.Range, 0, curRangeLen) for oldRangeIdx := 0; oldRangeIdx < curRangeLen; oldRangeIdx++ { newRange := ranges[oldRangeIdx].Clone() - if sc.MemTracker != nil { - sc.MemTracker.Consume(2 * types.EstimatedMemUsage(newRange.LowVal, len(newRange.LowVal))) - } newRange.LowVal[i] = oneColumnRan[ranIdx].LowVal[0] newRange.HighVal[i] = oneColumnRan[ranIdx].HighVal[0] newRanges = append(newRanges, newRange) } + if sc.MemTracker != nil && len(newRanges) != 0 { + sc.MemTracker.Consume(2 * types.EstimatedMemUsage(newRanges[0].LowVal, len(newRanges))) + } ranges = append(ranges, newRanges...) } j++ From 255dfe01aa220d6c0daedecf7f4792f719c429d9 Mon Sep 17 00:00:00 2001 From: xuhuaiyu <391585975@qq.com> Date: Wed, 24 Nov 2021 10:46:56 +0800 Subject: [PATCH 7/7] merge master --- planner/core/exhaust_physical_plans.go | 1 + 1 file changed, 1 insertion(+) diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index 799188dddd7f8..fb6a02d34b484 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -1555,6 +1555,7 @@ func (ijHelper *indexJoinBuildHelper) buildTemplateRange(matchedKeyCnt int, eqAn HighVal: make([]types.Datum, pointLength), }) } + sc := ijHelper.join.ctx.GetSessionVars().StmtCtx for i, j := 0, 0; j < len(eqAndInFuncs); i++ { // This position is occupied by join key. if ijHelper.curIdxOff2KeyOff[i] != -1 {