Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Interpreter: Run interpreter test without exchange executors #4788

Merged
merged 29 commits into from
May 10, 2022
Merged
Show file tree
Hide file tree
Changes from 24 commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
8e17622
init.
ywqzzy Apr 27, 2022
c8fd369
fix.
ywqzzy Apr 27, 2022
4a29a27
Merge branch 'master' of https://github.com/pingcap/tiflash into run_…
ywqzzy Apr 28, 2022
491dafd
fix restoreConcurrency.
ywqzzy Apr 28, 2022
fbd66bb
some tests
ywqzzy Apr 28, 2022
b5e5614
format.
ywqzzy Apr 28, 2022
99830e3
fix build fail.
ywqzzy Apr 28, 2022
347bf8a
format, more tests.
ywqzzy Apr 28, 2022
a7d4602
clean
ywqzzy Apr 28, 2022
9a7b988
refactor
ywqzzy Apr 28, 2022
781489e
getColumnsFromTableScan utils and clean code.
ywqzzy Apr 28, 2022
283639e
refactor gencolumn related functions.
ywqzzy Apr 29, 2022
7dbd5c5
remove useless cout.
ywqzzy Apr 29, 2022
120a983
all user choose stream size.
ywqzzy Apr 29, 2022
0ce3668
address comments.
ywqzzy Apr 29, 2022
02f3509
rename.
ywqzzy Apr 29, 2022
3dad372
rename.
ywqzzy May 5, 2022
4910d32
address comments.
ywqzzy May 5, 2022
cc81990
clean
ywqzzy May 5, 2022
fc46cd5
address comments.
ywqzzy May 5, 2022
b23f514
address comments
ywqzzy May 5, 2022
0216daa
Merge branch 'master' of https://github.com/pingcap/tiflash into run_…
ywqzzy May 5, 2022
927234c
address comments.
ywqzzy May 5, 2022
2639b7f
format.
ywqzzy May 5, 2022
eca8746
address comment.
ywqzzy May 5, 2022
eb50f1e
Merge branch 'master' of https://github.com/pingcap/tiflash into run_…
ywqzzy May 5, 2022
589263d
fix.
ywqzzy May 6, 2022
4846d23
Update dbms/src/Flash/Coprocessor/GenSchemaAndColumn.cpp
ywqzzy May 10, 2022
ced4d51
Merge branch 'master' into run_interpreter_test
ywqzzy May 10, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include <TestUtils/MockTableScanBlockInputStream.h>
#include <DataStreams/MockTableScanBlockInputStream.h>

namespace DB
{
Expand All @@ -32,7 +32,7 @@ MockTableScanBlockInputStream::MockTableScanBlockInputStream(ColumnsWithTypeAndN
}
}

ColumnPtr MockTableScanBlockInputStream::makeColumn(ColumnWithTypeAndName elem)
ColumnPtr MockTableScanBlockInputStream::makeColumn(ColumnWithTypeAndName elem) const
{
auto column = elem.type->createColumn();
size_t row_count = 0;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class MockTableScanBlockInputStream : public IProfilingBlockInputStream

protected:
Block readImpl() override;
ColumnPtr makeColumn(ColumnWithTypeAndName elem);
ColumnPtr makeColumn(ColumnWithTypeAndName elem) const;
};

} // namespace DB
27 changes: 27 additions & 0 deletions dbms/src/Flash/Coprocessor/DAGContext.h
Original file line number Diff line number Diff line change
Expand Up @@ -167,8 +167,30 @@ class DAGContext
, max_recorded_error_count(max_error_count_)
, warnings(max_recorded_error_count)
, warning_count(0)
, is_test(true)
Lloyd-Pottiger marked this conversation as resolved.
Show resolved Hide resolved
{}

// for tests need to run query tasks.
explicit DAGContext(const tipb::DAGRequest & dag_request_, String log_identifier, size_t concurrency)
: dag_request(&dag_request_)
, initialize_concurrency(concurrency)
, is_mpp_task(false)
, is_root_mpp_task(false)
, tunnel_set(nullptr)
, log(Logger::get(log_identifier))
, flags(dag_request->flags())
, sql_mode(dag_request->sql_mode())
, max_recorded_error_count(getMaxErrorCount(*dag_request))
, warnings(max_recorded_error_count)
, warning_count(0)
, is_test(true)
{
assert(dag_request->has_root_executor() || dag_request->executors_size() > 0);
return_executor_id = dag_request->root_executor().has_executor_id() || dag_request->executors(0).has_executor_id();

initOutputInfo();
}

void attachBlockIO(const BlockIO & io_);
std::unordered_map<String, BlockInputStreams> & getProfileStreamsMap();

Expand Down Expand Up @@ -275,6 +297,8 @@ class DAGContext
return sql_mode & f;
}

bool isTest() const { return is_test; }

void cancelAllExchangeReceiver();

void initExchangeReceiverIfMPP(Context & context, size_t max_streams);
Expand All @@ -287,6 +311,7 @@ class DAGContext
const tipb::DAGRequest * dag_request;
Int64 compile_time_ns = 0;
size_t final_concurrency = 1;
size_t initialize_concurrency = 1;
bool has_read_wait_index = false;
Clock::time_point read_wait_index_start_timestamp{Clock::duration::zero()};
Clock::time_point read_wait_index_end_timestamp{Clock::duration::zero()};
Expand Down Expand Up @@ -345,6 +370,8 @@ class DAGContext
/// vector of SubqueriesForSets(such as join build subquery).
/// The order of the vector is also the order of the subquery.
std::vector<SubqueriesForSets> subqueries;

bool is_test = false; /// switch for test, do not use it in production.
};

} // namespace DB
28 changes: 22 additions & 6 deletions dbms/src/Flash/Coprocessor/DAGQueryBlockInterpreter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include <DataStreams/HashJoinProbeBlockInputStream.h>
#include <DataStreams/LimitBlockInputStream.h>
#include <DataStreams/MergeSortingBlockInputStream.h>
#include <DataStreams/MockTableScanBlockInputStream.h>
#include <DataStreams/NullBlockInputStream.h>
#include <DataStreams/ParallelAggregatingBlockInputStream.h>
#include <DataStreams/PartialSortingBlockInputStream.h>
Expand All @@ -38,6 +39,7 @@
#include <Flash/Coprocessor/DAGQueryBlockInterpreter.h>
#include <Flash/Coprocessor/DAGUtils.h>
#include <Flash/Coprocessor/ExchangeSenderInterpreterHelper.h>
#include <Flash/Coprocessor/GenSchemaAndColumn.h>
#include <Flash/Coprocessor/InterpreterUtils.h>
#include <Flash/Coprocessor/StreamingDAGResponseWriter.h>
#include <Flash/Mpp/ExchangeReceiver.h>
Expand All @@ -46,6 +48,7 @@
#include <Interpreters/Join.h>
#include <Parsers/ASTSelectQuery.h>
#include <Parsers/ASTTablesInSelectQuery.h>
#include <Storages/Transaction/TiDB.h>
#include <WindowFunctions/WindowFunctionFactory.h>

namespace DB
Expand Down Expand Up @@ -114,7 +117,8 @@ AnalysisResult analyzeExpressions(
AnalysisResult res;
ExpressionActionsChain chain;
// selection on table scan had been executed in handleTableScan
if (query_block.selection && !query_block.isTableScanSource())
// In test mode, filter is not pushed down to table scan
if (query_block.selection && (!query_block.isTableScanSource() || context.getDAGContext()->isTest()))
ywqzzy marked this conversation as resolved.
Show resolved Hide resolved
{
std::vector<const tipb::Expr *> where_conditions;
for (const auto & c : query_block.selection->selection().conditions())
Expand Down Expand Up @@ -300,6 +304,19 @@ void DAGQueryBlockInterpreter::handleTableScan(const TiDBTableScan & table_scan,
}
}

// for tests, we need to mock tableScan blockInputStream as the source stream.
void DAGQueryBlockInterpreter::handleMockTableScan(const TiDBTableScan & table_scan, DAGPipeline & pipeline)
{
auto names_and_types = genNamesAndTypes(table_scan);
auto columns_with_type_and_name = getColumnWithTypeAndName(names_and_types);
analyzer = std::make_unique<DAGExpressionAnalyzer>(std::move(names_and_types), context);
for (size_t i = 0; i < max_streams; ++i)
{
auto mock_table_scan_stream = std::make_shared<MockTableScanBlockInputStream>(columns_with_type_and_name, context.getSettingsRef().max_block_size);
pipeline.streams.emplace_back(mock_table_scan_stream);
}
}

void DAGQueryBlockInterpreter::executePushedDownFilter(
const std::vector<const tipb::Expr *> & conditions,
size_t remote_read_streams_start_index,
Expand Down Expand Up @@ -1042,7 +1059,10 @@ void DAGQueryBlockInterpreter::executeImpl(DAGPipeline & pipeline)
else if (query_block.isTableScanSource())
{
TiDBTableScan table_scan(query_block.source, dagContext());
handleTableScan(table_scan, pipeline);
if (dagContext().isTest())
handleMockTableScan(table_scan, pipeline);
else
handleTableScan(table_scan, pipeline);
dagContext().table_scan_executor_id = query_block.source_name;
}
else if (query_block.source->tp() == tipb::ExecType::TypeWindow)
Expand Down Expand Up @@ -1089,14 +1109,12 @@ void DAGQueryBlockInterpreter::executeImpl(DAGPipeline & pipeline)
// execute aggregation
executeAggregation(pipeline, res.before_aggregation, res.aggregation_keys, res.aggregation_collators, res.aggregate_descriptions, res.is_final_agg);
}

if (res.before_having)
{
// execute having
executeWhere(pipeline, res.before_having, res.having_column_name);
recordProfileStreams(pipeline, query_block.having_name);
}

if (res.before_order_and_select)
{
executeExpression(pipeline, res.before_order_and_select);
Expand All @@ -1111,14 +1129,12 @@ void DAGQueryBlockInterpreter::executeImpl(DAGPipeline & pipeline)

// execute final project action
executeProject(pipeline, final_project);

// execute limit
if (query_block.limit_or_topn && query_block.limit_or_topn->tp() == tipb::TypeLimit)
{
executeLimit(pipeline);
recordProfileStreams(pipeline, query_block.limit_or_topn_name);
}

restorePipelineConcurrency(pipeline);

// execute exchange_sender
Expand Down
2 changes: 2 additions & 0 deletions dbms/src/Flash/Coprocessor/DAGQueryBlockInterpreter.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include <Flash/Coprocessor/ChunkCodec.h>
#include <Flash/Coprocessor/DAGPipeline.h>
#include <Flash/Coprocessor/DAGStorageInterpreter.h>
#include <Flash/Coprocessor/TiDBTableScan.h>
#include <Interpreters/AggregateDescription.h>
#include <Interpreters/Context.h>
#include <Interpreters/ExpressionActions.h>
Expand Down Expand Up @@ -57,6 +58,7 @@ class DAGQueryBlockInterpreter
private:
#endif
void executeImpl(DAGPipeline & pipeline);
void handleMockTableScan(const TiDBTableScan & table_scan, DAGPipeline & pipeline);
void handleTableScan(const TiDBTableScan & table_scan, DAGPipeline & pipeline);
void executeCastAfterTableScan(
const TiDBTableScan & table_scan,
Expand Down
55 changes: 55 additions & 0 deletions dbms/src/Flash/Coprocessor/GenSchemaAndColumn.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
// Copyright 2022 PingCAP, Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <Flash/Coprocessor/GenSchemaAndColumn.h>
#include <Storages/MutableSupport.h>

namespace DB
{
NamesAndTypes genNamesAndTypes(const TiDBTableScan & table_scan)
{
NamesAndTypes names_and_types;
names_and_types.reserve(table_scan.getColumnSize());
for (Int32 i = 0; i < table_scan.getColumnSize(); ++i)
{
TiDB::ColumnInfo column_info;
const auto & ci = table_scan.getColumns()[i];
column_info.tp = static_cast<TiDB::TP>(ci.tp());
column_info.id = ci.column_id();

switch (column_info.id)
{
case TiDBPkColumnID:
names_and_types.emplace_back(MutableSupport::tidb_pk_column_name, getDataTypeByColumnInfoForComputingLayer(column_info));
SeaRise marked this conversation as resolved.
Show resolved Hide resolved
break;
case ExtraTableIDColumnID:
names_and_types.emplace_back(MutableSupport::extra_table_id_column_name, MutableSupport::extra_table_id_column_type);
break;
default:
names_and_types.emplace_back(fmt::format("mock_table_scan_{}", i), getDataTypeByColumnInfoForComputingLayer(column_info));
}
}
return names_and_types;
}

ColumnsWithTypeAndName getColumnWithTypeAndName(const NamesAndTypes & names_and_types)
{
std::vector<DB::ColumnWithTypeAndName> column_with_type_and_names;
column_with_type_and_names.reserve(names_and_types.size());
for (const auto & col : names_and_types)
{
column_with_type_and_names.push_back(DB::ColumnWithTypeAndName(col.type, col.name));
}
return column_with_type_and_names;
}
} // namespace DB
26 changes: 26 additions & 0 deletions dbms/src/Flash/Coprocessor/GenSchemaAndColumn.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
// Copyright 2022 PingCAP, Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <Core/ColumnsWithTypeAndName.h>
#include <Core/NamesAndTypes.h>
#include <Flash/Coprocessor/TiDBTableScan.h>
#include <Storages/Transaction/TiDB.h>

namespace DB
{
NamesAndTypes genNamesAndTypes(const TiDBTableScan & table_scan);
ColumnsWithTypeAndName getColumnWithTypeAndName(const NamesAndTypes & names_and_types);
} // namespace DB
5 changes: 3 additions & 2 deletions dbms/src/Flash/Coprocessor/InterpreterDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,11 @@ InterpreterDAG::InterpreterDAG(Context & context_, const DAGQuerySource & dag_)
const Settings & settings = context.getSettingsRef();
if (dagContext().isBatchCop() || dagContext().isMPPTask())
max_streams = settings.max_threads;
else if (dagContext().isTest())
max_streams = dagContext().initialize_concurrency;
else
max_streams = 1;

if (max_streams > 1)
{
max_streams *= settings.max_streams_to_max_threads_ratio;
Expand Down Expand Up @@ -79,7 +82,6 @@ BlockIO InterpreterDAG::execute()
BlockInputStreams streams = executeQueryBlock(*dag.getRootQueryBlock());
DAGPipeline pipeline;
pipeline.streams = streams;

/// add union to run in parallel if needed
if (dagContext().isMPPTask())
/// MPPTask do not need the returned blocks.
Expand All @@ -95,7 +97,6 @@ BlockIO InterpreterDAG::execute()
SizeLimits(settings.max_rows_to_transfer, settings.max_bytes_to_transfer, settings.transfer_overflow_mode),
dagContext().log->identifier());
}

BlockIO res;
res.in = pipeline.firstStream();
return res;
Expand Down
2 changes: 0 additions & 2 deletions dbms/src/Flash/Coprocessor/TiDBTableScan.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@

#include <Flash/Coprocessor/DAGContext.h>

#include <vector>

namespace DB
{
/// TiDBTableScan is a wrap to hide the difference of `TableScan` and `PartitionTableScan`
Expand Down
Loading