From a14b34e227d3c3fb474f12dff855df1b07a57437 Mon Sep 17 00:00:00 2001 From: Denis Hirn Date: Wed, 17 Jul 2024 10:00:51 +0200 Subject: [PATCH] Revert "Improve EXPLAIN output of Delim Joins and Delim Gets" This reverts commit 23ccd79b3b88109097bdc8dd66207ed6da7b650b. --- src/execution/physical_plan/plan_delim_get.cpp | 1 - src/execution/physical_plan/plan_delim_join.cpp | 8 +++----- src/include/duckdb/execution/physical_plan_generator.hpp | 2 ++ src/include/duckdb/planner/operator/logical_delim_get.hpp | 2 -- .../duckdb/planner/subquery/flatten_dependent_join.hpp | 2 -- .../duckdb/storage/serialization/logical_operator.json | 6 ------ src/planner/binder/query_node/plan_subquery.cpp | 8 -------- src/planner/subquery/flatten_dependent_join.cpp | 2 -- src/storage/serialization/serialize_logical_operator.cpp | 2 -- 9 files changed, 5 insertions(+), 28 deletions(-) diff --git a/src/execution/physical_plan/plan_delim_get.cpp b/src/execution/physical_plan/plan_delim_get.cpp index 122c9f9fa2f7..1b45efe21b4e 100644 --- a/src/execution/physical_plan/plan_delim_get.cpp +++ b/src/execution/physical_plan/plan_delim_get.cpp @@ -10,7 +10,6 @@ unique_ptr PhysicalPlanGenerator::CreatePlan(LogicalDelimGet & // create a PhysicalChunkScan without an owned_collection, the collection will be added later auto chunk_scan = make_uniq(op.types, PhysicalOperatorType::DELIM_SCAN, op.estimated_cardinality, nullptr); - chunk_scan->delim_index = op.delim_idx; return std::move(chunk_scan); } diff --git a/src/execution/physical_plan/plan_delim_join.cpp b/src/execution/physical_plan/plan_delim_join.cpp index 0c2bd366ed6e..60d0b8f11f7b 100644 --- a/src/execution/physical_plan/plan_delim_join.cpp +++ b/src/execution/physical_plan/plan_delim_join.cpp @@ -45,16 +45,14 @@ unique_ptr PhysicalPlanGenerator::PlanDelimJoin(LogicalCompari // now create the duplicate eliminated join unique_ptr delim_join; if (op.delim_flipped) { - delim_join = make_uniq(op.types, std::move(plan), delim_scans, op.estimated_cardinality, - op.mark_index); + delim_join = + make_uniq(op.types, std::move(plan), delim_scans, op.estimated_cardinality, optional_idx(++this->delim_index)); } else { - delim_join = make_uniq(op.types, std::move(plan), delim_scans, op.estimated_cardinality, - op.mark_index); + delim_join = make_uniq(op.types, std::move(plan), delim_scans, op.estimated_cardinality, optional_idx(++this->delim_index)); } // we still have to create the DISTINCT clause that is used to generate the duplicate eliminated chunk delim_join->distinct = make_uniq(context, delim_types, std::move(distinct_expressions), std::move(distinct_groups), op.estimated_cardinality); - return std::move(delim_join); } diff --git a/src/include/duckdb/execution/physical_plan_generator.hpp b/src/include/duckdb/execution/physical_plan_generator.hpp index 50d04dfc1339..b759e1cbe60b 100644 --- a/src/include/duckdb/execution/physical_plan_generator.hpp +++ b/src/include/duckdb/execution/physical_plan_generator.hpp @@ -105,6 +105,8 @@ class PhysicalPlanGenerator { bool PreserveInsertionOrder(PhysicalOperator &plan); bool UseBatchIndex(PhysicalOperator &plan); + idx_t delim_index = 0; + private: ClientContext &context; }; diff --git a/src/include/duckdb/planner/operator/logical_delim_get.hpp b/src/include/duckdb/planner/operator/logical_delim_get.hpp index 44e7d5d6daf9..895883b16cc9 100644 --- a/src/include/duckdb/planner/operator/logical_delim_get.hpp +++ b/src/include/duckdb/planner/operator/logical_delim_get.hpp @@ -28,8 +28,6 @@ class LogicalDelimGet : public LogicalOperator { idx_t table_index; //! The types of the chunk vector chunk_types; - //! Delim Join Index - optional_idx delim_idx; public: vector GetColumnBindings() override { diff --git a/src/include/duckdb/planner/subquery/flatten_dependent_join.hpp b/src/include/duckdb/planner/subquery/flatten_dependent_join.hpp index d54e1778fcbf..991e084c42ab 100644 --- a/src/include/duckdb/planner/subquery/flatten_dependent_join.hpp +++ b/src/include/duckdb/planner/subquery/flatten_dependent_join.hpp @@ -44,8 +44,6 @@ struct FlattenDependentJoins { bool perform_delim; bool any_join; - idx_t delim_root_idx; - private: unique_ptr PushDownDependentJoinInternal(unique_ptr plan, bool &parent_propagate_null_values, idx_t lateral_depth); diff --git a/src/include/duckdb/storage/serialization/logical_operator.json b/src/include/duckdb/storage/serialization/logical_operator.json index 5cda48b74511..28eefe9261fa 100644 --- a/src/include/duckdb/storage/serialization/logical_operator.json +++ b/src/include/duckdb/storage/serialization/logical_operator.json @@ -266,12 +266,6 @@ "id": 201, "name": "chunk_types", "type": "vector" - }, - { - "id": 202, - "name": "delim_idx", - "type": "optional_idx", - "default": "optional_idx()" } ], "constructor": ["table_index", "chunk_types"] diff --git a/src/planner/binder/query_node/plan_subquery.cpp b/src/planner/binder/query_node/plan_subquery.cpp index cb2afc733a4c..af70d7e033e0 100644 --- a/src/planner/binder/query_node/plan_subquery.cpp +++ b/src/planner/binder/query_node/plan_subquery.cpp @@ -243,17 +243,14 @@ static unique_ptr PlanCorrelatedSubquery(Binder &binder, BoundSubque // the input value NULL will generate the value 42, and we need to join NULL on the LHS with NULL on the RHS // the left side is the original plan // this is the side that will be duplicate eliminated and pushed into the RHS - idx_t mark_index = binder.GenerateTableIndex(); auto delim_join = CreateDuplicateEliminatedJoin(correlated_columns, JoinType::SINGLE, std::move(root), perform_delim); - delim_join->mark_index = mark_index; // the right side initially is a DEPENDENT join between the duplicate eliminated scan and the subquery // HOWEVER: we do not explicitly create the dependent join // instead, we eliminate the dependent join by pushing it down into the right side of the plan FlattenDependentJoins flatten(binder, correlated_columns, perform_delim); - flatten.delim_root_idx = mark_index; // first we check which logical operators have correlated expressions in the first place flatten.DetectCorrelatedExpressions(*plan); // now we push the dependent join down @@ -280,7 +277,6 @@ static unique_ptr PlanCorrelatedSubquery(Binder &binder, BoundSubque delim_join->mark_index = mark_index; // RHS FlattenDependentJoins flatten(binder, correlated_columns, perform_delim, true); - flatten.delim_root_idx = mark_index; flatten.DetectCorrelatedExpressions(*plan); auto dependent_join = flatten.PushDownDependentJoin(std::move(plan)); @@ -309,7 +305,6 @@ static unique_ptr PlanCorrelatedSubquery(Binder &binder, BoundSubque delim_join->mark_index = mark_index; // RHS FlattenDependentJoins flatten(binder, correlated_columns, true, true); - flatten.delim_root_idx = mark_index; flatten.DetectCorrelatedExpressions(*plan); auto dependent_join = flatten.PushDownDependentJoin(std::move(plan)); @@ -427,12 +422,9 @@ unique_ptr Binder::PlanLateralJoin(unique_ptr } auto perform_delim = PerformDuplicateElimination(*this, correlated); - idx_t delim_idx = GenerateTableIndex(); auto delim_join = CreateDuplicateEliminatedJoin(correlated, join_type, std::move(left), perform_delim); - delim_join->mark_index = delim_idx; FlattenDependentJoins flatten(*this, correlated, perform_delim); - flatten.delim_root_idx = delim_idx; // first we check which logical operators have correlated expressions in the first place flatten.DetectCorrelatedExpressions(*right, true); diff --git a/src/planner/subquery/flatten_dependent_join.cpp b/src/planner/subquery/flatten_dependent_join.cpp index 2b2c7d8c0896..4a8a128fc39a 100644 --- a/src/planner/subquery/flatten_dependent_join.cpp +++ b/src/planner/subquery/flatten_dependent_join.cpp @@ -146,7 +146,6 @@ unique_ptr FlattenDependentJoins::PushDownDependentJoinInternal delim_offset = left_columns; data_offset = 0; delim_scan = make_uniq(delim_index, delim_types); - delim_scan->delim_idx = optional_idx(delim_root_idx); if (plan->type == LogicalOperatorType::LOGICAL_PROJECTION) { // we want to keep the logical projection for positionality. exit_projection = true; @@ -270,7 +269,6 @@ unique_ptr FlattenDependentJoins::PushDownDependentJoinInternal } auto left_index = binder.GenerateTableIndex(); delim_scan = make_uniq(left_index, delim_types); - delim_scan->delim_idx = optional_idx(delim_root_idx); join->children.push_back(std::move(delim_scan)); join->children.push_back(std::move(plan)); for (idx_t i = 0; i < new_group_count; i++) { diff --git a/src/storage/serialization/serialize_logical_operator.cpp b/src/storage/serialization/serialize_logical_operator.cpp index 5dc3beba346a..055c4e773abd 100644 --- a/src/storage/serialization/serialize_logical_operator.cpp +++ b/src/storage/serialization/serialize_logical_operator.cpp @@ -377,14 +377,12 @@ void LogicalDelimGet::Serialize(Serializer &serializer) const { LogicalOperator::Serialize(serializer); serializer.WritePropertyWithDefault(200, "table_index", table_index); serializer.WritePropertyWithDefault>(201, "chunk_types", chunk_types); - serializer.WritePropertyWithDefault(202, "delim_idx", delim_idx, optional_idx()); } unique_ptr LogicalDelimGet::Deserialize(Deserializer &deserializer) { auto table_index = deserializer.ReadPropertyWithDefault(200, "table_index"); auto chunk_types = deserializer.ReadPropertyWithDefault>(201, "chunk_types"); auto result = duckdb::unique_ptr(new LogicalDelimGet(table_index, std::move(chunk_types))); - deserializer.ReadPropertyWithDefault(202, "delim_idx", result->delim_idx, optional_idx()); return std::move(result); }