Skip to content

Commit

Permalink
resolve comment
Browse files Browse the repository at this point in the history
Signed-off-by: stephen <[email protected]>
  • Loading branch information
stephen-shelby committed Oct 14, 2024
1 parent 9721ede commit 22e44de
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 31 deletions.
23 changes: 0 additions & 23 deletions be/src/exec/hdfs_scanner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -594,29 +594,6 @@ void HdfsScannerContext::append_or_update_column_to_chunk(ChunkPtr* chunk, size_
ck->set_num_rows(row_count);
}

void HdfsScannerContext::append_or_update_extended_column_to_chunk(ChunkPtr* chunk, size_t row_count) {
if (extended_columns.size() == 0) return;
ChunkPtr& ck = (*chunk);
for (size_t i = 0; i < extended_columns.size(); i++) {
SlotDescriptor* slot_desc = extended_columns[i].slot_desc;
DCHECK(extended_values[i]->is_constant());
auto* const_column = ColumnHelper::as_raw_column<ConstColumn>(extended_values[i]);
ColumnPtr data_column = const_column->data_column();
auto chunk_extended_column = ColumnHelper::create_column(slot_desc->type(), slot_desc->is_nullable());

if (row_count > 0) {
if (data_column->is_nullable()) {
chunk_extended_column->append_nulls(1);
} else {
chunk_extended_column->append(*data_column, 0, 1);
}
chunk_extended_column->assign(row_count, 0);
}
ck->append_or_update_column(std::move(chunk_extended_column), slot_desc->id());
}
ck->set_num_rows(row_count);
}

bool HdfsScannerContext::can_use_dict_filter_on_slot(SlotDescriptor* slot) const {
if (!slot->type().is_string_type()) {
return false;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,7 @@

import com.google.common.collect.Lists;
import com.starrocks.catalog.Database;
<<<<<<< HEAD
import com.starrocks.common.ExceptionChecker;
=======
>>>>>>> 162e88d6db (eq-delete)
import com.starrocks.common.Pair;
import com.starrocks.connector.exception.StarRocksConnectorException;
import com.starrocks.connector.iceberg.IcebergMetadata;
Expand Down Expand Up @@ -490,7 +487,6 @@ org.apache.iceberg.Table getTable(String dbName, String tableName) throws StarRo
() -> UtFrameUtils.getFragmentPlan(starRocksAssert.getCtx(), sql));

starRocksAssert.getCtx().getSessionVariable().setEnableReadIcebergEqDeleteWithPartitionEvolution(true);

String plan = UtFrameUtils.getFragmentPlan(starRocksAssert.getCtx(), sql);
assertContains(plan, "4:Project\n" +
" | <slot 2> : 2: k2\n" +
Expand Down Expand Up @@ -553,8 +549,4 @@ org.apache.iceberg.Table getTable(String dbName, String tableName) throws StarRo
}
}
}
<<<<<<< HEAD
=======

>>>>>>> 162e88d6db (eq-delete)
}

0 comments on commit 22e44de

Please sign in to comment.