Skip to content

Commit

Permalink
Remove unused updatedColumns from IcebergTableHandle
Browse files Browse the repository at this point in the history
  • Loading branch information
electrum committed Mar 28, 2023
1 parent 38ada59 commit 786a281
Show file tree
Hide file tree
Showing 5 changed files with 19 additions and 53 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,6 @@ public IcebergTableHandle getTableHandle(
table.location(),
table.properties(),
NO_RETRIES,
ImmutableList.of(),
false,
Optional.empty());
}
Expand Down Expand Up @@ -2326,7 +2325,6 @@ else if (isMetadataColumnId(columnHandle.getId())) {
table.getTableLocation(),
table.getStorageProperties(),
table.getRetryMode(),
table.getUpdatedColumns(),
table.isRecordScannedFiles(),
table.getMaxScannedFileSize()),
remainingConstraint.transformKeys(ColumnHandle.class::cast),
Expand Down Expand Up @@ -2455,7 +2453,6 @@ public TableStatistics getTableStatistics(ConnectorSession session, ConnectorTab
IcebergTableHandle originalHandle = (IcebergTableHandle) tableHandle;
// Certain table handle attributes are not applicable to select queries (which need stats).
// If this changes, the caching logic may here may need to be revised.
checkArgument(originalHandle.getUpdatedColumns().isEmpty(), "Unexpected updated columns");
checkArgument(!originalHandle.isRecordScannedFiles(), "Unexpected scanned files recording set");
checkArgument(originalHandle.getMaxScannedFileSize().isEmpty(), "Unexpected max scanned file size set");

Expand All @@ -2476,7 +2473,6 @@ public TableStatistics getTableStatistics(ConnectorSession session, ConnectorTab
originalHandle.getTableLocation(),
originalHandle.getStorageProperties(),
NO_RETRIES, // retry mode doesn't affect stats
originalHandle.getUpdatedColumns(),
originalHandle.isRecordScannedFiles(),
originalHandle.getMaxScannedFileSize()),
handle -> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,6 @@ public class IcebergTableHandle
private final Map<String, String> storageProperties;
private final RetryMode retryMode;

// UPDATE only
private final List<IcebergColumnHandle> updatedColumns;

// Filter used during split generation and table scan, but not required to be strictly enforced by Iceberg Connector
private final TupleDomain<IcebergColumnHandle> unenforcedPredicate;

Expand Down Expand Up @@ -83,8 +80,7 @@ public static IcebergTableHandle fromJsonForDeserializationOnly(
@JsonProperty("nameMappingJson") Optional<String> nameMappingJson,
@JsonProperty("tableLocation") String tableLocation,
@JsonProperty("storageProperties") Map<String, String> storageProperties,
@JsonProperty("retryMode") RetryMode retryMode,
@JsonProperty("updatedColumns") List<IcebergColumnHandle> updatedColumns)
@JsonProperty("retryMode") RetryMode retryMode)
{
return new IcebergTableHandle(
schemaName,
Expand All @@ -102,7 +98,6 @@ public static IcebergTableHandle fromJsonForDeserializationOnly(
tableLocation,
storageProperties,
retryMode,
updatedColumns,
false,
Optional.empty());
}
Expand All @@ -123,7 +118,6 @@ public IcebergTableHandle(
String tableLocation,
Map<String, String> storageProperties,
RetryMode retryMode,
List<IcebergColumnHandle> updatedColumns,
boolean recordScannedFiles,
Optional<DataSize> maxScannedFileSize)
{
Expand All @@ -142,7 +136,6 @@ public IcebergTableHandle(
this.tableLocation = requireNonNull(tableLocation, "tableLocation is null");
this.storageProperties = ImmutableMap.copyOf(requireNonNull(storageProperties, "storageProperties is null"));
this.retryMode = requireNonNull(retryMode, "retryMode is null");
this.updatedColumns = ImmutableList.copyOf(requireNonNull(updatedColumns, "updatedColumns is null"));
this.recordScannedFiles = recordScannedFiles;
this.maxScannedFileSize = requireNonNull(maxScannedFileSize, "maxScannedFileSize is null");
}
Expand Down Expand Up @@ -238,12 +231,6 @@ public RetryMode getRetryMode()
return retryMode;
}

@JsonProperty
public List<IcebergColumnHandle> getUpdatedColumns()
{
return updatedColumns;
}

@JsonIgnore
public boolean isRecordScannedFiles()
{
Expand Down Expand Up @@ -284,7 +271,6 @@ public IcebergTableHandle withProjectedColumns(Set<IcebergColumnHandle> projecte
tableLocation,
storageProperties,
retryMode,
updatedColumns,
recordScannedFiles,
maxScannedFileSize);
}
Expand All @@ -307,30 +293,6 @@ public IcebergTableHandle withRetryMode(RetryMode retryMode)
tableLocation,
storageProperties,
retryMode,
updatedColumns,
recordScannedFiles,
maxScannedFileSize);
}

public IcebergTableHandle withUpdatedColumns(List<IcebergColumnHandle> updatedColumns)
{
return new IcebergTableHandle(
schemaName,
tableName,
tableType,
snapshotId,
tableSchemaJson,
sortOrder,
partitionSpecJson,
formatVersion,
unenforcedPredicate,
enforcedPredicate,
projectedColumns,
nameMappingJson,
tableLocation,
storageProperties,
retryMode,
updatedColumns,
recordScannedFiles,
maxScannedFileSize);
}
Expand All @@ -353,7 +315,6 @@ public IcebergTableHandle forOptimize(boolean recordScannedFiles, DataSize maxSc
tableLocation,
storageProperties,
retryMode,
updatedColumns,
recordScannedFiles,
Optional.of(maxScannedFileSize));
}
Expand Down Expand Up @@ -384,16 +345,31 @@ public boolean equals(Object o)
Objects.equals(nameMappingJson, that.nameMappingJson) &&
Objects.equals(tableLocation, that.tableLocation) &&
Objects.equals(retryMode, that.retryMode) &&
Objects.equals(updatedColumns, that.updatedColumns) &&
Objects.equals(storageProperties, that.storageProperties) &&
Objects.equals(maxScannedFileSize, that.maxScannedFileSize);
}

@Override
public int hashCode()
{
return Objects.hash(schemaName, tableName, tableType, snapshotId, tableSchemaJson, sortOrder, partitionSpecJson, formatVersion, unenforcedPredicate, enforcedPredicate,
projectedColumns, nameMappingJson, tableLocation, storageProperties, retryMode, updatedColumns, recordScannedFiles, maxScannedFileSize);
return Objects.hash(
schemaName,
tableName,
tableType,
snapshotId,
tableSchemaJson,
sortOrder,
partitionSpecJson,
formatVersion,
unenforcedPredicate,
enforcedPredicate,
projectedColumns,
nameMappingJson,
tableLocation,
storageProperties,
retryMode,
recordScannedFiles,
maxScannedFileSize);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,6 @@ private static ConnectorPageSource createTestingPageSource(HiveTransactionHandle
tablePath,
ImmutableMap.of(),
RetryMode.NO_RETRIES,
ImmutableList.of(),
false,
Optional.empty()),
transaction);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,6 @@ public void testIncompleteDynamicFilterTimeout()
nationTable.location(),
nationTable.properties(),
NO_RETRIES,
ImmutableList.of(),
false,
Optional.empty());

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,6 @@ public void testProjectionPushdown()
"",
ImmutableMap.of(),
NO_RETRIES,
ImmutableList.of(),
false,
Optional.empty());
TableHandle table = new TableHandle(catalogHandle, icebergTable, new HiveTransactionHandle(false));
Expand Down Expand Up @@ -252,7 +251,6 @@ public void testPredicatePushdown()
"",
ImmutableMap.of(),
NO_RETRIES,
ImmutableList.of(),
false,
Optional.empty());
TableHandle table = new TableHandle(catalogHandle, icebergTable, new HiveTransactionHandle(false));
Expand Down Expand Up @@ -302,7 +300,6 @@ public void testColumnPruningProjectionPushdown()
"",
ImmutableMap.of(),
NO_RETRIES,
ImmutableList.of(),
false,
Optional.empty());
TableHandle table = new TableHandle(catalogHandle, icebergTable, new HiveTransactionHandle(false));
Expand Down Expand Up @@ -363,7 +360,6 @@ public void testPushdownWithDuplicateExpressions()
"",
ImmutableMap.of(),
NO_RETRIES,
ImmutableList.of(),
false,
Optional.empty());
TableHandle table = new TableHandle(catalogHandle, icebergTable, new HiveTransactionHandle(false));
Expand Down

0 comments on commit 786a281

Please sign in to comment.