Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove unused updatedColumns from IcebergTableHandle #16678

Merged
merged 1 commit into from
Mar 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,6 @@ public IcebergTableHandle getTableHandle(
table.location(),
table.properties(),
NO_RETRIES,
ImmutableList.of(),
false,
Optional.empty());
}
Expand Down Expand Up @@ -2328,7 +2327,6 @@ else if (isMetadataColumnId(columnHandle.getId())) {
table.getTableLocation(),
table.getStorageProperties(),
table.getRetryMode(),
table.getUpdatedColumns(),
table.isRecordScannedFiles(),
table.getMaxScannedFileSize()),
remainingConstraint.transformKeys(ColumnHandle.class::cast),
Expand Down Expand Up @@ -2457,7 +2455,6 @@ public TableStatistics getTableStatistics(ConnectorSession session, ConnectorTab
IcebergTableHandle originalHandle = (IcebergTableHandle) tableHandle;
// Certain table handle attributes are not applicable to select queries (which need stats).
// If this changes, the caching logic may here may need to be revised.
checkArgument(originalHandle.getUpdatedColumns().isEmpty(), "Unexpected updated columns");
checkArgument(!originalHandle.isRecordScannedFiles(), "Unexpected scanned files recording set");
checkArgument(originalHandle.getMaxScannedFileSize().isEmpty(), "Unexpected max scanned file size set");

Expand All @@ -2478,7 +2475,6 @@ public TableStatistics getTableStatistics(ConnectorSession session, ConnectorTab
originalHandle.getTableLocation(),
originalHandle.getStorageProperties(),
NO_RETRIES, // retry mode doesn't affect stats
originalHandle.getUpdatedColumns(),
originalHandle.isRecordScannedFiles(),
originalHandle.getMaxScannedFileSize()),
handle -> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,6 @@ public class IcebergTableHandle
private final Map<String, String> storageProperties;
private final RetryMode retryMode;

// UPDATE only
private final List<IcebergColumnHandle> updatedColumns;

// Filter used during split generation and table scan, but not required to be strictly enforced by Iceberg Connector
private final TupleDomain<IcebergColumnHandle> unenforcedPredicate;

Expand Down Expand Up @@ -83,8 +80,7 @@ public static IcebergTableHandle fromJsonForDeserializationOnly(
@JsonProperty("nameMappingJson") Optional<String> nameMappingJson,
@JsonProperty("tableLocation") String tableLocation,
@JsonProperty("storageProperties") Map<String, String> storageProperties,
@JsonProperty("retryMode") RetryMode retryMode,
@JsonProperty("updatedColumns") List<IcebergColumnHandle> updatedColumns)
@JsonProperty("retryMode") RetryMode retryMode)
{
return new IcebergTableHandle(
schemaName,
Expand All @@ -102,7 +98,6 @@ public static IcebergTableHandle fromJsonForDeserializationOnly(
tableLocation,
storageProperties,
retryMode,
updatedColumns,
false,
Optional.empty());
}
Expand All @@ -123,7 +118,6 @@ public IcebergTableHandle(
String tableLocation,
Map<String, String> storageProperties,
RetryMode retryMode,
List<IcebergColumnHandle> updatedColumns,
boolean recordScannedFiles,
Optional<DataSize> maxScannedFileSize)
{
Expand All @@ -142,7 +136,6 @@ public IcebergTableHandle(
this.tableLocation = requireNonNull(tableLocation, "tableLocation is null");
this.storageProperties = ImmutableMap.copyOf(requireNonNull(storageProperties, "storageProperties is null"));
this.retryMode = requireNonNull(retryMode, "retryMode is null");
this.updatedColumns = ImmutableList.copyOf(requireNonNull(updatedColumns, "updatedColumns is null"));
this.recordScannedFiles = recordScannedFiles;
this.maxScannedFileSize = requireNonNull(maxScannedFileSize, "maxScannedFileSize is null");
}
Expand Down Expand Up @@ -238,12 +231,6 @@ public RetryMode getRetryMode()
return retryMode;
}

@JsonProperty
public List<IcebergColumnHandle> getUpdatedColumns()
{
return updatedColumns;
}

@JsonIgnore
public boolean isRecordScannedFiles()
{
Expand Down Expand Up @@ -284,7 +271,6 @@ public IcebergTableHandle withProjectedColumns(Set<IcebergColumnHandle> projecte
tableLocation,
storageProperties,
retryMode,
updatedColumns,
recordScannedFiles,
maxScannedFileSize);
}
Expand All @@ -307,30 +293,6 @@ public IcebergTableHandle withRetryMode(RetryMode retryMode)
tableLocation,
storageProperties,
retryMode,
updatedColumns,
recordScannedFiles,
maxScannedFileSize);
}

public IcebergTableHandle withUpdatedColumns(List<IcebergColumnHandle> updatedColumns)
{
return new IcebergTableHandle(
schemaName,
tableName,
tableType,
snapshotId,
tableSchemaJson,
sortOrder,
partitionSpecJson,
formatVersion,
unenforcedPredicate,
enforcedPredicate,
projectedColumns,
nameMappingJson,
tableLocation,
storageProperties,
retryMode,
updatedColumns,
recordScannedFiles,
maxScannedFileSize);
}
Expand All @@ -353,7 +315,6 @@ public IcebergTableHandle forOptimize(boolean recordScannedFiles, DataSize maxSc
tableLocation,
storageProperties,
retryMode,
updatedColumns,
recordScannedFiles,
Optional.of(maxScannedFileSize));
}
Expand Down Expand Up @@ -384,16 +345,31 @@ public boolean equals(Object o)
Objects.equals(nameMappingJson, that.nameMappingJson) &&
Objects.equals(tableLocation, that.tableLocation) &&
Objects.equals(retryMode, that.retryMode) &&
Objects.equals(updatedColumns, that.updatedColumns) &&
Objects.equals(storageProperties, that.storageProperties) &&
Objects.equals(maxScannedFileSize, that.maxScannedFileSize);
}

@Override
public int hashCode()
{
return Objects.hash(schemaName, tableName, tableType, snapshotId, tableSchemaJson, sortOrder, partitionSpecJson, formatVersion, unenforcedPredicate, enforcedPredicate,
projectedColumns, nameMappingJson, tableLocation, storageProperties, retryMode, updatedColumns, recordScannedFiles, maxScannedFileSize);
return Objects.hash(
schemaName,
tableName,
tableType,
snapshotId,
tableSchemaJson,
sortOrder,
partitionSpecJson,
formatVersion,
unenforcedPredicate,
enforcedPredicate,
projectedColumns,
nameMappingJson,
tableLocation,
storageProperties,
retryMode,
recordScannedFiles,
maxScannedFileSize);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,6 @@ private static ConnectorPageSource createTestingPageSource(HiveTransactionHandle
tablePath,
ImmutableMap.of(),
RetryMode.NO_RETRIES,
ImmutableList.of(),
false,
Optional.empty()),
transaction);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,6 @@ public void testIncompleteDynamicFilterTimeout()
nationTable.location(),
nationTable.properties(),
NO_RETRIES,
ImmutableList.of(),
false,
Optional.empty());

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,6 @@ public void testProjectionPushdown()
"",
ImmutableMap.of(),
NO_RETRIES,
ImmutableList.of(),
false,
Optional.empty());
TableHandle table = new TableHandle(catalogHandle, icebergTable, new HiveTransactionHandle(false));
Expand Down Expand Up @@ -252,7 +251,6 @@ public void testPredicatePushdown()
"",
ImmutableMap.of(),
NO_RETRIES,
ImmutableList.of(),
false,
Optional.empty());
TableHandle table = new TableHandle(catalogHandle, icebergTable, new HiveTransactionHandle(false));
Expand Down Expand Up @@ -302,7 +300,6 @@ public void testColumnPruningProjectionPushdown()
"",
ImmutableMap.of(),
NO_RETRIES,
ImmutableList.of(),
false,
Optional.empty());
TableHandle table = new TableHandle(catalogHandle, icebergTable, new HiveTransactionHandle(false));
Expand Down Expand Up @@ -363,7 +360,6 @@ public void testPushdownWithDuplicateExpressions()
"",
ImmutableMap.of(),
NO_RETRIES,
ImmutableList.of(),
false,
Optional.empty());
TableHandle table = new TableHandle(catalogHandle, icebergTable, new HiveTransactionHandle(false));
Expand Down