Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove deprecated methods from Java Table class #9853

Merged
merged 1 commit into from
Dec 7, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
89 changes: 4 additions & 85 deletions java/src/main/java/ai/rapids/cudf/Table.java
Original file line number Diff line number Diff line change
Expand Up @@ -1091,20 +1091,6 @@ public static void writeColumnViewsToParquet(ParquetWriterOptions options,
}
}

/**
* Writes this table to a Parquet file on the host
*
* @param options parameters for the writer
* @param outputFile file to write the table to
* @deprecated please use writeParquetChunked instead
*/
@Deprecated
public void writeParquet(ParquetWriterOptions options, File outputFile) {
try (TableWriter writer = writeParquetChunked(options, outputFile)) {
writer.write(this);
}
}

private static class ORCTableWriter implements TableWriter {
private long handle;
HostBufferConsumer consumer;
Expand Down Expand Up @@ -1179,33 +1165,6 @@ public static TableWriter writeORCChunked(ORCWriterOptions options, HostBufferCo
return new ORCTableWriter(options, consumer);
}

/**
* Writes this table to a file on the host.
* @param outputFile - File to write the table to
* @deprecated please use writeORCChunked instead
*/
@Deprecated
public void writeORC(File outputFile) {
// Need to specify the number of columns but leave all column names undefined
String[] names = new String[getNumberOfColumns()];
Arrays.fill(names, "");
ORCWriterOptions opts = ORCWriterOptions.builder().withColumns(true, names).build();
writeORC(opts, outputFile);
}

/**
* Writes this table to a file on the host.
* @param outputFile - File to write the table to
* @deprecated please use writeORCChunked instead
*/
@Deprecated
public void writeORC(ORCWriterOptions options, File outputFile) {
assert options.getTopLevelChildren() == getNumberOfColumns() : "must specify names for all columns";
try (TableWriter writer = Table.writeORCChunked(options, outputFile)) {
writer.write(this);
}
}

private static class ArrowIPCTableWriter implements TableWriter {
private final ArrowIPCWriterOptions.DoneOnGpu callback;
private long handle;
Expand Down Expand Up @@ -2082,26 +2041,6 @@ public Table gather(ColumnView gatherMap) {
return gather(gatherMap, OutOfBoundsPolicy.NULLIFY);
}

/**
* Gathers the rows of this table according to `gatherMap` such that row "i"
* in the resulting table's columns will contain row "gatherMap[i]" from this table.
* The number of rows in the result table will be equal to the number of elements in
* `gatherMap`.
*
* A negative value `i` in the `gatherMap` is interpreted as `i+n`, where
* `n` is the number of rows in this table.
*
* @deprecated Use {@link #gather(ColumnView, OutOfBoundsPolicy)}
* @param gatherMap the map of indexes. Must be non-nullable and integral type.
* @param checkBounds if true bounds checking is performed on the value. Be very careful
* when setting this to false.
* @return the resulting Table.
*/
@Deprecated
public Table gather(ColumnView gatherMap, boolean checkBounds) {
return new Table(gather(nativeHandle, gatherMap.getNativeView(), checkBounds));
}

/**
* Gathers the rows of this table according to `gatherMap` such that row "i"
* in the resulting table's columns will contain row "gatherMap[i]" from this table.
Expand Down Expand Up @@ -2256,7 +2195,7 @@ public GatherMap[] conditionalLeftJoinGatherMaps(Table rightTable,
* the left and right tables, respectively, to produce the result of the left join.
* It is the responsibility of the caller to close the resulting gather map instances.
* This interface allows passing an output row count that was previously computed from
* {@link #conditionalLeftJoinRowCount(Table, CompiledExpression, boolean)}.
* {@link #conditionalLeftJoinRowCount(Table, CompiledExpression)}.
* WARNING: Passing a row count that is smaller than the actual row count will result
* in undefined behavior.
* @param rightTable the right side table of the join in the join
Expand Down Expand Up @@ -2396,7 +2335,7 @@ public GatherMap[] conditionalInnerJoinGatherMaps(Table rightTable,
* the left and right tables, respectively, to produce the result of the inner join.
* It is the responsibility of the caller to close the resulting gather map instances.
* This interface allows passing an output row count that was previously computed from
* {@link #conditionalInnerJoinRowCount(Table, CompiledExpression, boolean)}.
* {@link #conditionalInnerJoinRowCount(Table, CompiledExpression)}.
* WARNING: Passing a row count that is smaller than the actual row count will result
* in undefined behavior.
* @param rightTable the right side table of the join in the join
Expand Down Expand Up @@ -2588,7 +2527,7 @@ public GatherMap conditionalLeftSemiJoinGatherMap(Table rightTable,
* to produce the result of the left semi join.
* It is the responsibility of the caller to close the resulting gather map instance.
* This interface allows passing an output row count that was previously computed from
* {@link #conditionalLeftSemiJoinRowCount(Table, CompiledExpression, boolean)}.
* {@link #conditionalLeftSemiJoinRowCount(Table, CompiledExpression)}.
* WARNING: Passing a row count that is smaller than the actual row count will result
* in undefined behavior.
* @param rightTable the right side table of the join
Expand Down Expand Up @@ -2667,7 +2606,7 @@ public GatherMap conditionalLeftAntiJoinGatherMap(Table rightTable,
* to produce the result of the left anti join.
* It is the responsibility of the caller to close the resulting gather map instance.
* This interface allows passing an output row count that was previously computed from
* {@link #conditionalLeftAntiJoinRowCount(Table, CompiledExpression, boolean)}.
* {@link #conditionalLeftAntiJoinRowCount(Table, CompiledExpression)}.
* WARNING: Passing a row count that is smaller than the actual row count will result
* in undefined behavior.
* @param rightTable the right side table of the join
Expand Down Expand Up @@ -3449,14 +3388,6 @@ public ContiguousTable[] contiguousSplitGroups() {
groupByOptions.getKeysDescending(),
groupByOptions.getKeysNullSmallest());
}

/**
* @deprecated use aggregateWindowsOverRanges
*/
@Deprecated
public Table aggregateWindowsOverTimeRanges(AggregationOverWindow... windowAggregates) {
return aggregateWindowsOverRanges(windowAggregates);
}
}

public static final class TableOperation {
Expand Down Expand Up @@ -3651,18 +3582,6 @@ public PartitionedTable hashPartition(HashType type, int numberOfPartitions) {
partitionOffsets.length,
partitionOffsets)), partitionOffsets);
}

/**
* Hash partition a table into the specified number of partitions.
* @deprecated Use {@link #hashPartition(int)}
* @param numberOfPartitions - number of partitions to use
* @return - {@link PartitionedTable} - Table that exposes a limited functionality of the
* {@link Table} class
*/
@Deprecated
public PartitionedTable partition(int numberOfPartitions) {
return hashPartition(numberOfPartitions);
}
}

/////////////////////////////////////////////////////////////////////////////
Expand Down
37 changes: 15 additions & 22 deletions java/src/test/java/ai/rapids/cudf/TableTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -7177,19 +7177,6 @@ void testORCWriteMapChunked() throws IOException {
}
}

@Test
void testORCWriteToFile() throws IOException {
File tempFile = File.createTempFile("test", ".orc");
try (Table table0 = getExpectedFileTable(WriteUtils.getNonNestedColumns(false))) {
table0.writeORC(tempFile.getAbsoluteFile());
try (Table table1 = Table.readORC(tempFile.getAbsoluteFile())) {
assertTablesAreEqual(table0, table1);
}
} finally {
tempFile.delete();
}
}

@Test
void testORCWriteToFileWithColNames() throws IOException {
File tempFile = File.createTempFile("test", ".orc");
Expand All @@ -7198,7 +7185,9 @@ void testORCWriteToFileWithColNames() throws IOException {
ORCWriterOptions.Builder optBuilder = ORCWriterOptions.builder();
WriteUtils.buildWriterOptions(optBuilder, colNames);
ORCWriterOptions options = optBuilder.build();
table0.writeORC(options, tempFile.getAbsoluteFile());
try (TableWriter writer = Table.writeORCChunked(options, tempFile.getAbsoluteFile())) {
writer.write(table0);
}
ORCOptions opts = ORCOptions.builder().includeColumn(colNames).build();
try (Table table1 = Table.readORC(opts, tempFile.getAbsoluteFile())) {
assertTablesAreEqual(table0, table1);
Expand All @@ -7217,7 +7206,9 @@ void testORCReadAndWriteForDecimal128() throws IOException {
ORCWriterOptions.Builder optBuilder = ORCWriterOptions.builder();
WriteUtils.buildWriterOptions(optBuilder, colNames);
ORCWriterOptions options = optBuilder.build();
table0.writeORC(options, tempFile.getAbsoluteFile());
try (TableWriter writer = Table.writeORCChunked(options, tempFile.getAbsoluteFile())) {
writer.write(table0);
}
ORCOptions opts = ORCOptions.builder()
.includeColumn(colNames)
.decimal128Column(Columns.DECIMAL128.name,
Expand All @@ -7236,13 +7227,15 @@ void testORCReadAndWriteForDecimal128() throws IOException {
void testORCWriteToFileUncompressed() throws IOException {
File tempFileUncompressed = File.createTempFile("test-uncompressed", ".orc");
try (Table table0 = getExpectedFileTable(WriteUtils.getNonNestedColumns(false))) {
String[] colNames = new String[table0.getNumberOfColumns()];
Arrays.fill(colNames, "");
ORCWriterOptions opts = ORCWriterOptions.builder()
.withColumns(true, colNames)
.withCompressionType(CompressionType.NONE)
.build();
table0.writeORC(opts, tempFileUncompressed.getAbsoluteFile());
String[] colNames = WriteUtils.getNonNestedColumns(false);
ORCWriterOptions.Builder optsBuilder = ORCWriterOptions.builder();
WriteUtils.buildWriterOptions(optsBuilder, colNames);
optsBuilder.withCompressionType(CompressionType.NONE);
ORCWriterOptions opts = optsBuilder.build();
try (TableWriter writer =
Table.writeORCChunked(opts,tempFileUncompressed.getAbsoluteFile())) {
writer.write(table0);
}
try (Table table2 = Table.readORC(tempFileUncompressed.getAbsoluteFile())) {
assertTablesAreEqual(table0, table2);
}
Expand Down