diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/parquet/TestTimestamp.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/parquet/TestTimestamp.java index faa7da7cfd9f..a99d58060328 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/parquet/TestTimestamp.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/parquet/TestTimestamp.java @@ -24,12 +24,14 @@ import io.trino.spi.connector.ConnectorPageSource; import io.trino.spi.connector.ConnectorSession; import io.trino.spi.type.SqlTimestamp; +import io.trino.spi.type.Type; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.mapred.JobConf; import org.apache.parquet.hadoop.metadata.CompressionCodecName; import org.apache.parquet.schema.MessageType; import org.testng.annotations.Test; +import java.io.IOException; import java.util.Iterator; import java.util.List; import java.util.Optional; @@ -55,9 +57,9 @@ public void testTimestampBackedByInt64() { MessageType parquetSchema = parseMessageType("message hive_timestamp { optional int64 test (TIMESTAMP_MILLIS); }"); ContiguousSet epochMillisValues = ContiguousSet.create(Range.closedOpen((long) -1_000, (long) 1_000), DiscreteDomain.longs()); - ImmutableList.Builder timestamps = new ImmutableList.Builder<>(); + ImmutableList.Builder timestampsMillis = new ImmutableList.Builder<>(); for (long value : epochMillisValues) { - timestamps.add(SqlTimestamp.fromMillis(3, value)); + timestampsMillis.add(SqlTimestamp.fromMillis(3, value)); } List objectInspectors = singletonList(javaLongObjectInspector); @@ -79,38 +81,42 @@ public void testTimestampBackedByInt64() Optional.of(parquetSchema), false); - Iterator expectedValues = timestamps.build().iterator(); - try (ConnectorPageSource pageSource = StandardFileFormats.TRINO_PARQUET.createFileFormatReader(session, HDFS_ENVIRONMENT, tempFile.getFile(), columnNames, ImmutableList.of(TIMESTAMP_MILLIS))) { - // skip a page to exercise the decoder's skip() logic - Page firstPage = pageSource.getNextPage(); - - assertTrue(firstPage.getPositionCount() > 0, "Expected first page to have at least 1 row"); + testReadingAs(TIMESTAMP_MILLIS, session, tempFile, columnNames, timestampsMillis.build()); + } + } - for (int i = 0; i < firstPage.getPositionCount(); i++) { - expectedValues.next(); - } + private void testReadingAs(Type type, ConnectorSession session, ParquetTester.TempFile tempFile, List columnNames, List expectedValues) + throws IOException + { + Iterator expected = expectedValues.iterator(); + try (ConnectorPageSource pageSource = StandardFileFormats.TRINO_PARQUET.createFileFormatReader(session, HDFS_ENVIRONMENT, tempFile.getFile(), columnNames, ImmutableList.of(type))) { + // skip a page to exercise the decoder's skip() logic + Page firstPage = pageSource.getNextPage(); + assertTrue(firstPage.getPositionCount() > 0, "Expected first page to have at least 1 row"); - int pageCount = 1; - while (!pageSource.isFinished()) { - Page page = pageSource.getNextPage(); - if (page == null) { - continue; - } - pageCount++; - Block block = page.getBlock(0); + for (int i = 0; i < firstPage.getPositionCount(); i++) { + expected.next(); + } - for (int i = 0; i < block.getPositionCount(); i++) { - assertThat(TIMESTAMP_MILLIS.getObjectValue(session, block, i)) - .isEqualTo(expectedValues.next()); - } + int pageCount = 1; + while (!pageSource.isFinished()) { + Page page = pageSource.getNextPage(); + if (page == null) { + continue; } + pageCount++; + Block block = page.getBlock(0); - assertThat(pageCount) - .withFailMessage("Expected more than one page but processed %s", pageCount) - .isGreaterThan(1); + for (int i = 0; i < block.getPositionCount(); i++) { + assertThat(type.getObjectValue(session, block, i)).isEqualTo(expected.next()); + } } - assertFalse(expectedValues.hasNext(), "Read fewer values than expected"); + assertThat(pageCount) + .withFailMessage("Expected more than one page but processed %s", pageCount) + .isGreaterThan(1); + + assertFalse(expected.hasNext(), "Read fewer values than expected"); } } }