From 850f7937c2039121e992523ad71f7cc5df2e03e4 Mon Sep 17 00:00:00 2001 From: "Mateusz \"Serafin\" Gajewski" Date: Sun, 18 Feb 2024 18:36:02 +0100 Subject: [PATCH] Remove Phoenix5 connector support Shaded Phoenix 5.1.3 client brings over 60 Critical and High CVEs into the Trino codebase. 5.1.4-SNAPSHOT version doesn't bring any significant improvement to this situation. --- .github/workflows/ci.yml | 2 - core/trino-server/src/main/provisio/trino.xml | 6 - docs/release-template.md | 2 - docs/src/main/sphinx/connector.md | 1 - docs/src/main/sphinx/connector/phoenix.md | 288 ----- docs/src/main/sphinx/release/release-312.md | 2 +- lib/trino-phoenix5-patched/pom.xml | 92 -- .../zookeeper/client/StaticHostProvider.java | 165 --- .../java/org/apache/phoenix/TestDummy.java | 22 - plugin/trino-phoenix5/pom.xml | 432 ------- .../phoenix5/ConfigurationInstantiator.java | 29 - .../trino/plugin/phoenix5/MetadataUtil.java | 39 - .../trino/plugin/phoenix5/PhoenixClient.java | 1033 ----------------- .../plugin/phoenix5/PhoenixClientModule.java | 217 ---- .../phoenix5/PhoenixColumnProperties.java | 59 - .../trino/plugin/phoenix5/PhoenixConfig.java | 98 -- .../plugin/phoenix5/PhoenixConnector.java | 127 -- .../phoenix5/PhoenixConnectorFactory.java | 75 -- .../plugin/phoenix5/PhoenixErrorCode.java | 44 - .../plugin/phoenix5/PhoenixMergeSink.java | 228 ---- .../phoenix5/PhoenixMergeTableHandle.java | 56 - .../plugin/phoenix5/PhoenixMetadata.java | 350 ------ .../phoenix5/PhoenixOutputTableHandle.java | 51 - .../phoenix5/PhoenixPageSinkProvider.java | 61 - .../plugin/phoenix5/PhoenixPageSource.java | 142 --- .../phoenix5/PhoenixPageSourceProvider.java | 109 -- .../trino/plugin/phoenix5/PhoenixPlugin.java | 28 - .../phoenix5/PhoenixSessionProperties.java | 62 - .../trino/plugin/phoenix5/PhoenixSplit.java | 84 -- .../plugin/phoenix5/PhoenixSplitManager.java | 165 --- .../phoenix5/PhoenixTableProperties.java | 239 ---- .../phoenix5/SerializedPhoenixInputSplit.java | 68 -- .../io/trino/plugin/phoenix5/TypeUtils.java | 249 ---- .../plugin/phoenix5/PhoenixQueryRunner.java | 145 --- .../plugin/phoenix5/PhoenixSqlExecutor.java | 63 - .../plugin/phoenix5/PhoenixTestTable.java | 46 - .../plugin/phoenix5/TestPhoenixConfig.java | 61 - .../phoenix5/TestPhoenixConnectorTest.java | 834 ------------- .../plugin/phoenix5/TestPhoenixPlugin.java | 39 - .../plugin/phoenix5/TestPhoenixSplit.java | 51 - .../phoenix5/TestPhoenixTypeMapping.java | 789 ------------- .../plugin/phoenix5/TestingPhoenixServer.java | 120 -- pom.xml | 10 - .../io/trino/tests/product/TestGroups.java | 1 - .../EnvMultinodeAllConnectors.java | 1 - .../env/environment/EnvMultinodePhoenix5.java | 77 -- .../suite/suites/Suite6NonGeneric.java | 5 - .../multinode-all/phoenix5.properties | 2 - .../multinode-phoenix5/hbase-site.xml | 7 - .../multinode-phoenix5/phoenix.properties | 3 - .../tempto-configuration.yaml | 3 - .../tests/product/phoenix/TestPhoenix.java | 42 - 52 files changed, 1 insertion(+), 6923 deletions(-) delete mode 100644 docs/src/main/sphinx/connector/phoenix.md delete mode 100644 lib/trino-phoenix5-patched/pom.xml delete mode 100644 lib/trino-phoenix5-patched/src/main/java/org/apache/phoenix/shaded/org/apache/zookeeper/client/StaticHostProvider.java delete mode 100644 lib/trino-phoenix5-patched/src/test/java/org/apache/phoenix/TestDummy.java delete mode 100644 plugin/trino-phoenix5/pom.xml delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/ConfigurationInstantiator.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/MetadataUtil.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixClient.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixClientModule.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixColumnProperties.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConfig.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConnector.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConnectorFactory.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixErrorCode.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixMergeSink.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixMergeTableHandle.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixMetadata.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixOutputTableHandle.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPageSinkProvider.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPageSource.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPageSourceProvider.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPlugin.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixSessionProperties.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixSplit.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixSplitManager.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixTableProperties.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/SerializedPhoenixInputSplit.java delete mode 100644 plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/TypeUtils.java delete mode 100644 plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/PhoenixQueryRunner.java delete mode 100644 plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/PhoenixSqlExecutor.java delete mode 100644 plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/PhoenixTestTable.java delete mode 100644 plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixConfig.java delete mode 100644 plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixConnectorTest.java delete mode 100644 plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixPlugin.java delete mode 100644 plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixSplit.java delete mode 100644 plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixTypeMapping.java delete mode 100644 plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestingPhoenixServer.java delete mode 100644 testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvMultinodePhoenix5.java delete mode 100644 testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-all/phoenix5.properties delete mode 100644 testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-phoenix5/hbase-site.xml delete mode 100644 testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-phoenix5/phoenix.properties delete mode 100644 testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-phoenix5/tempto-configuration.yaml delete mode 100644 testing/trino-product-tests/src/main/java/io/trino/tests/product/phoenix/TestPhoenix.java diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 55091d34709e..084d912a4991 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -375,7 +375,6 @@ jobs: !:trino-oracle, !:trino-orc, !:trino-parquet, - !:trino-phoenix5, !:trino-pinot, !:trino-postgresql, !:trino-raptor-legacy, @@ -478,7 +477,6 @@ jobs: - { modules: plugin/trino-mysql } - { modules: plugin/trino-opensearch } - { modules: plugin/trino-oracle } - - { modules: plugin/trino-phoenix5 } - { modules: plugin/trino-pinot } - { modules: plugin/trino-postgresql } - { modules: plugin/trino-raptor-legacy } diff --git a/core/trino-server/src/main/provisio/trino.xml b/core/trino-server/src/main/provisio/trino.xml index 7a2e0a6fe8f2..73913d71354c 100644 --- a/core/trino-server/src/main/provisio/trino.xml +++ b/core/trino-server/src/main/provisio/trino.xml @@ -236,12 +236,6 @@ - - - - - - diff --git a/docs/release-template.md b/docs/release-template.md index f45addc1b051..72fd6ef14e7b 100644 --- a/docs/release-template.md +++ b/docs/release-template.md @@ -64,8 +64,6 @@ ## Oracle connector -## Phoenix connector - ## Pinot connector ## PostgreSQL connector diff --git a/docs/src/main/sphinx/connector.md b/docs/src/main/sphinx/connector.md index 3c8dc201e7f0..78ca870b533f 100644 --- a/docs/src/main/sphinx/connector.md +++ b/docs/src/main/sphinx/connector.md @@ -31,7 +31,6 @@ MongoDB MySQL OpenSearch Oracle -Phoenix Pinot PostgreSQL Prometheus diff --git a/docs/src/main/sphinx/connector/phoenix.md b/docs/src/main/sphinx/connector/phoenix.md deleted file mode 100644 index af26ce92644e..000000000000 --- a/docs/src/main/sphinx/connector/phoenix.md +++ /dev/null @@ -1,288 +0,0 @@ ---- -myst: - substitutions: - default_domain_compaction_threshold: '`5000`' ---- - -# Phoenix connector - -```{raw} html - -``` - -The Phoenix connector allows querying data stored in -[Apache HBase](https://hbase.apache.org/) using -[Apache Phoenix](https://phoenix.apache.org/). - -## Requirements - -To query HBase data through Phoenix, you need: - -- Network access from the Trino coordinator and workers to the ZooKeeper - servers. The default port is 2181. -- A compatible version of Phoenix: all 5.x versions starting from 5.1.0 are supported. - -## Configuration - -To configure the Phoenix connector, create a catalog properties file -`etc/catalog/example.properties` with the following contents, -replacing `host1,host2,host3` with a comma-separated list of the ZooKeeper -nodes used for discovery of the HBase cluster: - -```text -connector.name=phoenix5 -phoenix.connection-url=jdbc:phoenix:host1,host2,host3:2181:/hbase -phoenix.config.resources=/path/to/hbase-site.xml -``` - -The optional paths to Hadoop resource files, such as `hbase-site.xml` are used -to load custom Phoenix client connection properties. - -The following Phoenix-specific configuration properties are available: - -| Property name | Required | Description | -| ----------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `phoenix.connection-url` | Yes | `jdbc:phoenix[:zk_quorum][:zk_port][:zk_hbase_path]`. The `zk_quorum` is a comma separated list of ZooKeeper servers. The `zk_port` is the ZooKeeper port. The `zk_hbase_path` is the HBase root znode path, that is configurable using `hbase-site.xml`. By default the location is `/hbase` | -| `phoenix.config.resources` | No | Comma-separated list of configuration files (e.g. `hbase-site.xml`) to use for connection properties. These files must exist on the machines running Trino. | -| `phoenix.max-scans-per-split` | No | Maximum number of HBase scans that will be performed in a single split. Default is 20. Lower values will lead to more splits in Trino. Can also be set via session propery `max_scans_per_split`. For details see: [https://phoenix.apache.org/update_statistics.html](https://phoenix.apache.org/update_statistics.html). (This setting has no effect when guideposts are disabled in Phoenix.) | - -```{include} jdbc-common-configurations.fragment -``` - -```{include} query-comment-format.fragment -``` - -```{include} jdbc-domain-compaction-threshold.fragment -``` - -```{include} jdbc-procedures.fragment -``` - -```{include} jdbc-case-insensitive-matching.fragment -``` - -```{include} non-transactional-insert.fragment -``` - -## Querying Phoenix tables - -The default empty schema in Phoenix maps to a schema named `default` in Trino. -You can see the available Phoenix schemas by running `SHOW SCHEMAS`: - -``` -SHOW SCHEMAS FROM example; -``` - -If you have a Phoenix schema named `web`, you can view the tables -in this schema by running `SHOW TABLES`: - -``` -SHOW TABLES FROM example.web; -``` - -You can see a list of the columns in the `clicks` table in the `web` schema -using either of the following: - -``` -DESCRIBE example.web.clicks; -SHOW COLUMNS FROM example.web.clicks; -``` - -Finally, you can access the `clicks` table in the `web` schema: - -``` -SELECT * FROM example.web.clicks; -``` - -If you used a different name for your catalog properties file, use -that catalog name instead of `example` in the above examples. - -(phoenix-type-mapping)= - -## Type mapping - -Because Trino and Phoenix each support types that the other does not, this -connector {ref}`modifies some types ` when reading or -writing data. Data types may not map the same way in both directions between -Trino and the data source. Refer to the following sections for type mapping in -each direction. - -### Phoenix type to Trino type mapping - -The connector maps Phoenix types to the corresponding Trino types following this -table: - -:::{list-table} Phoenix type to Trino type mapping -:widths: 50, 50 -:header-rows: 1 - -* - Phoenix database type - - Trino type -* - `BOOLEAN` - - `BOOLEAN` -* - `TINYINT` - - `TINYINT` -* - `UNSIGNED_TINYINT` - - `TINYINT` -* - `SMALLINT` - - `SMALLINT` -* - `UNSIGNED_SMALLINT` - - `SMALLINT` -* - `INTEGER` - - `INTEGER` -* - `UNSIGNED_INT` - - `INTEGER` -* - `BIGINT` - - `BIGINT` -* - `UNSIGNED_LONG` - - `BIGINT` -* - `FLOAT` - - `REAL` -* - `UNSIGNED_FLOAT` - - `REAL` -* - `DOUBLE` - - `DOUBLE` -* - `UNSIGNED_DOUBLE` - - `DOUBLE` -* - `DECIMAL(p,s)` - - `DECIMAL(p,s)` -* - `CHAR(n)` - - `CHAR(n)` -* - `VARCHAR(n)` - - `VARCHAR(n)` -* - `BINARY` - - `VARBINARY` -* - `VARBINARY` - - `VARBINARY` -* - `DATE` - - `DATE` -* - `UNSIGNED_DATE` - - `DATE` -* - `ARRAY` - - `ARRAY` -::: - -No other types are supported. - -### Trino type to Phoenix type mapping - -The Phoenix fixed length `BINARY` data type is mapped to the Trino variable -length `VARBINARY` data type. There is no way to create a Phoenix table in -Trino that uses the `BINARY` data type, as Trino does not have an equivalent -type. - -The connector maps Trino types to the corresponding Phoenix types following this -table: - -:::{list-table} Trino type to Phoenix type mapping -:widths: 50, 50 -:header-rows: 1 - -* - Trino database type - - Phoenix type -* - `BOOLEAN` - - `BOOLEAN` -* - `TINYINT` - - `TINYINT` -* - `SMALLINT` - - `SMALLINT` -* - `INTEGER` - - `INTEGER` -* - `BIGINT` - - `BIGINT` -* - `REAL` - - `FLOAT` -* - `DOUBLE` - - `DOUBLE` -* - `DECIMAL(p,s)` - - `DECIMAL(p,s)` -* - `CHAR(n)` - - `CHAR(n)` -* - `VARCHAR(n)` - - `VARCHAR(n)` -* - `VARBINARY` - - `VARBINARY` -* - `TIME` - - `TIME` -* - `DATE` - - `DATE` -* - `ARRAY` - - `ARRAY` -::: - -No other types are supported. - -```{include} decimal-type-handling.fragment -``` - -```{include} jdbc-type-mapping.fragment -``` - -## Table properties - Phoenix - -Table property usage example: - -``` -CREATE TABLE example_schema.scientists ( - recordkey VARCHAR, - birthday DATE, - name VARCHAR, - age BIGINT -) -WITH ( - rowkeys = 'recordkey,birthday', - salt_buckets = 10 -); -``` - -The following are supported Phoenix table properties from [https://phoenix.apache.org/language/index.html#options](https://phoenix.apache.org/language/index.html#options) - -| Property name | Default value | Description | -| ----------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------- | -| `rowkeys` | `ROWKEY` | Comma-separated list of primary key columns. See further description below | -| `split_on` | (none) | List of keys to presplit the table on. See [Split Point](https://phoenix.apache.org/language/index.html#split_point). | -| `salt_buckets` | (none) | Number of salt buckets for this table. | -| `disable_wal` | false | Whether to disable WAL writes in HBase for this table. | -| `immutable_rows` | false | Declares whether this table has rows which are write-once, append-only. | -| `default_column_family` | `0` | Default column family name to use for this table. | - -### `rowkeys` - -This is a comma-separated list of columns to be used as the table's primary key. If not specified, a `BIGINT` primary key column named `ROWKEY` is generated -, as well as a sequence with the same name as the table suffixed with `_seq` (i.e. `._seq`) -, which is used to automatically populate the `ROWKEY` for each row during insertion. - -## Table properties - HBase - -The following are the supported HBase table properties that are passed through by Phoenix during table creation. -Use them in the same way as above: in the `WITH` clause of the `CREATE TABLE` statement. - -| Property name | Default value | Description | -| --------------------- | ------------- | ---------------------------------------------------------------------------------------------------------------------- | -| `versions` | `1` | The maximum number of versions of each cell to keep. | -| `min_versions` | `0` | The minimum number of cell versions to keep. | -| `compression` | `NONE` | Compression algorithm to use. Valid values are `NONE` (default), `SNAPPY`, `LZO`, `LZ4`, or `GZ`. | -| `data_block_encoding` | `FAST_DIFF` | Block encoding algorithm to use. Valid values are: `NONE`, `PREFIX`, `DIFF`, `FAST_DIFF` (default), or `ROW_INDEX_V1`. | -| `ttl` | `FOREVER` | Time To Live for each cell. | -| `bloomfilter` | `NONE` | Bloomfilter to use. Valid values are `NONE` (default), `ROW`, or `ROWCOL`. | - -(phoenix-sql-support)= - -## SQL support - -The connector provides read and write access to data and metadata in -Phoenix. In addition to the {ref}`globally available -` and {ref}`read operation ` -statements, the connector supports the following features: - -- {doc}`/sql/insert` -- {doc}`/sql/delete` -- {doc}`/sql/merge` -- {doc}`/sql/create-table` -- {doc}`/sql/create-table-as` -- {doc}`/sql/drop-table` -- {doc}`/sql/create-schema` -- {doc}`/sql/drop-schema` - -```{include} sql-delete-limitation.fragment -``` diff --git a/docs/src/main/sphinx/release/release-312.md b/docs/src/main/sphinx/release/release-312.md index 006c0d4b8bda..2f228d2c5c2f 100644 --- a/docs/src/main/sphinx/release/release-312.md +++ b/docs/src/main/sphinx/release/release-312.md @@ -20,7 +20,7 @@ - Improve support for correlated subqueries containing redundant `LIMIT` clauses. ({issue}`441`) - Add a new {ref}`uuid-type` type to represent UUIDs. ({issue}`755`) - Add {func}`uuid` function to generate random UUIDs. ({issue}`786`) -- Add {doc}`/connector/phoenix`. ({issue}`672`) +- Add Phoenix connector. ({issue}`672`) - Make semantic error name available in client protocol. ({issue}`790`) - Report operator statistics when `experimental.work-processor-pipelines` is enabled. ({issue}`788`) diff --git a/lib/trino-phoenix5-patched/pom.xml b/lib/trino-phoenix5-patched/pom.xml deleted file mode 100644 index 00ab4c38b02d..000000000000 --- a/lib/trino-phoenix5-patched/pom.xml +++ /dev/null @@ -1,92 +0,0 @@ - - - - 4.0.0 - - - io.trino - trino-root - 444-SNAPSHOT - ../../pom.xml - - - trino-phoenix5-patched - Trino - patched Phoenix5 client to work with JDK17 - - - ${project.parent.basedir} - - - - - org.apache.phoenix - phoenix-client-embedded-hbase-2.2 - 5.1.3 - - - - io.airlift - junit-extensions - test - - - - org.junit.jupiter - junit-jupiter-api - test - - - - - - - org.apache.maven.plugins - maven-shade-plugin - - - - shade - - package - - false - false - false - - - org.apache.zookeeper - org.apache.phoenix.shaded.org.apache.zookeeper - - - - - org.apache.phoenix:phoenix-client-embedded-hbase-2.2 - - org/apache/phoenix/shaded/org/apache/zookeeper/client/StaticHostProvider.class - org/apache/phoenix/shaded/org/apache/zookeeper/client/StaticHostProvider$*.class - - javax/xml/bind/** - META-INF/services/javax.xml.bind.JAXBContext - org/apache/phoenix/shaded/com/sun/xml/bind/** - - - - - - - - - org.basepom.maven - duplicate-finder-maven-plugin - - - - org.apache.phoenix - phoenix-client-embedded-hbase-2.2 - - - - - - - diff --git a/lib/trino-phoenix5-patched/src/main/java/org/apache/phoenix/shaded/org/apache/zookeeper/client/StaticHostProvider.java b/lib/trino-phoenix5-patched/src/main/java/org/apache/phoenix/shaded/org/apache/zookeeper/client/StaticHostProvider.java deleted file mode 100644 index 3b475118fa30..000000000000 --- a/lib/trino-phoenix5-patched/src/main/java/org/apache/phoenix/shaded/org/apache/zookeeper/client/StaticHostProvider.java +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.shaded.org.apache.zookeeper.client; - -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; - -// TODO(https://github.com/trinodb/trino/issues/13051): Remove when Phoenix 5.2 is release -public final class StaticHostProvider - implements HostProvider -{ - public interface Resolver - { - InetAddress[] getAllByName(String name) - throws UnknownHostException; - } - - private final List serverAddresses = new ArrayList(5); - private final Resolver resolver; - - private int lastIndex = -1; - private int currentIndex = -1; - - /** - * Constructs a SimpleHostSet. - * - * @param serverAddresses - * possibly unresolved ZooKeeper server addresses - * @throws IllegalArgumentException - * if serverAddresses is empty or resolves to an empty list - */ - public StaticHostProvider(Collection serverAddresses) - { - this.resolver = InetAddress::getAllByName; - init(serverAddresses); - } - - /** - * Introduced for testing purposes. getAllByName() is a static method of InetAddress, therefore cannot be easily mocked. - * By abstraction of Resolver interface we can easily inject a mocked implementation in tests. - * - * @param serverAddresses - * possibly unresolved ZooKeeper server addresses - * @param resolver - * custom resolver implementation - * @throws IllegalArgumentException - * if serverAddresses is empty or resolves to an empty list - */ - public StaticHostProvider(Collection serverAddresses, Resolver resolver) - { - this.resolver = resolver; - init(serverAddresses); - } - - /** - * Common init method for all constructors. - * Resolve all unresolved server addresses, put them in a list and shuffle. - */ - private void init(Collection serverAddresses) - { - if (serverAddresses.isEmpty()) { - throw new IllegalArgumentException( - "A HostProvider may not be empty!"); - } - - this.serverAddresses.addAll(serverAddresses); - Collections.shuffle(this.serverAddresses); - } - - /** - * Evaluate to a hostname if one is available and otherwise it returns the - * string representation of the IP address. - * - * In Java 7, we have a method getHostString, but earlier versions do not support it. - * This method is to provide a replacement for InetSocketAddress.getHostString(). - * - * @return Hostname string of address parameter - */ - private String getHostString(InetSocketAddress addr) - { - String hostString = ""; - - if (addr == null) { - return hostString; - } - if (!addr.isUnresolved()) { - InetAddress ia = addr.getAddress(); - - // If the string starts with '/', then it has no hostname - // and we want to avoid the reverse lookup, so we return - // the string representation of the address. - if (ia.toString().startsWith("/")) { - hostString = ia.getHostAddress(); - } - else { - hostString = addr.getHostName(); - } - } - else { - hostString = addr.getHostString(); - } - - return hostString; - } - - @Override - public int size() - { - return serverAddresses.size(); - } - - @Override - public InetSocketAddress next(long spinDelay) - { - currentIndex = ++currentIndex % serverAddresses.size(); - if (currentIndex == lastIndex && spinDelay > 0) { - try { - Thread.sleep(spinDelay); - } - catch (InterruptedException e) { - } - } - else if (lastIndex == -1) { - // We don't want to sleep on the first ever connect attempt. - lastIndex = 0; - } - - InetSocketAddress curAddr = serverAddresses.get(currentIndex); - try { - String curHostString = getHostString(curAddr); - List resolvedAddresses = new ArrayList(Arrays.asList(this.resolver.getAllByName(curHostString))); - if (resolvedAddresses.isEmpty()) { - return curAddr; - } - Collections.shuffle(resolvedAddresses); - return new InetSocketAddress(resolvedAddresses.get(0), curAddr.getPort()); - } - catch (UnknownHostException e) { - return curAddr; - } - } - - @Override - public void onConnected() - { - lastIndex = currentIndex; - } -} diff --git a/lib/trino-phoenix5-patched/src/test/java/org/apache/phoenix/TestDummy.java b/lib/trino-phoenix5-patched/src/test/java/org/apache/phoenix/TestDummy.java deleted file mode 100644 index 5a6de3fbedc3..000000000000 --- a/lib/trino-phoenix5-patched/src/test/java/org/apache/phoenix/TestDummy.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix; - -import org.junit.jupiter.api.Test; - -public class TestDummy -{ - @Test - public void buildRequiresTestToExist() {} -} diff --git a/plugin/trino-phoenix5/pom.xml b/plugin/trino-phoenix5/pom.xml deleted file mode 100644 index abb631f55b1f..000000000000 --- a/plugin/trino-phoenix5/pom.xml +++ /dev/null @@ -1,432 +0,0 @@ - - - 4.0.0 - - - io.trino - trino-root - 444-SNAPSHOT - ../../pom.xml - - - trino-phoenix5 - trino-plugin - Trino - Phoenix 5 connector - - - ${project.parent.basedir} - 2.2.7 - - - ${air.test.jvm.additional-arguments.default} --add-opens=java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED - - - - - com.google.guava - guava - - - - com.google.inject - guice - - - - io.airlift - bootstrap - - - - io.airlift - configuration - - - - io.airlift - json - - - - io.airlift - log - - - - io.trino - trino-base-jdbc - - - - - io.trino - trino-phoenix5-patched - - - org.apache.phoenix - * - - - - - - io.trino - trino-plugin-toolkit - - - - jakarta.annotation - jakarta.annotation-api - - - - jakarta.validation - jakarta.validation-api - - - - joda-time - joda-time - - - - org.weakref - jmxutils - - - - com.fasterxml.jackson.core - jackson-annotations - provided - - - - io.airlift - slice - provided - - - - io.opentelemetry - opentelemetry-api - provided - - - - io.opentelemetry - opentelemetry-context - provided - - - - io.trino - trino-spi - provided - - - - org.openjdk.jol - jol-core - provided - - - - - - ch.qos.reload4j - reload4j - 1.2.25 - runtime - - - - com.fasterxml.jackson.core - jackson-databind - runtime - - - - com.google.errorprone - error_prone_annotations - runtime - true - - - - io.airlift - log-manager - runtime - - - org.slf4j - jcl-over-slf4j - - - org.slf4j - log4j-over-slf4j - - - org.slf4j - slf4j-api - - - - - - io.airlift - junit-extensions - test - - - - io.airlift - testing - test - - - - io.trino - trino-base-jdbc - test-jar - test - - - - io.trino - trino-main - test - - - org.slf4j - slf4j-api - - - - - - io.trino - trino-main - test-jar - test - - - - io.trino - trino-parser - test - - - - io.trino - trino-testing - test - - - junit - junit - - - org.slf4j - slf4j-api - - - - - - io.trino - trino-testing-services - test - - - - io.trino - trino-tpch - test - - - - io.trino.tpch - tpch - test - - - - org.apache.hadoop - hadoop-hdfs - 3.1.4 - test-jar - test - - - * - * - - - - - - org.apache.hbase - hbase-common - ${dep.hbase.version} - test-jar - test - - - * - * - - - - - - org.apache.hbase - hbase-hadoop-compat - ${dep.hbase.version} - test-jar - test - - - * - * - - - - - - org.apache.hbase - hbase-hadoop2-compat - ${dep.hbase.version} - test-jar - test - - - * - * - - - - - - org.apache.hbase - hbase-server - ${dep.hbase.version} - test-jar - test - - - * - * - - - - - - org.apache.hbase - hbase-zookeeper - ${dep.hbase.version} - test-jar - test - - - * - * - - - - - - org.apache.zookeeper - zookeeper - test - - - com.github.spotbugs - spotbugs-annotations - - - org.apache.yetus - audience-annotations - - - org.slf4j - slf4j-api - - - - - - org.assertj - assertj-core - test - - - - org.jetbrains - annotations - test - - - - org.junit.jupiter - junit-jupiter-api - test - - - - org.junit.jupiter - junit-jupiter-engine - test - - - - - - - org.basepom.maven - duplicate-finder-maven-plugin - - - mrapp-generated-classpath - - assets/org/apache/commons/math3/exception/util/LocalizedFormats_fr.properties - - tables/.*\.bin - jetty-dir.css - - - - com.clearspring.analytics - stream - - - - - - - - - - - lib/tools.jar - - - idea.maven.embedder.version - - - - - org.apache.phoenix - phoenix-client-embedded-hbase-2.2 - 5.1.3 - provided - true - - - - - diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/ConfigurationInstantiator.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/ConfigurationInstantiator.java deleted file mode 100644 index 30a39be1c608..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/ConfigurationInstantiator.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import io.trino.spi.classloader.ThreadContextClassLoader; -import org.apache.hadoop.conf.Configuration; - -final class ConfigurationInstantiator -{ - private ConfigurationInstantiator() {} - - public static Configuration newEmptyConfiguration() - { - try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(ConfigurationInstantiator.class.getClassLoader())) { - return new Configuration(false); - } - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/MetadataUtil.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/MetadataUtil.java deleted file mode 100644 index 072b14605149..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/MetadataUtil.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import jakarta.annotation.Nullable; -import org.apache.phoenix.util.SchemaUtil; - -import static io.trino.plugin.phoenix5.PhoenixMetadata.DEFAULT_SCHEMA; - -public final class MetadataUtil -{ - private MetadataUtil() {} - - public static String getEscapedTableName(@Nullable String schema, String table) - { - return SchemaUtil.getEscapedTableName(toPhoenixSchemaName(schema), table); - } - - public static @Nullable String toPhoenixSchemaName(@Nullable String trinoSchemaName) - { - return DEFAULT_SCHEMA.equalsIgnoreCase(trinoSchemaName) ? "" : trinoSchemaName; - } - - public static @Nullable String toTrinoSchemaName(@Nullable String phoenixSchemaName) - { - return "".equals(phoenixSchemaName) ? DEFAULT_SCHEMA : phoenixSchemaName; - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixClient.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixClient.java deleted file mode 100644 index 96b337b47730..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixClient.java +++ /dev/null @@ -1,1033 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.inject.Inject; -import io.trino.plugin.base.expression.ConnectorExpressionRewriter; -import io.trino.plugin.base.mapping.IdentifierMapping; -import io.trino.plugin.base.mapping.RemoteIdentifiers; -import io.trino.plugin.jdbc.BaseJdbcClient; -import io.trino.plugin.jdbc.ColumnMapping; -import io.trino.plugin.jdbc.ConnectionFactory; -import io.trino.plugin.jdbc.JdbcColumnHandle; -import io.trino.plugin.jdbc.JdbcOutputTableHandle; -import io.trino.plugin.jdbc.JdbcSortItem; -import io.trino.plugin.jdbc.JdbcSplit; -import io.trino.plugin.jdbc.JdbcTableHandle; -import io.trino.plugin.jdbc.JdbcTypeHandle; -import io.trino.plugin.jdbc.LongReadFunction; -import io.trino.plugin.jdbc.LongWriteFunction; -import io.trino.plugin.jdbc.ObjectReadFunction; -import io.trino.plugin.jdbc.ObjectWriteFunction; -import io.trino.plugin.jdbc.PredicatePushdownController; -import io.trino.plugin.jdbc.PreparedQuery; -import io.trino.plugin.jdbc.QueryBuilder; -import io.trino.plugin.jdbc.RemoteTableName; -import io.trino.plugin.jdbc.WriteFunction; -import io.trino.plugin.jdbc.WriteMapping; -import io.trino.plugin.jdbc.expression.ComparisonOperator; -import io.trino.plugin.jdbc.expression.JdbcConnectorExpressionRewriterBuilder; -import io.trino.plugin.jdbc.expression.ParameterizedExpression; -import io.trino.plugin.jdbc.expression.RewriteComparison; -import io.trino.plugin.jdbc.logging.RemoteQueryModifier; -import io.trino.spi.TrinoException; -import io.trino.spi.block.Block; -import io.trino.spi.connector.ColumnHandle; -import io.trino.spi.connector.ColumnMetadata; -import io.trino.spi.connector.ConnectorSession; -import io.trino.spi.connector.ConnectorTableHandle; -import io.trino.spi.connector.ConnectorTableMetadata; -import io.trino.spi.connector.JoinStatistics; -import io.trino.spi.connector.JoinType; -import io.trino.spi.connector.SchemaNotFoundException; -import io.trino.spi.connector.SchemaTableName; -import io.trino.spi.expression.ConnectorExpression; -import io.trino.spi.security.ConnectorIdentity; -import io.trino.spi.type.ArrayType; -import io.trino.spi.type.CharType; -import io.trino.spi.type.DecimalType; -import io.trino.spi.type.Decimals; -import io.trino.spi.type.RowType; -import io.trino.spi.type.Type; -import io.trino.spi.type.VarbinaryType; -import io.trino.spi.type.VarcharType; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.compile.QueryPlan; -import org.apache.phoenix.compile.StatementContext; -import org.apache.phoenix.exception.SQLExceptionCode; -import org.apache.phoenix.iterate.ConcatResultIterator; -import org.apache.phoenix.iterate.LookAheadResultIterator; -import org.apache.phoenix.iterate.MapReduceParallelScanGrouper; -import org.apache.phoenix.iterate.PeekingResultIterator; -import org.apache.phoenix.iterate.ResultIterator; -import org.apache.phoenix.iterate.SequenceResultIterator; -import org.apache.phoenix.iterate.TableResultIterator; -import org.apache.phoenix.jdbc.DelegatePreparedStatement; -import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.jdbc.PhoenixPreparedStatement; -import org.apache.phoenix.jdbc.PhoenixResultSet; -import org.apache.phoenix.mapreduce.PhoenixInputSplit; -import org.apache.phoenix.monitoring.ScanMetricsHolder; -import org.apache.phoenix.query.ConnectionQueryServices; -import org.apache.phoenix.query.HBaseFactoryProvider; -import org.apache.phoenix.query.QueryConstants; -import org.apache.phoenix.schema.PColumn; -import org.apache.phoenix.schema.PName; -import org.apache.phoenix.schema.PTable; -import org.apache.phoenix.schema.TableProperty; -import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.types.PhoenixArray; -import org.apache.phoenix.util.SchemaUtil; - -import java.io.IOException; -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.JDBCType; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Types; -import java.text.SimpleDateFormat; -import java.time.LocalDate; -import java.time.format.DateTimeFormatter; -import java.util.ArrayList; -import java.util.Collection; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.OptionalLong; -import java.util.Set; -import java.util.StringJoiner; -import java.util.function.BiFunction; - -import static com.google.common.base.MoreObjects.firstNonNull; -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Verify.verify; -import static com.google.common.collect.ImmutableList.toImmutableList; -import static com.google.common.collect.ImmutableSet.toImmutableSet; -import static com.google.common.collect.Iterators.tryFind; -import static io.trino.plugin.jdbc.DecimalConfig.DecimalMapping.ALLOW_OVERFLOW; -import static io.trino.plugin.jdbc.DecimalSessionSessionProperties.getDecimalDefaultScale; -import static io.trino.plugin.jdbc.DecimalSessionSessionProperties.getDecimalRounding; -import static io.trino.plugin.jdbc.DecimalSessionSessionProperties.getDecimalRoundingMode; -import static io.trino.plugin.jdbc.PredicatePushdownController.DISABLE_PUSHDOWN; -import static io.trino.plugin.jdbc.PredicatePushdownController.FULL_PUSHDOWN; -import static io.trino.plugin.jdbc.StandardColumnMappings.bigintColumnMapping; -import static io.trino.plugin.jdbc.StandardColumnMappings.bigintWriteFunction; -import static io.trino.plugin.jdbc.StandardColumnMappings.booleanColumnMapping; -import static io.trino.plugin.jdbc.StandardColumnMappings.booleanWriteFunction; -import static io.trino.plugin.jdbc.StandardColumnMappings.charWriteFunction; -import static io.trino.plugin.jdbc.StandardColumnMappings.decimalColumnMapping; -import static io.trino.plugin.jdbc.StandardColumnMappings.defaultCharColumnMapping; -import static io.trino.plugin.jdbc.StandardColumnMappings.defaultVarcharColumnMapping; -import static io.trino.plugin.jdbc.StandardColumnMappings.doubleColumnMapping; -import static io.trino.plugin.jdbc.StandardColumnMappings.doubleWriteFunction; -import static io.trino.plugin.jdbc.StandardColumnMappings.integerColumnMapping; -import static io.trino.plugin.jdbc.StandardColumnMappings.integerWriteFunction; -import static io.trino.plugin.jdbc.StandardColumnMappings.longDecimalWriteFunction; -import static io.trino.plugin.jdbc.StandardColumnMappings.realColumnMapping; -import static io.trino.plugin.jdbc.StandardColumnMappings.realWriteFunction; -import static io.trino.plugin.jdbc.StandardColumnMappings.shortDecimalWriteFunction; -import static io.trino.plugin.jdbc.StandardColumnMappings.smallintColumnMapping; -import static io.trino.plugin.jdbc.StandardColumnMappings.smallintWriteFunction; -import static io.trino.plugin.jdbc.StandardColumnMappings.timeWriteFunctionUsingSqlTime; -import static io.trino.plugin.jdbc.StandardColumnMappings.tinyintColumnMapping; -import static io.trino.plugin.jdbc.StandardColumnMappings.tinyintWriteFunction; -import static io.trino.plugin.jdbc.StandardColumnMappings.varbinaryColumnMapping; -import static io.trino.plugin.jdbc.StandardColumnMappings.varbinaryWriteFunction; -import static io.trino.plugin.jdbc.StandardColumnMappings.varcharColumnMapping; -import static io.trino.plugin.jdbc.StandardColumnMappings.varcharWriteFunction; -import static io.trino.plugin.jdbc.TypeHandlingJdbcSessionProperties.getUnsupportedTypeHandling; -import static io.trino.plugin.jdbc.UnsupportedTypeHandling.CONVERT_TO_VARCHAR; -import static io.trino.plugin.phoenix5.ConfigurationInstantiator.newEmptyConfiguration; -import static io.trino.plugin.phoenix5.MetadataUtil.getEscapedTableName; -import static io.trino.plugin.phoenix5.MetadataUtil.toPhoenixSchemaName; -import static io.trino.plugin.phoenix5.PhoenixClientModule.getConnectionProperties; -import static io.trino.plugin.phoenix5.PhoenixColumnProperties.isPrimaryKey; -import static io.trino.plugin.phoenix5.PhoenixErrorCode.PHOENIX_METADATA_ERROR; -import static io.trino.plugin.phoenix5.PhoenixErrorCode.PHOENIX_QUERY_ERROR; -import static io.trino.plugin.phoenix5.PhoenixMetadata.DEFAULT_SCHEMA; -import static io.trino.plugin.phoenix5.TypeUtils.getArrayElementPhoenixTypeName; -import static io.trino.plugin.phoenix5.TypeUtils.getJdbcObjectArray; -import static io.trino.plugin.phoenix5.TypeUtils.jdbcObjectArrayToBlock; -import static io.trino.plugin.phoenix5.TypeUtils.toBoxedArray; -import static io.trino.spi.StandardErrorCode.ALREADY_EXISTS; -import static io.trino.spi.StandardErrorCode.NOT_SUPPORTED; -import static io.trino.spi.connector.ConnectorMetadata.MODIFYING_ROWS_MESSAGE; -import static io.trino.spi.type.BigintType.BIGINT; -import static io.trino.spi.type.BooleanType.BOOLEAN; -import static io.trino.spi.type.DateType.DATE; -import static io.trino.spi.type.DecimalType.DEFAULT_PRECISION; -import static io.trino.spi.type.DecimalType.DEFAULT_SCALE; -import static io.trino.spi.type.DecimalType.createDecimalType; -import static io.trino.spi.type.DoubleType.DOUBLE; -import static io.trino.spi.type.IntegerType.INTEGER; -import static io.trino.spi.type.RealType.REAL; -import static io.trino.spi.type.SmallintType.SMALLINT; -import static io.trino.spi.type.TimeType.TIME_MILLIS; -import static io.trino.spi.type.TinyintType.TINYINT; -import static io.trino.spi.type.VarcharType.createUnboundedVarcharType; -import static java.lang.Math.max; -import static java.lang.String.format; -import static java.lang.String.join; -import static java.math.RoundingMode.UNNECESSARY; -import static java.sql.Types.ARRAY; -import static java.sql.Types.LONGNVARCHAR; -import static java.sql.Types.LONGVARCHAR; -import static java.sql.Types.NVARCHAR; -import static java.sql.Types.TIMESTAMP; -import static java.sql.Types.TIMESTAMP_WITH_TIMEZONE; -import static java.sql.Types.TIME_WITH_TIMEZONE; -import static java.sql.Types.VARCHAR; -import static java.util.Locale.ENGLISH; -import static java.util.stream.Collectors.joining; -import static java.util.stream.Collectors.toSet; -import static org.apache.hadoop.hbase.HConstants.FOREVER; -import static org.apache.phoenix.coprocessor.BaseScannerRegionObserver.SKIP_REGION_BOUNDARY_CHECK; -import static org.apache.phoenix.util.PhoenixRuntime.getTable; -import static org.apache.phoenix.util.SchemaUtil.ESCAPE_CHARACTER; -import static org.apache.phoenix.util.SchemaUtil.getEscapedArgument; - -public class PhoenixClient - extends BaseJdbcClient -{ - public static final String MERGE_ROW_ID_COLUMN_NAME = "$merge_row_id"; - public static final String ROWKEY = "ROWKEY"; - public static final JdbcColumnHandle ROWKEY_COLUMN_HANDLE = new JdbcColumnHandle( - ROWKEY, - new JdbcTypeHandle(Types.BIGINT, Optional.of("BIGINT"), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()), - BIGINT); - - private static final String DATE_FORMAT = "y-MM-dd G"; - private static final DateTimeFormatter LOCAL_DATE_FORMATTER = DateTimeFormatter.ofPattern(DATE_FORMAT); - - // Phoenix threshold for simplifying big IN predicates is 50k https://issues.apache.org/jira/browse/PHOENIX-6751 - public static final int DEFAULT_DOMAIN_COMPACTION_THRESHOLD = 5_000; - - private final Configuration configuration; - - private final ConnectorExpressionRewriter connectorExpressionRewriter; - - @Inject - public PhoenixClient(PhoenixConfig config, ConnectionFactory connectionFactory, QueryBuilder queryBuilder, IdentifierMapping identifierMapping, RemoteQueryModifier queryModifier) - throws SQLException - { - super( - ESCAPE_CHARACTER, - connectionFactory, - queryBuilder, - ImmutableSet.of(), - identifierMapping, - queryModifier, - false); - this.configuration = newEmptyConfiguration(); - getConnectionProperties(config).forEach((k, v) -> configuration.set((String) k, (String) v)); - this.connectorExpressionRewriter = JdbcConnectorExpressionRewriterBuilder.newBuilder() - .addStandardRules(this::quoted) - .add(new RewriteComparison(ImmutableSet.of(ComparisonOperator.EQUAL, ComparisonOperator.NOT_EQUAL))) - .withTypeClass("integer_type", ImmutableSet.of("tinyint", "smallint", "integer", "bigint")) - .map("$add(left: integer_type, right: integer_type)").to("left + right") - .map("$subtract(left: integer_type, right: integer_type)").to("left - right") - .map("$multiply(left: integer_type, right: integer_type)").to("left * right") - .map("$divide(left: integer_type, right: integer_type)").to("left / right") - .map("$modulus(left: integer_type, right: integer_type)").to("left % right") - .map("$negate(value: integer_type)").to("-value") - .build(); - } - - @Override - public Optional convertPredicate(ConnectorSession session, ConnectorExpression expression, Map assignments) - { - return connectorExpressionRewriter.rewrite(session, expression, assignments); - } - - @Override - public Optional implementJoin( - ConnectorSession session, - JoinType joinType, - PreparedQuery leftSource, - Map leftProjections, - PreparedQuery rightSource, - Map rightProjections, - List joinConditions, - JoinStatistics statistics) - { - // Joins are currently not supported - return Optional.empty(); - } - - public Connection getConnection(ConnectorSession session) - throws SQLException - { - return connectionFactory.openConnection(session); - } - - public org.apache.hadoop.hbase.client.Connection getHConnection() - throws IOException - { - return HBaseFactoryProvider.getHConnectionFactory().createConnection(configuration); - } - - @Override - public void execute(ConnectorSession session, String statement) - { - super.execute(session, statement); - } - - @Override - public Collection listSchemas(Connection connection) - { - try (ResultSet resultSet = connection.getMetaData().getSchemas()) { - ImmutableSet.Builder schemaNames = ImmutableSet.builder(); - schemaNames.add(DEFAULT_SCHEMA); - while (resultSet.next()) { - String schemaName = getTableSchemaName(resultSet); - // skip internal schemas - if (filterSchema(schemaName)) { - schemaNames.add(schemaName); - } - } - return schemaNames.build(); - } - catch (SQLException e) { - throw new TrinoException(PHOENIX_METADATA_ERROR, e); - } - } - - @Override - public PreparedStatement buildSql(ConnectorSession session, Connection connection, JdbcSplit split, JdbcTableHandle table, List columnHandles) - throws SQLException - { - PreparedStatement query = prepareStatement( - session, - connection, - table, - columnHandles, - Optional.of(split)); - QueryPlan queryPlan = getQueryPlan(query.unwrap(PhoenixPreparedStatement.class)); - ResultSet resultSet = getResultSet(((PhoenixSplit) split).getPhoenixInputSplit(), queryPlan); - return new DelegatePreparedStatement(query) - { - @Override - public ResultSet executeQuery() - { - return resultSet; - } - }; - } - - public PreparedStatement prepareStatement( - ConnectorSession session, - Connection connection, - JdbcTableHandle table, - List columns, - Optional split) - throws SQLException - { - PreparedQuery preparedQuery = prepareQuery( - session, - connection, - table, - Optional.empty(), - columns, - ImmutableMap.of(), - split); - return queryBuilder.prepareStatement(this, session, connection, preparedQuery, Optional.of(columns.size())); - } - - @Override - public boolean supportsTopN(ConnectorSession session, JdbcTableHandle handle, List sortOrder) - { - return true; - } - - @Override - protected Optional topNFunction() - { - return Optional.of(TopNFunction.sqlStandard(this::quoted)); - } - - @Override - public boolean isTopNGuaranteed(ConnectorSession session) - { - // There are multiple splits and TopN is not guaranteed across them. - return false; - } - - @Override - public OptionalLong update(ConnectorSession session, JdbcTableHandle handle) - { - throw new TrinoException(NOT_SUPPORTED, MODIFYING_ROWS_MESSAGE); - } - - @Override - protected Optional> limitFunction() - { - return Optional.of((sql, limit) -> sql + " LIMIT " + limit); - } - - @Override - public boolean isLimitGuaranteed(ConnectorSession session) - { - return false; - } - - @Override - public String buildInsertSql(JdbcOutputTableHandle handle, List columnWriters) - { - PhoenixOutputTableHandle outputHandle = (PhoenixOutputTableHandle) handle; - String params = columnWriters.stream() - .map(WriteFunction::getBindExpression) - .collect(joining(",")); - String columns = handle.getColumnNames().stream() - .map(SchemaUtil::getEscapedArgument) - .collect(joining(",")); - if (outputHandle.rowkeyColumn().isPresent()) { - String nextId = format( - "NEXT VALUE FOR %s, ", - quoted(null, handle.getSchemaName(), handle.getTableName() + "_sequence")); - params = nextId + params; - columns = outputHandle.rowkeyColumn().get() + ", " + columns; - } - return format( - "UPSERT INTO %s (%s) VALUES (%s)", - quoted(null, handle.getSchemaName(), handle.getTableName()), - columns, - params); - } - - @Override - public ResultSet getTables(Connection connection, Optional schemaName, Optional tableName) - throws SQLException - { - return super.getTables(connection, schemaName.map(MetadataUtil::toPhoenixSchemaName), tableName); - } - - @Override - protected String getTableSchemaName(ResultSet resultSet) - throws SQLException - { - return firstNonNull(resultSet.getString("TABLE_SCHEM"), DEFAULT_SCHEMA); - } - - @Override - protected ResultSet getColumns(JdbcTableHandle handle, DatabaseMetaData metadata) - throws SQLException - { - try { - return super.getColumns(handle, metadata); - } - catch (org.apache.phoenix.schema.TableNotFoundException e) { - // Most JDBC driver return an empty result when DatabaseMetaData.getColumns can't find objects, but Phoenix driver throws an exception - // Rethrow as Trino TableNotFoundException to suppress the exception during listing information_schema - RemoteTableName remoteTableName = handle.getRequiredNamedRelation().getRemoteTableName(); - throw new io.trino.spi.connector.TableNotFoundException(new SchemaTableName(remoteTableName.getSchemaName().orElse(null), remoteTableName.getTableName())); - } - } - - @Override - public Optional toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) - { - Optional mapping = getForcedMappingToVarchar(typeHandle); - if (mapping.isPresent()) { - return mapping; - } - - switch (typeHandle.getJdbcType()) { - case Types.BOOLEAN: - return Optional.of(booleanColumnMapping()); - - case Types.TINYINT: - return Optional.of(tinyintColumnMapping()); - - case Types.SMALLINT: - return Optional.of(smallintColumnMapping()); - - case Types.INTEGER: - return Optional.of(integerColumnMapping()); - - case Types.BIGINT: - return Optional.of(bigintColumnMapping()); - - case Types.FLOAT: - return Optional.of(realColumnMapping()); - - case Types.DOUBLE: - return Optional.of(doubleColumnMapping()); - - case Types.DECIMAL: - Optional columnSize = typeHandle.getColumnSize(); - int precision = columnSize.orElse(DEFAULT_PRECISION); - int decimalDigits = typeHandle.getDecimalDigits().orElse(DEFAULT_SCALE); - if (getDecimalRounding(session) == ALLOW_OVERFLOW) { - if (columnSize.isEmpty()) { - return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, getDecimalDefaultScale(session)), getDecimalRoundingMode(session))); - } - } - // TODO does phoenix support negative scale? - precision = precision + max(-decimalDigits, 0); // Map decimal(p, -s) (negative scale) to decimal(p+s, 0). - if (precision > Decimals.MAX_PRECISION) { - break; - } - return Optional.of(decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0)), UNNECESSARY)); - - case Types.CHAR: - return Optional.of(defaultCharColumnMapping(typeHandle.getRequiredColumnSize(), true)); - - case VARCHAR: - case NVARCHAR: - case LONGVARCHAR: - case LONGNVARCHAR: - if (typeHandle.getColumnSize().isEmpty()) { - return Optional.of(varcharColumnMapping(createUnboundedVarcharType(), true)); - } - return Optional.of(defaultVarcharColumnMapping(typeHandle.getRequiredColumnSize(), true)); - - case Types.BINARY: - case Types.VARBINARY: - return Optional.of(varbinaryColumnMapping()); - - case Types.DATE: - return Optional.of(ColumnMapping.longMapping( - DATE, - dateReadFunction(), - dateWriteFunctionUsingString())); - - // TODO add support for TIMESTAMP after Phoenix adds support for LocalDateTime - case TIMESTAMP: - case TIME_WITH_TIMEZONE: - case TIMESTAMP_WITH_TIMEZONE: - if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) { - return mapToUnboundedVarchar(typeHandle); - } - return Optional.empty(); - - case ARRAY: - JdbcTypeHandle elementTypeHandle = getArrayElementTypeHandle(typeHandle); - if (elementTypeHandle.getJdbcType() == Types.VARBINARY) { - return Optional.empty(); - } - return toColumnMapping(session, connection, elementTypeHandle) - .map(elementMapping -> { - ArrayType trinoArrayType = new ArrayType(elementMapping.getType()); - String jdbcTypeName = elementTypeHandle.getJdbcTypeName() - .orElseThrow(() -> new TrinoException( - PHOENIX_METADATA_ERROR, - "Type name is missing for jdbc type: " + JDBCType.valueOf(elementTypeHandle.getJdbcType()))); - // TODO (https://github.com/trinodb/trino/issues/11132) Enable predicate pushdown on ARRAY(CHAR) type in Phoenix - PredicatePushdownController pushdownController = elementTypeHandle.getJdbcType() == Types.CHAR ? DISABLE_PUSHDOWN : FULL_PUSHDOWN; - return arrayColumnMapping(session, trinoArrayType, jdbcTypeName, pushdownController); - }); - } - if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) { - return mapToUnboundedVarchar(typeHandle); - } - return Optional.empty(); - } - - @Override - public WriteMapping toWriteMapping(ConnectorSession session, Type type) - { - if (type == BOOLEAN) { - return WriteMapping.booleanMapping("boolean", booleanWriteFunction()); - } - - if (type == TINYINT) { - return WriteMapping.longMapping("tinyint", tinyintWriteFunction()); - } - if (type == SMALLINT) { - return WriteMapping.longMapping("smallint", smallintWriteFunction()); - } - if (type == INTEGER) { - return WriteMapping.longMapping("integer", integerWriteFunction()); - } - if (type == BIGINT) { - return WriteMapping.longMapping("bigint", bigintWriteFunction()); - } - if (type == REAL) { - return WriteMapping.longMapping("float", realWriteFunction()); - } - if (type == DOUBLE) { - return WriteMapping.doubleMapping("double", doubleWriteFunction()); - } - - if (type instanceof DecimalType decimalType) { - String dataType = format("decimal(%s, %s)", decimalType.getPrecision(), decimalType.getScale()); - if (decimalType.isShort()) { - return WriteMapping.longMapping(dataType, shortDecimalWriteFunction(decimalType)); - } - return WriteMapping.objectMapping(dataType, longDecimalWriteFunction(decimalType)); - } - - if (type instanceof CharType charType) { - return WriteMapping.sliceMapping("char(" + charType.getLength() + ")", charWriteFunction()); - } - if (type instanceof VarcharType varcharType) { - String dataType; - if (varcharType.isUnbounded()) { - dataType = "varchar"; - } - else { - dataType = "varchar(" + varcharType.getBoundedLength() + ")"; - } - return WriteMapping.sliceMapping(dataType, varcharWriteFunction()); - } - if (type instanceof VarbinaryType) { - return WriteMapping.sliceMapping("varbinary", varbinaryWriteFunction()); - } - - if (type == DATE) { - return WriteMapping.longMapping("date", dateWriteFunctionUsingString()); - } - if (TIME_MILLIS.equals(type)) { - return WriteMapping.longMapping("time", timeWriteFunctionUsingSqlTime()); - } - if (type instanceof ArrayType arrayType) { - Type elementType = arrayType.getElementType(); - String elementDataType = toWriteMapping(session, elementType).getDataType().toUpperCase(ENGLISH); - String elementWriteName = getArrayElementPhoenixTypeName(session, this, elementType); - return WriteMapping.objectMapping(elementDataType + " ARRAY", arrayWriteFunction(session, elementType, elementWriteName)); - } - throw new TrinoException(NOT_SUPPORTED, "Unsupported column type: " + type.getDisplayName()); - } - - @Override - public Optional getTableComment(ResultSet resultSet) - { - // Don't return a comment until the connector supports creating tables with comment - return Optional.empty(); - } - - @Override - public JdbcOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata) - { - if (tableMetadata.getComment().isPresent()) { - throw new TrinoException(NOT_SUPPORTED, "This connector does not support creating tables with table comment"); - } - SchemaTableName schemaTableName = tableMetadata.getTable(); - String schema = schemaTableName.getSchemaName(); - String table = schemaTableName.getTableName(); - - if (!getSchemaNames(session).contains(schema)) { - throw new SchemaNotFoundException(schema); - } - - try (Connection connection = connectionFactory.openConnection(session)) { - ConnectorIdentity identity = session.getIdentity(); - RemoteIdentifiers remoteIdentifiers = getRemoteIdentifiers(connection); - schema = getIdentifierMapping().toRemoteSchemaName(remoteIdentifiers, identity, schema); - table = getIdentifierMapping().toRemoteTableName(remoteIdentifiers, identity, schema, table); - schema = toPhoenixSchemaName(schema); - LinkedList tableColumns = new LinkedList<>(tableMetadata.getColumns()); - Map tableProperties = tableMetadata.getProperties(); - Optional immutableRows = PhoenixTableProperties.getImmutableRows(tableProperties); - String immutable = immutableRows.isPresent() && immutableRows.get() ? "IMMUTABLE" : ""; - - ImmutableList.Builder columnNames = ImmutableList.builder(); - ImmutableList.Builder columnTypes = ImmutableList.builder(); - ImmutableList.Builder columnList = ImmutableList.builder(); - Set rowkeyColumns = tableColumns.stream().filter(col -> isPrimaryKey(col, tableProperties)).collect(toSet()); - ImmutableList.Builder pkNames = ImmutableList.builder(); - Optional rowkeyColumn = Optional.empty(); - if (rowkeyColumns.isEmpty()) { - // Add a rowkey when not specified in DDL - columnList.add(ROWKEY + " bigint not null"); - pkNames.add(ROWKEY); - execute(session, format("CREATE SEQUENCE %s", getEscapedTableName(schema, table + "_sequence"))); - rowkeyColumn = Optional.of(ROWKEY); - } - for (ColumnMetadata column : tableColumns) { - if (column.getComment() != null) { - throw new TrinoException(NOT_SUPPORTED, "This connector does not support creating tables with column comment"); - } - String columnName = getIdentifierMapping().toRemoteColumnName(remoteIdentifiers, column.getName()); - columnNames.add(columnName); - columnTypes.add(column.getType()); - String typeStatement = toWriteMapping(session, column.getType()).getDataType(); - if (rowkeyColumns.contains(column)) { - typeStatement += " not null"; - pkNames.add(columnName); - } - columnList.add(format("%s %s", getEscapedArgument(columnName), typeStatement)); - } - - ImmutableList.Builder tableOptions = ImmutableList.builder(); - PhoenixTableProperties.getSaltBuckets(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.SALT_BUCKETS + "=" + value)); - PhoenixTableProperties.getSplitOn(tableProperties).ifPresent(value -> tableOptions.add("SPLIT ON (" + value.replace('"', '\'') + ")")); - PhoenixTableProperties.getDisableWal(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.DISABLE_WAL + "=" + value)); - PhoenixTableProperties.getDefaultColumnFamily(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.DEFAULT_COLUMN_FAMILY + "=" + value)); - PhoenixTableProperties.getBloomfilter(tableProperties).ifPresent(value -> tableOptions.add(ColumnFamilyDescriptorBuilder.BLOOMFILTER + "='" + value + "'")); - PhoenixTableProperties.getVersions(tableProperties).ifPresent(value -> tableOptions.add(HConstants.VERSIONS + "=" + value)); - PhoenixTableProperties.getMinVersions(tableProperties).ifPresent(value -> tableOptions.add(ColumnFamilyDescriptorBuilder.MIN_VERSIONS + "=" + value)); - PhoenixTableProperties.getCompression(tableProperties).ifPresent(value -> tableOptions.add(ColumnFamilyDescriptorBuilder.COMPRESSION + "='" + value + "'")); - PhoenixTableProperties.getTimeToLive(tableProperties).ifPresent(value -> tableOptions.add(ColumnFamilyDescriptorBuilder.TTL + "=" + value)); - PhoenixTableProperties.getDataBlockEncoding(tableProperties).ifPresent(value -> tableOptions.add(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING + "='" + value + "'")); - - String sql = format( - "CREATE %s TABLE %s (%s , CONSTRAINT PK PRIMARY KEY (%s)) %s", - immutable, - getEscapedTableName(schema, table), - join(", ", columnList.build()), - join(", ", pkNames.build()), - join(", ", tableOptions.build())); - - execute(session, sql); - - return new PhoenixOutputTableHandle( - schema, - table, - columnNames.build(), - columnTypes.build(), - Optional.empty(), - rowkeyColumn); - } - catch (SQLException e) { - if (e.getErrorCode() == SQLExceptionCode.TABLE_ALREADY_EXIST.getErrorCode()) { - throw new TrinoException(ALREADY_EXISTS, "Phoenix table already exists", e); - } - throw new TrinoException(PHOENIX_METADATA_ERROR, "Error creating Phoenix table", e); - } - } - - @Override - public void renameColumn(ConnectorSession session, JdbcTableHandle handle, JdbcColumnHandle jdbcColumn, String newColumnName) - { - throw new TrinoException(NOT_SUPPORTED, "This connector does not support renaming columns"); - } - - @Override - protected void renameTable(ConnectorSession session, String catalogName, String schemaName, String tableName, SchemaTableName newTable) - { - throw new TrinoException(NOT_SUPPORTED, "This connector does not support renaming tables"); - } - - @Override - public void renameSchema(ConnectorSession session, String schemaName, String newSchemaName) - { - throw new TrinoException(NOT_SUPPORTED, "This connector does not support renaming schemas"); - } - - @Override - public Map getTableProperties(ConnectorSession session, JdbcTableHandle handle) - { - ImmutableMap.Builder properties = ImmutableMap.builder(); - RemoteTableName remoteTableName = handle.getRequiredNamedRelation().getRemoteTableName(); - - try (Connection connection = connectionFactory.openConnection(session); - Admin admin = connection.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) { - String schemaName = toPhoenixSchemaName(remoteTableName.getSchemaName().orElse(null)); - PTable table = getTable(connection, SchemaUtil.getTableName(schemaName, remoteTableName.getTableName())); - - boolean salted = table.getBucketNum() != null; - StringJoiner joiner = new StringJoiner(","); - List pkColumns = table.getPKColumns(); - for (PColumn pkColumn : pkColumns.subList(salted ? 1 : 0, pkColumns.size())) { - joiner.add(pkColumn.getName().getString()); - } - properties.put(PhoenixTableProperties.ROWKEYS, joiner.toString()); - - if (table.getBucketNum() != null) { - properties.put(PhoenixTableProperties.SALT_BUCKETS, table.getBucketNum()); - } - if (table.isWALDisabled()) { - properties.put(PhoenixTableProperties.DISABLE_WAL, table.isWALDisabled()); - } - if (table.isImmutableRows()) { - properties.put(PhoenixTableProperties.IMMUTABLE_ROWS, table.isImmutableRows()); - } - - String defaultFamilyName = QueryConstants.DEFAULT_COLUMN_FAMILY; - if (table.getDefaultFamilyName() != null) { - defaultFamilyName = table.getDefaultFamilyName().getString(); - properties.put(PhoenixTableProperties.DEFAULT_COLUMN_FAMILY, defaultFamilyName); - } - - TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(table.getPhysicalName().getBytes())); - - ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies(); - for (ColumnFamilyDescriptor columnFamily : columnFamilies) { - if (columnFamily.getNameAsString().equals(defaultFamilyName)) { - if (columnFamily.getBloomFilterType() != BloomType.NONE) { - properties.put(PhoenixTableProperties.BLOOMFILTER, columnFamily.getBloomFilterType()); - } - if (columnFamily.getMaxVersions() != 1) { - properties.put(PhoenixTableProperties.VERSIONS, columnFamily.getMaxVersions()); - } - if (columnFamily.getMinVersions() > 0) { - properties.put(PhoenixTableProperties.MIN_VERSIONS, columnFamily.getMinVersions()); - } - if (columnFamily.getCompressionType() != Compression.Algorithm.NONE) { - properties.put(PhoenixTableProperties.COMPRESSION, columnFamily.getCompressionType()); - } - if (columnFamily.getTimeToLive() < FOREVER) { - properties.put(PhoenixTableProperties.TTL, columnFamily.getTimeToLive()); - } - if (columnFamily.getDataBlockEncoding() != DataBlockEncoding.NONE) { - properties.put(PhoenixTableProperties.DATA_BLOCK_ENCODING, columnFamily.getDataBlockEncoding()); - } - break; - } - } - } - catch (org.apache.phoenix.schema.TableNotFoundException e) { - // Rethrow as Trino TableNotFoundException to suppress the exception during listing information_schema - throw new io.trino.spi.connector.TableNotFoundException(new SchemaTableName(remoteTableName.getSchemaName().orElse(null), remoteTableName.getTableName())); - } - catch (IOException | SQLException e) { - throw new TrinoException(PHOENIX_METADATA_ERROR, "Couldn't get Phoenix table properties", e); - } - return properties.buildOrThrow(); - } - - @Override - public void setColumnType(ConnectorSession session, JdbcTableHandle handle, JdbcColumnHandle column, Type type) - { - throw new TrinoException(NOT_SUPPORTED, "This connector does not support setting column types"); - } - - private static LongReadFunction dateReadFunction() - { - return (resultSet, index) -> { - // Convert to LocalDate from java.sql.Date via String because java.sql.Date#toLocalDate() returns wrong results in B.C. dates. -5881579-07-11 -> +5881580-07-11 - // Phoenix JDBC driver supports getObject(index, LocalDate.class), but it leads to incorrect issues. -5877641-06-23 -> 7642-06-23 & 5881580-07-11 -> 1580-07-11 - // The current implementation still returns +10 days during julian -> gregorian switch - return LocalDate.parse(new SimpleDateFormat(DATE_FORMAT).format(resultSet.getDate(index)), LOCAL_DATE_FORMATTER).toEpochDay(); - }; - } - - private static LongWriteFunction dateWriteFunctionUsingString() - { - return new LongWriteFunction() { - @Override - public String getBindExpression() - { - return "TO_DATE(?, 'y-MM-dd G', 'local')"; - } - - @Override - public void set(PreparedStatement statement, int index, long value) - throws SQLException - { - statement.setString(index, LOCAL_DATE_FORMATTER.format(LocalDate.ofEpochDay(value))); - } - }; - } - - private static ColumnMapping arrayColumnMapping(ConnectorSession session, ArrayType arrayType, String elementJdbcTypeName, PredicatePushdownController pushdownController) - { - return ColumnMapping.objectMapping( - arrayType, - arrayReadFunction(session, arrayType.getElementType()), - arrayWriteFunction(session, arrayType.getElementType(), elementJdbcTypeName), - pushdownController); - } - - private static ObjectReadFunction arrayReadFunction(ConnectorSession session, Type elementType) - { - return ObjectReadFunction.of(Block.class, (resultSet, columnIndex) -> { - Object[] objectArray = toBoxedArray(resultSet.getArray(columnIndex).getArray()); - return jdbcObjectArrayToBlock(session, elementType, objectArray); - }); - } - - private static ObjectWriteFunction arrayWriteFunction(ConnectorSession session, Type elementType, String elementJdbcTypeName) - { - return ObjectWriteFunction.of(Block.class, (statement, index, block) -> { - Object[] jdbcObjectArray = getJdbcObjectArray(session, elementType, block); - PhoenixArray phoenixArray = (PhoenixArray) statement.getConnection().createArrayOf(elementJdbcTypeName, jdbcObjectArray); - for (int i = 0; i < jdbcObjectArray.length; i++) { - if (jdbcObjectArray[i] == null && phoenixArray.getElement(i) != null) { - // TODO (https://github.com/trinodb/trino/issues/6421) Prevent writing incorrect results due to Phoenix JDBC driver bug - throw new TrinoException(PHOENIX_QUERY_ERROR, format("Phoenix JDBC driver replaced 'null' with '%s' at index %s in %s", phoenixArray.getElement(i), i + 1, phoenixArray)); - } - } - statement.setArray(index, phoenixArray); - }); - } - - private JdbcTypeHandle getArrayElementTypeHandle(JdbcTypeHandle arrayTypeHandle) - { - String arrayTypeName = arrayTypeHandle.getJdbcTypeName() - .orElseThrow(() -> new TrinoException(PHOENIX_METADATA_ERROR, "Type name is missing for jdbc type: " + JDBCType.valueOf(arrayTypeHandle.getJdbcType()))); - checkArgument(arrayTypeName.endsWith(" ARRAY"), "array type must end with ' ARRAY'"); - arrayTypeName = arrayTypeName.substring(0, arrayTypeName.length() - " ARRAY".length()); - verify(arrayTypeHandle.getCaseSensitivity().isEmpty(), "Case sensitivity not supported"); - return new JdbcTypeHandle( - PDataType.fromSqlTypeName(arrayTypeName).getSqlType(), - Optional.of(arrayTypeName), - arrayTypeHandle.getColumnSize(), - arrayTypeHandle.getDecimalDigits(), - arrayTypeHandle.getArrayDimensions(), - Optional.empty()); - } - - public QueryPlan getQueryPlan(PhoenixPreparedStatement inputQuery) - { - try { - // Optimize the query plan so that we potentially use secondary indexes - QueryPlan queryPlan = inputQuery.optimizeQuery(); - // Initialize the query plan so it sets up the parallel scans - queryPlan.iterator(MapReduceParallelScanGrouper.getInstance()); - return queryPlan; - } - catch (SQLException e) { - throw new TrinoException(PHOENIX_QUERY_ERROR, "Failed to get the Phoenix query plan", e); - } - } - - private static ResultSet getResultSet(PhoenixInputSplit split, QueryPlan queryPlan) - { - List scans = split.getScans(); - try { - List iterators = new ArrayList<>(scans.size()); - StatementContext context = queryPlan.getContext(); - // Clear the table region boundary cache to make sure long running jobs stay up to date - PName physicalTableName = queryPlan.getTableRef().getTable().getPhysicalName(); - PhoenixConnection phoenixConnection = context.getConnection(); - ConnectionQueryServices services = phoenixConnection.getQueryServices(); - services.clearTableRegionCache(TableName.valueOf(physicalTableName.getBytes())); - - for (Scan scan : scans) { - scan = new Scan(scan); - // For MR, skip the region boundary check exception if we encounter a split. ref: PHOENIX-2599 - scan.setAttribute(SKIP_REGION_BOUNDARY_CHECK, Bytes.toBytes(true)); - - ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance( - context.getReadMetricsQueue(), - physicalTableName.getString(), - scan, - phoenixConnection.getLogLevel()); - - TableResultIterator tableResultIterator = new TableResultIterator( - phoenixConnection.getMutationState(), - scan, - scanMetricsHolder, - services.getRenewLeaseThresholdMilliSeconds(), - queryPlan, - MapReduceParallelScanGrouper.getInstance()); - iterators.add(LookAheadResultIterator.wrap(tableResultIterator)); - } - ResultIterator iterator = ConcatResultIterator.newIterator(iterators); - if (context.getSequenceManager().getSequenceCount() > 0) { - iterator = new SequenceResultIterator(iterator, context.getSequenceManager()); - } - // Clone the row projector as it's not thread safe and would be used simultaneously by - // multiple threads otherwise. - return new PhoenixResultSet(iterator, queryPlan.getProjector().cloneIfNecessary(), context); - } - catch (SQLException e) { - throw new TrinoException(PHOENIX_QUERY_ERROR, "Error while setting up Phoenix ResultSet", e); - } - catch (IOException e) { - throw new TrinoException(PhoenixErrorCode.PHOENIX_INTERNAL_ERROR, "Error while copying scan", e); - } - } - - public JdbcTableHandle buildPlainTable(JdbcTableHandle handle) - { - checkArgument(handle.isNamedRelation(), "Only allow build plain table from named relation table"); - - SchemaTableName schemaTableName = handle.getRequiredNamedRelation().getSchemaTableName(); - RemoteTableName remoteTableName = handle.getRequiredNamedRelation().getRemoteTableName(); - return new JdbcTableHandle(schemaTableName, remoteTableName, Optional.empty()); - } - - public JdbcTableHandle updatedScanColumnTable(ConnectorSession session, ConnectorTableHandle table, Optional> originalColumns, JdbcColumnHandle mergeRowIdColumnHandle) - { - JdbcTableHandle tableHandle = (JdbcTableHandle) table; - if (originalColumns.isEmpty()) { - return tableHandle; - } - List scanColumnHandles = originalColumns.get(); - checkArgument(!scanColumnHandles.isEmpty(), "Scan columns should not empty"); - checkArgument(tryFind(scanColumnHandles.iterator(), column -> MERGE_ROW_ID_COLUMN_NAME.equalsIgnoreCase(column.getColumnName())).isPresent(), "Merge row id column must exist in original columns"); - - return new JdbcTableHandle( - tableHandle.getRelationHandle(), - tableHandle.getConstraint(), - tableHandle.getConstraintExpressions(), - tableHandle.getSortOrder(), - tableHandle.getLimit(), - Optional.of(getUpdatedScanColumnHandles(session, tableHandle, scanColumnHandles, mergeRowIdColumnHandle)), - tableHandle.getOtherReferencedTables(), - tableHandle.getNextSyntheticColumnId(), - tableHandle.getAuthorization(), - tableHandle.getUpdateAssignments()); - } - - private List getUpdatedScanColumnHandles(ConnectorSession session, JdbcTableHandle tableHandle, List scanColumnHandles, JdbcColumnHandle mergeRowIdColumnHandle) - { - RowType columnType = (RowType) mergeRowIdColumnHandle.getColumnType(); - List primaryKeyColumnHandles = getPrimaryKeyColumnHandles(session, tableHandle); - Set mergeRowIdFieldNames = columnType.getFields().stream() - .map(RowType.Field::getName) - .filter(Optional::isPresent) - .map(Optional::get) - .collect(toImmutableSet()); - Set primaryKeyColumnNames = primaryKeyColumnHandles.stream() - .map(JdbcColumnHandle::getColumnName) - .collect(toImmutableSet()); - checkArgument(mergeRowIdFieldNames.containsAll(primaryKeyColumnNames), "Merge row id fields should contains all primary keys"); - - ImmutableList.Builder columnHandleBuilder = ImmutableList.builder(); - scanColumnHandles.stream() - .filter(jdbcColumnHandle -> !MERGE_ROW_ID_COLUMN_NAME.equalsIgnoreCase(jdbcColumnHandle.getColumnName())) - .forEach(columnHandleBuilder::add); - // Add merge row id fields - for (JdbcColumnHandle columnHandle : primaryKeyColumnHandles) { - String columnName = columnHandle.getColumnName(); - if (ROWKEY.equalsIgnoreCase(columnName)) { - checkArgument(primaryKeyColumnHandles.size() == 1, "Wrong primary keys"); - columnHandleBuilder.add(ROWKEY_COLUMN_HANDLE); - break; - } - - if (!tryFind(scanColumnHandles.iterator(), column -> column.getColumnName().equalsIgnoreCase(columnName)).isPresent()) { - columnHandleBuilder.add(columnHandle); - } - } - - return columnHandleBuilder.build(); - } - - public List getPrimaryKeyColumnHandles(ConnectorSession session, JdbcTableHandle tableHandle) - { - if (tableHandle.getColumns().isPresent()) { - tableHandle = buildPlainTable(tableHandle); - } - - Map tableProperties = getTableProperties(session, tableHandle); - List primaryKeyColumnHandles = getColumns(session, tableHandle) - .stream() - .filter(columnHandle -> PhoenixColumnProperties.isPrimaryKey(columnHandle.getColumnMetadata(), tableProperties)) - .collect(toImmutableList()); - verify(!primaryKeyColumnHandles.isEmpty(), "Phoenix primary key is empty"); - return primaryKeyColumnHandles; - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixClientModule.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixClientModule.java deleted file mode 100644 index 52bac20a9f28..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixClientModule.java +++ /dev/null @@ -1,217 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.inject.Binder; -import com.google.inject.Key; -import com.google.inject.Provides; -import com.google.inject.Scopes; -import com.google.inject.Singleton; -import io.airlift.configuration.AbstractConfigurationAwareModule; -import io.opentelemetry.api.OpenTelemetry; -import io.trino.plugin.base.classloader.ClassLoaderSafeConnectorMetadata; -import io.trino.plugin.base.classloader.ClassLoaderSafeConnectorPageSinkProvider; -import io.trino.plugin.base.classloader.ClassLoaderSafeConnectorPageSourceProvider; -import io.trino.plugin.base.classloader.ClassLoaderSafeConnectorSplitManager; -import io.trino.plugin.base.classloader.ForClassLoaderSafe; -import io.trino.plugin.base.mapping.IdentifierMappingModule; -import io.trino.plugin.jdbc.ConfiguringConnectionFactory; -import io.trino.plugin.jdbc.ConnectionFactory; -import io.trino.plugin.jdbc.DecimalModule; -import io.trino.plugin.jdbc.DefaultQueryBuilder; -import io.trino.plugin.jdbc.DriverConnectionFactory; -import io.trino.plugin.jdbc.DynamicFilteringStats; -import io.trino.plugin.jdbc.ForBaseJdbc; -import io.trino.plugin.jdbc.ForJdbcDynamicFiltering; -import io.trino.plugin.jdbc.ForRecordCursor; -import io.trino.plugin.jdbc.JdbcClient; -import io.trino.plugin.jdbc.JdbcDiagnosticModule; -import io.trino.plugin.jdbc.JdbcDynamicFilteringConfig; -import io.trino.plugin.jdbc.JdbcDynamicFilteringSessionProperties; -import io.trino.plugin.jdbc.JdbcDynamicFilteringSplitManager; -import io.trino.plugin.jdbc.JdbcMetadataConfig; -import io.trino.plugin.jdbc.JdbcMetadataSessionProperties; -import io.trino.plugin.jdbc.JdbcWriteConfig; -import io.trino.plugin.jdbc.JdbcWriteSessionProperties; -import io.trino.plugin.jdbc.LazyConnectionFactory; -import io.trino.plugin.jdbc.MaxDomainCompactionThreshold; -import io.trino.plugin.jdbc.QueryBuilder; -import io.trino.plugin.jdbc.RetryingConnectionFactoryModule; -import io.trino.plugin.jdbc.ReusableConnectionFactoryModule; -import io.trino.plugin.jdbc.StatsCollecting; -import io.trino.plugin.jdbc.TypeHandlingJdbcConfig; -import io.trino.plugin.jdbc.TypeHandlingJdbcSessionProperties; -import io.trino.plugin.jdbc.credential.EmptyCredentialProvider; -import io.trino.plugin.jdbc.logging.RemoteQueryModifierModule; -import io.trino.spi.TrinoException; -import io.trino.spi.connector.ConnectorMetadata; -import io.trino.spi.connector.ConnectorPageSinkProvider; -import io.trino.spi.connector.ConnectorPageSourceProvider; -import io.trino.spi.connector.ConnectorSplitManager; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.phoenix.jdbc.PhoenixDriver; -import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver; - -import java.sql.SQLException; -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.ExecutorService; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.util.concurrent.MoreExecutors.newDirectExecutorService; -import static com.google.inject.multibindings.OptionalBinder.newOptionalBinder; -import static io.airlift.configuration.ConditionalModule.conditionalModule; -import static io.airlift.configuration.ConfigBinder.configBinder; -import static io.trino.plugin.base.ClosingBinder.closingBinder; -import static io.trino.plugin.jdbc.JdbcModule.bindSessionPropertiesProvider; -import static io.trino.plugin.jdbc.JdbcModule.bindTablePropertiesProvider; -import static io.trino.plugin.phoenix5.ConfigurationInstantiator.newEmptyConfiguration; -import static io.trino.plugin.phoenix5.PhoenixClient.DEFAULT_DOMAIN_COMPACTION_THRESHOLD; -import static io.trino.plugin.phoenix5.PhoenixErrorCode.PHOENIX_CONFIG_ERROR; -import static java.util.Objects.requireNonNull; -import static org.weakref.jmx.guice.ExportBinder.newExporter; - -public class PhoenixClientModule - extends AbstractConfigurationAwareModule -{ - private final String catalogName; - - public PhoenixClientModule(String catalogName) - { - this.catalogName = requireNonNull(catalogName, "catalogName is null"); - } - - @Override - protected void setup(Binder binder) - { - install(new RemoteQueryModifierModule()); - install(new RetryingConnectionFactoryModule()); - binder.bind(ConnectorSplitManager.class).annotatedWith(ForJdbcDynamicFiltering.class).to(PhoenixSplitManager.class).in(Scopes.SINGLETON); - binder.bind(ConnectorSplitManager.class).annotatedWith(ForClassLoaderSafe.class).to(JdbcDynamicFilteringSplitManager.class).in(Scopes.SINGLETON); - binder.bind(ConnectorSplitManager.class).to(ClassLoaderSafeConnectorSplitManager.class).in(Scopes.SINGLETON); - binder.bind(ConnectorPageSinkProvider.class).annotatedWith(ForClassLoaderSafe.class).to(PhoenixPageSinkProvider.class).in(Scopes.SINGLETON); - binder.bind(ConnectorPageSinkProvider.class).to(ClassLoaderSafeConnectorPageSinkProvider.class).in(Scopes.SINGLETON); - binder.bind(ConnectorPageSourceProvider.class).annotatedWith(ForClassLoaderSafe.class).to(PhoenixPageSourceProvider.class).in(Scopes.SINGLETON); - binder.bind(ConnectorPageSourceProvider.class).to(ClassLoaderSafeConnectorPageSourceProvider.class).in(Scopes.SINGLETON); - - binder.bind(QueryBuilder.class).to(DefaultQueryBuilder.class).in(Scopes.SINGLETON); - newOptionalBinder(binder, Key.get(int.class, MaxDomainCompactionThreshold.class)); - configBinder(binder).bindConfigDefaults(JdbcMetadataConfig.class, config -> config.setDomainCompactionThreshold(DEFAULT_DOMAIN_COMPACTION_THRESHOLD)); - - configBinder(binder).bindConfig(TypeHandlingJdbcConfig.class); - bindSessionPropertiesProvider(binder, TypeHandlingJdbcSessionProperties.class); - bindSessionPropertiesProvider(binder, JdbcMetadataSessionProperties.class); - bindSessionPropertiesProvider(binder, JdbcWriteSessionProperties.class); - bindSessionPropertiesProvider(binder, PhoenixSessionProperties.class); - bindSessionPropertiesProvider(binder, JdbcDynamicFilteringSessionProperties.class); - - binder.bind(DynamicFilteringStats.class).in(Scopes.SINGLETON); - newExporter(binder).export(DynamicFilteringStats.class) - .as(generator -> generator.generatedNameOf(DynamicFilteringStats.class, catalogName)); - - configBinder(binder).bindConfig(JdbcMetadataConfig.class); - configBinder(binder).bindConfig(JdbcWriteConfig.class); - configBinder(binder).bindConfig(JdbcDynamicFilteringConfig.class); - - binder.bind(PhoenixClient.class).in(Scopes.SINGLETON); - binder.bind(JdbcClient.class).annotatedWith(ForBaseJdbc.class).to(Key.get(PhoenixClient.class)).in(Scopes.SINGLETON); - binder.bind(JdbcClient.class).to(Key.get(JdbcClient.class, StatsCollecting.class)).in(Scopes.SINGLETON); - binder.bind(ConnectorMetadata.class).annotatedWith(ForClassLoaderSafe.class).to(PhoenixMetadata.class).in(Scopes.SINGLETON); - binder.bind(ConnectorMetadata.class).to(ClassLoaderSafeConnectorMetadata.class).in(Scopes.SINGLETON); - - install(conditionalModule( - PhoenixConfig.class, - PhoenixConfig::isReuseConnection, - new ReusableConnectionFactoryModule(), - innerBinder -> innerBinder.bind(ConnectionFactory.class).to(LazyConnectionFactory.class).in(Scopes.SINGLETON))); - - bindTablePropertiesProvider(binder, PhoenixTableProperties.class); - binder.bind(PhoenixColumnProperties.class).in(Scopes.SINGLETON); - - binder.bind(PhoenixConnector.class).in(Scopes.SINGLETON); - - checkConfiguration(buildConfigObject(PhoenixConfig.class).getConnectionUrl()); - - install(new JdbcDiagnosticModule()); - install(new IdentifierMappingModule()); - install(new DecimalModule()); - - closingBinder(binder) - .registerExecutor(ExecutorService.class, ForRecordCursor.class); - } - - private void checkConfiguration(String connectionUrl) - { - try { - PhoenixDriver driver = PhoenixDriver.INSTANCE; - checkArgument(driver.acceptsURL(connectionUrl), "Invalid JDBC URL for Phoenix connector"); - } - catch (SQLException e) { - throw new TrinoException(PHOENIX_CONFIG_ERROR, e); - } - } - - @Provides - @Singleton - @ForBaseJdbc - public ConnectionFactory getConnectionFactory(PhoenixConfig config, OpenTelemetry openTelemetry) - throws SQLException - { - return new ConfiguringConnectionFactory( - DriverConnectionFactory.builder( - PhoenixDriver.INSTANCE, // Note: for some reason new PhoenixDriver won't work. - config.getConnectionUrl(), - new EmptyCredentialProvider()) - .setConnectionProperties(getConnectionProperties(config)) - .setOpenTelemetry(openTelemetry) - .build(), - connection -> { - // Per JDBC spec, a Driver is expected to have new connections in auto-commit mode. - // This seems not to be true for PhoenixDriver, so we need to be explicit here. - connection.setAutoCommit(true); - }); - } - - public static Properties getConnectionProperties(PhoenixConfig config) - throws SQLException - { - Configuration resourcesConfig = readConfig(config); - Properties connectionProperties = new Properties(); - for (Map.Entry entry : resourcesConfig) { - connectionProperties.setProperty(entry.getKey(), entry.getValue()); - } - - PhoenixEmbeddedDriver.ConnectionInfo connectionInfo = PhoenixEmbeddedDriver.ConnectionInfo.create(config.getConnectionUrl()); - connectionInfo.asProps().asMap().forEach(connectionProperties::setProperty); - return connectionProperties; - } - - private static Configuration readConfig(PhoenixConfig config) - { - Configuration result = newEmptyConfiguration(); - for (String resourcePath : config.getResourceConfigFiles()) { - result.addResource(new Path(resourcePath)); - } - return result; - } - - @Singleton - @ForRecordCursor - @Provides - public ExecutorService createRecordCursorExecutor() - { - return newDirectExecutorService(); - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixColumnProperties.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixColumnProperties.java deleted file mode 100644 index 11e6c4c058d8..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixColumnProperties.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.collect.ImmutableList; -import com.google.inject.Inject; -import io.trino.spi.connector.ColumnMetadata; -import io.trino.spi.session.PropertyMetadata; - -import java.util.List; -import java.util.Map; -import java.util.Optional; - -import static io.trino.plugin.phoenix5.PhoenixTableProperties.getRowkeys; -import static io.trino.spi.session.PropertyMetadata.booleanProperty; - -public class PhoenixColumnProperties -{ - public static final String PRIMARY_KEY = "primary_key"; - - private final List> columnProperties; - - @Inject - public PhoenixColumnProperties() - { - columnProperties = ImmutableList.of( - booleanProperty( - PRIMARY_KEY, - "True if the column is part of the primary key", - false, - false)); - } - - public List> getColumnProperties() - { - return columnProperties; - } - - public static boolean isPrimaryKey(ColumnMetadata col, Map tableProperties) - { - Optional> rowkeysTableProp = getRowkeys(tableProperties); - if (rowkeysTableProp.isPresent()) { - return rowkeysTableProp.get().stream().anyMatch(col.getName()::equalsIgnoreCase); - } - Boolean isPk = (Boolean) col.getProperties().get(PRIMARY_KEY); - return isPk != null && isPk; - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConfig.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConfig.java deleted file mode 100644 index 92b64baf2558..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConfig.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.base.Splitter; -import com.google.common.collect.ImmutableList; -import io.airlift.configuration.Config; -import io.airlift.configuration.ConfigDescription; -import io.airlift.configuration.validation.FileExists; -import jakarta.validation.constraints.Max; -import jakarta.validation.constraints.Min; -import jakarta.validation.constraints.NotNull; - -import java.util.List; - -public class PhoenixConfig -{ - static final int MAX_ALLOWED_SCANS_PER_SPLIT = 1000; - - private String connectionUrl; - private List resourceConfigFiles = ImmutableList.of(); - - /* - * By default group at most 20 HBase scans into a single Split. - * There is at least one Split per HBase region, HBase's default region size is 20GB - * and Phoenix' default scan chunk size is 300MB. - * Any value between 10 and perhaps 30 is a good default. 20 is a good compromise allowing - * 3-4 parallel scans per region with all default settings. - * A large value here makes sense when the Guidepost-width in Phoenix has been reduced. - */ - private int maxScansPerSplit = 20; - private boolean reuseConnection = true; - - @NotNull - public String getConnectionUrl() - { - return connectionUrl; - } - - @Config("phoenix.connection-url") - public PhoenixConfig setConnectionUrl(String connectionUrl) - { - this.connectionUrl = connectionUrl; - return this; - } - - @NotNull - public List<@FileExists String> getResourceConfigFiles() - { - return resourceConfigFiles; - } - - @Config("phoenix.config.resources") - public PhoenixConfig setResourceConfigFiles(String files) - { - this.resourceConfigFiles = Splitter.on(',').trimResults().omitEmptyStrings().splitToList(files); - return this; - } - - @Min(1) - @Max(MAX_ALLOWED_SCANS_PER_SPLIT) - public int getMaxScansPerSplit() - { - return maxScansPerSplit; - } - - @Config("phoenix.max-scans-per-split") - @ConfigDescription("Maximum number of HBase scans that will be performed in a single split.") - public PhoenixConfig setMaxScansPerSplit(int scansPerSplit) - { - this.maxScansPerSplit = scansPerSplit; - return this; - } - - public boolean isReuseConnection() - { - return reuseConnection; - } - - @Config("query.reuse-connection") - @ConfigDescription("Enables reusing JDBC connection within single Trino query to run metadata queries from Coordinator to remote service") - public PhoenixConfig setReuseConnection(boolean reuseConnection) - { - this.reuseConnection = reuseConnection; - return this; - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConnector.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConnector.java deleted file mode 100644 index ab951f7924a9..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConnector.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.inject.Inject; -import io.airlift.bootstrap.LifeCycleManager; -import io.trino.plugin.base.session.SessionPropertiesProvider; -import io.trino.plugin.jdbc.JdbcTransactionHandle; -import io.trino.plugin.jdbc.TablePropertiesProvider; -import io.trino.spi.connector.Connector; -import io.trino.spi.connector.ConnectorMetadata; -import io.trino.spi.connector.ConnectorPageSinkProvider; -import io.trino.spi.connector.ConnectorPageSourceProvider; -import io.trino.spi.connector.ConnectorSession; -import io.trino.spi.connector.ConnectorSplitManager; -import io.trino.spi.connector.ConnectorTransactionHandle; -import io.trino.spi.session.PropertyMetadata; -import io.trino.spi.transaction.IsolationLevel; - -import java.util.List; -import java.util.Set; - -import static com.google.common.collect.ImmutableList.toImmutableList; -import static java.util.Objects.requireNonNull; - -public class PhoenixConnector - implements Connector -{ - private final LifeCycleManager lifeCycleManager; - private final ConnectorMetadata metadata; - private final ConnectorSplitManager splitManager; - private final ConnectorPageSinkProvider pageSinkProvider; - private final ConnectorPageSourceProvider pageSourceProvider; - private final List> tableProperties; - private final PhoenixColumnProperties columnProperties; - private final List> sessionProperties; - - @Inject - public PhoenixConnector( - LifeCycleManager lifeCycleManager, - ConnectorMetadata metadata, - ConnectorSplitManager splitManager, - ConnectorPageSinkProvider pageSinkProvider, - ConnectorPageSourceProvider pageSourceProvider, - Set tableProperties, - PhoenixColumnProperties columnProperties, - Set sessionProperties) - { - this.lifeCycleManager = requireNonNull(lifeCycleManager, "lifeCycleManager is null"); - this.metadata = requireNonNull(metadata, "metadata is null"); - this.splitManager = requireNonNull(splitManager, "splitManager is null"); - this.pageSinkProvider = requireNonNull(pageSinkProvider, "pageSinkProvider is null"); - this.pageSourceProvider = requireNonNull(pageSourceProvider, "pageSourceProvider is null"); - this.tableProperties = tableProperties.stream() - .flatMap(tablePropertiesProvider -> tablePropertiesProvider.getTableProperties().stream()) - .collect(toImmutableList()); - this.columnProperties = requireNonNull(columnProperties, "columnProperties is null"); - this.sessionProperties = sessionProperties.stream() - .flatMap(sessionPropertiesProvider -> sessionPropertiesProvider.getSessionProperties().stream()) - .collect(toImmutableList()); - } - - @Override - public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel, boolean readOnly, boolean autoCommit) - { - return new JdbcTransactionHandle(); - } - - @Override - public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transaction) - { - return metadata; - } - - @Override - public ConnectorSplitManager getSplitManager() - { - return splitManager; - } - - @Override - public ConnectorPageSinkProvider getPageSinkProvider() - { - return pageSinkProvider; - } - - @Override - public ConnectorPageSourceProvider getPageSourceProvider() - { - return pageSourceProvider; - } - - @Override - public List> getTableProperties() - { - return tableProperties; - } - - @Override - public List> getColumnProperties() - { - return columnProperties.getColumnProperties(); - } - - @Override - public List> getSessionProperties() - { - return sessionProperties; - } - - @Override - public final void shutdown() - { - lifeCycleManager.stop(); - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConnectorFactory.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConnectorFactory.java deleted file mode 100644 index 28c04aab640f..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConnectorFactory.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.inject.Injector; -import io.airlift.bootstrap.Bootstrap; -import io.airlift.json.JsonModule; -import io.opentelemetry.api.OpenTelemetry; -import io.trino.spi.NodeManager; -import io.trino.spi.catalog.CatalogName; -import io.trino.spi.classloader.ThreadContextClassLoader; -import io.trino.spi.connector.Connector; -import io.trino.spi.connector.ConnectorContext; -import io.trino.spi.connector.ConnectorFactory; -import io.trino.spi.type.TypeManager; - -import java.util.Map; - -import static io.trino.plugin.base.Versions.checkStrictSpiVersionMatch; -import static java.util.Objects.requireNonNull; - -public class PhoenixConnectorFactory - implements ConnectorFactory -{ - private final ClassLoader classLoader; - - public PhoenixConnectorFactory(ClassLoader classLoader) - { - this.classLoader = requireNonNull(classLoader, "classLoader is null"); - } - - @Override - public String getName() - { - return "phoenix5"; - } - - @Override - public Connector create(String catalogName, Map requiredConfig, ConnectorContext context) - { - requireNonNull(requiredConfig, "requiredConfig is null"); - checkStrictSpiVersionMatch(context, this); - - try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { - Bootstrap app = new Bootstrap( - new JsonModule(), - new PhoenixClientModule(catalogName), - binder -> { - binder.bind(CatalogName.class).toInstance(new CatalogName(catalogName)); - binder.bind(ClassLoader.class).toInstance(PhoenixConnectorFactory.class.getClassLoader()); - binder.bind(TypeManager.class).toInstance(context.getTypeManager()); - binder.bind(NodeManager.class).toInstance(context.getNodeManager()); - binder.bind(OpenTelemetry.class).toInstance(context.getOpenTelemetry()); - }); - - Injector injector = app - .doNotInitializeLogging() - .setRequiredConfigurationProperties(requiredConfig) - .initialize(); - - return injector.getInstance(PhoenixConnector.class); - } - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixErrorCode.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixErrorCode.java deleted file mode 100644 index 8372a71775d6..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixErrorCode.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import io.trino.spi.ErrorCode; -import io.trino.spi.ErrorCodeSupplier; -import io.trino.spi.ErrorType; - -import static io.trino.spi.ErrorType.EXTERNAL; -import static io.trino.spi.ErrorType.INTERNAL_ERROR; - -public enum PhoenixErrorCode - implements ErrorCodeSupplier -{ - PHOENIX_INTERNAL_ERROR(0, INTERNAL_ERROR), - PHOENIX_QUERY_ERROR(1, EXTERNAL), - PHOENIX_CONFIG_ERROR(2, INTERNAL_ERROR), - PHOENIX_METADATA_ERROR(3, EXTERNAL), - PHOENIX_SPLIT_ERROR(4, EXTERNAL); - - private final ErrorCode errorCode; - - PhoenixErrorCode(int code, ErrorType type) - { - errorCode = new ErrorCode(code + 0x0106_0000, name(), type); - } - - @Override - public ErrorCode toErrorCode() - { - return errorCode; - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixMergeSink.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixMergeSink.java deleted file mode 100644 index ad440238408d..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixMergeSink.java +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.base.Joiner; -import com.google.common.collect.ImmutableList; -import io.airlift.slice.Slice; -import io.trino.plugin.jdbc.JdbcClient; -import io.trino.plugin.jdbc.JdbcOutputTableHandle; -import io.trino.plugin.jdbc.JdbcPageSink; -import io.trino.plugin.jdbc.WriteFunction; -import io.trino.plugin.jdbc.logging.RemoteQueryModifier; -import io.trino.spi.Page; -import io.trino.spi.block.Block; -import io.trino.spi.block.RowBlock; -import io.trino.spi.connector.ConnectorMergeSink; -import io.trino.spi.connector.ConnectorMergeTableHandle; -import io.trino.spi.connector.ConnectorPageSink; -import io.trino.spi.connector.ConnectorPageSinkId; -import io.trino.spi.connector.ConnectorSession; -import io.trino.spi.type.RowType; -import io.trino.spi.type.Type; - -import java.util.Collection; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.stream.IntStream; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.collect.ImmutableList.toImmutableList; -import static io.trino.plugin.phoenix5.PhoenixClient.ROWKEY; -import static io.trino.plugin.phoenix5.PhoenixClient.ROWKEY_COLUMN_HANDLE; -import static io.trino.spi.type.TinyintType.TINYINT; -import static java.lang.String.format; -import static java.util.concurrent.CompletableFuture.completedFuture; -import static org.apache.phoenix.util.SchemaUtil.getEscapedArgument; - -public class PhoenixMergeSink - implements ConnectorMergeSink -{ - private final String schemaName; - private final String tableName; - private final boolean hasRowKey; - private final int columnCount; - private final List mergeRowIdFieldNames; - - private final ConnectorPageSink insertSink; - private final ConnectorPageSink updateSink; - private final ConnectorPageSink deleteSink; - - public PhoenixMergeSink(PhoenixClient phoenixClient, RemoteQueryModifier remoteQueryModifier, ConnectorSession session, ConnectorMergeTableHandle mergeHandle, ConnectorPageSinkId pageSinkId) - { - PhoenixMergeTableHandle phoenixMergeTableHandle = (PhoenixMergeTableHandle) mergeHandle; - PhoenixOutputTableHandle phoenixOutputTableHandle = phoenixMergeTableHandle.phoenixOutputTableHandle(); - this.schemaName = phoenixOutputTableHandle.getSchemaName(); - this.tableName = phoenixOutputTableHandle.getTableName(); - this.hasRowKey = phoenixOutputTableHandle.rowkeyColumn().isPresent(); - this.columnCount = phoenixOutputTableHandle.getColumnNames().size(); - - this.insertSink = new JdbcPageSink(session, phoenixOutputTableHandle, phoenixClient, pageSinkId, remoteQueryModifier); - this.updateSink = createUpdateSink(session, phoenixOutputTableHandle, phoenixClient, pageSinkId, remoteQueryModifier); - - ImmutableList.Builder mergeRowIdFieldNamesBuilder = ImmutableList.builder(); - ImmutableList.Builder mergeRowIdFieldTypesBuilder = ImmutableList.builder(); - RowType mergeRowIdColumnType = (RowType) phoenixMergeTableHandle.mergeRowIdColumnHandle().getColumnType(); - for (RowType.Field field : mergeRowIdColumnType.getFields()) { - checkArgument(field.getName().isPresent(), "Merge row id column field must have name"); - mergeRowIdFieldNamesBuilder.add(getEscapedArgument(field.getName().get())); - mergeRowIdFieldTypesBuilder.add(field.getType()); - } - this.mergeRowIdFieldNames = mergeRowIdFieldNamesBuilder.build(); - this.deleteSink = createDeleteSink(session, mergeRowIdFieldTypesBuilder.build(), phoenixClient, pageSinkId, remoteQueryModifier); - } - - private ConnectorPageSink createUpdateSink( - ConnectorSession session, - PhoenixOutputTableHandle phoenixOutputTableHandle, - PhoenixClient phoenixClient, - ConnectorPageSinkId pageSinkId, - RemoteQueryModifier remoteQueryModifier) - { - ImmutableList.Builder columnNamesBuilder = ImmutableList.builder(); - ImmutableList.Builder columnTypesBuilder = ImmutableList.builder(); - columnNamesBuilder.addAll(phoenixOutputTableHandle.getColumnNames()); - columnTypesBuilder.addAll(phoenixOutputTableHandle.getColumnTypes()); - if (hasRowKey) { - columnNamesBuilder.add(ROWKEY); - columnTypesBuilder.add(ROWKEY_COLUMN_HANDLE.getColumnType()); - } - - PhoenixOutputTableHandle updateOutputTableHandle = new PhoenixOutputTableHandle( - schemaName, - tableName, - columnNamesBuilder.build(), - columnTypesBuilder.build(), - Optional.empty(), - Optional.empty()); - return new JdbcPageSink(session, updateOutputTableHandle, phoenixClient, pageSinkId, remoteQueryModifier); - } - - private ConnectorPageSink createDeleteSink( - ConnectorSession session, - List mergeRowIdFieldTypes, - PhoenixClient phoenixClient, - ConnectorPageSinkId pageSinkId, - RemoteQueryModifier remoteQueryModifier) - { - checkArgument(mergeRowIdFieldNames.size() == mergeRowIdFieldTypes.size(), "Wrong merge row column, columns and types size not match"); - JdbcOutputTableHandle deleteOutputTableHandle = new PhoenixOutputTableHandle( - schemaName, - tableName, - mergeRowIdFieldNames, - mergeRowIdFieldTypes, - Optional.empty(), - Optional.empty()); - - return new DeleteSink(session, deleteOutputTableHandle, phoenixClient, pageSinkId, remoteQueryModifier); - } - - private class DeleteSink - extends JdbcPageSink - { - public DeleteSink(ConnectorSession session, JdbcOutputTableHandle handle, JdbcClient jdbcClient, ConnectorPageSinkId pageSinkId, RemoteQueryModifier remoteQueryModifier) - { - super(session, handle, jdbcClient, pageSinkId, remoteQueryModifier); - } - - @Override - protected String getSinkSql(JdbcClient jdbcClient, JdbcOutputTableHandle outputTableHandle, List columnWriters) - { - List conjuncts = mergeRowIdFieldNames.stream() - .map(name -> name + " = ? ") - .collect(toImmutableList()); - checkArgument(!conjuncts.isEmpty(), "Merge row id fields should not empty"); - String whereCondition = Joiner.on(" AND ").join(conjuncts); - - return format("DELETE FROM %s.%s WHERE %s", schemaName, tableName, whereCondition); - } - } - - @Override - public void storeMergedRows(Page page) - { - checkArgument(page.getChannelCount() == 2 + columnCount, "The page size should be 2 + columnCount (%s), but is %s", columnCount, page.getChannelCount()); - int positionCount = page.getPositionCount(); - Block operationBlock = page.getBlock(columnCount); - - int[] dataChannel = IntStream.range(0, columnCount).toArray(); - Page dataPage = page.getColumns(dataChannel); - - int[] insertPositions = new int[positionCount]; - int insertPositionCount = 0; - int[] deletePositions = new int[positionCount]; - int deletePositionCount = 0; - int[] updatePositions = new int[positionCount]; - int updatePositionCount = 0; - - for (int position = 0; position < positionCount; position++) { - int operation = TINYINT.getByte(operationBlock, position); - switch (operation) { - case INSERT_OPERATION_NUMBER -> { - insertPositions[insertPositionCount] = position; - insertPositionCount++; - } - case DELETE_OPERATION_NUMBER -> { - deletePositions[deletePositionCount] = position; - deletePositionCount++; - } - case UPDATE_OPERATION_NUMBER -> { - updatePositions[updatePositionCount] = position; - updatePositionCount++; - } - default -> throw new IllegalStateException("Unexpected value: " + operation); - } - } - - if (insertPositionCount > 0) { - insertSink.appendPage(dataPage.getPositions(insertPositions, 0, insertPositionCount)); - } - - List rowIdFields = RowBlock.getRowFieldsFromBlock(page.getBlock(columnCount + 1)); - if (deletePositionCount > 0) { - Block[] deleteBlocks = new Block[rowIdFields.size()]; - for (int field = 0; field < rowIdFields.size(); field++) { - deleteBlocks[field] = rowIdFields.get(field).getPositions(deletePositions, 0, deletePositionCount); - } - deleteSink.appendPage(new Page(deletePositionCount, deleteBlocks)); - } - - if (updatePositionCount > 0) { - Page updatePage = dataPage.getPositions(updatePositions, 0, updatePositionCount); - if (hasRowKey) { - updatePage = updatePage.appendColumn(rowIdFields.get(0).getPositions(updatePositions, 0, updatePositionCount)); - } - - updateSink.appendPage(updatePage); - } - } - - @Override - public CompletableFuture> finish() - { - insertSink.finish(); - deleteSink.finish(); - updateSink.finish(); - return completedFuture(ImmutableList.of()); - } - - @Override - public void abort() - { - insertSink.abort(); - deleteSink.abort(); - updateSink.abort(); - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixMergeTableHandle.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixMergeTableHandle.java deleted file mode 100644 index 232df18b0cf8..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixMergeTableHandle.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import io.trino.plugin.jdbc.JdbcColumnHandle; -import io.trino.plugin.jdbc.JdbcTableHandle; -import io.trino.spi.connector.ConnectorMergeTableHandle; - -public record PhoenixMergeTableHandle(JdbcTableHandle tableHandle, PhoenixOutputTableHandle phoenixOutputTableHandle, JdbcColumnHandle mergeRowIdColumnHandle) - implements ConnectorMergeTableHandle -{ - @JsonCreator - public PhoenixMergeTableHandle( - @JsonProperty("tableHandle") JdbcTableHandle tableHandle, - @JsonProperty("phoenixOutputTableHandle") PhoenixOutputTableHandle phoenixOutputTableHandle, - @JsonProperty("mergeRowIdColumnHandle") JdbcColumnHandle mergeRowIdColumnHandle) - { - this.tableHandle = tableHandle; - this.phoenixOutputTableHandle = phoenixOutputTableHandle; - this.mergeRowIdColumnHandle = mergeRowIdColumnHandle; - } - - @JsonProperty - @Override - public JdbcTableHandle getTableHandle() - { - return tableHandle; - } - - @Override - @JsonProperty - public PhoenixOutputTableHandle phoenixOutputTableHandle() - { - return phoenixOutputTableHandle; - } - - @Override - @JsonProperty - public JdbcColumnHandle mergeRowIdColumnHandle() - { - return mergeRowIdColumnHandle; - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixMetadata.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixMetadata.java deleted file mode 100644 index c9ce8c4d44b8..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixMetadata.java +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.collect.ImmutableList; -import com.google.inject.Inject; -import io.airlift.slice.Slice; -import io.trino.plugin.base.mapping.IdentifierMapping; -import io.trino.plugin.jdbc.DefaultJdbcMetadata; -import io.trino.plugin.jdbc.JdbcColumnHandle; -import io.trino.plugin.jdbc.JdbcNamedRelationHandle; -import io.trino.plugin.jdbc.JdbcQueryEventListener; -import io.trino.plugin.jdbc.JdbcTableHandle; -import io.trino.plugin.jdbc.JdbcTypeHandle; -import io.trino.plugin.jdbc.RemoteTableName; -import io.trino.spi.TrinoException; -import io.trino.spi.connector.AggregateFunction; -import io.trino.spi.connector.AggregationApplicationResult; -import io.trino.spi.connector.ColumnHandle; -import io.trino.spi.connector.ColumnMetadata; -import io.trino.spi.connector.ConnectorInsertTableHandle; -import io.trino.spi.connector.ConnectorMergeTableHandle; -import io.trino.spi.connector.ConnectorOutputMetadata; -import io.trino.spi.connector.ConnectorOutputTableHandle; -import io.trino.spi.connector.ConnectorSession; -import io.trino.spi.connector.ConnectorTableHandle; -import io.trino.spi.connector.ConnectorTableLayout; -import io.trino.spi.connector.ConnectorTableMetadata; -import io.trino.spi.connector.ConnectorTableProperties; -import io.trino.spi.connector.ConnectorTableSchema; -import io.trino.spi.connector.LocalProperty; -import io.trino.spi.connector.RetryMode; -import io.trino.spi.connector.SchemaTableName; -import io.trino.spi.connector.SortingProperty; -import io.trino.spi.expression.Constant; -import io.trino.spi.predicate.TupleDomain; -import io.trino.spi.security.TrinoPrincipal; -import io.trino.spi.statistics.ComputedStatistics; -import io.trino.spi.type.RowType; - -import java.sql.Connection; -import java.sql.SQLException; -import java.sql.Types; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Verify.verify; -import static com.google.common.collect.ImmutableList.toImmutableList; -import static io.trino.plugin.phoenix5.MetadataUtil.getEscapedTableName; -import static io.trino.plugin.phoenix5.MetadataUtil.toTrinoSchemaName; -import static io.trino.plugin.phoenix5.PhoenixClient.MERGE_ROW_ID_COLUMN_NAME; -import static io.trino.plugin.phoenix5.PhoenixErrorCode.PHOENIX_METADATA_ERROR; -import static io.trino.spi.StandardErrorCode.NOT_SUPPORTED; -import static io.trino.spi.connector.RetryMode.NO_RETRIES; -import static java.lang.String.format; -import static java.util.Objects.requireNonNull; -import static org.apache.phoenix.util.SchemaUtil.getEscapedArgument; - -public class PhoenixMetadata - extends DefaultJdbcMetadata -{ - // Maps to Phoenix's default empty schema - public static final String DEFAULT_SCHEMA = "default"; - // col name used for PK if none provided in DDL - private static final String ROWKEY = "ROWKEY"; - - private final PhoenixClient phoenixClient; - private final IdentifierMapping identifierMapping; - - @Inject - public PhoenixMetadata(PhoenixClient phoenixClient, IdentifierMapping identifierMapping, Set jdbcQueryEventListeners) - { - super(phoenixClient, false, jdbcQueryEventListeners); - this.phoenixClient = requireNonNull(phoenixClient, "phoenixClient is null"); - this.identifierMapping = requireNonNull(identifierMapping, "identifierMapping is null"); - } - - @Override - public JdbcTableHandle getTableHandle(ConnectorSession session, SchemaTableName schemaTableName) - { - return phoenixClient.getTableHandle(session, schemaTableName) - .map(JdbcTableHandle::asPlainTable) - .map(JdbcNamedRelationHandle::getRemoteTableName) - .map(remoteTableName -> new JdbcTableHandle( - schemaTableName, - new RemoteTableName(remoteTableName.getCatalogName(), Optional.ofNullable(toTrinoSchemaName(remoteTableName.getSchemaName().orElse(null))), remoteTableName.getTableName()), - Optional.empty())) - .orElse(null); - } - - @Override - public ConnectorTableProperties getTableProperties(ConnectorSession session, ConnectorTableHandle table) - { - JdbcTableHandle tableHandle = (JdbcTableHandle) table; - List> sortingProperties = tableHandle.getSortOrder() - .map(properties -> properties - .stream() - .map(item -> (LocalProperty) new SortingProperty( - item.getColumn(), - item.getSortOrder())) - .collect(toImmutableList())) - .orElse(ImmutableList.of()); - - return new ConnectorTableProperties(TupleDomain.all(), Optional.empty(), Optional.empty(), sortingProperties); - } - - @Override - public ConnectorTableSchema getTableSchema(ConnectorSession session, ConnectorTableHandle table) - { - JdbcTableHandle handle = (JdbcTableHandle) table; - return new ConnectorTableSchema( - handle.getRequiredNamedRelation().getSchemaTableName(), - getColumnMetadata(session, handle).stream() - .map(ColumnMetadata::getColumnSchema) - .collect(toImmutableList())); - } - - @Override - public ConnectorTableMetadata getTableMetadata(ConnectorSession session, ConnectorTableHandle table) - { - JdbcTableHandle handle = (JdbcTableHandle) table; - return new ConnectorTableMetadata( - handle.getRequiredNamedRelation().getSchemaTableName(), - getColumnMetadata(session, handle), - phoenixClient.getTableProperties(session, handle)); - } - - private List getColumnMetadata(ConnectorSession session, JdbcTableHandle handle) - { - return phoenixClient.getColumns(session, handle).stream() - .filter(column -> !ROWKEY.equalsIgnoreCase(column.getColumnName())) - .map(JdbcColumnHandle::getColumnMetadata) - .collect(toImmutableList()); - } - - @Override - public void createSchema(ConnectorSession session, String schemaName, Map properties, TrinoPrincipal owner) - { - checkArgument(properties.isEmpty(), "Can't have properties for schema creation"); - if (DEFAULT_SCHEMA.equalsIgnoreCase(schemaName)) { - throw new TrinoException(NOT_SUPPORTED, "Can't create 'default' schema which maps to Phoenix empty schema"); - } - phoenixClient.execute(session, format("CREATE SCHEMA %s", getEscapedArgument(toRemoteSchemaName(session, schemaName)))); - } - - @Override - public void dropSchema(ConnectorSession session, String schemaName, boolean cascade) - { - if (cascade) { - // Phoenix doesn't support CASCADE option https://phoenix.apache.org/language/index.html#drop_schema - throw new TrinoException(NOT_SUPPORTED, "This connector does not support dropping schemas with CASCADE option"); - } - if (DEFAULT_SCHEMA.equalsIgnoreCase(schemaName)) { - throw new TrinoException(NOT_SUPPORTED, "Can't drop 'default' schema which maps to Phoenix empty schema"); - } - phoenixClient.execute(session, format("DROP SCHEMA %s", getEscapedArgument(toRemoteSchemaName(session, schemaName)))); - } - - private String toRemoteSchemaName(ConnectorSession session, String schemaName) - { - try (Connection connection = phoenixClient.getConnection(session)) { - return identifierMapping.toRemoteSchemaName(phoenixClient.getRemoteIdentifiers(connection), session.getIdentity(), schemaName); - } - catch (SQLException e) { - throw new TrinoException(PHOENIX_METADATA_ERROR, "Couldn't get casing for the schema name", e); - } - } - - @Override - public Optional applyUpdate(ConnectorSession session, ConnectorTableHandle handle, Map assignments) - { - // Phoenix support row level update, so we should reject this path, earlier than in JDBC client - return Optional.empty(); - } - - @Override - public void createTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, boolean ignoreExisting) - { - phoenixClient.beginCreateTable(session, tableMetadata); - } - - @Override - public ConnectorOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional layout, RetryMode retryMode) - { - if (retryMode != NO_RETRIES) { - throw new TrinoException(NOT_SUPPORTED, "This connector does not support query retries"); - } - return phoenixClient.beginCreateTable(session, tableMetadata); - } - - @Override - public Optional finishCreateTable(ConnectorSession session, ConnectorOutputTableHandle tableHandle, Collection fragments, Collection computedStatistics) - { - return Optional.empty(); - } - - @Override - public ConnectorInsertTableHandle beginInsert(ConnectorSession session, ConnectorTableHandle tableHandle, List columns, RetryMode retryMode) - { - if (retryMode != NO_RETRIES) { - throw new TrinoException(NOT_SUPPORTED, "This connector does not support query retries"); - } - JdbcTableHandle handle = (JdbcTableHandle) tableHandle; - Optional rowkeyColumn = phoenixClient.getColumns(session, handle).stream() - .map(JdbcColumnHandle::getColumnName) - .filter(ROWKEY::equalsIgnoreCase) - .findFirst(); - - List columnHandles = columns.stream() - .map(JdbcColumnHandle.class::cast) - .collect(toImmutableList()); - - RemoteTableName remoteTableName = handle.asPlainTable().getRemoteTableName(); - return new PhoenixOutputTableHandle( - remoteTableName.getSchemaName().orElse(null), - remoteTableName.getTableName(), - columnHandles.stream().map(JdbcColumnHandle::getColumnName).collect(toImmutableList()), - columnHandles.stream().map(JdbcColumnHandle::getColumnType).collect(toImmutableList()), - Optional.of(columnHandles.stream().map(JdbcColumnHandle::getJdbcTypeHandle).collect(toImmutableList())), - rowkeyColumn); - } - - @Override - public Optional finishInsert( - ConnectorSession session, - ConnectorInsertTableHandle insertHandle, - List sourceTableHandles, - Collection fragments, - Collection computedStatistics) - { - return Optional.empty(); - } - - @Override - public void addColumn(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnMetadata column) - { - if (column.getComment() != null) { - throw new TrinoException(NOT_SUPPORTED, "This connector does not support adding columns with comments"); - } - - JdbcTableHandle handle = (JdbcTableHandle) tableHandle; - RemoteTableName remoteTableName = handle.asPlainTable().getRemoteTableName(); - phoenixClient.execute(session, format( - "ALTER TABLE %s ADD %s %s", - getEscapedTableName(remoteTableName.getSchemaName().orElse(null), remoteTableName.getTableName()), - phoenixClient.quoted(column.getName()), - phoenixClient.toWriteMapping(session, column.getType()).getDataType())); - } - - @Override - public void dropColumn(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnHandle column) - { - JdbcTableHandle handle = (JdbcTableHandle) tableHandle; - JdbcColumnHandle columnHandle = (JdbcColumnHandle) column; - RemoteTableName remoteTableName = handle.asPlainTable().getRemoteTableName(); - phoenixClient.execute(session, format( - "ALTER TABLE %s DROP COLUMN %s", - getEscapedTableName(remoteTableName.getSchemaName().orElse(null), remoteTableName.getTableName()), - phoenixClient.quoted(columnHandle.getColumnName()))); - } - - @Override - public void dropTable(ConnectorSession session, ConnectorTableHandle tableHandle) - { - // if we autogenerated a ROWKEY for this table, delete the associated sequence as well - boolean hasRowkey = getColumnHandles(session, tableHandle).values().stream() - .map(JdbcColumnHandle.class::cast) - .map(JdbcColumnHandle::getColumnName) - .anyMatch(ROWKEY::equals); - if (hasRowkey) { - JdbcTableHandle jdbcHandle = (JdbcTableHandle) tableHandle; - RemoteTableName remoteTableName = jdbcHandle.asPlainTable().getRemoteTableName(); - phoenixClient.execute(session, format("DROP SEQUENCE %s", getEscapedTableName(remoteTableName.getSchemaName().orElse(null), remoteTableName.getTableName() + "_sequence"))); - } - phoenixClient.dropTable(session, (JdbcTableHandle) tableHandle); - } - - @Override - public JdbcColumnHandle getMergeRowIdColumnHandle(ConnectorSession session, ConnectorTableHandle tableHandle) - { - JdbcTableHandle handle = (JdbcTableHandle) tableHandle; - - List fields = phoenixClient.getPrimaryKeyColumnHandles(session, handle).stream() - .map(columnHandle -> new RowType.Field(Optional.of(columnHandle.getColumnName()), columnHandle.getColumnType())) - .collect(toImmutableList()); - verify(!fields.isEmpty(), "Phoenix primary key is empty"); - - return new JdbcColumnHandle( - MERGE_ROW_ID_COLUMN_NAME, - new JdbcTypeHandle(Types.ROWID, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()), - RowType.from(fields)); - } - - @Override - public ConnectorMergeTableHandle beginMerge(ConnectorSession session, ConnectorTableHandle tableHandle, RetryMode retryMode) - { - JdbcTableHandle handle = (JdbcTableHandle) tableHandle; - checkArgument(handle.isNamedRelation(), "Merge target must be named relation table"); - JdbcTableHandle plainTable = phoenixClient.buildPlainTable(handle); - JdbcColumnHandle mergeRowIdColumnHandle = getMergeRowIdColumnHandle(session, plainTable); - - List columns = phoenixClient.getColumns(session, plainTable).stream() - .filter(column -> !ROWKEY.equalsIgnoreCase(column.getColumnName())) - .collect(toImmutableList()); - PhoenixOutputTableHandle phoenixOutputTableHandle = (PhoenixOutputTableHandle) beginInsert(session, plainTable, ImmutableList.copyOf(columns), retryMode); - - return new PhoenixMergeTableHandle( - phoenixClient.updatedScanColumnTable(session, handle, handle.getColumns(), mergeRowIdColumnHandle), - phoenixOutputTableHandle, - mergeRowIdColumnHandle); - } - - @Override - public void finishMerge(ConnectorSession session, ConnectorMergeTableHandle mergeTableHandle, Collection fragments, Collection computedStatistics) - { - } - - @Override - public void truncateTable(ConnectorSession session, ConnectorTableHandle tableHandle) - { - throw new TrinoException(NOT_SUPPORTED, "This connector does not support truncating tables"); - } - - @Override - public Optional> applyAggregation( - ConnectorSession session, - ConnectorTableHandle table, - List aggregates, - Map assignments, - List> groupingSets) - { - // TODO support aggregation pushdown - return Optional.empty(); - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixOutputTableHandle.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixOutputTableHandle.java deleted file mode 100644 index e43f95b9cb1c..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixOutputTableHandle.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import io.trino.plugin.jdbc.JdbcOutputTableHandle; -import io.trino.plugin.jdbc.JdbcTypeHandle; -import io.trino.spi.type.Type; -import jakarta.annotation.Nullable; - -import java.util.List; -import java.util.Optional; - -import static java.util.Objects.requireNonNull; - -public class PhoenixOutputTableHandle - extends JdbcOutputTableHandle -{ - private final Optional rowkeyColumn; - - @JsonCreator - public PhoenixOutputTableHandle( - @Nullable @JsonProperty("schemaName") String schemaName, - @JsonProperty("tableName") String tableName, - @JsonProperty("columnNames") List columnNames, - @JsonProperty("columnTypes") List columnTypes, - @JsonProperty("jdbcColumnTypes") Optional> jdbcColumnTypes, - @JsonProperty("rowkeyColumn") Optional rowkeyColumn) - { - super("", schemaName, tableName, columnNames, columnTypes, jdbcColumnTypes, Optional.empty(), Optional.empty()); - this.rowkeyColumn = requireNonNull(rowkeyColumn, "rowkeyColumn is null"); - } - - @JsonProperty - public Optional rowkeyColumn() - { - return rowkeyColumn; - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPageSinkProvider.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPageSinkProvider.java deleted file mode 100644 index 56e303330c8c..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPageSinkProvider.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.inject.Inject; -import io.trino.plugin.jdbc.JdbcPageSinkProvider; -import io.trino.plugin.jdbc.logging.RemoteQueryModifier; -import io.trino.spi.connector.ConnectorInsertTableHandle; -import io.trino.spi.connector.ConnectorMergeSink; -import io.trino.spi.connector.ConnectorMergeTableHandle; -import io.trino.spi.connector.ConnectorOutputTableHandle; -import io.trino.spi.connector.ConnectorPageSink; -import io.trino.spi.connector.ConnectorPageSinkId; -import io.trino.spi.connector.ConnectorPageSinkProvider; -import io.trino.spi.connector.ConnectorSession; -import io.trino.spi.connector.ConnectorTransactionHandle; - -public class PhoenixPageSinkProvider - implements ConnectorPageSinkProvider -{ - private final JdbcPageSinkProvider delegate; - private final PhoenixClient jdbcClient; - private final RemoteQueryModifier remoteQueryModifier; - - @Inject - public PhoenixPageSinkProvider(PhoenixClient jdbcClient, RemoteQueryModifier remoteQueryModifier) - { - this.delegate = new JdbcPageSinkProvider(jdbcClient, remoteQueryModifier); - this.jdbcClient = jdbcClient; - this.remoteQueryModifier = remoteQueryModifier; - } - - @Override - public ConnectorPageSink createPageSink(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorOutputTableHandle outputTableHandle, ConnectorPageSinkId pageSinkId) - { - return delegate.createPageSink(transactionHandle, session, outputTableHandle, pageSinkId); - } - - @Override - public ConnectorPageSink createPageSink(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorInsertTableHandle insertTableHandle, ConnectorPageSinkId pageSinkId) - { - return delegate.createPageSink(transactionHandle, session, insertTableHandle, pageSinkId); - } - - @Override - public ConnectorMergeSink createMergeSink(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorMergeTableHandle mergeHandle, ConnectorPageSinkId pageSinkId) - { - return new PhoenixMergeSink(jdbcClient, remoteQueryModifier, session, mergeHandle, pageSinkId); - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPageSource.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPageSource.java deleted file mode 100644 index 990dd3c3e47a..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPageSource.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import io.trino.spi.Page; -import io.trino.spi.block.Block; -import io.trino.spi.connector.ConnectorPageSource; - -import java.io.IOException; -import java.util.List; - -import static com.google.common.base.Preconditions.checkArgument; -import static io.trino.spi.block.RowBlock.fromFieldBlocks; -import static java.util.Objects.requireNonNull; - -public class PhoenixPageSource - implements ConnectorPageSource -{ - private final ConnectorPageSource delegate; - private final List columnAdaptations; - - public PhoenixPageSource(ConnectorPageSource delegate, List columnAdaptations) - { - this.delegate = delegate; - this.columnAdaptations = columnAdaptations; - } - - @Override - public long getCompletedBytes() - { - return delegate.getCompletedBytes(); - } - - @Override - public long getReadTimeNanos() - { - return delegate.getReadTimeNanos(); - } - - @Override - public boolean isFinished() - { - return delegate.isFinished(); - } - - @Override - public Page getNextPage() - { - Page page = delegate.getNextPage(); - if (page == null || columnAdaptations.isEmpty()) { - return page; - } - - return getColumnAdaptationsPage(page); - } - - private Page getColumnAdaptationsPage(Page page) - { - Block[] blocks = new Block[columnAdaptations.size()]; - for (int i = 0; i < columnAdaptations.size(); i++) { - blocks[i] = columnAdaptations.get(i).getBlock(page); - } - - return new Page(page.getPositionCount(), blocks); - } - - @Override - public long getMemoryUsage() - { - return delegate.getMemoryUsage(); - } - - @Override - public void close() - throws IOException - { - delegate.close(); - } - - public interface ColumnAdaptation - { - Block getBlock(Page sourcePage); - - static ColumnAdaptation sourceColumn(int index) - { - return new SourceColumn(index); - } - - static ColumnAdaptation mergedRowColumns(List mergeRowIdSourceChannels) - { - return new MergedRowAdaptation(mergeRowIdSourceChannels); - } - } - - private static final class MergedRowAdaptation - implements ColumnAdaptation - { - private final List mergeRowIdSourceChannels; - - public MergedRowAdaptation(List mergeRowIdSourceChannels) - { - this.mergeRowIdSourceChannels = mergeRowIdSourceChannels; - } - - @Override - public Block getBlock(Page page) - { - requireNonNull(page, "page is null"); - Block[] mergeRowIdBlocks = new Block[mergeRowIdSourceChannels.size()]; - for (int i = 0; i < mergeRowIdBlocks.length; i++) { - mergeRowIdBlocks[i] = page.getBlock(mergeRowIdSourceChannels.get(i)); - } - return fromFieldBlocks(page.getPositionCount(), mergeRowIdBlocks); - } - } - - private record SourceColumn(int sourceChannel) - implements ColumnAdaptation - { - private SourceColumn - { - checkArgument(sourceChannel >= 0, "sourceChannel is negative"); - } - - @Override - public Block getBlock(Page sourcePage) - { - return sourcePage.getBlock(sourceChannel); - } - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPageSourceProvider.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPageSourceProvider.java deleted file mode 100644 index 55a5f941d71d..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPageSourceProvider.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.collect.ImmutableList; -import com.google.inject.Inject; -import io.trino.plugin.jdbc.ForRecordCursor; -import io.trino.plugin.jdbc.JdbcColumnHandle; -import io.trino.plugin.jdbc.JdbcRecordSetProvider; -import io.trino.plugin.jdbc.JdbcTableHandle; -import io.trino.spi.connector.ColumnHandle; -import io.trino.spi.connector.ConnectorPageSource; -import io.trino.spi.connector.ConnectorPageSourceProvider; -import io.trino.spi.connector.ConnectorSession; -import io.trino.spi.connector.ConnectorSplit; -import io.trino.spi.connector.ConnectorTableHandle; -import io.trino.spi.connector.ConnectorTransactionHandle; -import io.trino.spi.connector.DynamicFilter; -import io.trino.spi.connector.RecordPageSource; -import io.trino.spi.type.RowType; - -import java.util.List; -import java.util.Optional; -import java.util.concurrent.ExecutorService; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.collect.ImmutableList.toImmutableList; -import static com.google.common.collect.Iterators.indexOf; -import static io.trino.plugin.phoenix5.PhoenixClient.MERGE_ROW_ID_COLUMN_NAME; -import static io.trino.plugin.phoenix5.PhoenixPageSource.ColumnAdaptation; - -public class PhoenixPageSourceProvider - implements ConnectorPageSourceProvider -{ - private final JdbcRecordSetProvider recordSetProvider; - private final PhoenixClient phoenixClient; - - @Inject - public PhoenixPageSourceProvider(PhoenixClient phoenixClient, @ForRecordCursor ExecutorService executor) - { - this.recordSetProvider = new JdbcRecordSetProvider(phoenixClient, executor); - this.phoenixClient = phoenixClient; - } - - @Override - public ConnectorPageSource createPageSource(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit split, ConnectorTableHandle table, List columns, DynamicFilter dynamicFilter) - { - JdbcTableHandle tableHandle = (JdbcTableHandle) table; - List columnHandles = columns.stream() - .map(JdbcColumnHandle.class::cast) - .collect(toImmutableList()); - int mergeRowIdChannel = indexOf(columnHandles.iterator(), column -> column.getColumnName().equalsIgnoreCase(MERGE_ROW_ID_COLUMN_NAME)); - Optional> scanColumnHandles = Optional.of(columnHandles); - if (mergeRowIdChannel != -1) { - JdbcColumnHandle mergeRowIdColumn = columnHandles.get(mergeRowIdChannel); - tableHandle = phoenixClient.updatedScanColumnTable(session, tableHandle, scanColumnHandles, mergeRowIdColumn); - scanColumnHandles = tableHandle.getColumns(); - } - - return new PhoenixPageSource( - new RecordPageSource(recordSetProvider.getRecordSet(transaction, session, split, tableHandle, scanColumnHandles.orElse(ImmutableList.of()))), - getColumnAdaptations(scanColumnHandles, mergeRowIdChannel, columnHandles)); - } - - private List getColumnAdaptations(Optional> scanColumnHandles, int mergeRowIdChannel, List columnHandles) - { - if (mergeRowIdChannel == -1) { - return ImmutableList.of(); - } - - List scanColumns = scanColumnHandles.get(); - checkArgument(!scanColumns.isEmpty(), "Scan column handles is empty"); - JdbcColumnHandle mergeRowIdColumn = columnHandles.get(mergeRowIdChannel); - ImmutableList.Builder columnAdaptationBuilder = ImmutableList.builder(); - for (int index = 0; index < scanColumns.size(); index++) { - if (mergeRowIdChannel == index) { - columnAdaptationBuilder.add(buildMergeIdColumnAdaptation(scanColumns, mergeRowIdColumn)); - } - columnAdaptationBuilder.add(ColumnAdaptation.sourceColumn(index)); - } - if (mergeRowIdChannel == scanColumns.size()) { - columnAdaptationBuilder.add(buildMergeIdColumnAdaptation(scanColumns, mergeRowIdColumn)); - } - return columnAdaptationBuilder.build(); - } - - private ColumnAdaptation buildMergeIdColumnAdaptation(List scanColumns, JdbcColumnHandle mergeRowIdColumn) - { - RowType columnType = (RowType) mergeRowIdColumn.getColumnType(); - List mergeRowIdSourceChannels = columnType.getFields().stream() - .map(RowType.Field::getName) - .map(Optional::get) - .map(fieldName -> indexOf(scanColumns.iterator(), handle -> handle.getColumnName().equals(fieldName))) - .peek(fieldIndex -> checkArgument(fieldIndex != -1, "Merge row id field must exist in scanned columns")) - .collect(toImmutableList()); - return ColumnAdaptation.mergedRowColumns(mergeRowIdSourceChannels); - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPlugin.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPlugin.java deleted file mode 100644 index 38950b56eb7b..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixPlugin.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.collect.ImmutableList; -import io.trino.spi.Plugin; -import io.trino.spi.connector.ConnectorFactory; - -public class PhoenixPlugin - implements Plugin -{ - @Override - public Iterable getConnectorFactories() - { - return ImmutableList.of(new PhoenixConnectorFactory(PhoenixPlugin.class.getClassLoader())); - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixSessionProperties.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixSessionProperties.java deleted file mode 100644 index 0c47c7f3a63b..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixSessionProperties.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.collect.ImmutableList; -import com.google.inject.Inject; -import io.trino.plugin.base.session.SessionPropertiesProvider; -import io.trino.spi.TrinoException; -import io.trino.spi.session.PropertyMetadata; - -import java.util.List; - -import static io.trino.spi.StandardErrorCode.INVALID_SESSION_PROPERTY; -import static io.trino.spi.session.PropertyMetadata.integerProperty; -import static java.lang.String.format; - -public final class PhoenixSessionProperties - implements SessionPropertiesProvider -{ - public static final String MAX_SCANS_PER_SPLIT = "max_scans_per_split"; - - private final List> sessionProperties; - - @Inject - public PhoenixSessionProperties(PhoenixConfig phoenixConfig) - { - sessionProperties = ImmutableList.of( - integerProperty( - MAX_SCANS_PER_SPLIT, - "Maximum number of HBase scans per split", - phoenixConfig.getMaxScansPerSplit(), - PhoenixSessionProperties::validateScansPerSplit, - false)); - } - - @Override - public List> getSessionProperties() - { - return sessionProperties; - } - - private static void validateScansPerSplit(int maxScansPerSplit) - { - if (maxScansPerSplit < 1) { - throw new TrinoException(INVALID_SESSION_PROPERTY, format("%s must be greater than 0: %s", MAX_SCANS_PER_SPLIT, maxScansPerSplit)); - } - if (maxScansPerSplit > PhoenixConfig.MAX_ALLOWED_SCANS_PER_SPLIT) { - throw new TrinoException(INVALID_SESSION_PROPERTY, format("%s cannot exceed %s: %s", MAX_SCANS_PER_SPLIT, PhoenixConfig.MAX_ALLOWED_SCANS_PER_SPLIT, maxScansPerSplit)); - } - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixSplit.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixSplit.java deleted file mode 100644 index e665d6534c6d..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixSplit.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.collect.ImmutableMap; -import io.airlift.slice.SizeOf; -import io.trino.plugin.jdbc.JdbcSplit; -import io.trino.spi.HostAddress; -import org.apache.phoenix.mapreduce.PhoenixInputSplit; - -import java.util.List; -import java.util.Optional; - -import static io.airlift.slice.SizeOf.estimatedSizeOf; -import static io.airlift.slice.SizeOf.instanceSize; -import static io.airlift.slice.SizeOf.sizeOf; -import static java.util.Objects.requireNonNull; - -public class PhoenixSplit - extends JdbcSplit -{ - private static final int INSTANCE_SIZE = instanceSize(PhoenixSplit.class); - - private final List addresses; - private final SerializedPhoenixInputSplit serializedPhoenixInputSplit; - - @JsonCreator - public PhoenixSplit( - @JsonProperty("addresses") List addresses, - @JsonProperty("serializedPhoenixInputSplit") SerializedPhoenixInputSplit serializedPhoenixInputSplit) - { - super(Optional.empty()); - this.addresses = requireNonNull(addresses, "addresses is null"); - this.serializedPhoenixInputSplit = requireNonNull(serializedPhoenixInputSplit, "serializedPhoenixInputSplit is null"); - } - - @JsonProperty - @Override - public List getAddresses() - { - return addresses; - } - - @JsonProperty - public SerializedPhoenixInputSplit getSerializedPhoenixInputSplit() - { - return serializedPhoenixInputSplit; - } - - @JsonIgnore - public PhoenixInputSplit getPhoenixInputSplit() - { - return serializedPhoenixInputSplit.deserialize(); - } - - @Override - public long getRetainedSizeInBytes() - { - return INSTANCE_SIZE - + sizeOf(getAdditionalPredicate(), SizeOf::estimatedSizeOf) - + estimatedSizeOf(addresses, HostAddress::getRetainedSizeInBytes) - + serializedPhoenixInputSplit.getRetainedSizeInBytes(); - } - - @Override - public Object getInfo() - { - return ImmutableMap.of("addresses", addresses); - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixSplitManager.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixSplitManager.java deleted file mode 100644 index c4049636b315..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixSplitManager.java +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; -import com.google.inject.Inject; -import io.airlift.log.Logger; -import io.trino.plugin.jdbc.JdbcColumnHandle; -import io.trino.plugin.jdbc.JdbcTableHandle; -import io.trino.spi.HostAddress; -import io.trino.spi.TrinoException; -import io.trino.spi.connector.ConnectorSession; -import io.trino.spi.connector.ConnectorSplit; -import io.trino.spi.connector.ConnectorSplitManager; -import io.trino.spi.connector.ConnectorSplitSource; -import io.trino.spi.connector.ConnectorTableHandle; -import io.trino.spi.connector.ConnectorTransactionHandle; -import io.trino.spi.connector.Constraint; -import io.trino.spi.connector.DynamicFilter; -import io.trino.spi.connector.FixedSplitSource; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.RegionLocator; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.mapreduce.InputSplit; -import org.apache.phoenix.compile.QueryPlan; -import org.apache.phoenix.jdbc.PhoenixPreparedStatement; -import org.apache.phoenix.mapreduce.PhoenixInputSplit; -import org.apache.phoenix.query.KeyRange; - -import java.io.IOException; -import java.sql.Connection; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; - -import static com.google.common.collect.ImmutableList.toImmutableList; -import static io.trino.plugin.phoenix5.PhoenixErrorCode.PHOENIX_INTERNAL_ERROR; -import static io.trino.plugin.phoenix5.PhoenixErrorCode.PHOENIX_SPLIT_ERROR; -import static java.util.Objects.requireNonNull; -import static java.util.stream.Collectors.toList; -import static org.apache.phoenix.coprocessor.BaseScannerRegionObserver.EXPECTED_UPPER_REGION_KEY; - -public class PhoenixSplitManager - implements ConnectorSplitManager -{ - private static final Logger log = Logger.get(PhoenixSplitManager.class); - - private final PhoenixClient phoenixClient; - - @Inject - public PhoenixSplitManager(PhoenixClient phoenixClient) - { - this.phoenixClient = requireNonNull(phoenixClient, "phoenixClient is null"); - } - - @Override - public ConnectorSplitSource getSplits( - ConnectorTransactionHandle transaction, - ConnectorSession session, - ConnectorTableHandle table, - DynamicFilter dynamicFilter, - Constraint constraint) - { - JdbcTableHandle tableHandle = (JdbcTableHandle) table; - try (Connection connection = phoenixClient.getConnection(session)) { - List columns = tableHandle.getColumns() - .map(columnSet -> columnSet.stream().map(JdbcColumnHandle.class::cast).collect(toList())) - .orElseGet(() -> phoenixClient.getColumns(session, tableHandle)); - PhoenixPreparedStatement inputQuery = phoenixClient.prepareStatement( - session, - connection, - tableHandle, - columns, - Optional.empty()) - .unwrap(PhoenixPreparedStatement.class); - - int maxScansPerSplit = session.getProperty(PhoenixSessionProperties.MAX_SCANS_PER_SPLIT, Integer.class); - List splits = getSplits(inputQuery, maxScansPerSplit).stream() - .map(PhoenixInputSplit.class::cast) - .map(split -> new PhoenixSplit( - getSplitAddresses(split), - SerializedPhoenixInputSplit.serialize(split))) - .collect(toImmutableList()); - return new FixedSplitSource(splits); - } - catch (IOException | SQLException e) { - throw new TrinoException(PHOENIX_SPLIT_ERROR, "Couldn't get Phoenix splits", e); - } - } - - private List getSplitAddresses(PhoenixInputSplit split) - { - try { - return ImmutableList.of(HostAddress.fromString(split.getLocations()[0])); - } - catch (IOException | InterruptedException e) { - if (e instanceof InterruptedException) { - Thread.currentThread().interrupt(); - } - throw new TrinoException(PHOENIX_INTERNAL_ERROR, "Exception when getting split addresses", e); - } - } - - private List getSplits(PhoenixPreparedStatement inputQuery, int maxScansPerSplit) - throws IOException - { - QueryPlan queryPlan = phoenixClient.getQueryPlan(inputQuery); - return generateSplits(queryPlan, queryPlan.getSplits(), maxScansPerSplit); - } - - // mostly copied from PhoenixInputFormat, but without the region size calculations - private List generateSplits(QueryPlan queryPlan, List splits, int maxScansPerSplit) - throws IOException - { - requireNonNull(queryPlan, "queryPlan is null"); - requireNonNull(splits, "splits is null"); - - try (org.apache.hadoop.hbase.client.Connection connection = phoenixClient.getHConnection()) { - RegionLocator regionLocator = connection.getRegionLocator(TableName.valueOf(queryPlan.getTableRef().getTable().getPhysicalName().toString())); - long regionSize = -1; - List inputSplits = new ArrayList<>(splits.size()); - for (List scans : queryPlan.getScans()) { - HRegionLocation location = regionLocator.getRegionLocation(scans.getFirst().getStartRow(), false); - String regionLocation = location.getHostname(); - - if (log.isDebugEnabled()) { - log.debug( - "Scan count[%d] : %s ~ %s", - scans.size(), - Bytes.toStringBinary(scans.getFirst().getStartRow()), - Bytes.toStringBinary(scans.getLast().getStopRow())); - log.debug("First scan : %swith scanAttribute : %s [scanCache, cacheBlock, scanBatch] : [%d, %s, %d] and regionLocation : %s", - scans.getFirst(), scans.getFirst().getAttributesMap(), scans.getFirst().getCaching(), scans.getFirst().getCacheBlocks(), scans.getFirst().getBatch(), regionLocation); - for (int i = 0, limit = scans.size(); i < limit; i++) { - log.debug("EXPECTED_UPPER_REGION_KEY[%d] : %s", i, Bytes.toStringBinary(scans.get(i).getAttribute(EXPECTED_UPPER_REGION_KEY))); - } - } - /* - * Handle parallel execution explicitly in Trino rather than internally in Phoenix. - * Each split is handled by a single ConcatResultIterator - * (See PhoenixClient.getResultSet(...)) - */ - for (List splitScans : Lists.partition(scans, maxScansPerSplit)) { - inputSplits.add(new PhoenixInputSplit(splitScans, regionSize, regionLocation)); - } - } - return inputSplits; - } - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixTableProperties.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixTableProperties.java deleted file mode 100644 index ac32d8fb67e0..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixTableProperties.java +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.collect.ImmutableList; -import com.google.inject.Inject; -import io.trino.plugin.jdbc.TablePropertiesProvider; -import io.trino.spi.session.PropertyMetadata; -import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.util.StringUtils; - -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.Optional; - -import static com.google.common.collect.ImmutableList.toImmutableList; -import static io.trino.spi.session.PropertyMetadata.booleanProperty; -import static io.trino.spi.session.PropertyMetadata.enumProperty; -import static io.trino.spi.session.PropertyMetadata.integerProperty; -import static io.trino.spi.session.PropertyMetadata.stringProperty; -import static java.util.Objects.requireNonNull; - -/** - * Class contains all table properties for the Phoenix connector. Used when creating a table: - *

- *

CREATE TABLE foo (a VARCHAR with (primary_key = true), b INT) WITH (SALT_BUCKETS=10, VERSIONS=5, COMPRESSION='lz');
- */ -public final class PhoenixTableProperties - implements TablePropertiesProvider -{ - public static final String ROWKEYS = "rowkeys"; - public static final String SALT_BUCKETS = "salt_buckets"; - public static final String SPLIT_ON = "split_on"; - public static final String DISABLE_WAL = "disable_wal"; - public static final String IMMUTABLE_ROWS = "immutable_rows"; - public static final String DEFAULT_COLUMN_FAMILY = "default_column_family"; - public static final String BLOOMFILTER = "bloomfilter"; - public static final String VERSIONS = "versions"; - public static final String MIN_VERSIONS = "min_versions"; - public static final String COMPRESSION = "compression"; - public static final String TTL = "ttl"; - public static final String DATA_BLOCK_ENCODING = "data_block_encoding"; - - private final List> tableProperties; - - @Inject - public PhoenixTableProperties() - { - tableProperties = ImmutableList.of( - stringProperty( - ROWKEYS, - "Comma-separated list of columns to be the primary key.", - null, - false), - integerProperty( - SALT_BUCKETS, - "Number of salt buckets. This causes an extra byte to be transparently prepended to every row key to ensure an evenly distributed read and write load across all region servers.", - null, - false), - stringProperty( - SPLIT_ON, - "Comma-separated list of keys to split on during table creation.", - null, - false), - booleanProperty( - DISABLE_WAL, - "If true, causes HBase not to write data to the write-ahead-log, thus making updates faster at the expense of potentially losing data in the event of a region server failure.", - null, - false), - booleanProperty( - IMMUTABLE_ROWS, - "Set to true if the table has rows which are write-once, append-only.", - null, - false), - stringProperty( - DEFAULT_COLUMN_FAMILY, - "The column family name to use by default.", - null, - false), - enumProperty( - BLOOMFILTER, - "NONE, ROW or ROWCOL to enable blooms per column family.", - BloomType.class, - null, - false), - integerProperty( - VERSIONS, - "The maximum number of row versions to store, configured per column family via HColumnDescriptor.", - null, - false), - integerProperty( - MIN_VERSIONS, - "The minimum number of row versions to store, configured per column family via HColumnDescriptor.", - null, - false), - enumProperty( - COMPRESSION, - "Compression algorithm to use for HBase blocks. Options are: SNAPPY, GZIP, LZ, and others.", - Compression.Algorithm.class, - null, - false), - integerProperty( - TTL, - "Number of seconds for cell TTL. HBase will automatically delete rows once the expiration time is reached.", - null, - false), - enumProperty( - DATA_BLOCK_ENCODING, - "The block encoding algorithm to use for Cells in HBase blocks. Options are: NONE, PREFIX, DIFF, FAST_DIFF, ROW_INDEX_V1, and others.", - DataBlockEncoding.class, - null, - false)); - } - - @Override - public List> getTableProperties() - { - return tableProperties; - } - - public static Optional getSaltBuckets(Map tableProperties) - { - requireNonNull(tableProperties); - - Integer value = (Integer) tableProperties.get(SALT_BUCKETS); - return Optional.ofNullable(value); - } - - public static Optional getSplitOn(Map tableProperties) - { - requireNonNull(tableProperties); - - String value = (String) tableProperties.get(SPLIT_ON); - return Optional.ofNullable(value); - } - - public static Optional> getRowkeys(Map tableProperties) - { - requireNonNull(tableProperties); - - String rowkeysCsv = (String) tableProperties.get(ROWKEYS); - if (rowkeysCsv == null) { - return Optional.empty(); - } - - return Optional.of(Arrays.stream(StringUtils.split(rowkeysCsv, ',')) - .map(String::trim) - .collect(toImmutableList())); - } - - public static Optional getDisableWal(Map tableProperties) - { - requireNonNull(tableProperties); - - Boolean value = (Boolean) tableProperties.get(DISABLE_WAL); - return Optional.ofNullable(value); - } - - public static Optional getImmutableRows(Map tableProperties) - { - requireNonNull(tableProperties); - - Boolean value = (Boolean) tableProperties.get(IMMUTABLE_ROWS); - return Optional.ofNullable(value); - } - - public static Optional getDefaultColumnFamily(Map tableProperties) - { - requireNonNull(tableProperties); - - String value = (String) tableProperties.get(DEFAULT_COLUMN_FAMILY); - return Optional.ofNullable(value); - } - - public static Optional getBloomfilter(Map tableProperties) - { - requireNonNull(tableProperties); - - BloomType value = (BloomType) tableProperties.get(BLOOMFILTER); - return Optional.ofNullable(value); - } - - public static Optional getVersions(Map tableProperties) - { - requireNonNull(tableProperties); - - Integer value = (Integer) tableProperties.get(VERSIONS); - return Optional.ofNullable(value); - } - - public static Optional getMinVersions(Map tableProperties) - { - requireNonNull(tableProperties); - - Integer value = (Integer) tableProperties.get(MIN_VERSIONS); - return Optional.ofNullable(value); - } - - public static Optional getCompression(Map tableProperties) - { - requireNonNull(tableProperties); - - Compression.Algorithm value = (Compression.Algorithm) tableProperties.get(COMPRESSION); - return Optional.ofNullable(value); - } - - public static Optional getDataBlockEncoding(Map tableProperties) - { - requireNonNull(tableProperties); - - DataBlockEncoding value = (DataBlockEncoding) tableProperties.get(DATA_BLOCK_ENCODING); - return Optional.ofNullable(value); - } - - public static Optional getTimeToLive(Map tableProperties) - { - requireNonNull(tableProperties); - - Integer value = (Integer) tableProperties.get(TTL); - if (value == null) { - return Optional.empty(); - } - return Optional.of(value); - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/SerializedPhoenixInputSplit.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/SerializedPhoenixInputSplit.java deleted file mode 100644 index 3de85630e877..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/SerializedPhoenixInputSplit.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.io.ByteStreams; -import org.apache.hadoop.io.WritableUtils; -import org.apache.phoenix.mapreduce.PhoenixInputSplit; - -import java.io.IOException; -import java.io.UncheckedIOException; - -import static io.airlift.slice.SizeOf.instanceSize; -import static io.airlift.slice.SizeOf.sizeOf; -import static java.util.Objects.requireNonNull; - -public class SerializedPhoenixInputSplit -{ - private static final int INSTANCE_SIZE = instanceSize(SerializedPhoenixInputSplit.class); - - private final byte[] bytes; - - public static SerializedPhoenixInputSplit serialize(PhoenixInputSplit split) - { - return new SerializedPhoenixInputSplit(WritableUtils.toByteArray(split)); - } - - @JsonCreator - public SerializedPhoenixInputSplit(@JsonProperty("bytes") byte[] bytes) - { - this.bytes = requireNonNull(bytes, "bytes is null"); - } - - @JsonProperty - public byte[] getBytes() - { - return bytes; - } - - public PhoenixInputSplit deserialize() - { - PhoenixInputSplit split = new PhoenixInputSplit(); - try { - split.readFields(ByteStreams.newDataInput(bytes)); - } - catch (IOException e) { - throw new UncheckedIOException(e); - } - return split; - } - - public long getRetainedSizeInBytes() - { - return INSTANCE_SIZE + sizeOf(bytes); - } -} diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/TypeUtils.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/TypeUtils.java deleted file mode 100644 index 6b299d3036a8..000000000000 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/TypeUtils.java +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.base.CharMatcher; -import com.google.common.primitives.Shorts; -import com.google.common.primitives.SignedBytes; -import io.airlift.slice.Slice; -import io.trino.spi.TrinoException; -import io.trino.spi.block.Block; -import io.trino.spi.block.BlockBuilder; -import io.trino.spi.connector.ConnectorSession; -import io.trino.spi.type.ArrayType; -import io.trino.spi.type.CharType; -import io.trino.spi.type.DecimalType; -import io.trino.spi.type.Int128; -import io.trino.spi.type.Type; -import io.trino.spi.type.VarcharType; -import org.joda.time.DateTimeZone; -import org.joda.time.chrono.ISOChronology; - -import java.lang.reflect.Array; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.math.MathContext; -import java.sql.Date; - -import static com.google.common.base.Preconditions.checkArgument; -import static io.airlift.slice.Slices.utf8Slice; -import static io.trino.spi.StandardErrorCode.NOT_SUPPORTED; -import static io.trino.spi.type.BigintType.BIGINT; -import static io.trino.spi.type.BooleanType.BOOLEAN; -import static io.trino.spi.type.DateType.DATE; -import static io.trino.spi.type.Decimals.encodeScaledValue; -import static io.trino.spi.type.Decimals.encodeShortScaledValue; -import static io.trino.spi.type.DoubleType.DOUBLE; -import static io.trino.spi.type.IntegerType.INTEGER; -import static io.trino.spi.type.RealType.REAL; -import static io.trino.spi.type.SmallintType.SMALLINT; -import static io.trino.spi.type.TinyintType.TINYINT; -import static io.trino.spi.type.TypeUtils.readNativeValue; -import static io.trino.spi.type.TypeUtils.writeNativeValue; -import static java.lang.Float.floatToRawIntBits; -import static java.lang.Float.intBitsToFloat; -import static java.lang.Math.toIntExact; -import static java.lang.String.format; -import static java.util.Locale.ENGLISH; -import static java.util.Objects.requireNonNull; -import static java.util.concurrent.TimeUnit.DAYS; -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static org.joda.time.DateTimeZone.UTC; - -public final class TypeUtils -{ - private TypeUtils() {} - - public static String getArrayElementPhoenixTypeName(ConnectorSession session, PhoenixClient client, Type elementType) - { - if (elementType instanceof VarcharType) { - return "VARCHAR"; - } - - if (elementType instanceof CharType) { - return "CHAR"; - } - - if (elementType instanceof DecimalType) { - return "DECIMAL"; - } - - return client.toWriteMapping(session, elementType).getDataType().toUpperCase(ENGLISH); - } - - public static Block jdbcObjectArrayToBlock(ConnectorSession session, Type type, Object[] elements) - { - BlockBuilder builder = type.createBlockBuilder(null, elements.length); - for (Object element : elements) { - writeNativeValue(type, builder, jdbcObjectToTrinoNative(session, element, type)); - } - return builder.build(); - } - - public static Object[] getJdbcObjectArray(ConnectorSession session, Type elementType, Block block) - { - int positionCount = block.getPositionCount(); - Object[] valuesArray = new Object[positionCount]; - int subArrayLength = 1; - for (int i = 0; i < positionCount; i++) { - Object objectValue = trinoNativeToJdbcObject(session, elementType, readNativeValue(elementType, block, i)); - valuesArray[i] = objectValue; - if (objectValue != null && objectValue.getClass().isArray()) { - subArrayLength = Math.max(subArrayLength, Array.getLength(objectValue)); - } - } - if (elementType instanceof ArrayType) { - handleArrayNulls(valuesArray, subArrayLength); - } - return valuesArray; - } - - public static Object[] toBoxedArray(Object jdbcArray) - { - requireNonNull(jdbcArray, "jdbcArray is null"); - checkArgument(jdbcArray.getClass().isArray(), "object is not an array: %s", jdbcArray.getClass().getName()); - - if (!jdbcArray.getClass().getComponentType().isPrimitive()) { - return (Object[]) jdbcArray; - } - - int elementCount = Array.getLength(jdbcArray); - Object[] elements = new Object[elementCount]; - for (int i = 0; i < elementCount; i++) { - elements[i] = Array.get(jdbcArray, i); - } - return elements; - } - - private static void handleArrayNulls(Object[] valuesArray, int length) - { - for (int i = 0; i < valuesArray.length; i++) { - if (valuesArray[i] == null) { - valuesArray[i] = new Object[length]; - } - } - } - - private static Object jdbcObjectToTrinoNative(ConnectorSession session, Object jdbcObject, Type type) - { - if (jdbcObject == null) { - return null; - } - - if (BOOLEAN.equals(type) - || BIGINT.equals(type) - || DOUBLE.equals(type)) { - return jdbcObject; - } - - if (TINYINT.equals(type)) { - return (long) (byte) jdbcObject; - } - - if (SMALLINT.equals(type)) { - return (long) (short) jdbcObject; - } - - if (INTEGER.equals(type)) { - return (long) (int) jdbcObject; - } - - if (type instanceof ArrayType arrayType) { - return jdbcObjectArrayToBlock(session, arrayType.getElementType(), (Object[]) jdbcObject); - } - - if (type instanceof DecimalType decimalType) { - BigDecimal value = (BigDecimal) jdbcObject; - if (decimalType.isShort()) { - return encodeShortScaledValue(value, decimalType.getScale()); - } - return encodeScaledValue(value, decimalType.getScale()); - } - - if (REAL.equals(type)) { - return (long) floatToRawIntBits((float) jdbcObject); - } - - if (DATE.equals(type)) { - long localMillis = ((Date) jdbcObject).getTime(); - // Convert it to a ~midnight in UTC. - long utcMillis = ISOChronology.getInstance().getZone().getMillisKeepLocal(UTC, localMillis); - // convert to days - return MILLISECONDS.toDays(utcMillis); - } - - if (type instanceof VarcharType) { - return utf8Slice((String) jdbcObject); - } - - if (type instanceof CharType) { - return utf8Slice(CharMatcher.is(' ').trimTrailingFrom((String) jdbcObject)); - } - - throw new TrinoException(NOT_SUPPORTED, format("Unsupported type %s and object type %s", type, jdbcObject.getClass())); - } - - private static Object trinoNativeToJdbcObject(ConnectorSession session, Type type, Object object) - { - if (object == null) { - return null; - } - - if (DOUBLE.equals(type) || BOOLEAN.equals(type) || BIGINT.equals(type)) { - return object; - } - - if (type instanceof DecimalType decimalType) { - if (decimalType.isShort()) { - BigInteger unscaledValue = BigInteger.valueOf((long) object); - return new BigDecimal(unscaledValue, decimalType.getScale(), new MathContext(decimalType.getPrecision())); - } - BigInteger unscaledValue = ((Int128) object).toBigInteger(); - return new BigDecimal(unscaledValue, decimalType.getScale(), new MathContext(decimalType.getPrecision())); - } - - if (REAL.equals(type)) { - return intBitsToFloat(toIntExact((long) object)); - } - - if (TINYINT.equals(type)) { - return SignedBytes.checkedCast((long) object); - } - - if (SMALLINT.equals(type)) { - return Shorts.checkedCast((long) object); - } - - if (INTEGER.equals(type)) { - return toIntExact((long) object); - } - - if (DATE.equals(type)) { - // convert to midnight in default time zone - long millis = DAYS.toMillis((long) object); - return new Date(UTC.getMillisKeepLocal(DateTimeZone.getDefault(), millis)); - } - - if (type instanceof VarcharType || type instanceof CharType) { - return ((Slice) object).toStringUtf8(); - } - - if (type instanceof ArrayType arrayType) { - // process subarray of multi-dimensional array - return getJdbcObjectArray(session, arrayType.getElementType(), (Block) object); - } - - throw new TrinoException(NOT_SUPPORTED, "Unsupported type: " + type); - } -} diff --git a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/PhoenixQueryRunner.java b/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/PhoenixQueryRunner.java deleted file mode 100644 index 62cc4a4e2071..000000000000 --- a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/PhoenixQueryRunner.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.collect.ImmutableMap; -import io.airlift.log.Logger; -import io.trino.Session; -import io.trino.metadata.QualifiedObjectName; -import io.trino.plugin.tpch.TpchPlugin; -import io.trino.testing.DistributedQueryRunner; -import io.trino.testing.QueryRunner; -import io.trino.tpch.TpchTable; -import org.intellij.lang.annotations.Language; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.List; -import java.util.Map; -import java.util.Properties; - -import static io.trino.plugin.tpch.TpchMetadata.TINY_SCHEMA_NAME; -import static io.trino.testing.TestingSession.testSessionBuilder; -import static io.trino.tpch.TpchTable.LINE_ITEM; -import static io.trino.tpch.TpchTable.ORDERS; -import static io.trino.tpch.TpchTable.PART_SUPPLIER; -import static java.lang.String.format; - -public final class PhoenixQueryRunner -{ - private static final Logger LOG = Logger.get(PhoenixQueryRunner.class); - private static final String TPCH_SCHEMA = "tpch"; - - private PhoenixQueryRunner() - { - } - - public static QueryRunner createPhoenixQueryRunner(TestingPhoenixServer server, Map extraProperties, List> tables) - throws Exception - { - QueryRunner queryRunner = DistributedQueryRunner.builder(createSession()) - .setExtraProperties(extraProperties) - .build(); - - queryRunner.installPlugin(new TpchPlugin()); - queryRunner.createCatalog("tpch", "tpch"); - - Map properties = ImmutableMap.builder() - .put("phoenix.connection-url", server.getJdbcUrl()) - .put("case-insensitive-name-matching", "true") - .buildOrThrow(); - - queryRunner.installPlugin(new PhoenixPlugin()); - queryRunner.createCatalog("phoenix", "phoenix5", properties); - - createSchema(server, TPCH_SCHEMA); - copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(), tables); - - return queryRunner; - } - - private static void createSchema(TestingPhoenixServer phoenixServer, String schema) - throws SQLException - { - Properties properties = new Properties(); - properties.setProperty("phoenix.schema.isNamespaceMappingEnabled", "true"); - try (Connection connection = DriverManager.getConnection(phoenixServer.getJdbcUrl(), properties); - Statement statement = connection.createStatement()) { - statement.execute(format("CREATE SCHEMA IF NOT EXISTS %s", schema)); - } - } - - private static void copyTpchTables( - QueryRunner queryRunner, - String sourceCatalog, - String sourceSchema, - Session session, - Iterable> tables) - { - LOG.debug("Loading data from %s.%s...", sourceCatalog, sourceSchema); - for (TpchTable table : tables) { - copyTable(queryRunner, sourceCatalog, session, sourceSchema, table); - } - } - - private static void copyTable( - QueryRunner queryRunner, - String catalog, - Session session, - String schema, - TpchTable table) - { - QualifiedObjectName source = new QualifiedObjectName(catalog, schema, table.getTableName()); - String target = table.getTableName(); - String tableProperties = ""; - if (LINE_ITEM.getTableName().equals(target)) { - tableProperties = "WITH (ROWKEYS = 'ORDERKEY,LINENUMBER', SALT_BUCKETS=10)"; - } - else if (ORDERS.getTableName().equals(target)) { - tableProperties = "WITH (SALT_BUCKETS=10)"; - } - else if (PART_SUPPLIER.getTableName().equals(target)) { - tableProperties = "WITH (ROWKEYS = 'PARTKEY,SUPPKEY')"; - } - @Language("SQL") - String sql = format("CREATE TABLE IF NOT EXISTS %s %s AS SELECT * FROM %s", target, tableProperties, source); - - LOG.debug("Running import for %s %s", target, sql); - long rows = queryRunner.execute(session, sql).getUpdateCount().getAsLong(); - LOG.debug("%s rows loaded into %s", rows, target); - } - - private static Session createSession() - { - return testSessionBuilder() - .setCatalog("phoenix") - .setSchema(TPCH_SCHEMA) - .build(); - } - - public static void main(String[] args) - throws Exception - { - QueryRunner queryRunner = createPhoenixQueryRunner( - TestingPhoenixServer.getInstance().get(), - ImmutableMap.of("http-server.http.port", "8080"), - TpchTable.getTables()); - - Logger log = Logger.get(PhoenixQueryRunner.class); - log.info("======== SERVER STARTED ========"); - log.info("\n====\n%s\n====", queryRunner.getCoordinator().getBaseUrl()); - } -} diff --git a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/PhoenixSqlExecutor.java b/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/PhoenixSqlExecutor.java deleted file mode 100644 index 349f6700223f..000000000000 --- a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/PhoenixSqlExecutor.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import io.trino.testing.sql.SqlExecutor; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Properties; - -import static java.util.Objects.requireNonNull; - -public class PhoenixSqlExecutor - implements SqlExecutor -{ - private final String jdbcUrl; - private final Properties jdbcProperties; - - public PhoenixSqlExecutor(String jdbcUrl) - { - this(jdbcUrl, new Properties()); - } - - public PhoenixSqlExecutor(String jdbcUrl, Properties jdbcProperties) - { - this.jdbcUrl = requireNonNull(jdbcUrl, "jdbcUrl is null"); - this.jdbcProperties = new Properties(); - this.jdbcProperties.putAll(requireNonNull(jdbcProperties, "jdbcProperties is null")); - } - - @Override - public boolean supportsMultiRowInsert() - { - return false; - } - - @Override - public void execute(String sql) - { - sql = sql.replaceFirst("INSERT", "UPSERT"); - try (Connection connection = DriverManager.getConnection(jdbcUrl, jdbcProperties); - Statement statement = connection.createStatement()) { - statement.execute(sql); - connection.commit(); - } - catch (SQLException e) { - throw new RuntimeException("Error executing sql:\n" + sql, e); - } - } -} diff --git a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/PhoenixTestTable.java b/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/PhoenixTestTable.java deleted file mode 100644 index 01eb3fa40d5e..000000000000 --- a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/PhoenixTestTable.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import io.trino.testing.sql.SqlExecutor; -import io.trino.testing.sql.TestTable; - -import java.util.List; - -import static java.lang.String.format; - -public class PhoenixTestTable - extends TestTable -{ - public PhoenixTestTable(SqlExecutor sqlExecutor, String namePrefix, String tableDefinition, List rowsToInsert) - { - super(sqlExecutor, namePrefix, tableDefinition, rowsToInsert); - } - - @Override - protected void createAndInsert(List rowsToInsert) - { - sqlExecutor.execute(format("CREATE TABLE %s %s", name, tableDefinition)); - try { - for (String row : rowsToInsert) { - sqlExecutor.execute(format("UPSERT INTO %s VALUES (%s)", name, row)); - } - } - catch (Exception e) { - try (PhoenixTestTable ignored = this) { - throw e; - } - } - } -} diff --git a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixConfig.java b/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixConfig.java deleted file mode 100644 index 0a0400dff163..000000000000 --- a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixConfig.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.collect.ImmutableMap; -import org.junit.jupiter.api.Test; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Map; - -import static io.airlift.configuration.testing.ConfigAssertions.assertFullMapping; -import static io.airlift.configuration.testing.ConfigAssertions.assertRecordedDefaults; -import static io.airlift.configuration.testing.ConfigAssertions.recordDefaults; - -public class TestPhoenixConfig -{ - @Test - public void testDefaults() - { - assertRecordedDefaults(recordDefaults(PhoenixConfig.class) - .setConnectionUrl(null) - .setResourceConfigFiles("") - .setMaxScansPerSplit(20) - .setReuseConnection(true)); - } - - @Test - public void testExplicitPropertyMappings() - throws IOException - { - Path configFile = Files.createTempFile(null, null); - - Map properties = ImmutableMap.builder() - .put("phoenix.connection-url", "jdbc:phoenix:localhost:2181:/hbase") - .put("phoenix.config.resources", configFile.toString()) - .put("phoenix.max-scans-per-split", "1") - .put("query.reuse-connection", "false") - .buildOrThrow(); - - PhoenixConfig expected = new PhoenixConfig() - .setConnectionUrl("jdbc:phoenix:localhost:2181:/hbase") - .setResourceConfigFiles(configFile.toString()) - .setMaxScansPerSplit(1) - .setReuseConnection(false); - - assertFullMapping(properties, expected); - } -} diff --git a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixConnectorTest.java b/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixConnectorTest.java deleted file mode 100644 index 3a0b796f96bf..000000000000 --- a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixConnectorTest.java +++ /dev/null @@ -1,834 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Streams; -import io.trino.Session; -import io.trino.plugin.jdbc.BaseJdbcConnectorTest; -import io.trino.plugin.jdbc.UnsupportedTypeHandling; -import io.trino.sql.planner.assertions.PlanMatchPattern; -import io.trino.testing.QueryRunner; -import io.trino.testing.TestingConnectorBehavior; -import io.trino.testing.sql.SqlExecutor; -import io.trino.testing.sql.TestTable; -import org.intellij.lang.annotations.Language; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.parallel.Isolated; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.List; -import java.util.Optional; -import java.util.OptionalInt; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import java.util.stream.Stream; - -import static com.google.common.collect.ImmutableList.toImmutableList; -import static io.trino.plugin.jdbc.JdbcMetadataSessionProperties.DOMAIN_COMPACTION_THRESHOLD; -import static io.trino.plugin.jdbc.TypeHandlingJdbcSessionProperties.UNSUPPORTED_TYPE_HANDLING; -import static io.trino.plugin.jdbc.UnsupportedTypeHandling.CONVERT_TO_VARCHAR; -import static io.trino.plugin.phoenix5.PhoenixQueryRunner.createPhoenixQueryRunner; -import static io.trino.sql.planner.assertions.PlanMatchPattern.exchange; -import static io.trino.sql.planner.assertions.PlanMatchPattern.limit; -import static io.trino.sql.planner.assertions.PlanMatchPattern.output; -import static io.trino.sql.planner.assertions.PlanMatchPattern.project; -import static io.trino.sql.planner.assertions.PlanMatchPattern.sort; -import static io.trino.sql.planner.assertions.PlanMatchPattern.tableScan; -import static io.trino.sql.planner.assertions.PlanMatchPattern.topN; -import static io.trino.sql.planner.plan.ExchangeNode.Scope.LOCAL; -import static io.trino.sql.planner.plan.ExchangeNode.Scope.REMOTE; -import static io.trino.sql.planner.plan.ExchangeNode.Type.GATHER; -import static io.trino.sql.planner.plan.TopNNode.Step.FINAL; -import static io.trino.sql.tree.SortItem.NullOrdering.FIRST; -import static io.trino.sql.tree.SortItem.NullOrdering.LAST; -import static io.trino.sql.tree.SortItem.Ordering.ASCENDING; -import static io.trino.sql.tree.SortItem.Ordering.DESCENDING; -import static io.trino.testing.TestingNames.randomNameSuffix; -import static java.lang.String.format; -import static java.util.Locale.ENGLISH; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.junit.jupiter.api.Assumptions.abort; - -@Isolated -public class TestPhoenixConnectorTest - extends BaseJdbcConnectorTest -{ - private TestingPhoenixServer testingPhoenixServer; - - @Override - protected QueryRunner createQueryRunner() - throws Exception - { - testingPhoenixServer = closeAfterClass(TestingPhoenixServer.getInstance()).get(); - return createPhoenixQueryRunner(testingPhoenixServer, ImmutableMap.of(), REQUIRED_TPCH_TABLES); - } - - @Override - protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) - { - return switch (connectorBehavior) { - case SUPPORTS_MERGE, - SUPPORTS_PREDICATE_ARITHMETIC_EXPRESSION_PUSHDOWN, - SUPPORTS_ROW_LEVEL_UPDATE, - SUPPORTS_UPDATE -> true; - case SUPPORTS_ADD_COLUMN_WITH_COMMENT, - SUPPORTS_AGGREGATION_PUSHDOWN, - SUPPORTS_COMMENT_ON_COLUMN, - SUPPORTS_COMMENT_ON_TABLE, - SUPPORTS_CREATE_TABLE_WITH_COLUMN_COMMENT, - SUPPORTS_CREATE_TABLE_WITH_TABLE_COMMENT, - SUPPORTS_DROP_SCHEMA_CASCADE, - SUPPORTS_LIMIT_PUSHDOWN, - SUPPORTS_NATIVE_QUERY, - SUPPORTS_NOT_NULL_CONSTRAINT, - SUPPORTS_RENAME_COLUMN, - SUPPORTS_RENAME_SCHEMA, - SUPPORTS_RENAME_TABLE, - SUPPORTS_ROW_TYPE, - SUPPORTS_SET_COLUMN_TYPE, - SUPPORTS_TOPN_PUSHDOWN, - SUPPORTS_TRUNCATE -> false; - default -> super.hasBehavior(connectorBehavior); - }; - } - - // TODO: wait https://github.com/trinodb/trino/pull/14939 done and then remove this test - @Test - @Override - public void testArithmeticPredicatePushdown() - { - super.testArithmeticPredicatePushdown(); - // negate - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE -(nationkey) = -7")) - .isFullyPushedDown(); - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE -(nationkey) = 0")) - .isFullyPushedDown(); - - // additive identity - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey + 0 = nationkey")) - .isFullyPushedDown(); - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey - 0 = nationkey")) - .isFullyPushedDown(); - // additive inverse - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey + (-nationkey) = 0")) - .isFullyPushedDown(); - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey - nationkey = 0")) - .isFullyPushedDown(); - - // addition and subtraction of constant - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey + 1 = 7")) - .isFullyPushedDown(); - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey - 1 = 7")) - .isFullyPushedDown(); - - // addition and subtraction of columns - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey + regionkey = 26")) - .isFullyPushedDown(); - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey - regionkey = 23")) - .isFullyPushedDown(); - // addition and subtraction of columns ANDed with another predicate - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE regionkey = 0 AND nationkey + regionkey = 14")) - .isFullyPushedDown(); - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE regionkey = 0 AND nationkey - regionkey = 16")) - .isFullyPushedDown(); - - // multiplication/division/modulo by zero - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey * 0 != 0")) - .isFullyPushedDown(); - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey / 0 = 0")) - .failure().satisfies(this::verifyDivisionByZeroFailure); - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey % 0 = 0")) - .failure().satisfies(this::verifyDivisionByZeroFailure); - // Expression that evaluates to 0 for some rows on RHS of modulus - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey > 0 AND (nationkey - regionkey) / (regionkey - 1) = 2")) - .failure().satisfies(this::verifyDivisionByZeroFailure); - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey > 0 AND (nationkey - regionkey) % (regionkey - 1) = 2")) - .failure().satisfies(this::verifyDivisionByZeroFailure); - - // multiplicative/divisive identity - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey * 1 = nationkey")) - .isFullyPushedDown(); - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey / 1 = nationkey")) - .isFullyPushedDown(); - // multiplicative/divisive inverse - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey > 0 AND nationkey / nationkey = 1")) - .isFullyPushedDown(); - - // multiplication/division/modulus with a constant - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey * 2 = 40")) - .isFullyPushedDown(); - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey / 2 = 12")) - .isFullyPushedDown(); - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey % 20 = 7")) - .isFullyPushedDown(); - - // multiplication/division/modulus of columns - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey * regionkey = 40")) - .isFullyPushedDown(); - assertThat(query("SELECT orderkey, custkey FROM orders WHERE orderkey / custkey = 243")) - .isFullyPushedDown(); - assertThat(query("SELECT orderkey, custkey FROM orders WHERE orderkey % custkey = 1470")) - .isFullyPushedDown(); - // multiplication/division/modulus of columns ANDed with another predicate - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE regionkey = 2 AND nationkey * regionkey = 16")) - .isFullyPushedDown(); - assertThat(query("SELECT orderkey, custkey FROM orders WHERE custkey = 223 AND orderkey / custkey = 243")) - .isFullyPushedDown(); - assertThat(query("SELECT orderkey, custkey FROM orders WHERE custkey = 1483 AND orderkey % custkey = 1470")) - .isFullyPushedDown(); - - // some databases calculate remainder instead of modulus when one of the values is negative - assertThat(query("SELECT nationkey, name, regionkey FROM nation WHERE nationkey > 0 AND (nationkey - regionkey) % -nationkey = 2")) - .isFullyPushedDown(); - } - - private void verifyDivisionByZeroFailure(Throwable e) - { - assertThat(e.getCause().getCause()).hasMessageContainingAll("by zero"); - } - - @Override - protected TestTable createTableWithDefaultColumns() - { - return abort("Phoenix connector does not support column default values"); - } - - @Override - protected TestTable createTableWithUnsupportedColumn() - { - // Apparently all Phoenix types are supported in the Phoenix connector. - return abort("Cannot find an unsupported data type"); - } - - @Test - @Override - public void testAddAndDropColumnName() - { - for (String columnName : testColumnNameDataProvider()) { - // TODO: Investigate why these two case fail - if (columnName.equals("an'apostrophe")) { - assertThatThrownBy(() -> testAddAndDropColumnName(columnName, requiresDelimiting(columnName))) - .hasMessageContaining("Syntax error. Mismatched input"); - abort("TODO"); - } - if (columnName.equals("a\\backslash`")) { - assertThatThrownBy(() -> testAddAndDropColumnName(columnName, requiresDelimiting(columnName))) - .hasMessageContaining("Undefined column"); - abort("TODO"); - } - - testAddAndDropColumnName(columnName, requiresDelimiting(columnName)); - } - } - - @Test - @Override - public void testInsertArray() - { - assertThatThrownBy(super::testInsertArray) - // TODO (https://github.com/trinodb/trino/issues/6421) array with double null stored as array with 0 - .hasMessage("Phoenix JDBC driver replaced 'null' with '0.0' at index 1 in [0.0]"); - } - - @Test - @Override - public void testCreateSchema() - { - abort("test disabled until issue fixed"); // TODO https://github.com/trinodb/trino/issues/2348 - } - - @Override - protected boolean isColumnNameRejected(Exception exception, String columnName, boolean delimited) - { - // TODO This should produce a reasonable exception message like "Invalid column name". Then, we should verify the actual exception message - return columnName.equals("a\"quote"); - } - - @Override - protected Optional filterDataMappingSmokeTestData(DataMappingTestSetup dataMappingTestSetup) - { - String typeName = dataMappingTestSetup.getTrinoTypeName(); - if (typeName.equals("time(6)") || - typeName.equals("timestamp") || - typeName.equals("timestamp(6)") || - typeName.equals("timestamp(3) with time zone") || - typeName.equals("timestamp(6) with time zone")) { - return Optional.of(dataMappingTestSetup.asUnsupported()); - } - - if (typeName.equals("time")) { - // TODO Enable when adding support reading time column - return Optional.empty(); - } - - if (typeName.equals("date") && dataMappingTestSetup.getSampleValueLiteral().equals("DATE '1582-10-05'")) { - // Phoenix connector returns +10 days during julian->gregorian switch. The test case exists in TestPhoenixTypeMapping.testDate(). - return Optional.empty(); - } - - return Optional.of(dataMappingTestSetup); - } - - @Test - @Override - public void testShowCreateTable() - { - assertThat(computeActual("SHOW CREATE TABLE orders").getOnlyValue()) - .isEqualTo("CREATE TABLE phoenix.tpch.orders (\n" + - " orderkey bigint,\n" + - " custkey bigint,\n" + - " orderstatus varchar(1),\n" + - " totalprice double,\n" + - " orderdate date,\n" + - " orderpriority varchar(15),\n" + - " clerk varchar(15),\n" + - " shippriority integer,\n" + - " comment varchar(79)\n" + - ")\n" + - "WITH (\n" + - " bloomfilter = 'ROW',\n" + - " data_block_encoding = 'FAST_DIFF',\n" + - " rowkeys = 'ROWKEY',\n" + - " salt_buckets = 10\n" + - ")"); - } - - @Test - @Override - public void testCharVarcharComparison() - { - // test overridden because super uses all-space char values (' ') that are null-out by Phoenix - - try (TestTable table = new TestTable( - getQueryRunner()::execute, - "test_char_varchar", - "(k, v) AS VALUES" + - " (-1, CAST(NULL AS char(3))), " + - " (3, CAST('x ' AS char(3)))")) { - assertQuery( - "SELECT k, v FROM " + table.getName() + " WHERE v = CAST('x ' AS varchar(2))", - // The value is included because both sides of the comparison are coerced to char(3) - "VALUES (3, 'x ')"); - - assertQuery( - "SELECT k, v FROM " + table.getName() + " WHERE v = CAST('x ' AS varchar(4))", - // The value is included because both sides of the comparison are coerced to char(4) - "VALUES (3, 'x ')"); - } - } - - @Test - @Override - public void testVarcharCharComparison() - { - // test overridden because Phoenix nulls-out '' varchar value, impacting results - - try (TestTable table = new TestTable( - getQueryRunner()::execute, - "test_varchar_char", - "(k, v) AS VALUES" + - " (-1, CAST(NULL AS varchar(3))), " + - " (0, CAST('' AS varchar(3)))," + // '' gets replaced with null in Phoenix - " (1, CAST(' ' AS varchar(3))), " + - " (2, CAST(' ' AS varchar(3))), " + - " (3, CAST(' ' AS varchar(3)))," + - " (4, CAST('x' AS varchar(3)))," + - " (5, CAST('x ' AS varchar(3)))," + - " (6, CAST('x ' AS varchar(3)))")) { - assertQuery( - "SELECT k, v FROM " + table.getName() + " WHERE v = CAST(' ' AS char(2))", - // The 3-spaces value is included because both sides of the comparison are coerced to char(3) - "VALUES (1, ' '), (2, ' '), (3, ' ')"); - - // value that's not all-spaces - assertQuery( - "SELECT k, v FROM " + table.getName() + " WHERE v = CAST('x ' AS char(2))", - // The 3-spaces value is included because both sides of the comparison are coerced to char(3) - "VALUES (4, 'x'), (5, 'x '), (6, 'x ')"); - } - } - - @Test - @Override - public void testCharTrailingSpace() - { - String schema = getSession().getSchema().orElseThrow(); - try (TestTable table = new PhoenixTestTable(onRemoteDatabase(), schema + ".char_trailing_space", "(x char(10) primary key)", List.of("'test'"))) { - String tableName = table.getName(); - assertQuery("SELECT * FROM " + tableName + " WHERE x = char 'test'", "VALUES 'test '"); - assertQuery("SELECT * FROM " + tableName + " WHERE x = char 'test '", "VALUES 'test '"); - assertQuery("SELECT * FROM " + tableName + " WHERE x = char 'test '", "VALUES 'test '"); - assertQueryReturnsEmptyResult("SELECT * FROM " + tableName + " WHERE x = char ' test'"); - } - } - - // Overridden because Phoenix requires a ROWID column - @Test - @Override - public void testCountDistinctWithStringTypes() - { - assertThatThrownBy(super::testCountDistinctWithStringTypes).hasStackTraceContaining("Illegal data. CHAR types may only contain single byte characters"); - // Skipping the ą test case because it is not supported - List rows = Streams.mapWithIndex(Stream.of("a", "b", "A", "B", " a ", "a", "b", " b "), (value, idx) -> String.format("%d, '%2$s', '%2$s'", idx, value)) - .collect(toImmutableList()); - String tableName = "count_distinct_strings" + randomNameSuffix(); - - try (TestTable testTable = new TestTable(getQueryRunner()::execute, tableName, "(id int, t_char CHAR(5), t_varchar VARCHAR(5)) WITH (ROWKEYS='id')", rows)) { - assertQuery("SELECT count(DISTINCT t_varchar) FROM " + testTable.getName(), "VALUES 6"); - assertQuery("SELECT count(DISTINCT t_char) FROM " + testTable.getName(), "VALUES 6"); - assertQuery("SELECT count(DISTINCT t_char), count(DISTINCT t_varchar) FROM " + testTable.getName(), "VALUES (6, 6)"); - } - } - - @Test - @Override - public void testMergeLarge() - { - String tableName = "test_merge_" + randomNameSuffix(); - - assertUpdate(createTableForWrites(format("CREATE TABLE %s (orderkey BIGINT, custkey BIGINT, totalprice DOUBLE)", tableName))); - - assertUpdate( - format("INSERT INTO %s SELECT orderkey, custkey, totalprice FROM tpch.sf1.orders", tableName), - (long) computeScalar("SELECT count(*) FROM tpch.sf1.orders")); - - @Language("SQL") String mergeSql = "" + - "MERGE INTO " + tableName + " t USING (SELECT * FROM tpch.sf1.orders) s ON (t.orderkey = s.orderkey)\n" + - "WHEN MATCHED AND mod(s.orderkey, 3) = 0 THEN UPDATE SET totalprice = t.totalprice + s.totalprice\n" + - "WHEN MATCHED AND mod(s.orderkey, 3) = 1 THEN DELETE"; - - assertUpdate(mergeSql, 1_000_000); - - // verify deleted rows - assertQuery("SELECT count(*) FROM " + tableName + " WHERE mod(orderkey, 3) = 1", "SELECT 0"); - - // verify untouched rows - assertThat(query("SELECT count(*), cast(sum(totalprice) AS decimal(18,2)) FROM " + tableName + " WHERE mod(orderkey, 3) = 2")) - .matches("SELECT count(*), cast(sum(totalprice) AS decimal(18,2)) FROM tpch.sf1.orders WHERE mod(orderkey, 3) = 2"); - - // TODO investigate why sum(DOUBLE) not correct - // verify updated rows - String sql = format("SELECT count(*) FROM %s t JOIN tpch.sf1.orders s ON t.orderkey = s.orderkey WHERE mod(t.orderkey, 3) = 0 AND t.totalprice != s.totalprice * 2", tableName); - assertQuery(sql, "SELECT 0"); - - assertUpdate("DROP TABLE " + tableName); - } - - @Test - public void testMergeWithSpecifiedRowkeys() - { - testMergeWithSpecifiedRowkeys("customer"); - testMergeWithSpecifiedRowkeys("customer_copy"); - testMergeWithSpecifiedRowkeys("customer,customer_copy"); - } - - // This method is mainly copied from BaseConnectorTest#testMergeMultipleOperations, and appended a 'customer_copy' column which is the copy of the 'customer' column for - // testing merge with specifying rowkeys explicitly in Phoenix - private void testMergeWithSpecifiedRowkeys(String rowkeyDefinition) - { - int targetCustomerCount = 32; - String targetTable = "merge_multiple_rowkeys_specified_" + randomNameSuffix(); - // check the upper case table name also works - targetTable = targetTable.toUpperCase(ENGLISH); - assertUpdate(createTableForWrites(format("CREATE TABLE %s (customer VARCHAR, purchases INT, zipcode INT, spouse VARCHAR, address VARCHAR, customer_copy VARCHAR) WITH (rowkeys = '%s')", targetTable, rowkeyDefinition))); - - String originalInsertFirstHalf = IntStream.range(1, targetCustomerCount / 2) - .mapToObj(intValue -> format("('joe_%s', %s, %s, 'jan_%s', '%s Poe Ct', 'joe_%s')", intValue, 1000, 91000, intValue, intValue, intValue)) - .collect(Collectors.joining(", ")); - String originalInsertSecondHalf = IntStream.range(targetCustomerCount / 2, targetCustomerCount) - .mapToObj(intValue -> format("('joe_%s', %s, %s, 'jan_%s', '%s Poe Ct', 'joe_%s')", intValue, 2000, 92000, intValue, intValue, intValue)) - .collect(Collectors.joining(", ")); - - assertUpdate(format("INSERT INTO %s (customer, purchases, zipcode, spouse, address, customer_copy) VALUES %s, %s", targetTable, originalInsertFirstHalf, originalInsertSecondHalf), targetCustomerCount - 1); - - String firstMergeSource = IntStream.range(targetCustomerCount / 2, targetCustomerCount) - .mapToObj(intValue -> format("('joe_%s', %s, %s, 'jill_%s', '%s Eop Ct', 'joe_%s')", intValue, 3000, 83000, intValue, intValue, intValue)) - .collect(Collectors.joining(", ")); - - assertUpdate(format("MERGE INTO %s t USING (VALUES %s) AS s(customer, purchases, zipcode, spouse, address, customer_copy)", targetTable, firstMergeSource) + - " ON t.customer = s.customer" + - " WHEN MATCHED THEN UPDATE SET purchases = s.purchases, zipcode = s.zipcode, spouse = s.spouse, address = s.address", - targetCustomerCount / 2); - - assertQuery( - "SELECT customer, purchases, zipcode, spouse, address, customer_copy FROM " + targetTable, - format("VALUES %s, %s", originalInsertFirstHalf, firstMergeSource)); - - String nextInsert = IntStream.range(targetCustomerCount, targetCustomerCount * 3 / 2) - .mapToObj(intValue -> format("('jack_%s', %s, %s, 'jan_%s', '%s Poe Ct', 'jack_%s')", intValue, 4000, 74000, intValue, intValue, intValue)) - .collect(Collectors.joining(", ")); - - assertUpdate(format("INSERT INTO %s (customer, purchases, zipcode, spouse, address, customer_copy) VALUES %s", targetTable, nextInsert), targetCustomerCount / 2); - - String secondMergeSource = IntStream.range(1, targetCustomerCount * 3 / 2) - .mapToObj(intValue -> format("('joe_%s', %s, %s, 'jen_%s', '%s Poe Ct', 'joe_%s')", intValue, 5000, 85000, intValue, intValue, intValue)) - .collect(Collectors.joining(", ")); - - assertUpdate(format("MERGE INTO %s t USING (VALUES %s) AS s(customer, purchases, zipcode, spouse, address, customer_copy)", targetTable, secondMergeSource) + - " ON t.customer = s.customer" + - " WHEN MATCHED AND t.zipcode = 91000 THEN DELETE" + - " WHEN MATCHED AND s.zipcode = 85000 THEN UPDATE SET zipcode = 60000" + - " WHEN MATCHED THEN UPDATE SET zipcode = s.zipcode, spouse = s.spouse, address = s.address" + - " WHEN NOT MATCHED THEN INSERT (customer, purchases, zipcode, spouse, address, customer_copy) VALUES(s.customer, s.purchases, s.zipcode, s.spouse, s.address, s.customer_copy)", - targetCustomerCount * 3 / 2 - 1); - - String updatedBeginning = IntStream.range(targetCustomerCount / 2, targetCustomerCount) - .mapToObj(intValue -> format("('joe_%s', %s, %s, 'jill_%s', '%s Eop Ct', 'joe_%s')", intValue, 3000, 60000, intValue, intValue, intValue)) - .collect(Collectors.joining(", ")); - String updatedMiddle = IntStream.range(targetCustomerCount, targetCustomerCount * 3 / 2) - .mapToObj(intValue -> format("('joe_%s', %s, %s, 'jen_%s', '%s Poe Ct', 'joe_%s')", intValue, 5000, 85000, intValue, intValue, intValue)) - .collect(Collectors.joining(", ")); - String updatedEnd = IntStream.range(targetCustomerCount, targetCustomerCount * 3 / 2) - .mapToObj(intValue -> format("('jack_%s', %s, %s, 'jan_%s', '%s Poe Ct', 'jack_%s')", intValue, 4000, 74000, intValue, intValue, intValue)) - .collect(Collectors.joining(", ")); - - assertQuery( - "SELECT customer, purchases, zipcode, spouse, address, customer_copy FROM " + targetTable, - format("VALUES %s, %s, %s", updatedBeginning, updatedMiddle, updatedEnd)); - - assertUpdate("DROP TABLE " + targetTable); - } - - @Test - @Override - public void testUpdateRowConcurrently() - { - abort("Phoenix doesn't support concurrent update of different columns in a row"); - } - - @Test - public void testSchemaOperations() - { - assertUpdate("CREATE SCHEMA new_schema"); - assertUpdate("CREATE TABLE new_schema.test (x bigint)"); - - assertThatThrownBy(() -> getQueryRunner().execute("DROP SCHEMA new_schema")) - .isInstanceOf(RuntimeException.class) - .hasMessageContaining("Cannot drop non-empty schema 'new_schema'"); - - assertUpdate("DROP TABLE new_schema.test"); - assertUpdate("DROP SCHEMA new_schema"); - } - - @Test - public void testMultipleSomeColumnsRangesPredicate() - { - assertQuery("SELECT orderkey, shippriority, clerk, totalprice, custkey FROM orders WHERE orderkey BETWEEN 10 AND 50 OR orderkey BETWEEN 100 AND 150"); - } - - @Test - public void testUnsupportedType() - { - onRemoteDatabase().execute("CREATE TABLE tpch.test_timestamp (pk bigint primary key, val1 timestamp)"); - onRemoteDatabase().execute("UPSERT INTO tpch.test_timestamp (pk, val1) VALUES (1, null)"); - onRemoteDatabase().execute("UPSERT INTO tpch.test_timestamp (pk, val1) VALUES (2, '2002-05-30T09:30:10.5')"); - assertUpdate("INSERT INTO test_timestamp VALUES (3)", 1); - assertQuery("SELECT * FROM test_timestamp", "VALUES 1, 2, 3"); - assertQuery( - withUnsupportedType(CONVERT_TO_VARCHAR), - "SELECT * FROM test_timestamp", - "VALUES " + - "(1, null), " + - "(2, '2002-05-30 09:30:10.500'), " + - "(3, null)"); - assertQueryFails( - withUnsupportedType(CONVERT_TO_VARCHAR), - "INSERT INTO test_timestamp VALUES (4, '2002-05-30 09:30:10.500')", - "Underlying type that is mapped to VARCHAR is not supported for INSERT: TIMESTAMP"); - assertUpdate("DROP TABLE tpch.test_timestamp"); - } - - @Test - public void testDefaultDecimalTable() - { - onRemoteDatabase().execute("CREATE TABLE tpch.test_null_decimal (pk bigint primary key, val1 decimal)"); - onRemoteDatabase().execute("UPSERT INTO tpch.test_null_decimal (pk, val1) VALUES (1, 2)"); - assertQuery("SELECT * FROM tpch.test_null_decimal", "VALUES (1, 2) "); - } - - private Session withUnsupportedType(UnsupportedTypeHandling unsupportedTypeHandling) - { - return Session.builder(getSession()) - .setCatalogSessionProperty("phoenix", UNSUPPORTED_TYPE_HANDLING, unsupportedTypeHandling.name()) - .build(); - } - - @Test - public void testCreateTableWithProperties() - { - assertUpdate("CREATE TABLE test_create_table_with_properties (created_date date, a bigint, b double, c varchar(10), d varchar(10)) WITH(rowkeys = 'created_date row_timestamp, a,b,c', salt_buckets=10)"); - assertThat(getQueryRunner().tableExists(getSession(), "test_create_table_with_properties")).isTrue(); - assertTableColumnNames("test_create_table_with_properties", "created_date", "a", "b", "c", "d"); - assertThat(computeActual("SHOW CREATE TABLE test_create_table_with_properties").getOnlyValue()) - .isEqualTo("CREATE TABLE phoenix.tpch.test_create_table_with_properties (\n" + - " created_date date,\n" + - " a bigint NOT NULL,\n" + - " b double NOT NULL,\n" + - " c varchar(10) NOT NULL,\n" + - " d varchar(10)\n" + - ")\n" + - "WITH (\n" + - " bloomfilter = 'ROW',\n" + - " data_block_encoding = 'FAST_DIFF',\n" + - " rowkeys = 'A,B,C',\n" + - " salt_buckets = 10\n" + - ")"); - - assertUpdate("DROP TABLE test_create_table_with_properties"); - } - - @Test - public void testCreateTableWithPresplits() - { - assertUpdate("CREATE TABLE test_create_table_with_presplits (rid varchar(10), val1 varchar(10)) with(rowkeys = 'rid', SPLIT_ON='\"1\",\"2\",\"3\"')"); - assertThat(getQueryRunner().tableExists(getSession(), "test_create_table_with_presplits")).isTrue(); - assertTableColumnNames("test_create_table_with_presplits", "rid", "val1"); - assertUpdate("DROP TABLE test_create_table_with_presplits"); - } - - @Test - public void testSecondaryIndex() - { - assertUpdate("CREATE TABLE test_primary_table (pk bigint, val1 double, val2 double, val3 double) with(rowkeys = 'pk')"); - onRemoteDatabase().execute("CREATE LOCAL INDEX test_local_index ON tpch.test_primary_table (val1)"); - onRemoteDatabase().execute("CREATE INDEX test_global_index ON tpch.test_primary_table (val2)"); - assertUpdate("INSERT INTO test_primary_table VALUES (1, 1.1, 1.2, 1.3)", 1); - assertQuery("SELECT val1,val3 FROM test_primary_table where val1 < 1.2", "SELECT 1.1,1.3"); - assertQuery("SELECT val2,val3 FROM test_primary_table where val2 < 1.3", "SELECT 1.2,1.3"); - assertUpdate("DROP TABLE test_primary_table"); - } - - @Test - public void testCaseInsensitiveNameMatching() - { - onRemoteDatabase().execute("CREATE TABLE tpch.\"TestCaseInsensitive\" (\"pK\" bigint primary key, \"Val1\" double)"); - assertUpdate("INSERT INTO testcaseinsensitive VALUES (1, 1.1)", 1); - assertQuery("SELECT Val1 FROM testcaseinsensitive where Val1 < 1.2", "SELECT 1.1"); - } - - @Test - public void testMissingColumnsOnInsert() - { - onRemoteDatabase().execute("CREATE TABLE tpch.test_col_insert(pk VARCHAR NOT NULL PRIMARY KEY, col1 VARCHAR, col2 VARCHAR)"); - assertUpdate("INSERT INTO test_col_insert(pk, col1) VALUES('1', 'val1')", 1); - assertUpdate("INSERT INTO test_col_insert(pk, col2) VALUES('1', 'val2')", 1); - assertQuery("SELECT * FROM test_col_insert", "SELECT 1, 'val1', 'val2'"); - } - - @Test - @Override - public void testTopNPushdown() - { - abort("Phoenix does not support topN push down, but instead replaces partial topN with partial Limit."); - } - - @Test - public void testReplacePartialTopNWithLimit() - { - List orderBy = ImmutableList.of(sort("orderkey", ASCENDING, LAST)); - - assertThat(query("SELECT orderkey FROM orders ORDER BY orderkey LIMIT 10")) - .matches(output( - topN(10, orderBy, FINAL, - exchange(LOCAL, GATHER, ImmutableList.of(), - exchange(REMOTE, GATHER, ImmutableList.of(), - limit( - 10, - ImmutableList.of(), - true, - orderBy.stream() - .map(PlanMatchPattern.Ordering::getField) - .collect(toImmutableList()), - tableScan("orders", ImmutableMap.of("orderkey", "orderkey")))))))); - - orderBy = ImmutableList.of(sort("orderkey", ASCENDING, FIRST)); - - assertThat(query("SELECT orderkey FROM orders ORDER BY orderkey NULLS FIRST LIMIT 10")) - .matches(output( - topN(10, orderBy, FINAL, - exchange(LOCAL, GATHER, ImmutableList.of(), - exchange(REMOTE, GATHER, ImmutableList.of(), - limit( - 10, - ImmutableList.of(), - true, - orderBy.stream() - .map(PlanMatchPattern.Ordering::getField) - .collect(toImmutableList()), - tableScan("orders", ImmutableMap.of("orderkey", "orderkey")))))))); - - orderBy = ImmutableList.of(sort("orderkey", DESCENDING, LAST)); - - assertThat(query("SELECT orderkey FROM orders ORDER BY orderkey DESC LIMIT 10")) - .matches(output( - topN(10, orderBy, FINAL, - exchange(LOCAL, GATHER, ImmutableList.of(), - exchange(REMOTE, GATHER, ImmutableList.of(), - limit( - 10, - ImmutableList.of(), - true, - orderBy.stream() - .map(PlanMatchPattern.Ordering::getField) - .collect(toImmutableList()), - tableScan("orders", ImmutableMap.of("orderkey", "orderkey")))))))); - - orderBy = ImmutableList.of(sort("orderkey", ASCENDING, LAST), sort("custkey", ASCENDING, LAST)); - - assertThat(query("SELECT orderkey FROM orders ORDER BY orderkey, custkey LIMIT 10")) - .matches(output( - project( - topN(10, orderBy, FINAL, - exchange(LOCAL, GATHER, ImmutableList.of(), - exchange(REMOTE, GATHER, ImmutableList.of(), - limit( - 10, - ImmutableList.of(), - true, - orderBy.stream() - .map(PlanMatchPattern.Ordering::getField) - .collect(toImmutableList()), - tableScan("orders", ImmutableMap.of("orderkey", "orderkey", "custkey", "custkey"))))))))); - - orderBy = ImmutableList.of(sort("orderkey", ASCENDING, LAST), sort("custkey", DESCENDING, LAST)); - - assertThat(query("SELECT orderkey FROM orders ORDER BY orderkey, custkey DESC LIMIT 10")) - .matches(output( - project( - topN(10, orderBy, FINAL, - exchange(LOCAL, GATHER, ImmutableList.of(), - exchange(REMOTE, GATHER, ImmutableList.of(), - limit( - 10, - ImmutableList.of(), - true, - orderBy.stream() - .map(PlanMatchPattern.Ordering::getField) - .collect(toImmutableList()), - tableScan("orders", ImmutableMap.of("orderkey", "orderkey", "custkey", "custkey"))))))))); - } - - /* - * Make sure that partial topN is replaced with a partial limit when the input is presorted. - */ - @Test - public void testUseSortedPropertiesForPartialTopNElimination() - { - String tableName = "test_propagate_table_scan_sorting_properties"; - // salting ensures multiple splits - String createTableSql = format("" + - "CREATE TABLE %s WITH (salt_buckets = 5) AS " + - "SELECT * FROM tpch.tiny.customer", - tableName); - assertUpdate(createTableSql, 1500L); - - String expected = "SELECT custkey FROM customer ORDER BY 1 NULLS FIRST LIMIT 100"; - String actual = format("SELECT custkey FROM %s ORDER BY 1 NULLS FIRST LIMIT 100", tableName); - assertQuery(getSession(), actual, expected, assertPartialLimitWithPreSortedInputsCount(getSession(), 1)); - assertUpdate("DROP TABLE " + tableName); - } - - @Override - protected TestTable simpleTable() - { - // override because Phoenix requires primary key specification - return new PhoenixTestTable(onRemoteDatabase(), "tpch.simple_table", "(col BIGINT PRIMARY KEY)", ImmutableList.of("1", "2")); - } - - @Override - protected TestTable createTableWithDoubleAndRealColumns(String name, List rows) - { - return new PhoenixTestTable(onRemoteDatabase(), name, "(t_double double primary key, u_double double, v_real float, w_real float)", rows); - } - - @Override - protected void verifyConcurrentAddColumnFailurePermissible(Exception e) - { - assertThat(e) - .hasMessageContaining("Concurrent modification to table"); - } - - @Test - @Override - public void testCreateSchemaWithLongName() - { - // TODO: Find the maximum table schema length in Phoenix and enable this test. - abort("TODO"); - } - - @Test - @Override - public void testCreateTableWithLongTableName() - { - // TODO: Find the maximum table name length in Phoenix and enable this test. - // Table name length with 65536 chars throws "startRow's length must be less than or equal to 32767 to meet the criteria for a row key." - // 32767 chars still causes the same error and shorter names (e.g. 10000) causes timeout. - abort("TODO"); - } - - @Test - @Override - public void testCreateTableWithLongColumnName() - { - // TODO: Find the maximum column name length in Phoenix and enable this test. - abort("TODO"); - } - - @Test - @Override - public void testAlterTableAddLongColumnName() - { - // TODO: Find the maximum column name length in Phoenix and enable this test. - abort("TODO"); - } - - @Test - public void testLargeDefaultDomainCompactionThreshold() - { - String catalogName = getSession().getCatalog().orElseThrow(); - String propertyName = catalogName + "." + DOMAIN_COMPACTION_THRESHOLD; - assertQuery( - "SHOW SESSION LIKE '" + propertyName + "'", - "VALUES('" + propertyName + "','5000', '5000', 'integer', 'Maximum ranges to allow in a tuple domain without simplifying it')"); - } - - @Override - protected OptionalInt maxTableNameLength() - { - return OptionalInt.of(32767); - } - - @Override - protected SqlExecutor onRemoteDatabase() - { - return sql -> { - try { - try (Connection connection = DriverManager.getConnection(testingPhoenixServer.getJdbcUrl()); - Statement statement = connection.createStatement()) { - statement.execute(sql); - connection.commit(); - } - } - catch (SQLException e) { - throw new RuntimeException(e); - } - }; - } -} diff --git a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixPlugin.java b/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixPlugin.java deleted file mode 100644 index 8b3a6479dd2d..000000000000 --- a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixPlugin.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.collect.ImmutableMap; -import io.trino.spi.Plugin; -import io.trino.spi.connector.ConnectorFactory; -import io.trino.testing.TestingConnectorContext; -import org.junit.jupiter.api.Test; - -import static com.google.common.collect.Iterables.getOnlyElement; - -public class TestPhoenixPlugin -{ - @Test - public void testCreateConnector() - { - Plugin plugin = new PhoenixPlugin(); - ConnectorFactory factory = getOnlyElement(plugin.getConnectorFactories()); - factory.create( - "test", - ImmutableMap.of( - "phoenix.connection-url", "jdbc:phoenix:test", - "bootstrap.quiet", "true"), - new TestingConnectorContext()) - .shutdown(); - } -} diff --git a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixSplit.java b/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixSplit.java deleted file mode 100644 index dea8625592ef..000000000000 --- a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixSplit.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.ImmutableList; -import io.airlift.json.ObjectMapperProvider; -import io.trino.spi.HostAddress; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.mapreduce.PhoenixInputSplit; -import org.junit.jupiter.api.Test; - -import java.util.List; - -import static org.assertj.core.api.Assertions.assertThat; - -public class TestPhoenixSplit -{ - private final ObjectMapper objectMapper = new ObjectMapperProvider().get(); - - @Test - public void testPhoenixSplitJsonRoundtrip() - throws Exception - { - List addresses = ImmutableList.of(HostAddress.fromString("host:9000")); - List scans = ImmutableList.of(new Scan().withStartRow(Bytes.toBytes("A")).withStopRow(Bytes.toBytes("Z"))); - PhoenixInputSplit phoenixInputSplit = new PhoenixInputSplit(scans); - PhoenixSplit expected = new PhoenixSplit( - addresses, - SerializedPhoenixInputSplit.serialize(phoenixInputSplit)); - - assertThat(objectMapper.canSerialize(PhoenixSplit.class)).isTrue(); - - String json = objectMapper.writeValueAsString(expected); - PhoenixSplit actual = objectMapper.readValue(json, PhoenixSplit.class); - assertThat(actual.getPhoenixInputSplit()).isEqualTo(expected.getPhoenixInputSplit()); - assertThat(actual.getAddresses()).isEqualTo(expected.getAddresses()); - } -} diff --git a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixTypeMapping.java b/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixTypeMapping.java deleted file mode 100644 index 672b4e75fea4..000000000000 --- a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixTypeMapping.java +++ /dev/null @@ -1,789 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import io.trino.Session; -import io.trino.plugin.jdbc.UnsupportedTypeHandling; -import io.trino.spi.type.ArrayType; -import io.trino.spi.type.CharType; -import io.trino.testing.AbstractTestQueryFramework; -import io.trino.testing.QueryRunner; -import io.trino.testing.TestingSession; -import io.trino.testing.datatype.CreateAndInsertDataSetup; -import io.trino.testing.datatype.CreateAsSelectDataSetup; -import io.trino.testing.datatype.DataSetup; -import io.trino.testing.datatype.SqlDataTypeTest; -import io.trino.testing.sql.TestTable; -import io.trino.testing.sql.TrinoSqlExecutor; -import org.intellij.lang.annotations.Language; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.api.parallel.Execution; - -import java.math.RoundingMode; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.ZoneId; - -import static com.google.common.base.Preconditions.checkState; -import static com.google.common.base.Verify.verify; -import static io.trino.plugin.jdbc.DecimalConfig.DecimalMapping.ALLOW_OVERFLOW; -import static io.trino.plugin.jdbc.DecimalConfig.DecimalMapping.STRICT; -import static io.trino.plugin.jdbc.DecimalSessionSessionProperties.DECIMAL_DEFAULT_SCALE; -import static io.trino.plugin.jdbc.DecimalSessionSessionProperties.DECIMAL_MAPPING; -import static io.trino.plugin.jdbc.DecimalSessionSessionProperties.DECIMAL_ROUNDING_MODE; -import static io.trino.plugin.jdbc.TypeHandlingJdbcSessionProperties.UNSUPPORTED_TYPE_HANDLING; -import static io.trino.plugin.jdbc.UnsupportedTypeHandling.CONVERT_TO_VARCHAR; -import static io.trino.plugin.phoenix5.PhoenixQueryRunner.createPhoenixQueryRunner; -import static io.trino.spi.type.BigintType.BIGINT; -import static io.trino.spi.type.BooleanType.BOOLEAN; -import static io.trino.spi.type.CharType.createCharType; -import static io.trino.spi.type.DateType.DATE; -import static io.trino.spi.type.DecimalType.createDecimalType; -import static io.trino.spi.type.DoubleType.DOUBLE; -import static io.trino.spi.type.IntegerType.INTEGER; -import static io.trino.spi.type.RealType.REAL; -import static io.trino.spi.type.SmallintType.SMALLINT; -import static io.trino.spi.type.TimeZoneKey.getTimeZoneKey; -import static io.trino.spi.type.TinyintType.TINYINT; -import static io.trino.spi.type.VarbinaryType.VARBINARY; -import static io.trino.spi.type.VarcharType.VARCHAR; -import static io.trino.spi.type.VarcharType.createVarcharType; -import static java.lang.String.format; -import static java.math.RoundingMode.HALF_UP; -import static java.math.RoundingMode.UNNECESSARY; -import static java.time.ZoneOffset.UTC; -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; -import static org.junit.jupiter.api.parallel.ExecutionMode.CONCURRENT; - -/** - * @see Phoenix data types - */ -@TestInstance(PER_CLASS) -@Execution(CONCURRENT) -public class TestPhoenixTypeMapping - extends AbstractTestQueryFramework -{ - private TestingPhoenixServer phoenixServer; - - private final ZoneId jvmZone = ZoneId.systemDefault(); - // no DST in 1970, but has DST in later years (e.g. 2018) - private final ZoneId vilnius = ZoneId.of("Europe/Vilnius"); - // minutes offset change since 1970-01-01, no DST - private final ZoneId kathmandu = ZoneId.of("Asia/Kathmandu"); - - @BeforeAll - public void setUp() - { - checkState(jvmZone.getId().equals("America/Bahia_Banderas"), "This test assumes certain JVM time zone"); - LocalDate dateOfLocalTimeChangeForwardAtMidnightInJvmZone = LocalDate.of(1970, 1, 1); - checkIsGap(jvmZone, dateOfLocalTimeChangeForwardAtMidnightInJvmZone.atStartOfDay()); - - LocalDate dateOfLocalTimeChangeForwardAtMidnightInSomeZone = LocalDate.of(1983, 4, 1); - checkIsGap(vilnius, dateOfLocalTimeChangeForwardAtMidnightInSomeZone.atStartOfDay()); - LocalDate dateOfLocalTimeChangeBackwardAtMidnightInSomeZone = LocalDate.of(1983, 10, 1); - checkIsDoubled(vilnius, dateOfLocalTimeChangeBackwardAtMidnightInSomeZone.atStartOfDay().minusMinutes(1)); - - checkIsGap(kathmandu, LocalDate.of(1986, 1, 1).atStartOfDay()); - } - - @Override - protected QueryRunner createQueryRunner() - throws Exception - { - phoenixServer = closeAfterClass(TestingPhoenixServer.getInstance()).get(); - return createPhoenixQueryRunner(phoenixServer, ImmutableMap.of(), ImmutableList.of()); - } - - @Test - public void testBoolean() - { - SqlDataTypeTest.create() - .addRoundTrip("boolean", "true", BOOLEAN, "true") - .addRoundTrip("boolean", "false", BOOLEAN, "false") - .addRoundTrip("boolean", "NULL", BOOLEAN, "CAST(NULL AS BOOLEAN)") - .execute(getQueryRunner(), trinoCreateAsSelect("test_boolean")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_boolean")) - - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_boolean")); - } - - @Test - public void testTinyInt() - { - SqlDataTypeTest.create() - .addRoundTrip("tinyint", "-128", TINYINT, "TINYINT '-128'") // min value in Phoenix and Trino - .addRoundTrip("tinyint", "0", TINYINT, "TINYINT '0'") - .addRoundTrip("tinyint", "127", TINYINT, "TINYINT '127'") // max value in Phoenix and Trino - .addRoundTrip("tinyint", "NULL", TINYINT, "CAST(NULL AS TINYINT)") - .execute(getQueryRunner(), trinoCreateAsSelect("test_tinyint")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_tinyint")) - - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_tinyint")); - } - - @Test - public void testUnsupportedTinyint() - { - try (TestTable table = new TestTable(new PhoenixSqlExecutor(phoenixServer.getJdbcUrl()), "tpch.test_unsupported_tinyint", "(data tinyint, pk tinyint primary key)")) { - assertPhoenixQueryFails( - "INSERT INTO " + table.getName() + " VALUES (-129, 1)", // min - 1 - "ERROR 203 (22005): Type mismatch. BIGINT and TINYINT for expression: -129 in column 0.DATA"); - assertPhoenixQueryFails( - "INSERT INTO " + table.getName() + " VALUES (128, 2)", // max + 1 - "ERROR 203 (22005): Type mismatch. TINYINT and INTEGER for 128"); - } - } - - @Test - public void testUnsignedTinyInt() - { - SqlDataTypeTest.create() - .addRoundTrip("unsigned_tinyint", "0", TINYINT, "TINYINT '0'") // min value in Phoenix - .addRoundTrip("unsigned_tinyint", "127", TINYINT, "TINYINT '127'") // max value in Phoenix - .addRoundTrip("unsigned_tinyint", "NULL", TINYINT, "CAST(NULL AS TINYINT)") - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_unsigned_tinyint")); - } - - @Test - public void testSmallInt() - { - SqlDataTypeTest.create() - .addRoundTrip("smallint", "-32768", SMALLINT, "SMALLINT '-32768'") // min value in Phoenix and Trino - .addRoundTrip("smallint", "0", SMALLINT, "SMALLINT '0'") - .addRoundTrip("smallint", "32767", SMALLINT, "SMALLINT '32767'") // max value in Phoenix and Trino - .addRoundTrip("smallint", "NULL", SMALLINT, "CAST(NULL AS SMALLINT)") - .execute(getQueryRunner(), trinoCreateAsSelect("test_smallint")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_smallint")) - - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_smallint")); - } - - @Test - public void testUnsupportedSmallint() - { - try (TestTable table = new TestTable(new PhoenixSqlExecutor(phoenixServer.getJdbcUrl()), "tpch.test_unsupported_smallint", "(data smallint, pk smallint primary key)")) { - assertPhoenixQueryFails( - "INSERT INTO " + table.getName() + " VALUES (-32769, 1)", // min - 1 - "ERROR 203 (22005): Type mismatch. BIGINT and SMALLINT for expression: -32769 in column 0.DATA"); - assertPhoenixQueryFails( - "INSERT INTO " + table.getName() + " VALUES (32768, 2)", // max + 1 - "ERROR 203 (22005): Type mismatch. SMALLINT and INTEGER for 32768"); - } - } - - @Test - public void testUnsignedSmallInt() - { - SqlDataTypeTest.create() - .addRoundTrip("unsigned_smallint", "0", SMALLINT, "SMALLINT '0'") // min value in Phoenix - .addRoundTrip("unsigned_smallint", "32767", SMALLINT, "SMALLINT '32767'") // max value in Phoenix - .addRoundTrip("unsigned_smallint", "NULL", SMALLINT, "CAST(NULL AS SMALLINT)") - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_unsigned_smallint")); - } - - @Test - public void testInteger() - { - SqlDataTypeTest.create() - .addRoundTrip("integer", "-2147483648", INTEGER, "-2147483648") // min value in Phoenix and Trino - .addRoundTrip("integer", "0", INTEGER, "0") - .addRoundTrip("integer", "2147483647", INTEGER, "2147483647") // max value in Phoenix and Trino - .addRoundTrip("integer", "NULL", INTEGER, "CAST(NULL AS INTEGER)") - .execute(getQueryRunner(), trinoCreateAsSelect("test_integer")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_integer")) - - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_integer")); - } - - @Test - public void testUnsupportedInteger() - { - try (TestTable table = new TestTable(new PhoenixSqlExecutor(phoenixServer.getJdbcUrl()), "tpch.test_unsupported_integer", "(data integer, pk integer primary key)")) { - assertPhoenixQueryFails( - "INSERT INTO " + table.getName() + " VALUES (-2147483649, 1)", // min - 1 - "ERROR 203 (22005): Type mismatch. BIGINT and INTEGER for expression: -2147483649 in column 0.DATA"); - assertPhoenixQueryFails( - "INSERT INTO " + table.getName() + " VALUES (2147483648, 2)", // max + 1 - "ERROR 203 (22005): Type mismatch. INTEGER and BIGINT for 2147483648"); - } - } - - @Test - public void testUnsignedInt() - { - SqlDataTypeTest.create() - .addRoundTrip("unsigned_int", "0", INTEGER, "0") // min value in Phoenix - .addRoundTrip("unsigned_int", "2147483647", INTEGER, "2147483647") // max value in Phoenix - .addRoundTrip("unsigned_int", "NULL", INTEGER, "CAST(NULL AS INTEGER)") - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_unsigned_int")); - } - - @Test - public void testBigInt() - { - SqlDataTypeTest.create() - .addRoundTrip("bigint", "-9223372036854775808", BIGINT, "-9223372036854775808") // min value in Phoenix and Trino - .addRoundTrip("bigint", "0", BIGINT, "BIGINT '0'") - .addRoundTrip("bigint", "9223372036854775807", BIGINT, "9223372036854775807") // max value in Phoenix and Trino - .addRoundTrip("bigint", "NULL", BIGINT, "CAST(NULL AS BIGINT)") - .execute(getQueryRunner(), trinoCreateAsSelect("test_bigint")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_bigint")) - - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_bigint")); - } - - @Test - public void testUnsupportedBigInt() - { - try (TestTable table = new TestTable(new PhoenixSqlExecutor(phoenixServer.getJdbcUrl()), "tpch.test_unsupported_bigint", "(data bigint, pk bigint primary key)")) { - assertPhoenixQueryFails( - "INSERT INTO " + table.getName() + " VALUES (-9223372036854775809, 1)", // min - 1 - "ERROR 203 (22005): Type mismatch. DECIMAL and BIGINT for expression: -9223372036854775809 in column 0.DATA"); - - // Phoenix JDBC driver throws ArithmeticException instead of SQLException when the value is larger than max of bigint - assertThatThrownBy(() -> new PhoenixSqlExecutor(phoenixServer.getJdbcUrl()).execute(format("INSERT INTO %s VALUES (9223372036854775808, 2)", table.getName()))) // max + 1 - .isInstanceOf(ArithmeticException.class) - .hasMessage("Overflow"); - } - } - - @Test - public void testUnsignedLong() - { - SqlDataTypeTest.create() - .addRoundTrip("unsigned_long", "0", BIGINT, "BIGINT '0'") // min value in Phoenix - .addRoundTrip("unsigned_long", "9223372036854775807", BIGINT, "BIGINT '9223372036854775807'") // max value in Phoenix - .addRoundTrip("unsigned_long", "NULL", BIGINT, "CAST(NULL AS BIGINT)") - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_unsigned_long")); - } - - @Test - public void testFloat() - { - // Not testing Nan/-Infinity/+Infinity as those are not supported by Phoenix - SqlDataTypeTest.create() - .addRoundTrip("real", "REAL '-3.402823466E38'", REAL, "REAL '-3.402823466E38'") // min value in Phoenix - .addRoundTrip("real", "REAL '0.0'", REAL, "REAL '0.0'") - .addRoundTrip("real", "REAL '123.456E10'", REAL, "REAL '123.456E10'") - .addRoundTrip("real", "REAL '3.402823466E38'", REAL, "REAL '3.402823466E38'") // max value in Phoenix - .addRoundTrip("real", "NULL", REAL, "CAST(NULL AS REAL)") - .execute(getQueryRunner(), trinoCreateAsSelect("test_float")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_float")); - - SqlDataTypeTest.create() - .addRoundTrip("float", "-3.402823466E38", REAL, "REAL '-3.402823466E38'") // min value in Phoenix - .addRoundTrip("float", "0.0", REAL, "REAL '0.0'") - .addRoundTrip("float", "123.456E10", REAL, "REAL '123.456E10'") - .addRoundTrip("float", "3.402823466E38", REAL, "REAL '3.402823466E38'") // max value in Phoenix - .addRoundTrip("float", "NULL", REAL, "CAST(NULL AS REAL)") - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_float")); - } - - @Test - public void testUnsignedFloat() - { - // Not testing Nan/-Infinity/+Infinity as those are not supported by Phoenix - SqlDataTypeTest.create() - .addRoundTrip("unsigned_float", "0.0", REAL, "REAL '0.0'") // min value in Phoenix - .addRoundTrip("unsigned_float", "123.456E10", REAL, "REAL '123.456E10'") - .addRoundTrip("unsigned_float", "3.402823466E38", REAL, "REAL '3.402823466E38'") // max value in Phoenix - .addRoundTrip("unsigned_float", "NULL", REAL, "CAST(NULL AS REAL)") - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_unsigned_float")); - } - - @Test - public void testDouble() - { - // Not testing Nan/-Infinity/+Infinity as those are not supported by Phoenix - SqlDataTypeTest.create() - .addRoundTrip("double", "-1.7976931348623158E308", DOUBLE, "DOUBLE '-1.7976931348623158E308'") // min value in Phoenix - .addRoundTrip("double", "0.0", DOUBLE, "DOUBLE '0.0'") - .addRoundTrip("double", "1.0E100", DOUBLE, "DOUBLE '1.0E100'") - .addRoundTrip("double", "123.456E10", DOUBLE, "DOUBLE '123.456E10'") - .addRoundTrip("double", "1.7976931348623158E308", DOUBLE, "DOUBLE '1.7976931348623158E308'") // max value in Phoenix - .addRoundTrip("double", "NULL", DOUBLE, "CAST(NULL AS DOUBLE)") - .execute(getQueryRunner(), trinoCreateAsSelect("test_double")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_double")) - - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_double")); - } - - @Test - public void testUnsignedDouble() - { - // Not testing Nan/-Infinity/+Infinity as those are not supported by Phoenix - SqlDataTypeTest.create() - .addRoundTrip("unsigned_double", "0.0", DOUBLE, "DOUBLE '0.0'") // min value in Phoenix - .addRoundTrip("unsigned_double", "1.0E100", DOUBLE, "DOUBLE '1.0E100'") - .addRoundTrip("unsigned_double", "123.456E10", DOUBLE, "DOUBLE '123.456E10'") - .addRoundTrip("unsigned_double", "1.7976931348623158E308", DOUBLE, "DOUBLE '1.7976931348623158E308'") // max value in Phoenix - .addRoundTrip("unsigned_double", "NULL", DOUBLE, "CAST(NULL AS DOUBLE)") - - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_unsigned_double")); - } - - @Test - public void testVarchar() - { - SqlDataTypeTest.create() - .addRoundTrip("varchar(10)", "'text_a'", createVarcharType(10), "CAST('text_a' AS VARCHAR(10))") - .addRoundTrip("varchar(255)", "'text_b'", createVarcharType(255), "CAST('text_b' AS VARCHAR(255))") - .addRoundTrip("varchar(65535)", "'text_d'", createVarcharType(65535), "CAST('text_d' AS VARCHAR(65535))") - .addRoundTrip("varchar(10485760)", "'text_f'", createVarcharType(10485760), "CAST('text_f' AS VARCHAR(10485760))") - .addRoundTrip("varchar", "'unbounded'", VARCHAR, "VARCHAR 'unbounded'") - .addRoundTrip("varchar(10)", "NULL", createVarcharType(10), "CAST(NULL AS VARCHAR(10))") - .addRoundTrip("varchar", "NULL", VARCHAR, "CAST(NULL AS VARCHAR)") - .execute(getQueryRunner(), trinoCreateAsSelect("test_varchar")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_varchar")) - - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_varchar")); - } - - @Test - public void testChar() - { - SqlDataTypeTest.create() - .addRoundTrip("char(10)", "'text_a'", createCharType(10), "CAST('text_a' AS CHAR(10))") - .addRoundTrip("char(255)", "'text_b'", createCharType(255), "CAST('text_b' AS CHAR(255))") - .addRoundTrip("char(65536)", "'text_e'", createCharType(CharType.MAX_LENGTH), "CAST('text_e' AS CHAR(65536))") - .addRoundTrip("char(10)", "NULL", createCharType(10), "CAST(NULL AS CHAR(10))") - .execute(getQueryRunner(), trinoCreateAsSelect("test_char")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_char")) - - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_char")); - } - - @Test - public void testBinary() - { - // Not testing max length (2147483647) because it leads to 'Requested array size exceeds VM limit' - SqlDataTypeTest.create() - .addRoundTrip("binary(1)", "NULL", VARBINARY, "CAST(NULL AS VARBINARY)") - .addRoundTrip("binary(10)", "DECODE('', 'HEX')", VARBINARY, "CAST(NULL AS VARBINARY)") - .addRoundTrip("binary(5)", "DECODE('68656C6C6F', 'HEX')", VARBINARY, "to_utf8('hello')") - .addRoundTrip("binary(26)", "DECODE('5069C4996B6E6120C582C4856B61207720E69DB1E4BAACE983BD', 'HEX')", VARBINARY, "to_utf8('Piękna łąka w 東京都')") - .addRoundTrip("binary(16)", "DECODE('4261672066756C6C206F6620F09F92B0', 'HEX')", VARBINARY, "to_utf8('Bag full of 💰')") - .addRoundTrip("binary(17)", "DECODE('0001020304050607080DF9367AA7000000', 'HEX')", VARBINARY, "X'0001020304050607080DF9367AA7000000'") // non-text - .addRoundTrip("binary(6)", "DECODE('000000000000', 'HEX')", VARBINARY, "X'000000000000'") - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_binary")); - } - - @Test - public void testVarbinary() - { - SqlDataTypeTest.create() - .addRoundTrip("varbinary", "NULL", VARBINARY, "CAST(NULL AS varbinary)") - .addRoundTrip("varbinary", "X''", VARBINARY, "CAST(NULL AS varbinary)") // empty stored as NULL - .addRoundTrip("varbinary", "X'68656C6C6F'", VARBINARY, "to_utf8('hello')") - .addRoundTrip("varbinary", "X'5069C4996B6E6120C582C4856B61207720E69DB1E4BAACE983BD'", VARBINARY, "to_utf8('Piękna łąka w 東京都')") - .addRoundTrip("varbinary", "X'4261672066756C6C206F6620F09F92B0'", VARBINARY, "to_utf8('Bag full of 💰')") - .addRoundTrip("varbinary", "X'0001020304050607080DF9367AA7000000'", VARBINARY, "X'0001020304050607080DF9367AA7000000'") // non-text - .addRoundTrip("varbinary", "X'000000000000'", VARBINARY, "X'000000000000'") - .execute(getQueryRunner(), trinoCreateAsSelect("test_varbinary")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_varbinary")); - - SqlDataTypeTest.create() - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .addRoundTrip("varbinary", "NULL", VARBINARY, "CAST(NULL AS varbinary)") - .addRoundTrip("varbinary", "DECODE('', 'HEX')", VARBINARY, "CAST(NULL AS varbinary)") // empty stored as NULL - .addRoundTrip("varbinary", "DECODE('68656C6C6F', 'HEX')", VARBINARY, "to_utf8('hello')") - .addRoundTrip("varbinary", "DECODE('5069C4996B6E6120C582C4856B61207720E69DB1E4BAACE983BD', 'HEX')", VARBINARY, "to_utf8('Piękna łąka w 東京都')") - .addRoundTrip("varbinary", "DECODE('4261672066756C6C206F6620F09F92B0', 'HEX')", VARBINARY, "to_utf8('Bag full of 💰')") - .addRoundTrip("varbinary", "DECODE('0001020304050607080DF9367AA7000000', 'HEX')", VARBINARY, "X'0001020304050607080DF9367AA7000000'") // non-text - .addRoundTrip("varbinary", "DECODE('000000000000', 'HEX')", VARBINARY, "X'000000000000'") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_varbinary")); - } - - @Test - public void testDecimal() - { - SqlDataTypeTest.create() - .addRoundTrip("decimal(3, 0)", "CAST('193' AS decimal(3, 0))", createDecimalType(3, 0), "CAST('193' AS decimal(3, 0))") - .addRoundTrip("decimal(3, 0)", "CAST('19' AS decimal(3, 0))", createDecimalType(3, 0), "CAST('19' AS decimal(3, 0))") - .addRoundTrip("decimal(3, 0)", "CAST('-193' AS decimal(3, 0))", createDecimalType(3, 0), "CAST('-193' AS decimal(3, 0))") - .addRoundTrip("decimal(3, 1)", "CAST('10.0' AS decimal(3, 1))", createDecimalType(3, 1), "CAST('10.0' AS decimal(3, 1))") - .addRoundTrip("decimal(3, 1)", "CAST('10.1' AS decimal(3, 1))", createDecimalType(3, 1), "CAST('10.1' AS decimal(3, 1))") - .addRoundTrip("decimal(3, 1)", "CAST('-10.1' AS decimal(3, 1))", createDecimalType(3, 1), "CAST('-10.1' AS decimal(3, 1))") - .addRoundTrip("decimal(3, 2)", "CAST('3.14' AS decimal(3, 2))", createDecimalType(3, 2), "CAST('3.14' AS decimal(3, 2))") - .addRoundTrip("decimal(4, 2)", "CAST('2' AS decimal(4, 2))", createDecimalType(4, 2), "CAST('2' AS decimal(4, 2))") - .addRoundTrip("decimal(4, 2)", "CAST('2.3' AS decimal(4, 2))", createDecimalType(4, 2), "CAST('2.3' AS decimal(4, 2))") - .addRoundTrip("decimal(24, 2)", "CAST('2' AS decimal(24, 2))", createDecimalType(24, 2), "CAST('2' AS decimal(24, 2))") - .addRoundTrip("decimal(24, 2)", "CAST('2.3' AS decimal(24, 2))", createDecimalType(24, 2), "CAST('2.3' AS decimal(24, 2))") - .addRoundTrip("decimal(24, 2)", "CAST('123456789.3' AS decimal(24, 2))", createDecimalType(24, 2), "CAST('123456789.3' AS decimal(24, 2))") - .addRoundTrip("decimal(24, 4)", "CAST('12345678901234567890.31' AS decimal(24, 4))", createDecimalType(24, 4), "CAST('12345678901234567890.31' AS decimal(24, 4))") - .addRoundTrip("decimal(24, 23)", "CAST('3.12345678901234567890123' AS decimal(24, 23))", createDecimalType(24, 23), "CAST('3.12345678901234567890123' AS decimal(24, 23))") - .addRoundTrip("decimal(30, 5)", "CAST('3141592653589793238462643.38327' AS decimal(30, 5))", createDecimalType(30, 5), "CAST('3141592653589793238462643.38327' AS decimal(30, 5))") - .addRoundTrip("decimal(30, 5)", "CAST('-3141592653589793238462643.38327' AS decimal(30, 5))", createDecimalType(30, 5), "CAST('-3141592653589793238462643.38327' AS decimal(30, 5))") - .addRoundTrip("decimal(38, 0)", "CAST('27182818284590452353602874713526624977' AS decimal(38, 0))", createDecimalType(38, 0), "CAST('27182818284590452353602874713526624977' AS decimal(38, 0))") - .addRoundTrip("decimal(38, 0)", "CAST('-27182818284590452353602874713526624977' AS decimal(38, 0))", createDecimalType(38, 0), "CAST('-27182818284590452353602874713526624977' AS decimal(38, 0))") - .addRoundTrip("decimal(3, 0)", "NULL", createDecimalType(3, 0), "CAST(NULL AS decimal(3, 0))") - .addRoundTrip("decimal(38, 0)", "CAST(NULL AS decimal(38, 0))", createDecimalType(38, 0), "CAST(NULL AS decimal(38, 0))") - .execute(getQueryRunner(), trinoCreateAsSelect("test_decimal")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_decimal")); - - SqlDataTypeTest.create() - .addRoundTrip("decimal(3, 0)", "CAST(193 AS decimal(3, 0))", createDecimalType(3, 0), "CAST('193' AS decimal(3, 0))") - .addRoundTrip("decimal(3, 0)", "CAST(19 AS decimal(3, 0))", createDecimalType(3, 0), "CAST('19' AS decimal(3, 0))") - .addRoundTrip("decimal(3, 0)", "CAST(-193 AS decimal(3, 0))", createDecimalType(3, 0), "CAST('-193' AS decimal(3, 0))") - .addRoundTrip("decimal(3, 1)", "CAST(10.0 AS decimal(3, 1))", createDecimalType(3, 1), "CAST('10.0' AS decimal(3, 1))") - .addRoundTrip("decimal(3, 1)", "CAST(10.1 AS decimal(3, 1))", createDecimalType(3, 1), "CAST('10.1' AS decimal(3, 1))") - .addRoundTrip("decimal(3, 1)", "CAST(-10.1 AS decimal(3, 1))", createDecimalType(3, 1), "CAST('-10.1' AS decimal(3, 1))") - .addRoundTrip("decimal(3, 2)", "CAST(3.14 AS decimal(3, 2))", createDecimalType(3, 2), "CAST('3.14' AS decimal(3, 2))") - .addRoundTrip("decimal(4, 2)", "CAST(2 AS decimal(4, 2))", createDecimalType(4, 2), "CAST('2' AS decimal(4, 2))") - .addRoundTrip("decimal(4, 2)", "CAST(2.3 AS decimal(4, 2))", createDecimalType(4, 2), "CAST('2.3' AS decimal(4, 2))") - .addRoundTrip("decimal(24, 2)", "CAST(2 AS decimal(24, 2))", createDecimalType(24, 2), "CAST('2' AS decimal(24, 2))") - .addRoundTrip("decimal(24, 2)", "CAST(2.3 AS decimal(24, 2))", createDecimalType(24, 2), "CAST('2.3' AS decimal(24, 2))") - .addRoundTrip("decimal(24, 2)", "CAST(123456789.3 AS decimal(24, 2))", createDecimalType(24, 2), "CAST('123456789.3' AS decimal(24, 2))") - .addRoundTrip("decimal(24, 4)", "CAST(12345678901234567890.31 AS decimal(24, 4))", createDecimalType(24, 4), "CAST('12345678901234567890.31' AS decimal(24, 4))") - .addRoundTrip("decimal(24, 23)", "CAST(3.12345678901234567890123 AS decimal(24, 23))", createDecimalType(24, 23), "CAST('3.12345678901234567890123' AS decimal(24, 23))") - .addRoundTrip("decimal(30, 5)", "CAST(3141592653589793238462643.38327 AS decimal(30, 5))", createDecimalType(30, 5), "CAST('3141592653589793238462643.38327' AS decimal(30, 5))") - .addRoundTrip("decimal(30, 5)", "CAST(-3141592653589793238462643.38327 AS decimal(30, 5))", createDecimalType(30, 5), "CAST('-3141592653589793238462643.38327' AS decimal(30, 5))") - .addRoundTrip("decimal(38, 0)", "CAST(27182818284590452353602874713526624977 AS decimal(38, 0))", createDecimalType(38, 0), "CAST('27182818284590452353602874713526624977' AS decimal(38, 0))") - .addRoundTrip("decimal(38, 0)", "CAST(-27182818284590452353602874713526624977 AS decimal(38, 0))", createDecimalType(38, 0), "CAST('-27182818284590452353602874713526624977' AS decimal(38, 0))") - .addRoundTrip("decimal(3, 0)", "CAST(NULL AS decimal(3, 0))", createDecimalType(3, 0), "CAST(NULL AS decimal(3, 0))") - .addRoundTrip("decimal(38, 0)", "CAST(NULL AS decimal(38, 0))", createDecimalType(38, 0), "CAST(NULL AS decimal(38, 0))") - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_decimal")); - } - - @Test - public void testDecimalUnspecifiedPrecision() - { - PhoenixSqlExecutor phoenixSqlExecutor = new PhoenixSqlExecutor(phoenixServer.getJdbcUrl()); - try (TestTable testTable = new TestTable( - phoenixSqlExecutor, - "tpch.test_var_decimal", - "(pk bigint primary key, d_col decimal)", - asList("1, 1.12", "2, 123456.789", "3, -1.12", "4, -123456.789"))) { - assertQueryFails( - sessionWithDecimalMappingAllowOverflow(UNNECESSARY, 0), - "SELECT d_col FROM " + testTable.getName(), - "Rounding necessary"); - assertQuery( - sessionWithDecimalMappingAllowOverflow(HALF_UP, 0), - "SELECT d_col FROM " + testTable.getName(), - "VALUES (1), (123457), (-1), (-123457)"); - assertQueryFails( - sessionWithDecimalMappingAllowOverflow(UNNECESSARY, 1), - "SELECT d_col FROM " + testTable.getName(), - "Rounding necessary"); - assertQuery( - sessionWithDecimalMappingAllowOverflow(HALF_UP, 1), - "SELECT d_col FROM " + testTable.getName(), - "VALUES (1.1), (123456.8), (-1.1), (-123456.8)"); - assertQueryFails( - sessionWithDecimalMappingAllowOverflow(UNNECESSARY, 2), - "SELECT d_col FROM " + testTable.getName(), - "Rounding necessary"); - assertQuery( - sessionWithDecimalMappingAllowOverflow(HALF_UP, 2), - "SELECT d_col FROM " + testTable.getName(), - "VALUES (1.12), (123456.79), (-1.12), (-123456.79)"); - assertQuery( - sessionWithDecimalMappingAllowOverflow(UNNECESSARY, 3), - "SELECT d_col FROM " + testTable.getName(), - "VALUES (1.12), (123456.789), (-1.12), (-123456.789)"); - assertQueryFails( - sessionWithDecimalMappingStrict(CONVERT_TO_VARCHAR), - "SELECT d_col FROM " + testTable.getName(), - "Rounding necessary"); - } - } - - @Test - public void testDate() - { - testDate(UTC); - testDate(jvmZone); - // using two non-JVM zones so that we don't need to worry what Phoenix system zone is - testDate(vilnius); - testDate(kathmandu); - testDate(TestingSession.DEFAULT_TIME_ZONE_KEY.getZoneId()); - } - - private void testDate(ZoneId sessionZone) - { - Session session = Session.builder(getSession()) - .setTimeZoneKey(getTimeZoneKey(sessionZone.getId())) - .build(); - - SqlDataTypeTest.create() - .addRoundTrip("date", "DATE '-5877641-06-23'", DATE, "DATE '-5877641-06-23'") // min value in Trino - .addRoundTrip("date", "DATE '-0001-01-01'", DATE, "DATE '-0001-01-01'") - .addRoundTrip("date", "DATE '0001-01-01'", DATE, "DATE '0001-01-01'") - .addRoundTrip("date", "DATE '1582-10-04'", DATE, "DATE '1582-10-04'") - .addRoundTrip("date", "DATE '1582-10-05'", DATE, "DATE '1582-10-15'") // begin julian->gregorian switch - .addRoundTrip("date", "DATE '1582-10-14'", DATE, "DATE '1582-10-24'") // end julian->gregorian switch - .addRoundTrip("date", "DATE '1582-10-15'", DATE, "DATE '1582-10-15'") - .addRoundTrip("date", "DATE '1899-12-31'", DATE, "DATE '1899-12-31'") - .addRoundTrip("date", "DATE '1900-01-01'", DATE, "DATE '1900-01-01'") - .addRoundTrip("date", "DATE '1952-04-04'", DATE, "DATE '1952-04-04'") // before epoch - .addRoundTrip("date", "DATE '1970-01-01'", DATE, "DATE '1970-01-01'") - .addRoundTrip("date", "DATE '1970-02-03'", DATE, "DATE '1970-02-03'") - .addRoundTrip("date", "DATE '2017-07-01'", DATE, "DATE '2017-07-01'") // summer on northern hemisphere (possible DST) - .addRoundTrip("date", "DATE '2017-01-01'", DATE, "DATE '2017-01-01'") // winter on northern hemisphere (possible DST on southern hemisphere) - .addRoundTrip("date", "DATE '1983-04-01'", DATE, "DATE '1983-04-01'") - .addRoundTrip("date", "DATE '1983-10-01'", DATE, "DATE '1983-10-01'") - .addRoundTrip("date", "DATE '9999-12-31'", DATE, "DATE '9999-12-31'") - .addRoundTrip("date", "DATE '5881580-07-11'", DATE, "DATE '5881580-07-11'") // max value in Trino - .addRoundTrip("date", "NULL", DATE, "CAST(NULL AS DATE)") - .execute(getQueryRunner(), session, trinoCreateAsSelect(session, "test_date")) - .execute(getQueryRunner(), session, trinoCreateAsSelect("test_date")) - .execute(getQueryRunner(), session, trinoCreateAndInsert(session, "test_date")) - .execute(getQueryRunner(), session, trinoCreateAndInsert("test_date")); - - SqlDataTypeTest.create() - .addRoundTrip("date", "TO_DATE('5877642-06-23 BC', 'yyyy-MM-dd G', 'local')", DATE, "DATE '-5877641-06-23'") // min value in Trino - .addRoundTrip("date", "TO_DATE('0002-01-01 BC', 'yyyy-MM-dd G', 'local')", DATE, "DATE '-0001-01-01'") - .addRoundTrip("date", "TO_DATE('0001-01-01', 'yyyy-MM-dd', 'local')", DATE, "DATE '0001-01-01'") - .addRoundTrip("date", "TO_DATE('1582-10-04', 'yyyy-MM-dd', 'local')", DATE, "DATE '1582-10-04'") - .addRoundTrip("date", "TO_DATE('1582-10-05', 'yyyy-MM-dd', 'local')", DATE, "DATE '1582-10-15'") // begin julian->gregorian switch - .addRoundTrip("date", "TO_DATE('1582-10-14', 'yyyy-MM-dd', 'local')", DATE, "DATE '1582-10-24'") // end julian->gregorian switch - .addRoundTrip("date", "TO_DATE('1582-10-15', 'yyyy-MM-dd', 'local')", DATE, "DATE '1582-10-15'") - .addRoundTrip("date", "TO_DATE('1899-12-31', 'yyyy-MM-dd', 'local')", DATE, "DATE '1899-12-31'") - .addRoundTrip("date", "TO_DATE('1900-01-01', 'yyyy-MM-dd', 'local')", DATE, "DATE '1900-01-01'") - .addRoundTrip("date", "TO_DATE('1952-04-04', 'yyyy-MM-dd', 'local')", DATE, "DATE '1952-04-04'") // before epoch - .addRoundTrip("date", "TO_DATE('1970-01-01', 'yyyy-MM-dd', 'local')", DATE, "DATE '1970-01-01'") - .addRoundTrip("date", "TO_DATE('1970-02-03', 'yyyy-MM-dd', 'local')", DATE, "DATE '1970-02-03'") - .addRoundTrip("date", "TO_DATE('2017-07-01', 'yyyy-MM-dd', 'local')", DATE, "DATE '2017-07-01'") // summer on northern hemisphere (possible DST) - .addRoundTrip("date", "TO_DATE('2017-01-01', 'yyyy-MM-dd', 'local')", DATE, "DATE '2017-01-01'") // winter on northern hemisphere (possible DST on southern hemisphere) - .addRoundTrip("date", "TO_DATE('1983-04-01', 'yyyy-MM-dd', 'local')", DATE, "DATE '1983-04-01'") - .addRoundTrip("date", "TO_DATE('1983-10-01', 'yyyy-MM-dd', 'local')", DATE, "DATE '1983-10-01'") - .addRoundTrip("date", "TO_DATE('9999-12-31', 'yyyy-MM-dd', 'local')", DATE, "DATE '9999-12-31'") - .addRoundTrip("date", "TO_DATE('5881580-07-11', 'yyyy-MM-dd', 'local')", DATE, "DATE '5881580-07-11'") // max value in Trino - .addRoundTrip("date", "NULL", DATE, "CAST(NULL AS DATE)") - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), session, phoenixCreateAndInsert("tpch.test_date")); - } - - @Test - public void testUnsignedDate() - { - testUnsignedDate(UTC); - testUnsignedDate(jvmZone); - // using two non-JVM zones so that we don't need to worry what Phoenix system zone is - testUnsignedDate(vilnius); - testUnsignedDate(kathmandu); - testUnsignedDate(TestingSession.DEFAULT_TIME_ZONE_KEY.getZoneId()); - } - - private void testUnsignedDate(ZoneId sessionZone) - { - Session session = Session.builder(getSession()) - .setTimeZoneKey(getTimeZoneKey(sessionZone.getId())) - .build(); - - SqlDataTypeTest.create() - .addRoundTrip("unsigned_date", "TO_DATE('1970-01-01', 'yyyy-MM-dd', 'local')", DATE, "DATE '1970-01-01'") // min value in Phoenix - .addRoundTrip("unsigned_date", "TO_DATE('1970-02-03', 'yyyy-MM-dd', 'local')", DATE, "DATE '1970-02-03'") - .addRoundTrip("unsigned_date", "TO_DATE('1983-04-01', 'yyyy-MM-dd', 'local')", DATE, "DATE '1983-04-01'") - .addRoundTrip("unsigned_date", "TO_DATE('1983-10-01', 'yyyy-MM-dd', 'local')", DATE, "DATE '1983-10-01'") - .addRoundTrip("unsigned_date", "TO_DATE('2017-07-01', 'yyyy-MM-dd', 'local')", DATE, "DATE '2017-07-01'") // summer on northern hemisphere (possible DST) - .addRoundTrip("unsigned_date", "TO_DATE('2017-01-01', 'yyyy-MM-dd', 'local')", DATE, "DATE '2017-01-01'") // winter on northern hemisphere (possible DST on southern hemisphere) - .addRoundTrip("unsigned_date", "TO_DATE('9999-12-31', 'yyyy-MM-dd', 'local')", DATE, "DATE '9999-12-31'") - .addRoundTrip("unsigned_date", "TO_DATE('5881580-07-11', 'yyyy-MM-dd', 'local')", DATE, "DATE '5881580-07-11'") // max value in Trino - .addRoundTrip("unsigned_date", "NULL", DATE, "CAST(NULL AS DATE)") - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), session, phoenixCreateAndInsert("tpch.test_unsigned_date")); - } - - @Test - public void testArray() - { - // basic types - SqlDataTypeTest.create() - .addRoundTrip("ARRAY(boolean)", "ARRAY[true, false]", new ArrayType(BOOLEAN), "ARRAY[true, false]") - .addRoundTrip("ARRAY(bigint)", "ARRAY[123456789012]", new ArrayType(BIGINT), "ARRAY[123456789012]") - .addRoundTrip("ARRAY(integer)", "ARRAY[1, 2, 1234567890]", new ArrayType(INTEGER), "ARRAY[1, 2, 1234567890]") - .addRoundTrip("ARRAY(smallint)", "ARRAY[32456]", new ArrayType(SMALLINT), "ARRAY[SMALLINT '32456']") - .addRoundTrip("ARRAY(double)", "ARRAY[123.45]", new ArrayType(DOUBLE), "ARRAY[DOUBLE '123.45']") - .addRoundTrip("ARRAY(real)", "ARRAY[123.45]", new ArrayType(REAL), "ARRAY[REAL '123.45']") - .execute(getQueryRunner(), trinoCreateAsSelect("test_array_basic")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_array_basic")); - - SqlDataTypeTest.create() - .addRoundTrip("ARRAY(date)", "ARRAY[DATE '1952-04-03']", new ArrayType(DATE), "ARRAY[DATE '1952-04-03']") // before epoch - .addRoundTrip("ARRAY(date)", "ARRAY[DATE '1970-01-01']", new ArrayType(DATE), "ARRAY[DATE '1970-01-01']") - .addRoundTrip("ARRAY(date)", "ARRAY[DATE '1970-02-03']", new ArrayType(DATE), "ARRAY[DATE '1970-02-03']") - .addRoundTrip("ARRAY(date)", "ARRAY[DATE '2017-07-01']", new ArrayType(DATE), "ARRAY[DATE '2017-07-01']") // summer on northern hemisphere (possible DST) - .addRoundTrip("ARRAY(date)", "ARRAY[DATE '2017-01-01']", new ArrayType(DATE), "ARRAY[DATE '2017-01-01']") // winter on northern hemisphere (possible DST on southern hemisphere) - .addRoundTrip("ARRAY(date)", "ARRAY[DATE '1983-04-01']", new ArrayType(DATE), "ARRAY[DATE '1983-04-01']") - .addRoundTrip("ARRAY(date)", "ARRAY[DATE '1983-10-01']", new ArrayType(DATE), "ARRAY[DATE '1983-10-01']") - .execute(getQueryRunner(), trinoCreateAsSelect("test_array_date")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_array_date")); - - SqlDataTypeTest.create() - .addRoundTrip("date ARRAY", "ARRAY[TO_DATE('1952-04-03', 'yyyy-MM-dd', 'local')]", new ArrayType(DATE), "ARRAY[DATE '1952-04-03']") // before epoch - .addRoundTrip("date ARRAY", "ARRAY[TO_DATE('1970-01-01', 'yyyy-MM-dd', 'local')]", new ArrayType(DATE), "ARRAY[DATE '1970-01-01']") - .addRoundTrip("date ARRAY", "ARRAY[TO_DATE('1970-02-03', 'yyyy-MM-dd', 'local')]", new ArrayType(DATE), "ARRAY[DATE '1970-02-03']") - .addRoundTrip("date ARRAY", "ARRAY[TO_DATE('2017-07-01', 'yyyy-MM-dd', 'local')]", new ArrayType(DATE), "ARRAY[DATE '2017-07-01']") // summer on northern hemisphere (possible DST) - .addRoundTrip("date ARRAY", "ARRAY[TO_DATE('2017-01-01', 'yyyy-MM-dd', 'local')]", new ArrayType(DATE), "ARRAY[DATE '2017-01-01']") // winter on northern hemisphere (possible DST on southern hemisphere) - .addRoundTrip("date ARRAY", "ARRAY[TO_DATE('1983-04-01', 'yyyy-MM-dd', 'local')]", new ArrayType(DATE), "ARRAY[DATE '1983-04-01']") - .addRoundTrip("date ARRAY", "ARRAY[TO_DATE('1983-10-01', 'yyyy-MM-dd', 'local')]", new ArrayType(DATE), "ARRAY[DATE '1983-10-01']") - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_array_date")); - - SqlDataTypeTest.create() - .addRoundTrip("ARRAY(decimal(3, 0))", "ARRAY[CAST('193' AS decimal(3, 0)), CAST('19' AS decimal(3, 0)), CAST('-193' AS decimal(3, 0))]", new ArrayType(createDecimalType(3, 0)), "ARRAY[CAST('193' AS decimal(3, 0)), CAST('19' AS decimal(3, 0)), CAST('-193' AS decimal(3, 0))]") - .addRoundTrip("ARRAY(decimal(3, 1))", "ARRAY[CAST('10.0' AS decimal(3, 1)), CAST('10.1' AS decimal(3, 1)), CAST('-10.1' AS decimal(3, 1))]", new ArrayType(createDecimalType(3, 1)), "ARRAY[CAST('10.0' AS decimal(3, 1)), CAST('10.1' AS decimal(3, 1)), CAST('-10.1' AS decimal(3, 1))]") - .addRoundTrip("ARRAY(decimal(4, 2))", "ARRAY[CAST('2' AS decimal(4, 2)), CAST('2.3' AS decimal(4, 2))]", new ArrayType(createDecimalType(4, 2)), "ARRAY[CAST('2' AS decimal(4, 2)), CAST('2.3' AS decimal(4, 2))]") - .addRoundTrip("ARRAY(decimal(24, 2))", "ARRAY[CAST('2' AS decimal(24, 2)), CAST('2.3' AS decimal(24, 2)), CAST('123456789.3' AS decimal(24, 2))]", new ArrayType(createDecimalType(24, 2)), "ARRAY[CAST('2' AS decimal(24, 2)), CAST('2.3' AS decimal(24, 2)), CAST('123456789.3' AS decimal(24, 2))]") - .addRoundTrip("ARRAY(decimal(24, 4))", "ARRAY[CAST('12345678901234567890.31' AS decimal(24, 4))]", new ArrayType(createDecimalType(24, 4)), "ARRAY[CAST('12345678901234567890.31' AS decimal(24, 4))]") - .addRoundTrip("ARRAY(decimal(30, 5))", "ARRAY[CAST('3141592653589793238462643.38327' AS decimal(30, 5)), CAST('-3141592653589793238462643.38327' AS decimal(30, 5))]", new ArrayType(createDecimalType(30, 5)), "ARRAY[CAST('3141592653589793238462643.38327' AS decimal(30, 5)), CAST('-3141592653589793238462643.38327' AS decimal(30, 5))]") - .addRoundTrip("ARRAY(decimal(38, 0))", "ARRAY[CAST('27182818284590452353602874713526624977' AS decimal(38, 0)), CAST('-27182818284590452353602874713526624977' AS decimal(38, 0))]", new ArrayType(createDecimalType(38, 0)), "ARRAY[CAST('27182818284590452353602874713526624977' AS decimal(38, 0)), CAST('-27182818284590452353602874713526624977' AS decimal(38, 0))]") - .execute(getQueryRunner(), trinoCreateAsSelect("test_array_decimal")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_array_decimal")); - - SqlDataTypeTest.create() - .addRoundTrip("decimal(3, 0) ARRAY", "ARRAY[CAST(193 AS decimal(3, 0)), CAST(19 AS decimal(3, 0)), CAST(-193 AS decimal(3, 0))]", new ArrayType(createDecimalType(3, 0)), "ARRAY[CAST(193 AS decimal(3, 0)), CAST(19 AS decimal(3, 0)), CAST(-193 AS decimal(3, 0))]") - .addRoundTrip("decimal(3, 1) ARRAY", "ARRAY[CAST(10.0 AS decimal(3, 1)), CAST(10.1 AS decimal(3, 1)), CAST(-10.1 AS decimal(3, 1))]", new ArrayType(createDecimalType(3, 1)), "ARRAY[CAST(10.0 AS decimal(3, 1)), CAST(10.1 AS decimal(3, 1)), CAST(-10.1 AS decimal(3, 1))]") - .addRoundTrip("decimal(4, 2) ARRAY", "ARRAY[CAST(2 AS decimal(4, 2)), CAST(2.3 AS decimal(4, 2))]", new ArrayType(createDecimalType(4, 2)), "ARRAY[CAST(2 AS decimal(4, 2)), CAST(2.3 AS decimal(4, 2))]") - .addRoundTrip("decimal(24, 2) ARRAY", "ARRAY[CAST(2 AS decimal(24, 2)), CAST(2.3 AS decimal(24, 2)), CAST(123456789.3 AS decimal(24, 2))]", new ArrayType(createDecimalType(24, 2)), "ARRAY[CAST(2 AS decimal(24, 2)), CAST(2.3 AS decimal(24, 2)), CAST(123456789.3 AS decimal(24, 2))]") - .addRoundTrip("decimal(24, 4) ARRAY", "ARRAY[CAST(12345678901234567890.31 AS decimal(24, 4))]", new ArrayType(createDecimalType(24, 4)), "ARRAY[CAST(12345678901234567890.31 AS decimal(24, 4))]") - .addRoundTrip("decimal(30, 5) ARRAY", "ARRAY[CAST(3141592653589793238462643.38327 AS decimal(30, 5)), CAST(-3141592653589793238462643.38327 AS decimal(30, 5))]", new ArrayType(createDecimalType(30, 5)), "ARRAY[CAST(3141592653589793238462643.38327 AS decimal(30, 5)), CAST(-3141592653589793238462643.38327 AS decimal(30, 5))]") - .addRoundTrip("decimal(38, 0) ARRAY", "ARRAY[CAST(27182818284590452353602874713526624977 AS decimal(38, 0)), CAST(-27182818284590452353602874713526624977 AS decimal(38, 0))]", new ArrayType(createDecimalType(38, 0)), "ARRAY[CAST('27182818284590452353602874713526624977' AS decimal(38, 0)), CAST('-27182818284590452353602874713526624977' AS decimal(38, 0))]") - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_array_decimal")); - - SqlDataTypeTest.create() - .addRoundTrip("ARRAY(char(10))", "ARRAY['text_a']", new ArrayType(createCharType(10)), "ARRAY[CAST('text_a' AS char(10))]") - .addRoundTrip("ARRAY(char(255))", "ARRAY['text_b']", new ArrayType(createCharType(255)), "ARRAY[CAST('text_b' AS char(255))]") - .addRoundTrip("ARRAY(char(65535))", "ARRAY['text_d']", new ArrayType(createCharType(65535)), "ARRAY[CAST('text_d' AS char(65535))]") - .execute(getQueryRunner(), trinoCreateAsSelect("test_array_char")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_array_char")); - - SqlDataTypeTest.create() - .addRoundTrip("char(10) ARRAY", "ARRAY['text_a']", new ArrayType(createCharType(10)), "ARRAY[CAST('text_a' AS char(10))]") - .addRoundTrip("char(255) ARRAY", "ARRAY['text_b']", new ArrayType(createCharType(255)), "ARRAY[CAST('text_b' AS char(255))]") - .addRoundTrip("char(65535) ARRAY", "ARRAY['text_d']", new ArrayType(createCharType(65535)), "ARRAY[CAST('text_d' AS char(65535))]") - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_array_char")); - - SqlDataTypeTest.create() - .addRoundTrip("ARRAY(varchar(10))", "ARRAY['text_a']", new ArrayType(createVarcharType(10)), "ARRAY[CAST('text_a' AS varchar(10))]") - .addRoundTrip("ARRAY(varchar(255))", "ARRAY['text_b']", new ArrayType(createVarcharType(255)), "ARRAY[CAST('text_b' AS varchar(255))]") - .addRoundTrip("ARRAY(varchar(65535))", "ARRAY['text_d']", new ArrayType(createVarcharType(65535)), "ARRAY[CAST('text_d' AS varchar(65535))]") - .addRoundTrip("ARRAY(varchar(10485760))", "ARRAY['text_f']", new ArrayType(createVarcharType(10485760)), "ARRAY[CAST('text_f' AS varchar(10485760))]") - .addRoundTrip("ARRAY(varchar)", "ARRAY['unbounded']", new ArrayType(VARCHAR), "ARRAY[CAST('unbounded' AS varchar)]") - .execute(getQueryRunner(), trinoCreateAsSelect("test_array_varchar")) - .execute(getQueryRunner(), trinoCreateAndInsert("test_array_varchar")); - - SqlDataTypeTest.create() - .addRoundTrip("varchar(10) ARRAY", "ARRAY['text_a']", new ArrayType(createVarcharType(10)), "ARRAY[CAST('text_a' AS varchar(10))]") - .addRoundTrip("varchar(255) ARRAY", "ARRAY['text_b']", new ArrayType(createVarcharType(255)), "ARRAY[CAST('text_b' AS varchar(255))]") - .addRoundTrip("varchar(65535) ARRAY", "ARRAY['text_d']", new ArrayType(createVarcharType(65535)), "ARRAY[CAST('text_d' AS varchar(65535))]") - .addRoundTrip("varchar(10485760) ARRAY", "ARRAY['text_f']", new ArrayType(createVarcharType(10485760)), "ARRAY[CAST('text_f' AS varchar(10485760))]") - .addRoundTrip("varchar ARRAY", "ARRAY['unbounded']", new ArrayType(VARCHAR), "ARRAY[CAST('unbounded' AS varchar)]") - .addRoundTrip("integer primary key", "1", INTEGER, "1") - .execute(getQueryRunner(), phoenixCreateAndInsert("tpch.test_array_varchar")); - } - - @Test - public void testArrayNulls() - { - // Verify only SELECT instead of using SqlDataTypeTest because array comparison not supported for arrays with null elements - try (TestTable table = new TestTable(getQueryRunner()::execute, "test_array_nulls", "(c1 ARRAY(boolean), c2 ARRAY(varchar), c3 ARRAY(varchar))", ImmutableList.of("(NULL, ARRAY[NULL], ARRAY['foo', NULL, 'bar', NULL])"))) { - assertThat(query("SELECT c1 FROM " + table.getName())).matches("VALUES CAST(NULL AS ARRAY(boolean))"); - assertThat(query("SELECT c2 FROM " + table.getName())).matches("VALUES CAST(ARRAY[NULL] AS ARRAY(varchar))"); - assertThat(query("SELECT c3 FROM " + table.getName())).matches("VALUES CAST(ARRAY['foo', NULL, 'bar', NULL] AS ARRAY(varchar))"); - } - } - - private static void checkIsGap(ZoneId zone, LocalDateTime dateTime) - { - verify(isGap(zone, dateTime), "Expected %s to be a gap in %s", dateTime, zone); - } - - private static boolean isGap(ZoneId zone, LocalDateTime dateTime) - { - return zone.getRules().getValidOffsets(dateTime).isEmpty(); - } - - private static void checkIsDoubled(ZoneId zone, LocalDateTime dateTime) - { - verify(zone.getRules().getValidOffsets(dateTime).size() == 2, "Expected %s to be doubled in %s", dateTime, zone); - } - - private DataSetup trinoCreateAsSelect(String tableNamePrefix) - { - return trinoCreateAsSelect(getSession(), tableNamePrefix); - } - - private DataSetup trinoCreateAsSelect(Session session, String tableNamePrefix) - { - return new CreateAsSelectDataSetup(new TrinoSqlExecutor(getQueryRunner(), session), tableNamePrefix); - } - - private DataSetup trinoCreateAndInsert(String tableNamePrefix) - { - return trinoCreateAndInsert(getSession(), tableNamePrefix); - } - - private DataSetup trinoCreateAndInsert(Session session, String tableNamePrefix) - { - return new CreateAndInsertDataSetup(new TrinoSqlExecutor(getQueryRunner(), session), tableNamePrefix); - } - - private DataSetup phoenixCreateAndInsert(String tableNamePrefix) - { - return new CreateAndInsertDataSetup(new PhoenixSqlExecutor(phoenixServer.getJdbcUrl()), tableNamePrefix); - } - - private Session sessionWithDecimalMappingAllowOverflow(RoundingMode roundingMode, int scale) - { - return Session.builder(getSession()) - .setCatalogSessionProperty("phoenix", DECIMAL_MAPPING, ALLOW_OVERFLOW.name()) - .setCatalogSessionProperty("phoenix", DECIMAL_ROUNDING_MODE, roundingMode.name()) - .setCatalogSessionProperty("phoenix", DECIMAL_DEFAULT_SCALE, Integer.valueOf(scale).toString()) - .build(); - } - - private Session sessionWithDecimalMappingStrict(UnsupportedTypeHandling unsupportedTypeHandling) - { - return Session.builder(getSession()) - .setCatalogSessionProperty("phoenix", DECIMAL_MAPPING, STRICT.name()) - .setCatalogSessionProperty("phoenix", UNSUPPORTED_TYPE_HANDLING, unsupportedTypeHandling.name()) - .build(); - } - - private void assertPhoenixQueryFails(@Language("SQL") String sql, String expectedMessage) - { - assertThatThrownBy(() -> new PhoenixSqlExecutor(phoenixServer.getJdbcUrl()).execute(sql)) - .cause() - .hasMessageContaining(expectedMessage); - } -} diff --git a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestingPhoenixServer.java b/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestingPhoenixServer.java deleted file mode 100644 index 593124b3c6c0..000000000000 --- a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestingPhoenixServer.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.phoenix5; - -import com.google.errorprone.annotations.concurrent.GuardedBy; -import io.airlift.log.Logger; -import io.trino.testing.ResourcePresence; -import io.trino.testing.SharedResource; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.logging.Level; - -import static java.lang.String.format; -import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_RETRIES_NUMBER; -import static org.apache.hadoop.hbase.HConstants.MASTER_INFO_PORT; -import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_INFO_PORT; - -public final class TestingPhoenixServer - implements AutoCloseable -{ - private static final Logger LOG = Logger.get(TestingPhoenixServer.class); - - @GuardedBy("this") - private static final SharedResource sharedResource = new SharedResource<>(TestingPhoenixServer::new); - - public static synchronized SharedResource.Lease getInstance() - throws Exception - { - return sharedResource.getInstanceLease(); - } - - private HBaseTestingUtility hbaseTestingUtility; - private final int port; - private final Configuration conf = HBaseConfiguration.create(); - - private final java.util.logging.Logger apacheLogger; - - private final java.util.logging.Logger zookeeperLogger; - - private final java.util.logging.Logger securityLogger; - - private TestingPhoenixServer() - { - // keep references to prevent GC from resetting the log levels - apacheLogger = java.util.logging.Logger.getLogger("org.apache"); - apacheLogger.setLevel(Level.SEVERE); - zookeeperLogger = java.util.logging.Logger.getLogger("org.apache.phoenix.shaded.org.apache.zookeeper.server.ZooKeeperServer"); - zookeeperLogger.setLevel(Level.OFF); - securityLogger = java.util.logging.Logger.getLogger("SecurityLogger.org.apache"); - securityLogger.setLevel(Level.SEVERE); - // to squelch the SecurityLogger, - // instantiate logger with config above before config is overriden again in HBase test franework - org.apache.commons.logging.LogFactory.getLog("SecurityLogger.org.apache.hadoop.hbase.server"); - this.conf.set("hbase.security.logger", "ERROR"); - this.conf.setInt(MASTER_INFO_PORT, -1); - this.conf.setInt(REGIONSERVER_INFO_PORT, -1); - this.conf.setInt(HBASE_CLIENT_RETRIES_NUMBER, 15); - this.conf.setBoolean("phoenix.schema.isNamespaceMappingEnabled", true); - this.conf.set("hbase.regionserver.wal.codec", "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec"); - this.hbaseTestingUtility = new HBaseTestingUtility(conf); - - try { - MiniZooKeeperCluster zkCluster = this.hbaseTestingUtility.startMiniZKCluster(); - port = zkCluster.getClientPort(); - - MiniHBaseCluster hbaseCluster = hbaseTestingUtility.startMiniHBaseCluster(1, 4); - hbaseCluster.waitForActiveAndReadyMaster(); - LOG.info("Phoenix server ready: %s", getJdbcUrl()); - } - catch (Exception e) { - throw new RuntimeException("Can't start phoenix server.", e); - } - } - - @Override - public void close() - { - if (hbaseTestingUtility == null) { - return; - } - try { - LOG.info("Shutting down HBase cluster."); - hbaseTestingUtility.shutdownMiniHBaseCluster(); - hbaseTestingUtility.shutdownMiniZKCluster(); - } - catch (IOException e) { - Thread.currentThread().interrupt(); - throw new UncheckedIOException("Failed to shutdown HBaseTestingUtility instance", e); - } - hbaseTestingUtility = null; - } - - public String getJdbcUrl() - { - return format("jdbc:phoenix:localhost:%d:/hbase;phoenix.schema.isNamespaceMappingEnabled=true", port); - } - - @ResourcePresence - public boolean isRunning() - { - return hbaseTestingUtility != null; - } -} diff --git a/pom.xml b/pom.xml index 995048d08bcd..e433a2ade489 100644 --- a/pom.xml +++ b/pom.xml @@ -54,8 +54,6 @@ lib/trino-memory-context lib/trino-orc lib/trino-parquet - - lib/trino-phoenix5-patched lib/trino-plugin-toolkit lib/trino-record-decoder plugin/trino-accumulo @@ -95,7 +93,6 @@ plugin/trino-opensearch plugin/trino-oracle plugin/trino-password-authenticators - plugin/trino-phoenix5 plugin/trino-pinot plugin/trino-postgresql plugin/trino-prometheus @@ -1314,13 +1311,6 @@ ${project.version} - - - io.trino - trino-phoenix5-patched - ${project.version} - - io.trino trino-pinot diff --git a/testing/trino-product-tests-groups/src/main/java/io/trino/tests/product/TestGroups.java b/testing/trino-product-tests-groups/src/main/java/io/trino/tests/product/TestGroups.java index 0488875940d7..0c217bfaf23d 100644 --- a/testing/trino-product-tests-groups/src/main/java/io/trino/tests/product/TestGroups.java +++ b/testing/trino-product-tests-groups/src/main/java/io/trino/tests/product/TestGroups.java @@ -80,7 +80,6 @@ public final class TestGroups public static final String ICEBERG_REST = "iceberg_rest"; public static final String ICEBERG_JDBC = "iceberg_jdbc"; public static final String ICEBERG_NESSIE = "iceberg_nessie"; - public static final String PHOENIX = "phoenix"; public static final String CLICKHOUSE = "clickhouse"; public static final String KUDU = "kudu"; public static final String MARIADB = "mariadb"; diff --git a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvMultinodeAllConnectors.java b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvMultinodeAllConnectors.java index c44f85b98437..7ccba9ba34a8 100644 --- a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvMultinodeAllConnectors.java +++ b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvMultinodeAllConnectors.java @@ -71,7 +71,6 @@ public void extendEnvironment(Environment.Builder builder) "mysql", "opensearch", "oracle", - "phoenix5", "pinot", "postgresql", "prometheus", diff --git a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvMultinodePhoenix5.java b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvMultinodePhoenix5.java deleted file mode 100644 index 00b4cd4566b3..000000000000 --- a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvMultinodePhoenix5.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.tests.product.launcher.env.environment; - -import com.google.inject.Inject; -import io.trino.testing.TestingProperties; -import io.trino.tests.product.launcher.docker.DockerFiles; -import io.trino.tests.product.launcher.docker.DockerFiles.ResourceProvider; -import io.trino.tests.product.launcher.env.DockerContainer; -import io.trino.tests.product.launcher.env.Environment; -import io.trino.tests.product.launcher.env.EnvironmentProvider; -import io.trino.tests.product.launcher.env.common.StandardMultinode; -import io.trino.tests.product.launcher.env.common.TestsEnvironment; -import io.trino.tests.product.launcher.testcontainers.PortBinder; -import org.testcontainers.containers.startupcheck.IsRunningStartupCheckStrategy; - -import java.time.Duration; - -import static io.trino.tests.product.launcher.docker.ContainerUtil.forSelectedPorts; -import static io.trino.tests.product.launcher.env.EnvironmentContainers.configureTempto; -import static io.trino.tests.product.launcher.env.EnvironmentContainers.isTrinoContainer; -import static io.trino.tests.product.launcher.env.common.Standard.CONTAINER_TRINO_ETC; -import static java.util.Objects.requireNonNull; -import static org.testcontainers.utility.MountableFile.forHostPath; - -@TestsEnvironment -public final class EnvMultinodePhoenix5 - extends EnvironmentProvider -{ - private static final int ZOOKEEPER_PORT = 2181; - - private final ResourceProvider configDir; - private final PortBinder portBinder; - - @Inject - public EnvMultinodePhoenix5(StandardMultinode standardMultinode, DockerFiles dockerFiles, PortBinder portBinder) - { - super(standardMultinode); - this.configDir = dockerFiles.getDockerFilesHostDirectory("conf/environment/multinode-phoenix5/"); - this.portBinder = requireNonNull(portBinder, "portBinder is null"); - } - - @Override - public void extendEnvironment(Environment.Builder builder) - { - String dockerImageName = "ghcr.io/trinodb/testing/phoenix5:" + TestingProperties.getDockerImagesVersion(); - DockerContainer phoenix = new DockerContainer(dockerImageName, "phoenix") - .withStartupCheckStrategy(new IsRunningStartupCheckStrategy()) - .waitingFor(forSelectedPorts(ZOOKEEPER_PORT)) - .withStartupTimeout(Duration.ofMinutes(5)); - - portBinder.exposePort(phoenix, ZOOKEEPER_PORT); - - builder.addContainer(phoenix); - - builder.configureContainers(container -> { - if (isTrinoContainer(container.getLogicalName())) { - container.withCopyFileToContainer(forHostPath(configDir.getPath("hbase-site.xml")), CONTAINER_TRINO_ETC + "/hbase-site.xml"); - container.withCopyFileToContainer(forHostPath(configDir.getPath("phoenix.properties")), CONTAINER_TRINO_ETC + "/catalog/phoenix.properties"); - } - }); - - configureTempto(builder, configDir); - builder.addConnector("phoenix5"); - } -} diff --git a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/suite/suites/Suite6NonGeneric.java b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/suite/suites/Suite6NonGeneric.java index f25587c57d3e..eb421ce5f8ee 100644 --- a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/suite/suites/Suite6NonGeneric.java +++ b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/suite/suites/Suite6NonGeneric.java @@ -16,7 +16,6 @@ import com.google.common.collect.ImmutableList; import io.trino.tests.product.launcher.env.EnvironmentConfig; import io.trino.tests.product.launcher.env.EnvironmentDefaults; -import io.trino.tests.product.launcher.env.environment.EnvMultinodePhoenix5; import io.trino.tests.product.launcher.env.environment.EnvSinglenodeKerberosKmsHdfsImpersonation; import io.trino.tests.product.launcher.env.environment.EnvSinglenodeKerberosKmsHdfsImpersonationWithCredentialCache; import io.trino.tests.product.launcher.env.environment.EnvSinglenodeKerberosKmsHdfsNoImpersonation; @@ -28,7 +27,6 @@ import static com.google.common.base.Verify.verify; import static io.trino.tests.product.TestGroups.CONFIGURED_FEATURES; -import static io.trino.tests.product.TestGroups.PHOENIX; import static io.trino.tests.product.TestGroups.STORAGE_FORMATS; import static io.trino.tests.product.launcher.suite.SuiteTestRun.testOnEnvironment; @@ -52,9 +50,6 @@ public List getTestRuns(EnvironmentConfig config) .build(), testOnEnvironment(EnvSinglenodeKerberosKmsHdfsImpersonationWithCredentialCache.class) .withGroups(CONFIGURED_FEATURES, STORAGE_FORMATS) - .build(), - testOnEnvironment(EnvMultinodePhoenix5.class) - .withGroups(CONFIGURED_FEATURES, PHOENIX) .build()); } } diff --git a/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-all/phoenix5.properties b/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-all/phoenix5.properties deleted file mode 100644 index e39351ce4169..000000000000 --- a/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-all/phoenix5.properties +++ /dev/null @@ -1,2 +0,0 @@ -connector.name=phoenix5 -phoenix.connection-url=jdbc:phoenix:host1.invalid:2181:/hbase diff --git a/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-phoenix5/hbase-site.xml b/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-phoenix5/hbase-site.xml deleted file mode 100644 index b97cec8b4232..000000000000 --- a/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-phoenix5/hbase-site.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - phoenix.schema.isNamespaceMappingEnabled - true - - diff --git a/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-phoenix5/phoenix.properties b/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-phoenix5/phoenix.properties deleted file mode 100644 index c62caff6c5b4..000000000000 --- a/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-phoenix5/phoenix.properties +++ /dev/null @@ -1,3 +0,0 @@ -connector.name=phoenix5 -phoenix.connection-url=jdbc:phoenix:phoenix:2181:/hbase -phoenix.config.resources=/docker/presto-product-tests/conf/presto/etc/hbase-site.xml diff --git a/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-phoenix5/tempto-configuration.yaml b/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-phoenix5/tempto-configuration.yaml deleted file mode 100644 index 412b5d209cdb..000000000000 --- a/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-phoenix5/tempto-configuration.yaml +++ /dev/null @@ -1,3 +0,0 @@ -databases: - presto: - jdbc_url: "jdbc:trino://${databases.presto.host}:${databases.presto.port}/phoenix/default" diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/phoenix/TestPhoenix.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/phoenix/TestPhoenix.java deleted file mode 100644 index 3d2ca3b1884d..000000000000 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/phoenix/TestPhoenix.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.tests.product.phoenix; - -import io.trino.tempto.ProductTest; -import io.trino.tempto.query.QueryResult; -import org.testng.annotations.Test; - -import static io.trino.tempto.assertions.QueryAssert.Row.row; -import static io.trino.tests.product.TestGroups.PHOENIX; -import static io.trino.tests.product.TestGroups.PROFILE_SPECIFIC_TESTS; -import static io.trino.tests.product.utils.QueryExecutors.onTrino; -import static org.assertj.core.api.Assertions.assertThat; - -public class TestPhoenix - extends ProductTest -{ - @Test(groups = {PHOENIX, PROFILE_SPECIFIC_TESTS}) - public void testCreateTableAsSelect() - { - QueryResult result = onTrino().executeQuery("CREATE TABLE nation AS SELECT * FROM tpch.tiny.nation"); - try { - assertThat(result).updatedRowsCountIsEqualTo(25); - assertThat(onTrino().executeQuery("SELECT COUNT(*) FROM nation")) - .containsOnly(row(25)); - } - finally { - onTrino().executeQuery("DROP TABLE nation"); - } - } -}