From b78e917db3cf4309a5ae80157a5a2c89ded45c20 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 17 Mar 2022 14:25:33 +0800 Subject: [PATCH 001/172] modify batch version --- test/batch-test/openmldb-batch-test/pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/batch-test/openmldb-batch-test/pom.xml b/test/batch-test/openmldb-batch-test/pom.xml index 3528ba18d60..8377d330f31 100644 --- a/test/batch-test/openmldb-batch-test/pom.xml +++ b/test/batch-test/openmldb-batch-test/pom.xml @@ -16,8 +16,8 @@ 2.12.8 2.12 3.0.0 - 0.4.2 - 0.4.2 + 0.4.3 + 0.4.3-macos 0.1.0-SNAPSHOT provided From cda97ef642327165929c4d1b5f1358ef5fe5a0e3 Mon Sep 17 00:00:00 2001 From: wangkaidong <1027547596@qq.com> Date: Tue, 22 Mar 2022 14:59:12 +0800 Subject: [PATCH 002/172] standalone 01 --- .../src/main/resources/fedb_deploy.properties | 2 +- .../python-sdk-test/common/__init__.py | 0 .../python-sdk-test/common/fedb_test.py | 2 +- .../common/standalone_client.py | 35 +++++++++++++++++++ .../common/standalone_config.py | 31 ++++++++++++++++ .../python-sdk-test/common/standalone_test.py | 34 ++++++++++++++++++ .../python-sdk-test/conf/fedb.conf | 4 +-- .../python-sdk-test/conf/standalone.conf | 25 +++++++++++++ 8 files changed, 129 insertions(+), 4 deletions(-) mode change 100644 => 100755 test/integration-test/python-sdk-test/common/__init__.py mode change 100644 => 100755 test/integration-test/python-sdk-test/common/fedb_test.py create mode 100644 test/integration-test/python-sdk-test/common/standalone_client.py create mode 100755 test/integration-test/python-sdk-test/common/standalone_config.py create mode 100755 test/integration-test/python-sdk-test/common/standalone_test.py create mode 100644 test/integration-test/python-sdk-test/conf/standalone.conf diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fedb_deploy.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fedb_deploy.properties index 44ff02afda0..7e0034ac509 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fedb_deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fedb_deploy.properties @@ -11,6 +11,6 @@ spark_url=https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0. tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.2-linux.tar.gz #tmp=/home/zhaowei01/tobe/openmldb_linux.tar.gz -standalone=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.0-linux.tar.gz +standalone=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.2-linux.tar.gz tmp_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz tmp_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark.tar.gz diff --git a/test/integration-test/python-sdk-test/common/__init__.py b/test/integration-test/python-sdk-test/common/__init__.py old mode 100644 new mode 100755 diff --git a/test/integration-test/python-sdk-test/common/fedb_test.py b/test/integration-test/python-sdk-test/common/fedb_test.py old mode 100644 new mode 100755 index 1ee79b00211..6098ec813dd --- a/test/integration-test/python-sdk-test/common/fedb_test.py +++ b/test/integration-test/python-sdk-test/common/fedb_test.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import yaml +#import yaml from common import fedb_config from common.fedb_client import FedbClient diff --git a/test/integration-test/python-sdk-test/common/standalone_client.py b/test/integration-test/python-sdk-test/common/standalone_client.py new file mode 100644 index 00000000000..deb5d13332e --- /dev/null +++ b/test/integration-test/python-sdk-test/common/standalone_client.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import openmldb +import sqlalchemy as db +from nb_log import LogManager + +log = LogManager('fedb-sdk-test').get_logger_and_add_handlers() + + +class StandaloneClient: + + def __init__(self, host, port, dbName="db1"): + self.host = host + self.port = port + self.dbName = dbName + + def getConnect(self): + engine = db.create_engine('openmldb:///{}?host={}&port={}'.format(self.dbName, self.host, self.port)) + connect = engine.connect() + return connect diff --git a/test/integration-test/python-sdk-test/common/standalone_config.py b/test/integration-test/python-sdk-test/common/standalone_config.py new file mode 100755 index 00000000000..17a5c42dfac --- /dev/null +++ b/test/integration-test/python-sdk-test/common/standalone_config.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import configparser +import util.tools as tool + +config = configparser.ConfigParser() +confPath = tool.getAbsolutePath("conf/standalone.conf") +config.read(confPath) +lists_header = config.sections() # 配置组名, ['test', 'mysql'] # 不含'DEFAULT' +env = config['global']['env'] +default_db_name = config['global']['default_db_name'] +levels = config['global']['levels'].split(",") +levels = list(map(lambda l: int(l), levels)) + +host = config['standalone'][env + '_host'] +port = config['standalone'][env + '_port'] + diff --git a/test/integration-test/python-sdk-test/common/standalone_test.py b/test/integration-test/python-sdk-test/common/standalone_test.py new file mode 100755 index 00000000000..a1aa586e2ab --- /dev/null +++ b/test/integration-test/python-sdk-test/common/standalone_test.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common import standalone_config +from common.standalone_client import StandaloneClient +from nb_log import LogManager +import sys +import os +sys.path.append(os.pardir) +log = LogManager('python-sdk-test').get_logger_and_add_handlers() + +class StandaloneTest: + + def setup_class(self): + self.client = StandaloneClient(standalone_config.host,standalone_config.port,standalone_config.default_db_name) + self.connect = self.client.getConnect() + try: + self.connect.execute("create database {};".format(standalone_config.default_db_name)) + log.info("create db:" + standalone_config.default_db_name + ",success") + except Exception as e: + log.info("create db:" + standalone_config.default_db_name + ",failed . msg:" + str(e)) diff --git a/test/integration-test/python-sdk-test/conf/fedb.conf b/test/integration-test/python-sdk-test/conf/fedb.conf index 1518aad2160..074aa67e0dd 100644 --- a/test/integration-test/python-sdk-test/conf/fedb.conf +++ b/test/integration-test/python-sdk-test/conf/fedb.conf @@ -1,12 +1,12 @@ [global] env=qa -default_db_name=test_fedb +default_db_name=db1 levels=0 [fedb] #配置zk地址, 和集群启动配置中的zk_cluster保持一致 -qa_zk_cluster=172.24.4.55:10000 +qa_zk_cluster=172.24.4.40:10000 #配置集群的zk根路径, 和集群启动配置中的zk_root_path保持一致 qa_zk_root_path=/openmldb qa_tb_endpoint_0=172.24.4.55:10003 diff --git a/test/integration-test/python-sdk-test/conf/standalone.conf b/test/integration-test/python-sdk-test/conf/standalone.conf new file mode 100644 index 00000000000..ee8370a0e04 --- /dev/null +++ b/test/integration-test/python-sdk-test/conf/standalone.conf @@ -0,0 +1,25 @@ + +[global] +env=qa +default_db_name=db1 +levels=0 + +[standalone] +qa_port=172.24.4.40 +qa_host=10008 +# #配置zk地址, 和集群启动配置中的zk_cluster保持一致 +# qa_zk_cluster=172.24.4.55:10000 +# #配置集群的zk根路径, 和集群启动配置中的zk_root_path保持一致 +# qa_zk_root_path=/openmldb +# qa_tb_endpoint_0=172.24.4.55:10003 +# qa_tb_endpoint_1=172.24.4.55:10004 +# qa_tb_endpoint_2=172.24.4.55:10005 +# +# cj_zk_cluster=127.0.0.1:6181 +# cj_zk_root_path=/onebox +# cj_tb_endpoint_0=127.0.0.1:9520 +# cj_tb_endpoint_1=127.0.0.1:9521 +# cj_tb_endpoint_2=127.0.0.1:9522 +# +# cicd_zk_cluster=127.0.0.1:6181 +# cicd_zk_root_path=/onebox \ No newline at end of file From 07177026891196848c404aabbdc7dd62196c9be2 Mon Sep 17 00:00:00 2001 From: wangkaidong <1027547596@qq.com> Date: Wed, 13 Apr 2022 14:41:55 +0800 Subject: [PATCH 003/172] standalone java sdk,python sdk and offline python sdk --- cases/function/expression/test_predicate.yaml | 356 +++++++++--------- .../function/join/test_lastjoin_complex.yaml | 6 +- cases/function/out_in/test_out_in.yaml | 2 +- cases/function/select/test_where.yaml | 10 +- cases/function/v040/test_groupby.yaml | 36 +- cases/function/v040/test_udaf.yaml | 8 +- cases/query/const_query.yaml | 23 +- .../openmldb-sdk-test/pom.xml | 4 +- .../checker/DeploymentCountCheckerByCli.java | 3 +- .../java_sdk_test/common/FedbTest.java | 12 +- .../java_sdk_test/common/StandaloneTest.java | 18 +- .../java_sdk_test/entity/FesqlResult.java | 1 + .../executor/BatchSQLExecutor.java | 1 + .../java_sdk_test/util/FesqlUtil.java | 145 ++++++- .../src/main/resources/command.properties | 4 +- .../src/main/resources/fedb_deploy.properties | 4 +- .../src/main/resources/log4j.properties | 2 +- .../standalone/v030/DDLTest.java | 41 ++ .../standalone/v030/DMLTest.java | 17 + .../standalone/v030/DeploymentTest.java | 25 ++ .../standalone/v030/ExpressTest.java | 9 + .../standalone/v030/FunctionTest.java | 8 + .../standalone/v030/LastJoinTest.java | 8 + .../standalone/v030/MultiDBTest.java | 8 + .../standalone/v030/OutInTest.java | 6 + .../standalone/v030/SelectTest.java | 10 + .../standalone/v030/WindowTest.java | 10 + .../standalone/v040/ExpressTest.java | 9 + .../standalone/v040/FunctionTest.java | 16 + .../standalone/v040/GroupByTest.java | 8 + .../python-sdk-test/check/checker.py | 2 + .../python-sdk-test/common/fedb_client.py | 11 +- .../python-sdk-test/common/fedb_test.py | 14 +- .../common/standalone_client.py | 11 + .../python-sdk-test/common/standalone_test.py | 11 +- .../python-sdk-test/conf/fedb.conf | 7 +- .../python-sdk-test/conf/standalone.conf | 9 +- .../python-sdk-test/executor/fedb_executor.py | 6 + .../python-sdk-test/standalone/__init__.py | 15 + .../standalone/test_standalone_ddl.py | 50 +++ .../standalone/test_standalone_deploy.py | 42 +++ .../standalone/test_standalone_dml.py | 43 +++ .../standalone/test_standalone_express.py | 38 ++ .../test_standalone_express_v040.py | 35 ++ .../standalone/test_standalone_function.py | 35 ++ .../test_standalone_function_v040.py | 43 +++ .../test_standalone_groupby_v040.py | 36 ++ .../standalone/test_standalone_lastjoin.py | 36 ++ .../standalone/test_standalone_multidb.py | 35 ++ .../standalone/test_standalone_outin.py | 35 ++ .../standalone/test_standalone_select.py | 52 +++ .../standalone/test_standalone_window.py | 51 +++ .../python-sdk-test/util/fedb_util.py | 56 ++- 53 files changed, 1216 insertions(+), 267 deletions(-) create mode 100644 test/integration-test/python-sdk-test/standalone/__init__.py create mode 100644 test/integration-test/python-sdk-test/standalone/test_standalone_ddl.py create mode 100644 test/integration-test/python-sdk-test/standalone/test_standalone_deploy.py create mode 100644 test/integration-test/python-sdk-test/standalone/test_standalone_dml.py create mode 100644 test/integration-test/python-sdk-test/standalone/test_standalone_express.py create mode 100644 test/integration-test/python-sdk-test/standalone/test_standalone_express_v040.py create mode 100644 test/integration-test/python-sdk-test/standalone/test_standalone_function.py create mode 100644 test/integration-test/python-sdk-test/standalone/test_standalone_function_v040.py create mode 100644 test/integration-test/python-sdk-test/standalone/test_standalone_groupby_v040.py create mode 100644 test/integration-test/python-sdk-test/standalone/test_standalone_lastjoin.py create mode 100644 test/integration-test/python-sdk-test/standalone/test_standalone_multidb.py create mode 100644 test/integration-test/python-sdk-test/standalone/test_standalone_outin.py create mode 100644 test/integration-test/python-sdk-test/standalone/test_standalone_select.py create mode 100644 test/integration-test/python-sdk-test/standalone/test_standalone_window.py diff --git a/cases/function/expression/test_predicate.yaml b/cases/function/expression/test_predicate.yaml index 773fe215c78..bcb0c3bec81 100644 --- a/cases/function/expression/test_predicate.yaml +++ b/cases/function/expression/test_predicate.yaml @@ -597,181 +597,181 @@ cases: - [5, 1, 1590115440000, false] - [7, 1, 1590115450000, false] - [9, 1, 1590115460000, true] - - id: like_predicate_1 - desc: like predicate without escape - inputs: - - columns: ["id int", "std_ts timestamp"] - indexs: ["index1:id:std_ts"] - rows: - - [1, 1590115420000 ] - - [2, 1590115430000 ] - - [3, 1590115440000 ] - - [4, 1590115450000 ] - - [5, 1590115460000 ] - - [6, 1590115470000 ] - - columns: ["id int", "ts timestamp", "col2 string"] - indexs: ["idx:id:ts"] - rows: - - [1, 1590115420000, John] - - [2, 1590115430000, Mary] - - [3, 1590115440000, mike] - - [4, 1590115450000, Dan] - - [5, 1590115460000, Evan_W] - - [6, 1590115470000, M] - dataProvider: - - ["LIKE", "NOT LIKE", "ILIKE", "NOT ILIKE"] # LIKE / NOT LIKE - - ["m%", "M_ry" ] # match pattern - sql: | - select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] 'd[1]'; - expect: - columns: ["id int", "col2 string"] - order: id - expectProvider: - 0: - 0: - rows: - - [1, null] - - [2, null] - - [3, mike] - - [4, null] - - [5, null] - - [6, null] - 1: - rows: - - [1, null] - - [2, Mary] - - [3, null] - - [4, null] - - [5, null] - - [6, null] - 1: - 0: - rows: - - [1, John] - - [2, Mary] - - [3, null] - - [4, Dan] - - [5, Evan_W] - - [6, M] - 1: - rows: - - [1, John] - - [2, null] - - [3, mike] - - [4, Dan] - - [5, Evan_W] - - [6, M] - 2: - 0: - rows: - - [1, null] - - [2, Mary] - - [3, mike] - - [4, null] - - [5, null] - - [6, M] - 1: - rows: - - [1, null] - - [2, Mary] - - [3, null] - - [4, null] - - [5, null] - - [6, null] - 3: - 0: - rows: - - [1, John] - - [2, null] - - [3, null] - - [4, Dan] - - [5, Evan_W] - - [6, null] - 1: - rows: - - [1, John] - - [2, null] - - [3, mike] - - [4, Dan] - - [5, Evan_W] - - [6, M] - - id: like_predicate_2 - desc: like predicate with escape - inputs: - - columns: ["id int", "std_ts timestamp"] - indexs: ["index1:id:std_ts"] - rows: - - [1, 1590115420000 ] - - [2, 1590115430000 ] - - [3, 1590115440000 ] - - [4, 1590115450000 ] - - [5, 1590115460000 ] - - [6, 1590115470000 ] - - columns: ["id int", "ts timestamp", "col2 string"] - indexs: ["idx:id:ts"] - rows: - - [1, 1590115420000, a*_b] - - [2, 1590115430000, a*mb] - - [3, 1590115440000, "%a_%b"] - - [4, 1590115450000, "Ta_sub"] - - [5, 1590115460000, "lamrb"] - - [6, 1590115470000, "%a*_%b"] - dataProvider: - - ["LIKE", "NOT ILIKE"] - - ["%", "*", ""] # escape with % or disable - sql: | - select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] '%a*_%b' ESCAPE 'd[1]'; - expect: - columns: ["id int", "col2 string"] - order: id - expectProvider: - 0: - 0: - rows: - - [1, a*_b] - - [2, a*mb] - - [3, null] - - [4, null] - - [5, null] - - [6, null] - 1: - rows: - - [1, null] - - [2, null] - - [3, "%a_%b"] - - [4, Ta_sub] - - [5, null] - - [6, null] - 2: - rows: - - [1, a*_b] - - [2, a*mb] - - [3, null] - - [4, null] - - [5, null] - - [6, "%a*_%b"] - 1: - 0: - rows: - - [1, null] - - [2, null] - - [3, "%a_%b"] - - [4, "Ta_sub"] - - [5, "lamrb"] - - [6, "%a*_%b"] - 1: - rows: - - [1, a*_b] - - [2, a*mb] - - [3, null] - - [4, null] - - [5, "lamrb"] - - [6, "%a*_%b"] - 2: - rows: - - [1, null] - - [2, null] - - [3, "%a_%b"] - - [4, "Ta_sub"] - - [5, "lamrb"] - - [6, null] +# - id: like_predicate_1 +# desc: like predicate without escape +# inputs: +# - columns: ["id int", "std_ts timestamp"] +# indexs: ["index1:id:std_ts"] +# rows: +# - [1, 1590115420000 ] +# - [2, 1590115430000 ] +# - [3, 1590115440000 ] +# - [4, 1590115450000 ] +# - [5, 1590115460000 ] +# - [6, 1590115470000 ] +# - columns: ["id int", "ts timestamp", "col2 string"] +# indexs: ["idx:id:ts"] +# rows: +# - [1, 1590115420000, John] +# - [2, 1590115430000, Mary] +# - [3, 1590115440000, mike] +# - [4, 1590115450000, Dan] +# - [5, 1590115460000, Evan_W] +# - [6, 1590115470000, M] +# dataProvider: +# - ["LIKE", "NOT LIKE", "ILIKE", "NOT ILIKE"] # LIKE / NOT LIKE +# - ["m%", "M_ry" ] # match pattern +# sql: | +# select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] 'd[1]'; +# expect: +# columns: ["id int", "col2 string"] +# order: id +# expectProvider: +# 0: +# 0: +# rows: +# - [1, null] +# - [2, null] +# - [3, mike] +# - [4, null] +# - [5, null] +# - [6, null] +# 1: +# rows: +# - [1, null] +# - [2, Mary] +# - [3, null] +# - [4, null] +# - [5, null] +# - [6, null] +# 1: +# 0: +# rows: +# - [1, John] +# - [2, Mary] +# - [3, null] +# - [4, Dan] +# - [5, Evan_W] +# - [6, M] +# 1: +# rows: +# - [1, John] +# - [2, null] +# - [3, mike] +# - [4, Dan] +# - [5, Evan_W] +# - [6, M] +# 2: +# 0: +# rows: +# - [1, null] +# - [2, Mary] +# - [3, mike] +# - [4, null] +# - [5, null] +# - [6, M] +# 1: +# rows: +# - [1, null] +# - [2, Mary] +# - [3, null] +# - [4, null] +# - [5, null] +# - [6, null] +# 3: +# 0: +# rows: +# - [1, John] +# - [2, null] +# - [3, null] +# - [4, Dan] +# - [5, Evan_W] +# - [6, null] +# 1: +# rows: +# - [1, John] +# - [2, null] +# - [3, mike] +# - [4, Dan] +# - [5, Evan_W] +# - [6, M] +# - id: like_predicate_2 +# desc: like predicate with escape +# inputs: +# - columns: ["id int", "std_ts timestamp"] +# indexs: ["index1:id:std_ts"] +# rows: +# - [1, 1590115420000 ] +# - [2, 1590115430000 ] +# - [3, 1590115440000 ] +# - [4, 1590115450000 ] +# - [5, 1590115460000 ] +# - [6, 1590115470000 ] +# - columns: ["id int", "ts timestamp", "col2 string"] +# indexs: ["idx:id:ts"] +# rows: +# - [1, 1590115420000, a*_b] +# - [2, 1590115430000, a*mb] +# - [3, 1590115440000, "%a_%b"] +# - [4, 1590115450000, "Ta_sub"] +# - [5, 1590115460000, "lamrb"] +# - [6, 1590115470000, "%a*_%b"] +# dataProvider: +# - ["LIKE", "NOT ILIKE"] +# - ["%", "*", ""] # escape with % or disable +# sql: | +# select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] '%a*_%b' ESCAPE 'd[1]'; +# expect: +# columns: ["id int", "col2 string"] +# order: id +# expectProvider: +# 0: +# 0: +# rows: +# - [1, a*_b] +# - [2, a*mb] +# - [3, null] +# - [4, null] +# - [5, null] +# - [6, null] +# 1: +# rows: +# - [1, null] +# - [2, null] +# - [3, "%a_%b"] +# - [4, Ta_sub] +# - [5, null] +# - [6, null] +# 2: +# rows: +# - [1, a*_b] +# - [2, a*mb] +# - [3, null] +# - [4, null] +# - [5, null] +# - [6, "%a*_%b"] +# 1: +# 0: +# rows: +# - [1, null] +# - [2, null] +# - [3, "%a_%b"] +# - [4, "Ta_sub"] +# - [5, "lamrb"] +# - [6, "%a*_%b"] +# 1: +# rows: +# - [1, a*_b] +# - [2, a*mb] +# - [3, null] +# - [4, null] +# - [5, "lamrb"] +# - [6, "%a*_%b"] +# 2: +# rows: +# - [1, null] +# - [2, null] +# - [3, "%a_%b"] +# - [4, "Ta_sub"] +# - [5, "lamrb"] +# - [6, null] diff --git a/cases/function/join/test_lastjoin_complex.yaml b/cases/function/join/test_lastjoin_complex.yaml index d26159d2203..b73fad5afa0 100644 --- a/cases/function/join/test_lastjoin_complex.yaml +++ b/cases/function/join/test_lastjoin_complex.yaml @@ -946,7 +946,7 @@ cases: success: true columns: [ "id int", "c8 date", "c3 int", "c4 bigint" ] rows: - - [ 1, 2020-05-01, 20, 30 ] + - [ 1, '2020-05-01', 20, 30 ] - id: 17-2 desc: 两个子查询lastjoin,order不是主表的ts-离线支持 @@ -998,7 +998,7 @@ cases: success: true columns: [ "id int", "c8 date", "c3 int", "c4 bigint" ] rows: - - [ 1, 2020-05-01, 20, 30 ] + - [ 1, '2020-05-01', 20, 30 ] - id: 18-2 desc: 两个子查询lastjoin,拼接条件不是主表的索引-不带orderby-离线支持 @@ -1026,7 +1026,7 @@ cases: success: true columns: [ "id int", "c8 date", "c3 int", "c4 bigint" ] rows: - - [ 1, 2020-05-01, 20, 30 ] + - [ 1, '2020-05-01', 20, 30 ] - id: 19-1 desc: 两个子查询lastjoin-子查询带窗口特征-没有使用索引-不带orderby diff --git a/cases/function/out_in/test_out_in.yaml b/cases/function/out_in/test_out_in.yaml index 62de26ea78d..e7ac9134dfd 100644 --- a/cases/function/out_in/test_out_in.yaml +++ b/cases/function/out_in/test_out_in.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: [] +debugs: ['数据里有null、空串、特殊字符'] cases: - id: 0 diff --git a/cases/function/select/test_where.yaml b/cases/function/select/test_where.yaml index 427edcfc29d..c2a6db12f7f 100644 --- a/cases/function/select/test_where.yaml +++ b/cases/function/select/test_where.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. sqlDialect: ["HybridSQL"] -debugs: [] +debugs: ["Where条件未命中索引示例2"] cases: - id: 0 desc: Where条件命中索引 @@ -21,8 +21,8 @@ cases: sql: | SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=5; inputs: - - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string - index: index1:col2:col5 + - columns: ["col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64", "col6 string"] + indexs: ["index1:col2:col5"] data: | 0, 1, 5, 1.1, 11.1, 1, 1 0, 2, 5, 2.2, 22.2, 2, 22 @@ -140,8 +140,8 @@ cases: sql: | SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=col3 and col1 < 2; inputs: - - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string - index: index1:col2:col5 + - columns: ["col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64", "col6 string"] + indexs: ["index1:col2:col5"] data: | 0, 1, 5, 1.1, 11.1, 1, 1 0, 2, 5, 2.2, 22.2, 2, 22 diff --git a/cases/function/v040/test_groupby.yaml b/cases/function/v040/test_groupby.yaml index a44b93e6cfb..77fd25bb847 100644 --- a/cases/function/v040/test_groupby.yaml +++ b/cases/function/v040/test_groupby.yaml @@ -31,7 +31,7 @@ cases: sql: select c1,count(*) as v1 from {0} group by c1; expect: order: c1 - columns: ["c1 string","v1 int"] + columns: ["c1 string","v1 bigint"] rows: - ["aa",2] - ["bb",2] @@ -52,7 +52,7 @@ cases: sql: select c1,count(*) as v1 from {0} group by c1; expect: order: c1 - columns: ["c1 string","v1 int"] + columns: ["c1 string","v1 bigint"] rows: - ["aa",2] - ["bb",2] @@ -73,7 +73,7 @@ cases: sql: select c1,c2,count(*) as v1 from {0} group by c1,c2; expect: order: c1 - columns: ["c1 string","c2 int","v1 int"] + columns: ["c1 string","c2 int","v1 bigint"] rows: - ["aa",11,2] - ["bb",11,2] @@ -94,7 +94,7 @@ cases: sql: select c1,count(*) as v1 from {0} group by c1; expect: order: c1 - columns: ["c1 string","v1 int"] + columns: ["c1 int","v1 bigint"] rows: - [11,2] - [22,2] @@ -114,7 +114,7 @@ cases: sql: select c1,count(*) as v1 from {0} group by c1; expect: order: c1 - columns: ["c1 string","v1 int"] + columns: ["c1 bigint","v1 bigint"] rows: - [11,2] - [22,2] @@ -134,7 +134,7 @@ cases: sql: select c1,count(*) as v1 from {0} group by c1; expect: order: c1 - columns: ["c1 string","v1 int"] + columns: ["c1 smallint","v1 bigint"] rows: - [11,2] - [22,2] @@ -186,7 +186,7 @@ cases: sql: select c1,count(*) as v1 from {0} group by c1; expect: order: c1 - columns: ["c1 string","v1 int"] + columns: ["c1 date","v1 bigint"] rows: - ["2020-05-01",2] - ["2020-05-02",2] @@ -206,7 +206,7 @@ cases: sql: select c1,count(*) as v1 from {0} group by c1; expect: order: c1 - columns: ["c1 string","v1 int"] + columns: ["c1 timestamp","v1 bigint"] rows: - [11,2] - [22,2] @@ -226,7 +226,7 @@ cases: sql: select c1,count(*) as v1 from {0} group by c1; expect: order: c1 - columns: ["c1 string","v1 int"] + columns: ["c1 bool","v1 bigint"] rows: - [true,3] - [false,2] @@ -246,7 +246,7 @@ cases: sql: select c1,count(*) as v1 from {0} group by c1; expect: order: c1 - columns: ["c1 string","v1 int"] + columns: ["c1 string","v1 bigint"] rows: - ["",2] - [null,2] @@ -267,7 +267,7 @@ cases: sql: select c1,c2,count(*) as v1 from {0} group by c1,c2; expect: order: c1 - columns: ["c1 string","c2 int","v1 int"] + columns: ["c1 string","c2 int","v1 bigint"] rows: - ["aa",11,2] - ["bb",11,2] @@ -288,7 +288,7 @@ cases: - [6,"aa",11,1590738995000] sql: select c1,c2,count(*) as v1 from {0} group by c1,c2; expect: - columns: ["c1 string","c2 int","v1 int"] + columns: ["c1 string","c2 int","v1 bigint"] rows: - ["aa",12,1] - ["bb",11,2] @@ -326,7 +326,7 @@ cases: sql: select c1,count(c2) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0} group by c1; expect: order: c1 - columns: ["c1 string","v1 int","v2 int","v3 int","v4 double","v5 bigint"] + columns: ["c1 string","v1 bigint","v2 int","v3 int","v4 double","v5 int"] rows: - ["aa",3,6,1,3.333333,10] - ["bb",2,5,2,3.5,7] @@ -362,7 +362,7 @@ cases: sql: select c1,count(c2) as v1 from {0} group by c1 having count(c2)>1; expect: order: c1 - columns: ["c1 string","v1 int"] + columns: ["c1 string","v1 bigint"] rows: - ["aa",3] - ["bb",2] @@ -455,7 +455,7 @@ cases: sql: select t1.c1,t1.v1,t2.v1 from (select c1,sum(c2) as v1 from {0} group by c1) as t1 last join (select c1,sum(c2) as v1 from {1} group by c1) as t2 on t1.c1=t2.c1; expect: order: c1 - columns: [ "c1 string","v1 bigint","v1 bigint"] + columns: [ "c1 string","v1 int","v1 int"] rows: - [ "aa",23,2 ] - [ "cc",41,62 ] @@ -491,7 +491,7 @@ cases: sql: select c1,count(*) as v1 from (select * from {0}) as t group by c1; expect: order: c1 - columns: ["c1 string","v1 int"] + columns: ["c1 string","v1 bigint"] rows: - ["aa",2] - ["bb",2] @@ -511,7 +511,7 @@ cases: sql: select * from (select c1,count(*) as v1 from {0} group by c1); expect: order: c1 - columns: ["c1 string","v1 int"] + columns: ["c1 string","v1 bigint"] rows: - ["aa",2] - ["bb",2] @@ -548,7 +548,7 @@ cases: sql: select * from (select c1,count(*) as v1 from {0} group by c1) where v1=2; expect: order: c1 - columns: ["c1 string","v1 int"] + columns: ["c1 string","v1 bigint"] rows: - ["aa",2] - ["bb",2] diff --git a/cases/function/v040/test_udaf.yaml b/cases/function/v040/test_udaf.yaml index ba325e33fdb..fee7f58b800 100644 --- a/cases/function/v040/test_udaf.yaml +++ b/cases/function/v040/test_udaf.yaml @@ -30,7 +30,7 @@ cases: - [5,"bb",1590738994000] sql: select count(*) as v1 from {0}; expect: - columns: ["v1 int"] + columns: ["v1 bigint"] rows: - [5] - id: 1 @@ -64,7 +64,7 @@ cases: sql: select count(c2) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0}; expect: order: c1 - columns: ["v1 int","v2 int","v3 int","v4 double","v5 bigint"] + columns: ["v1 bigint","v2 int","v3 int","v4 double","v5 int"] rows: - [6,6,1,3.5,21] - id: 3 @@ -77,7 +77,7 @@ cases: sql: select count(c2) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0}; expect: order: c1 - columns: ["v1 int","v2 int","v3 int","v4 double","v5 bigint"] + columns: ["v1 int","v2 int","v3 int","v4 double","v5 int"] rows: - [0,0,0,0,0] - id: 4 @@ -96,7 +96,7 @@ cases: sql: select count(c1) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0}; expect: order: c1 - columns: ["v1 int","v2 int","v3 int","v4 double","v5 bigint"] + columns: ["v1 bigint","v2 int","v3 int","v4 double","v5 int"] rows: - [5,6,1,3.6,18] diff --git a/cases/query/const_query.yaml b/cases/query/const_query.yaml index 5591f55e6d3..70c8a7bf955 100644 --- a/cases/query/const_query.yaml +++ b/cases/query/const_query.yaml @@ -21,7 +21,8 @@ cases: sql: | select 1 as id, 2 as col1, 3.3 as col2; expect: - schema: id:int32, col1:int, col2:double +# schema: id:int32, col1:int, col2:double + columns: ["id int","col1 int","col2 double"] order: id rows: - [1, 2, 3.3] @@ -32,7 +33,8 @@ cases: sql: | select 1 as id, "hello_world" as col1; expect: - schema: id:int32, col1:string +# schema: id:int32, col1:string + columns: ["id int","col1 string"] order: id rows: - [1, "hello_world"] @@ -43,7 +45,8 @@ cases: sql: | select 1 as id, substring("hello_world", 3, 6) as col1; expect: - schema: id:int32, col1:string +# schema: id:int32, col1:string + columns: ["id int","col1 string"] order: id rows: - [1, "llo_wo"] @@ -54,7 +57,8 @@ cases: sql: | select 1 as id, substring("hello_world", 3) as col1; expect: - schema: id:int32, col1:string +# schema: id:int32, col1:string + columns: ["id int","col1 string"] order: id rows: - [1, "llo_world"] @@ -65,13 +69,14 @@ cases: sql: | select 1 as id, concat("hello", "world", "abc") as col1; expect: - schema: id:int32, col1:string + columns: ["id int","col1 string"] order: id rows: - [1, "helloworldabc"] - id: 5 desc: cast常量 using CAST operator mode: request-unsupport + db: db1 inputs: - columns: ["c1 int", "c2 string", "c5 bigint"] indexs: ["index1:c1:c5"] @@ -82,10 +87,11 @@ cases: expect: columns: ["c1 int", "c2 bigint", "c3 float", "c4 double", "c5 timestamp", "c6 date", "c7 string"] rows: - - [10, 10, 10.0, 10.0, 1590115460000, 2020-05-20, "10"] + - [10, 10, 10.0, 10.0, 1590115460000, '2020-05-20', "10"] - id: 6 desc: cast NULL常量 using CAST operator mode: request-unsupport + db: db1 inputs: - columns: ["c1 int", "c2 string", "c5 bigint"] indexs: ["index1:c1:c5"] @@ -100,6 +106,7 @@ cases: - id: 7 desc: cast常量 using type() function mode: request-unsupport + db: db1 inputs: - columns: ["c1 int", "c2 string", "c5 bigint"] indexs: ["index1:c1:c5"] @@ -110,10 +117,11 @@ cases: expect: columns: ["c1 int", "c2 bigint", "c3 float", "c4 double", "c5 timestamp", "c6 date", "c7 string"] rows: - - [10, 10, 10.0, 10.0, 1590115460000, 2020-05-20, "10"] + - [10, 10, 10.0, 10.0, 1590115460000, '2020-05-20', "10"] - id: 8 desc: cast NULL常量 using type(NULL) function mode: request-unsupport + db: db1 inputs: - columns: ["c1 int", "c2 string", "c5 bigint"] indexs: ["index1:c1:c5"] @@ -128,6 +136,7 @@ cases: - id: 9 desc: differnt const node type mode: request-unsupport + db: db1 sql: | select true c1, int16(3) c2, 13 c3, 10.0 c4, 'a string' c5, date(timestamp(1590115420000)) c6, timestamp(1590115420000) c7; expect: diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml index e3cca3769cd..a9b181b7af1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml @@ -16,8 +16,8 @@ 8 UTF-8 - 0.4.2 - 0.4.2-macos + 0.4.4-hotfix1 + 0.4.4-hotfix1-macos test_suite/test_tmp.xml 1.8.9 diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java index c5b7a871c22..f3053738a77 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java @@ -41,6 +41,7 @@ public void check() throws Exception { reportLog.info("deployment count name check"); int expectDeploymentCount = expect.getDeploymentCount(); List actualDeployments = fesqlResult.getDeployments(); - Assert.assertEquals(actualDeployments.size(),expectDeploymentCount); + Integer deploymentCount = fesqlResult.getDeploymentCount(); + Assert.assertEquals((int) deploymentCount,expectDeploymentCount); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java index 1157cc471f5..048b2db2f56 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java @@ -54,14 +54,14 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi }else{ FedbGlobalVar.mainInfo = FEDBInfo.builder() .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/home/zhaowei01/fedb-auto-test/tmp") - .fedbPath("/home/zhaowei01/fedb-auto-test/tmp/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:10000") + .basePath("/home/wangkaidong/fedb-auto-test/tmp") + .fedbPath("/home/wangkaidong/fedb-auto-test/tmp/openmldb-ns-1/bin/openmldb") + .zk_cluster("172.24.4.55:10018") .zk_root_path("/openmldb") .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("172.24.4.55:10004", "172.24.4.55:10005")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:10001", "172.24.4.55:10002", "172.24.4.55:10003")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:10006")) + .nsEndpoints(Lists.newArrayList("172.24.4.55:10023", "172.24.4.55:10024")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:10020", "172.24.4.55:10021", "172.24.4.55:10022")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:10025")) .build(); FedbGlobalVar.env = "cluster"; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java index d22d902e7a2..ec26084b82e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java @@ -35,7 +35,7 @@ */ @Slf4j public class StandaloneTest extends BaseTest { - // protected static SqlExecutor executor; + protected static SqlExecutor executor; @BeforeTest() @Parameters({"env","version","fedbPath"}) @@ -48,20 +48,24 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi }else{ FedbGlobalVar.mainInfo = FEDBInfo.builder() .deployType(OpenMLDBDeployType.STANDALONE) - .basePath("/home/zhaowei01/fedb-auto-test/standalone") - .fedbPath("/home/zhaowei01/fedb-auto-test/standalone/openmldb-standalone/bin/openmldb") + .basePath("/home/wangkaidong/fedb-auto-test/standalone") + .fedbPath("/home/wangkaidong/fedb-auto-test/standalone/openmldb-standalone/bin/openmldb") .nsNum(1).tabletNum(1) - .nsEndpoints(Lists.newArrayList("172.24.4.55:10019")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:10020")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:10021")) + .nsEndpoints(Lists.newArrayList("172.24.4.55:10027")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:10028")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:10029")) .host("172.24.4.55") - .port(10019) + .port(10027) .build(); } String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { FedbGlobalVar.env = caseEnv; } + //单机版SDK + StandaloneClient standaloneClient = new StandaloneClient(FedbGlobalVar.mainInfo); + executor = standaloneClient.getExecutor(); + log.info("executor : {}",executor); log.info("fedb global var env: {}", env); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlResult.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlResult.java index 09ee5e6848d..108e6e8cc6a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlResult.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlResult.java @@ -40,6 +40,7 @@ public class FesqlResult { private OpenMLDBSchema schema; private OpenmldbDeployment deployment; private List deployments; + private Integer deploymentCount; @Override public String toString() { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java index fe27ff70b18..f1a0e2d416c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java @@ -72,6 +72,7 @@ public void prepare(String version,SqlExecutor executor){ logger.info("version:{} prepare begin",version); boolean dbOk = executor.createDB(dbName); logger.info("version:{},create db:{},{}", version, dbName, dbOk); + FesqlUtil.useDB(executor,dbName); FesqlResult res = FesqlUtil.createAndInsert(executor, dbName, fesqlCase.getInputs(), false); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail . version:"+version); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java index 376e6bcd79c..18f63b48126 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java @@ -28,7 +28,9 @@ import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.InputDesc; +import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; import com._4paradigm.openmldb.test_common.model.SQLCase; +import com.google.common.base.Joiner; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; @@ -37,12 +39,10 @@ import org.testng.collections.Lists; import java.sql.*; +import java.sql.Date; import java.text.ParseException; import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Objects; +import java.util.*; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; @@ -286,6 +286,7 @@ public static FesqlResult sqlRequestModeWithSp(SqlExecutor executor, String dbNa } public static FesqlResult sql(SqlExecutor executor, String dbName, String sql) { + useDB(executor,dbName); FesqlResult fesqlResult = null; if (sql.startsWith("create database") || sql.startsWith("drop database")) { fesqlResult = db(executor, sql); @@ -293,12 +294,128 @@ public static FesqlResult sql(SqlExecutor executor, String dbName, String sql) { fesqlResult = ddl(executor, dbName, sql); } else if (sql.startsWith("insert")||sql.startsWith("INSERT")) { fesqlResult = insert(executor, dbName, sql); - } else { + }else if(sql.startsWith("show deployments;")){ + fesqlResult = showDeploys(executor,dbName,sql); + }else if(sql.startsWith("show deployment")){ + fesqlResult = deploy(executor, dbName, sql); + }else { fesqlResult = select(executor, dbName, sql); } return fesqlResult; } + public static FesqlResult deploy(SqlExecutor executor,String dbName,String showdeploySql){ + if (showdeploySql.isEmpty()){ + return null; + } + logger.info("show deployment:{}",showdeploySql); + FesqlResult fesqlResult = new FesqlResult(); + ResultSet rawRs = executor.executeSQL(dbName, showdeploySql); + if (rawRs == null) { + fesqlResult.setOk(false); + fesqlResult.setMsg("executeSQL fail, result is null"); + } else if (rawRs instanceof SQLResultSet){ + try { + SQLResultSet rs = (SQLResultSet)rawRs; + JDBCUtil.setSchema(rs.getMetaData(),fesqlResult); + fesqlResult.setOk(true); + String deployStr = convertRestultSetToListDeploy(rs); + String[] strings = deployStr.split("\n"); + List stringList = Arrays.asList(strings); + OpenmldbDeployment openmldbDeployment = parseDeployment(stringList); + fesqlResult.setDeployment(openmldbDeployment); + } catch (Exception e) { + fesqlResult.setOk(false); + fesqlResult.setMsg(e.getMessage()); + } + } + logger.info("select result:{} \n", fesqlResult); + return fesqlResult; + } + + public static FesqlResult showDeploys(SqlExecutor executor,String dbName,String showdeploySqls){ + if (showdeploySqls.isEmpty()){ + return null; + } + logger.info("show deployments:{}",showdeploySqls); + FesqlResult fesqlResult = new FesqlResult(); + ResultSet rawRs = executor.executeSQL(dbName, showdeploySqls); + if (rawRs == null) { + fesqlResult.setOk(false); + fesqlResult.setMsg("executeSQL fail, result is null"); + } else if (rawRs instanceof SQLResultSet){ + try { + SQLResultSet rs = (SQLResultSet)rawRs; + JDBCUtil.setSchema(rs.getMetaData(),fesqlResult); + fesqlResult.setOk(true); + List> lists = convertRestultSetToList(rs); + if(lists.size() == 0 ||lists.isEmpty()){ + fesqlResult.setDeploymentCount(0); + }else { + fesqlResult.setDeploymentCount(lists.size()); + } + //String[] strings = deployStr.split("\n"); + //List stringList = Arrays.asList(strings); + + } catch (Exception e) { + fesqlResult.setOk(false); + fesqlResult.setMsg(e.getMessage()); + } + } + return fesqlResult; + } + + private static String convertRestultSetToListDeploy(SQLResultSet rs) throws SQLException { + String string = null; + while (rs.next()) { + int columnCount = rs.getMetaData().getColumnCount(); + for (int i = 0; i < columnCount; i++) { + string=String.valueOf(getColumnData(rs, i)); + } + } + return string; + } + + private static OpenmldbDeployment parseDeployment(List lines){ + OpenmldbDeployment deployment = new OpenmldbDeployment(); + List inColumns = new ArrayList<>(); + List outColumns = new ArrayList<>(); + String[] db_sp = lines.get(3).split("\\s+"); + deployment.setDbName(db_sp[1]); + deployment.setName(db_sp[2]); + + String sql = ""; + List list = lines.subList(9, lines.size()); + Iterator it = list.iterator(); + while(it.hasNext()) { + String line = it.next().trim(); + if (line.contains("row in set")) break; + if (line.startsWith("#") || line.startsWith("-")) continue; + sql += line+"\n"; + } + deployment.setSql(sql); + while(it.hasNext()){ + String line = it.next().trim(); + if (line.contains("Output Schema")) break; + if (line.startsWith("#") || line.startsWith("-")|| line.equals("")) continue; + String[] infos = line.split("\\s+"); + String in = Joiner.on(",").join(infos); + inColumns.add(in); + } + while(it.hasNext()){ + String line = it.next().trim(); + if(line.startsWith("#")||line.startsWith("-"))continue; + String[] infos = line.split("\\s+"); + String out = Joiner.on(",").join(infos); + outColumns.add(out); + } + deployment.setInColumns(inColumns); + deployment.setOutColumns(outColumns); + return deployment; + } + + + public static FesqlResult insert(SqlExecutor executor, String dbName, String insertSql) { if (insertSql.isEmpty()) { return null; @@ -1291,4 +1408,22 @@ public static String getColumnTypeByType(int type){ } throw new IllegalArgumentException("not know type"); } + + + + public static void useDB(SqlExecutor executor,String dbName){ + Statement statement = executor.getStatement(); + String sql = String.format("use %s",dbName); + try { + statement.execute(sql); + } catch (Exception e) { + e.printStackTrace(); + }finally { + try { + statement.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/command.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/command.properties index df9ec709326..fb68b0851b1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/command.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/command.properties @@ -1,6 +1,6 @@ #远程执行命令时需要进行配置,本地执行则不需要进行配置 -remote_ip=172.24.4.40 -remote_user=zhaowei01 +remote_ip=172.24.4.55 +remote_user=wangkaidong remote_password=1qaz0p;/ #remote_private_key_path=src/main/resources/zw-mac-id_rsa \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fedb_deploy.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fedb_deploy.properties index 7e0034ac509..023a59609c0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fedb_deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fedb_deploy.properties @@ -9,8 +9,8 @@ main=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2 0.2.3=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2.3-linux.tar.gz spark_url=https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0.3.2/spark-3.0.0-bin-openmldbspark.tgz -tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.2-linux.tar.gz +tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.4-linux.tar.gz #tmp=/home/zhaowei01/tobe/openmldb_linux.tar.gz -standalone=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.2-linux.tar.gz +standalone=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.2.1-linux.tar.gz tmp_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz tmp_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark.tar.gz diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/log4j.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/log4j.properties index f332c949460..8aa7e8e77dc 100755 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/log4j.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/log4j.properties @@ -1,5 +1,5 @@ ### set log levels ### -log4j.rootLogger=stdout,warn,error +log4j.rootLogger=debug,info,stdout,warn,error # console log log4j.appender.stdout = org.apache.log4j.ConsoleAppender diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DDLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DDLTest.java index 11fd5e07747..06315a8a1aa 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DDLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DDLTest.java @@ -68,4 +68,45 @@ public void testCreateIndex(SQLCase testCase){ public void testCreateNoIndex(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kStandaloneCLI).run(); } + + //SDK版本 + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/ddl/test_create.yaml") + @Story("create") + public void testCreateSDk(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + + //全pass + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/ddl/test_ttl.yaml") + @Story("ttl") + public void testTTLSDK(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + + //有问题 + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/ddl/test_options.yaml") + @Story("options") + public void testOptionsSDK(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + + //有问题 + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/ddl/test_create_index.yaml") + @Story("create_index") + public void testCreateIndexSDK(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/ddl/test_create_no_index.yaml") + @Story("create_no_index") + public void testCreateNoIndexSDK(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DMLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DMLTest.java index b3d1ab321b8..8abb41f2758 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DMLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DMLTest.java @@ -81,4 +81,21 @@ public void testInsertMulti1000(){ FesqlResult result = OpenMLDBComamndFacade.sql(FedbGlobalVar.mainInfo, FedbGlobalVar.dbName, query); Assert.assertEquals(total,result.getCount()); } + + //pass + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/dml/test_insert.yaml") + @Story("insert") + public void testInsertSDK(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + + //pass + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/dml/multi_insert.yaml") + @Story("insert-multi") + public void testInsertMultiSDK(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DeploymentTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DeploymentTest.java index b4f99d176a4..ffa1c8b1e1c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DeploymentTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DeploymentTest.java @@ -51,4 +51,29 @@ public void testShow(SQLCase testCase){ public void testDrop(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kStandaloneCLI).run(); } + + + // 全pass + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/deploy/test_create_deploy.yaml") + @Story("create") + public void testCreateSDK(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + + //0有问题 + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/deploy/test_show_deploy.yaml") + @Story("show") + public void testShowSDK(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + + //0 有问题 + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/deploy/test_drop_deploy.yaml") + @Story("drop") + public void testDropSDK(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/ExpressTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/ExpressTest.java index ded8da6b8f1..7300ef87839 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/ExpressTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/ExpressTest.java @@ -41,4 +41,13 @@ public class ExpressTest extends StandaloneTest { public void testExpress(SQLCase testCase) throws Exception { ExecutorFactory.build(testCase, SQLCaseType.kStandaloneCLI).run(); } + + + //都pass test_predict.yaml最后俩个case还需要解析一下 + @Story("batch") + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/expression/") + public void testExpressSDK(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/FunctionTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/FunctionTest.java index 8d9d1c7e442..f2f3f7b57fe 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/FunctionTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/FunctionTest.java @@ -41,4 +41,12 @@ public class FunctionTest extends StandaloneTest { public void testFunction(SQLCase testCase) throws Exception { ExecutorFactory.build(testCase, SQLCaseType.kStandaloneCLI).run(); } + + //pass + @Story("batch") + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/function/") + public void testFunctionSDK(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/LastJoinTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/LastJoinTest.java index 447fb371870..40257abaea0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/LastJoinTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/LastJoinTest.java @@ -41,4 +41,12 @@ public class LastJoinTest extends StandaloneTest { public void testLastJoin(SQLCase testCase) throws Exception { ExecutorFactory.build(testCase, SQLCaseType.kStandaloneCLI).run(); } + + //all pass + @Story("batch") + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/join/","function/cluster/window_and_lastjoin.yaml"}) + public void testLastJoinSDK(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/MultiDBTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/MultiDBTest.java index 290d62e6ce7..dece5e6f276 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/MultiDBTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/MultiDBTest.java @@ -42,4 +42,12 @@ public class MultiDBTest extends StandaloneTest { public void testSelect(SQLCase testCase) throws Exception { ExecutorFactory.build( testCase, SQLCaseType.kStandaloneCLI).run(); } + + //pass + @Story("Standalone-CLI") + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/multiple_databases/"}) + public void testSelectSDK(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/OutInTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/OutInTest.java index a09871ceb6d..eacabb8fb27 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/OutInTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/OutInTest.java @@ -21,4 +21,10 @@ public void testOutIn(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kStandaloneCLI).run(); } + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/out_in/test_out_in.yaml") + @Story("Out-In") + public void testOutInSDK(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } } \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/SelectTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/SelectTest.java index 68245445a53..ca1a2b15ad1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/SelectTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/SelectTest.java @@ -43,4 +43,14 @@ public class SelectTest extends StandaloneTest { public void testSelect(SQLCase testCase) throws Exception { ExecutorFactory.build(testCase, SQLCaseType.kStandaloneCLI).run(); } + + //全pass + @Story("batch") + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/select/","query/const_query.yaml"}) + @Step("{testCase.desc}") + public void testSelectSDK(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/WindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/WindowTest.java index 8a9b6664c24..9d56b6378e8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/WindowTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/WindowTest.java @@ -43,4 +43,14 @@ public class WindowTest extends StandaloneTest { public void testWindow(SQLCase testCase) throws Exception { ExecutorFactory.build(testCase, SQLCaseType.kStandaloneCLI).run(); } + + //pass + @Story("batch") + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/window/", + "function/cluster/", + "function/test_index_optimized.yaml"}) + public void testWindowSDK(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v040/ExpressTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v040/ExpressTest.java index 2de998b2f88..19e6589c3d5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v040/ExpressTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v040/ExpressTest.java @@ -40,4 +40,13 @@ public class ExpressTest extends StandaloneTest { public void testExpress(SQLCase testCase) throws Exception { ExecutorFactory.build(testCase, SQLCaseType.kStandaloneCLI).run(); } + + //全pass + @Story("standalone-cli") + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/v040/test_like.yaml") + public void testExpressSDK(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v040/FunctionTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v040/FunctionTest.java index 6e9207660eb..fee49b4d963 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v040/FunctionTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v040/FunctionTest.java @@ -47,4 +47,20 @@ public void testLikeMatch(SQLCase testCase) throws Exception { public void testUDAF(SQLCase testCase) throws Exception { ExecutorFactory.build(testCase, SQLCaseType.kStandaloneCLI).run(); } + + //pass + @Story("like_match") + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/v040/test_like_match.yaml") + public void testLikeMatchSDK(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + + //pass + @Story("udaf") + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/v040/test_udaf.yaml") + public void testUDAFSDK(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v040/GroupByTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v040/GroupByTest.java index 162111c021f..9185443fb0a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v040/GroupByTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v040/GroupByTest.java @@ -40,4 +40,12 @@ public class GroupByTest extends StandaloneTest { public void testExpress(SQLCase testCase) throws Exception { ExecutorFactory.build(testCase, SQLCaseType.kStandaloneCLI).run(); } + + //全pass + @Story("standalone-cli") + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/v040/test_groupby.yaml") + public void testExpressSDK(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } } diff --git a/test/integration-test/python-sdk-test/check/checker.py b/test/integration-test/python-sdk-test/check/checker.py index 62e88b096b5..2ec2c39ec4c 100644 --- a/test/integration-test/python-sdk-test/check/checker.py +++ b/test/integration-test/python-sdk-test/check/checker.py @@ -175,6 +175,8 @@ def build(fesqlCase, fesqlResult): checkList.append(CountChecker(fesqlCase, fesqlResult)) elif key == 'columns': checkList.append(ColumnsChecker(fesqlCase, fesqlResult)) + elif key == 'schema': + checkList.append(ColumnsChecker(fesqlCase,fesqlResult)) elif key == 'order': pass else: diff --git a/test/integration-test/python-sdk-test/common/fedb_client.py b/test/integration-test/python-sdk-test/common/fedb_client.py index 4d3c58c0b7b..55edf46a775 100644 --- a/test/integration-test/python-sdk-test/common/fedb_client.py +++ b/test/integration-test/python-sdk-test/common/fedb_client.py @@ -16,6 +16,7 @@ import sqlalchemy as db from nb_log import LogManager +import openmldb log = LogManager('fedb-sdk-test').get_logger_and_add_handlers() @@ -28,6 +29,10 @@ def __init__(self, zkCluster, zkRootPath, dbName='test_fedb'): self.dbName = dbName def getConnect(self): - engine = db.create_engine('openmldb://@/{}?zk={}&zkPath={}'.format(self.dbName, self.zkCluster, self.zkRootPath)) - connect = engine.connect() - return connect + # engine = db.create_engine('openmldb://@/{}?zk={}&zkPath={}'.format(self.dbName, self.zkCluster, self.zkRootPath)) + # connect = engine.connect() + # return connect + + db = openmldb.dbapi.connect(self.dbName, self.zkCluster, self.zkRootPath) + cursor = db.cursor() + return cursor diff --git a/test/integration-test/python-sdk-test/common/fedb_test.py b/test/integration-test/python-sdk-test/common/fedb_test.py index 6098ec813dd..5ce3d4775a3 100755 --- a/test/integration-test/python-sdk-test/common/fedb_test.py +++ b/test/integration-test/python-sdk-test/common/fedb_test.py @@ -31,8 +31,12 @@ class FedbTest: def setup_class(self): self.client = FedbClient(fedb_config.zk_cluster, fedb_config.zk_root_path, fedb_config.default_db_name) self.connect = self.client.getConnect() - try: - self.connect.execute("create database {};".format(fedb_config.default_db_name)) - log.info("create db:" + fedb_config.default_db_name + ",success") - except Exception as e: - log.info("create db:" + fedb_config.default_db_name + ",failed . msg:"+str(e)) \ No newline at end of file + # try: + # self.connect.execute("create database {};".format(fedb_config.default_db_name)) + # log.info("create db:" + fedb_config.default_db_name + ",success") + # except Exception as e: + # log.info("create db:" + fedb_config.default_db_name + ",failed . msg:"+str(e)) + +if __name__ == "__main__": + f = FedbTest() + f.setup_class() diff --git a/test/integration-test/python-sdk-test/common/standalone_client.py b/test/integration-test/python-sdk-test/common/standalone_client.py index deb5d13332e..51d07ca4720 100644 --- a/test/integration-test/python-sdk-test/common/standalone_client.py +++ b/test/integration-test/python-sdk-test/common/standalone_client.py @@ -18,6 +18,7 @@ import openmldb import sqlalchemy as db from nb_log import LogManager +from common import standalone_config log = LogManager('fedb-sdk-test').get_logger_and_add_handlers() @@ -33,3 +34,13 @@ def getConnect(self): engine = db.create_engine('openmldb:///{}?host={}&port={}'.format(self.dbName, self.host, self.port)) connect = engine.connect() return connect + + # db = openmldb.dbapi.connect(self.dbName, self.host, int(self.port)) + # cursor = db.cursor() + # return cursor + +if __name__ == "__main__": + s = StandaloneClient(standalone_config.host,standalone_config.port,standalone_config.default_db_name) + cursor = s.getConnect() + rs = cursor.execute("select db3.auto_avelWUr0.c1,db3.auto_avelWUr0.c2,db4.auto_jF8Dp3W1.c3,db4.auto_jF8Dp3W1.c4 from db3.auto_avelWUr0 last join db4.auto_jF8Dp3W1 ORDER BY db4.auto_jF8Dp3W1.c3 on db3.auto_avelWUr0.c1=db4.auto_jF8Dp3W1.c1") + print(rs.fetchall()) diff --git a/test/integration-test/python-sdk-test/common/standalone_test.py b/test/integration-test/python-sdk-test/common/standalone_test.py index a1aa586e2ab..db2b2f421c5 100755 --- a/test/integration-test/python-sdk-test/common/standalone_test.py +++ b/test/integration-test/python-sdk-test/common/standalone_test.py @@ -27,8 +27,9 @@ class StandaloneTest: def setup_class(self): self.client = StandaloneClient(standalone_config.host,standalone_config.port,standalone_config.default_db_name) self.connect = self.client.getConnect() - try: - self.connect.execute("create database {};".format(standalone_config.default_db_name)) - log.info("create db:" + standalone_config.default_db_name + ",success") - except Exception as e: - log.info("create db:" + standalone_config.default_db_name + ",failed . msg:" + str(e)) + + # try: + # self.connect.execute("create database {};".format(standalone_config.default_db_name)) + # log.info("create db:" + standalone_config.default_db_name + ",success") + # except Exception as e: + # log.info("create db:" + standalone_config.default_db_name + ",failed . msg:" + str(e)) diff --git a/test/integration-test/python-sdk-test/conf/fedb.conf b/test/integration-test/python-sdk-test/conf/fedb.conf index 074aa67e0dd..776d03b278e 100644 --- a/test/integration-test/python-sdk-test/conf/fedb.conf +++ b/test/integration-test/python-sdk-test/conf/fedb.conf @@ -1,15 +1,16 @@ [global] env=qa -default_db_name=db1 +default_db_name=test_zw levels=0 [fedb] #配置zk地址, 和集群启动配置中的zk_cluster保持一致 -qa_zk_cluster=172.24.4.40:10000 +qa_zk_cluster=172.24.4.55:10018 #配置集群的zk根路径, 和集群启动配置中的zk_root_path保持一致 qa_zk_root_path=/openmldb -qa_tb_endpoint_0=172.24.4.55:10003 +# qa_tb_endpoint_0=172.24.4.55:10003 +qa_tb_endpoint_0=172.24.4.40:10009 qa_tb_endpoint_1=172.24.4.55:10004 qa_tb_endpoint_2=172.24.4.55:10005 diff --git a/test/integration-test/python-sdk-test/conf/standalone.conf b/test/integration-test/python-sdk-test/conf/standalone.conf index ee8370a0e04..bebecce28a2 100644 --- a/test/integration-test/python-sdk-test/conf/standalone.conf +++ b/test/integration-test/python-sdk-test/conf/standalone.conf @@ -1,16 +1,17 @@ [global] env=qa -default_db_name=db1 +default_db_name=test_zw levels=0 [standalone] -qa_port=172.24.4.40 -qa_host=10008 +qa_port=10027 +qa_host=172.24.4.55 +#qa_tb_endpoint_0=172.24.4.40:10009 # #配置zk地址, 和集群启动配置中的zk_cluster保持一致 # qa_zk_cluster=172.24.4.55:10000 # #配置集群的zk根路径, 和集群启动配置中的zk_root_path保持一致 -# qa_zk_root_path=/openmldb +qa_zk_root_path=/openmldb # qa_tb_endpoint_0=172.24.4.55:10003 # qa_tb_endpoint_1=172.24.4.55:10004 # qa_tb_endpoint_2=172.24.4.55:10005 diff --git a/test/integration-test/python-sdk-test/executor/fedb_executor.py b/test/integration-test/python-sdk-test/executor/fedb_executor.py index 34224ca2937..2095dfee7a1 100644 --- a/test/integration-test/python-sdk-test/executor/fedb_executor.py +++ b/test/integration-test/python-sdk-test/executor/fedb_executor.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import time from nb_log import LogManager import check.checker @@ -34,6 +35,7 @@ def process(self): log.info(str(self.fesqlCase['case_prefix']) + ': ' + self.fesqlCase['desc'] + " Begin!") self.prepare() fesqlResult = self.execute() + print(fesqlResult) self.check(fesqlResult) self.tearDown() @@ -105,6 +107,8 @@ def prepare(self): # except Exception as e: # pass inputs = self.fesqlCase.get('inputs') + #if inputs.get(0).get('columns')==None: + res, self.tableNames = fedb_util.createAndInsert(self.executor, self.dbName, inputs) if not res.ok: raise Exception("fail to run SQLExecutor: prepare fail") @@ -117,6 +121,8 @@ def execute(self): log.info("sql:" + sql) sql = fedb_util.formatSql(sql, self.tableNames) fesqlResult = fedb_util.sql(self.executor, self.dbName, sql) + if self.fesqlCase.__contains__('sql') == False: + return fesqlResult sql = self.fesqlCase['sql'] if sql != None and len(sql) > 0: log.info("sql:" + sql) diff --git a/test/integration-test/python-sdk-test/standalone/__init__.py b/test/integration-test/python-sdk-test/standalone/__init__.py new file mode 100644 index 00000000000..db7e29ce373 --- /dev/null +++ b/test/integration-test/python-sdk-test/standalone/__init__.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. \ No newline at end of file diff --git a/test/integration-test/python-sdk-test/standalone/test_standalone_ddl.py b/test/integration-test/python-sdk-test/standalone/test_standalone_ddl.py new file mode 100644 index 00000000000..0ab56013233 --- /dev/null +++ b/test/integration-test/python-sdk-test/standalone/test_standalone_ddl.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import allure +import pytest + +from nb_log import LogManager + +from common.standalone_test import StandaloneTest +from executor import fedb_executor +from util.test_util import getCases + +log = LogManager('python-sdk-test').get_logger_and_add_handlers() + +class TestStandaloneDDL(StandaloneTest): + + @pytest.mark.parametrize("testCase", getCases(["/function/ddl/test_create.yaml"])) + @allure.feature("DDL") + @allure.story("create") + def test_create(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() + + #全pass + @pytest.mark.parametrize("testCase", getCases(["/function/ddl/test_ttl.yaml"])) + @allure.feature("DDL") + @allure.story("ttl") + def test_ttl(self, testCase): + fedb_executor.build(self.connect, testCase).run() + + #有问题 + @pytest.mark.parametrize("testCase", getCases(["/function/ddl/test_options.yaml"])) + @allure.feature("DDL") + @allure.story("ttl") + def test_options(self, testCase): + fedb_executor.build(self.connect, testCase).run() + + diff --git a/test/integration-test/python-sdk-test/standalone/test_standalone_deploy.py b/test/integration-test/python-sdk-test/standalone/test_standalone_deploy.py new file mode 100644 index 00000000000..2b6aeaddee7 --- /dev/null +++ b/test/integration-test/python-sdk-test/standalone/test_standalone_deploy.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import allure +import pytest + +from nb_log import LogManager + +from common.standalone_test import StandaloneTest +from executor import fedb_executor +from util.test_util import getCases + +log = LogManager('python-sdk-test').get_logger_and_add_handlers() + +#都不行 +class TestStandaloneDeploy(StandaloneTest): + + @pytest.mark.parametrize("testCase", getCases(["/function/deploy/test_create_deploy.yaml"])) + @allure.feature("deploy") + @allure.story("create") + def test_create(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() + + @pytest.mark.parametrize("testCase", getCases(["/function/deploy/test_show_deploy.yaml"])) + @allure.feature("deploy") + @allure.story("show") + def test_show(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() \ No newline at end of file diff --git a/test/integration-test/python-sdk-test/standalone/test_standalone_dml.py b/test/integration-test/python-sdk-test/standalone/test_standalone_dml.py new file mode 100644 index 00000000000..3572e5ac7ab --- /dev/null +++ b/test/integration-test/python-sdk-test/standalone/test_standalone_dml.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import allure +import pytest + +from nb_log import LogManager + +from common.standalone_test import StandaloneTest +from executor import fedb_executor +from util.test_util import getCases + +log = LogManager('python-sdk-test').get_logger_and_add_handlers() + +class TestStandaloneDML(StandaloneTest): + + #全部pass + @pytest.mark.parametrize("testCase", getCases(["/function/dml/test_insert.yaml"])) + @allure.feature("dml") + @allure.story("insert") + def test_insert(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() + + #全部pass + @pytest.mark.parametrize("testCase", getCases(["/function/dml/multi_insert.yaml"])) + @allure.feature("dml") + @allure.story("multi_insert") + def test_insert(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() \ No newline at end of file diff --git a/test/integration-test/python-sdk-test/standalone/test_standalone_express.py b/test/integration-test/python-sdk-test/standalone/test_standalone_express.py new file mode 100644 index 00000000000..2ed5f7a6f10 --- /dev/null +++ b/test/integration-test/python-sdk-test/standalone/test_standalone_express.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import allure +import pytest + +from nb_log import LogManager + +from common.standalone_test import StandaloneTest +from executor import fedb_executor +from util.test_util import getCases + +log = LogManager('python-sdk-test').get_logger_and_add_handlers() + + +class TestStandaloneExpress(StandaloneTest): + + # testcase 71 72 73 74 75 76没pass 剩下都pass + #assert actual == value, 'actual:{},expect:{}'.format(actual, value) + #AssertionError: actual:id bigint,expect:id:bigint + @pytest.mark.parametrize("testCase", getCases(["/function/expression"])) + @allure.feature("expression") + @allure.story("batch") + def test_express(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() \ No newline at end of file diff --git a/test/integration-test/python-sdk-test/standalone/test_standalone_express_v040.py b/test/integration-test/python-sdk-test/standalone/test_standalone_express_v040.py new file mode 100644 index 00000000000..3586c4a1f07 --- /dev/null +++ b/test/integration-test/python-sdk-test/standalone/test_standalone_express_v040.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import allure +import pytest + +from nb_log import LogManager + +from common.standalone_test import StandaloneTest +from executor import fedb_executor +from util.test_util import getCases + +log = LogManager('python-sdk-test').get_logger_and_add_handlers() + +class TestStandaloneExpressV040(StandaloneTest): + + # 32 33 34 35也pass 之前因为//导致的 因为dataprovider 剩下都pass + @pytest.mark.parametrize("testCase", getCases(["/function/v040/test_like.yaml"])) + @allure.feature("expression") + @allure.story("batch") + def test_express(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() \ No newline at end of file diff --git a/test/integration-test/python-sdk-test/standalone/test_standalone_function.py b/test/integration-test/python-sdk-test/standalone/test_standalone_function.py new file mode 100644 index 00000000000..9c323351e9a --- /dev/null +++ b/test/integration-test/python-sdk-test/standalone/test_standalone_function.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import allure +import pytest + +from nb_log import LogManager + +from common.standalone_test import StandaloneTest +from executor import fedb_executor +from util.test_util import getCases + +log = LogManager('python-sdk-test').get_logger_and_add_handlers() + +class TestStandaloneFunction(StandaloneTest): + + #testcase 7 8 剩下都pass AssertionError: actual:32767,expect:None;actual_type:,expect_type: 0411测试全pass 用db.create_engine进行连接 + @pytest.mark.parametrize("testCase", getCases(["/function/function"])) + @allure.feature("function") + @allure.story("batch") + def test_function(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() \ No newline at end of file diff --git a/test/integration-test/python-sdk-test/standalone/test_standalone_function_v040.py b/test/integration-test/python-sdk-test/standalone/test_standalone_function_v040.py new file mode 100644 index 00000000000..f16ea2e0608 --- /dev/null +++ b/test/integration-test/python-sdk-test/standalone/test_standalone_function_v040.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import allure +import pytest + +from nb_log import LogManager + +from common.standalone_test import StandaloneTest +from executor import fedb_executor +from util.test_util import getCases + +log = LogManager('python-sdk-test').get_logger_and_add_handlers() + +class TestStandaloneFunctionV040(StandaloneTest): + + # 16,17pass啦 Syntax error: Illegal escape sequence: \% [at 2:7] (1,'\\\%a_b',1590738990000L); 全都pass + @pytest.mark.parametrize("testCase", getCases(["/function/v040/test_like_match.yaml"])) + @allure.feature("function") + @allure.story("like_match") + def test_express1(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() + + #全部pass + @pytest.mark.parametrize("testCase", getCases(["/function/v040/test_udaf.yaml"])) + @allure.feature("function") + @allure.story("udaf") + def test_express2(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() \ No newline at end of file diff --git a/test/integration-test/python-sdk-test/standalone/test_standalone_groupby_v040.py b/test/integration-test/python-sdk-test/standalone/test_standalone_groupby_v040.py new file mode 100644 index 00000000000..c34a53e0169 --- /dev/null +++ b/test/integration-test/python-sdk-test/standalone/test_standalone_groupby_v040.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import allure +import pytest + +from nb_log import LogManager + +from common.standalone_test import StandaloneTest +from executor import fedb_executor +from util.test_util import getCases + +log = LogManager('python-sdk-test').get_logger_and_add_handlers() + + +class TestStandaloneGroupbyV040(StandaloneTest): + + #11 没pass 因为排序没排好 但是是OK的 + @pytest.mark.parametrize("testCase", getCases(["/function/v040/test_groupby.yaml"])) + @allure.feature("groupby") + @allure.story("batch") + def test_window1(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() \ No newline at end of file diff --git a/test/integration-test/python-sdk-test/standalone/test_standalone_lastjoin.py b/test/integration-test/python-sdk-test/standalone/test_standalone_lastjoin.py new file mode 100644 index 00000000000..dafb26ce919 --- /dev/null +++ b/test/integration-test/python-sdk-test/standalone/test_standalone_lastjoin.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import allure +import pytest + +from nb_log import LogManager + +from common.standalone_test import StandaloneTest +from executor import fedb_executor +from util.test_util import getCases + +log = LogManager('python-sdk-test').get_logger_and_add_handlers() + + +class TestStandaloneLastjoin(StandaloneTest): + + #全部pass + @pytest.mark.parametrize("testCase", getCases(["/function/join/","/function/cluster/window_and_lastjoin.yaml"])) + @allure.feature("lastjoin") + @allure.story("batch") + def test_function(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() \ No newline at end of file diff --git a/test/integration-test/python-sdk-test/standalone/test_standalone_multidb.py b/test/integration-test/python-sdk-test/standalone/test_standalone_multidb.py new file mode 100644 index 00000000000..7dc1677c502 --- /dev/null +++ b/test/integration-test/python-sdk-test/standalone/test_standalone_multidb.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import allure +import pytest + +from nb_log import LogManager + +from common.standalone_test import StandaloneTest +from executor import fedb_executor +from util.test_util import getCases + +log = LogManager('python-sdk-test').get_logger_and_add_handlers() + +class TestStandaloneMultiDB(StandaloneTest): + + # 2,3,4,8 pass 要重写创建db + @pytest.mark.parametrize("testCase", getCases(["/function/multiple_databases/"])) + @allure.feature("multidb") + @allure.story("batch") + def test_select(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() \ No newline at end of file diff --git a/test/integration-test/python-sdk-test/standalone/test_standalone_outin.py b/test/integration-test/python-sdk-test/standalone/test_standalone_outin.py new file mode 100644 index 00000000000..c20ef92c13f --- /dev/null +++ b/test/integration-test/python-sdk-test/standalone/test_standalone_outin.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import allure +import pytest + +from nb_log import LogManager + +from common.standalone_test import StandaloneTest +from executor import fedb_executor +from util.test_util import getCases + +log = LogManager('python-sdk-test').get_logger_and_add_handlers() + +class TestStanaaloneOutIn(StandaloneTest): + + #有问题 单机版目前没法测 + @pytest.mark.parametrize("testCase", getCases(["/function/out_in/test_out_in.yaml"])) + @allure.feature("out-in") + @allure.story("out-in") + def test_function(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() \ No newline at end of file diff --git a/test/integration-test/python-sdk-test/standalone/test_standalone_select.py b/test/integration-test/python-sdk-test/standalone/test_standalone_select.py new file mode 100644 index 00000000000..93f7a31ad15 --- /dev/null +++ b/test/integration-test/python-sdk-test/standalone/test_standalone_select.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import allure +import pytest + +from nb_log import LogManager + +from common.standalone_test import StandaloneTest +from executor import fedb_executor +from util.test_util import getCases + +log = LogManager('python-sdk-test').get_logger_and_add_handlers() + + +class TestStandaloneSelect(StandaloneTest): + + # 0,1,2,3,5,7都不pass 剩下都pass KeyError: 'columns' + @pytest.mark.parametrize("testCase", getCases(["/function/select/"])) + @allure.feature("select") + @allure.story("batch") + def test_select1(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() + + @pytest.mark.parametrize("testCase", getCases(["/function/select/test_where.yaml"])) + @allure.feature("select") + @allure.story("batch") + def test_select3(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() + + #全pass + @pytest.mark.parametrize("testCase", getCases(["/query/const_query.yaml"])) + @allure.feature("select") + @allure.story("batch") + def test_select2(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() + diff --git a/test/integration-test/python-sdk-test/standalone/test_standalone_window.py b/test/integration-test/python-sdk-test/standalone/test_standalone_window.py new file mode 100644 index 00000000000..434e3d2eb70 --- /dev/null +++ b/test/integration-test/python-sdk-test/standalone/test_standalone_window.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import allure +import pytest + +from nb_log import LogManager + +from common.standalone_test import StandaloneTest +from executor import fedb_executor +from util.test_util import getCases + +log = LogManager('python-sdk-test').get_logger_and_add_handlers() + +class TestStandaloneWindow(StandaloneTest): + + #都pass + @pytest.mark.parametrize("testCase", getCases(["/function/window/"])) + @allure.feature("window") + @allure.story("batch") + def test_window1(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() + + # 13没pass属于正常情况 剩下都pass + @pytest.mark.parametrize("testCase", getCases(["/function/cluster/"])) + @allure.feature("window") + @allure.story("batch") + def test_window2(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() + + #都pass + @pytest.mark.parametrize("testCase", getCases(["/function/test_index_optimized.yaml"])) + @allure.feature("window") + @allure.story("batch") + def test_window3(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() \ No newline at end of file diff --git a/test/integration-test/python-sdk-test/util/fedb_util.py b/test/integration-test/python-sdk-test/util/fedb_util.py index 1de46ba1bd5..6da87e00200 100644 --- a/test/integration-test/python-sdk-test/util/fedb_util.py +++ b/test/integration-test/python-sdk-test/util/fedb_util.py @@ -17,13 +17,15 @@ from datetime import datetime from nb_log import LogManager -from sqlalchemy_openmldb.openmldbapi import Type as feType +#from sqlalchemy_openmldb.openmldbapi import Type as feType +from openmldb.dbapi import Type as feType import re import random import string import time -from sqlalchemy_openmldb.openmldbapi.sql_router_sdk import DataTypeName, SQLRequestRow +#from sqlalchemy_openmldb.openmldbapi.sql_router_sdk import DataTypeName, SQLRequestRow +from openmldb.native.sql_router_sdk import DataTypeName, SQLRequestRow from common import fedb_config from entity.fedb_result import FedbResult @@ -91,6 +93,8 @@ def sql(executor, dbName: str, sql: str): fedbResult = ddl(executor, dbName, sql) elif sql.startswith("insert"): fedbResult = insert(executor, dbName, sql) + elif sql.startswith("load"): + fedbResult = load(executor,sql) else: fedbResult = select(executor, dbName, sql) return fedbResult @@ -176,7 +180,12 @@ def ddl(executor, dbName: str, sql: str): log.info("ddl sql:" + sql) fesqlResult = FedbResult() try: - executor.execute(sql) + list = sql.split(" ") + newtable = dbName.__add__(".").__add__(list[2]) + list[2] = newtable + newsql = " ".join(list) + log.info("ddl newsql:"+newsql) + executor.execute(newsql) fesqlResult.ok = True fesqlResult.msg = "ok" except Exception as e: @@ -286,7 +295,8 @@ def select(executor, dbName: str, sql: str): fedbResult.msg = "ok" fedbResult.rs = rs fedbResult.count = rs.rowcount - fedbResult.result = convertRestultSetToListRS(rs) + fedbResult.result = rs.fetchall() + #fedbResult.result = convertRestultSetToListRS(rs) except Exception as e: log.info("select exception is {}".format(e)) fedbResult.ok = False @@ -294,6 +304,20 @@ def select(executor, dbName: str, sql: str): log.info("select result:" + str(fedbResult)) return fedbResult +def load(executor,sql: str): + log.info("load sql:"+sql) + fedbResult = FedbResult() + try: + executor.execute(sql) + time.sleep(4) + fedbResult.ok = True + fedbResult.msg = "ok" + except Exception as e: + log.info("load data exception is {}".format(e)) + fedbResult.ok = False + fedbResult.msg = str(e) + log.info("load result:"+str(fedbResult)) + return fedbResult def formatSql(sql: str, tableNames: list): if "{auto}" in sql: @@ -313,9 +337,23 @@ def formatSql(sql: str, tableNames: list): def createAndInsert(executor, dbName, inputs, requestMode: bool = False): tableNames = [] + dbnames = set() + dbnames.add(dbName) fedbResult = FedbResult() if inputs != None and len(inputs) > 0: for index, input in enumerate(inputs): + if input.__contains__('db') == True and dbnames.__contains__(input.get('db')) == False: + db = input.get('db') + log.info("db:" + db) + createDB(executor,db) + dbnames.add(db) + log.info("create input db, dbName:"+db) + + + for index, input in enumerate(inputs): + # if input.__contains__('columns') == False: + # fedbResult.ok = True + # return fedbResult, tableNames tableName = input.get('name') if tableName == None: tableName = getRandomName() @@ -325,7 +363,10 @@ def createAndInsert(executor, dbName, inputs, requestMode: bool = False): if createSql == None: createSql = getCreateSql(tableName, input['columns'], input['indexs']) createSql = formatSql(createSql, tableNames) - res = ddl(executor, dbName, createSql) + if input.__contains__('db') == True: + res = ddl(executor,input.get('db'),createSql) + else: + res = ddl(executor, dbName, createSql) if not res.ok: log.error("fail to create table") return res, tableNames @@ -341,12 +382,17 @@ def createAndInsert(executor, dbName, inputs, requestMode: bool = False): fedbResult.ok = True return fedbResult, tableNames +def createDB(executor, dbName): + sql = 'create database {}'.format(dbName) + executor.execute(sql) def getInsertSqls(input): insertSql = input.get('insert') if insertSql is not None and len(insertSql) > 0: return [insertSql] tableName = input.get('name') + if input.__contains__('db')==True: + tableName = input.get('db').__add__('.'+tableName) rows = input.get('rows') columns = input.get('columns') inserts = [] From fdf0f0522029be31d2cf57c42907b048e172971a Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 18 Apr 2022 16:48:19 +0800 Subject: [PATCH 004/172] modify auto deploy --- .../function/join/test_lastjoin_complex.yaml | 2 +- .../openmldb/batch_test/QuerySuites.scala | 86 +++++++++---------- .../openmldb-sdk-test/shell/stop-fedb.sh | 15 ++++ .../src/main/resources/command.properties | 2 +- .../src/main/resources/fedb_deploy.properties | 8 +- .../src/main/resources/log4j.properties | 2 +- .../java_sdk_test/temp/TestFEDBDeploy.java | 11 +++ .../test_suite/test_deploy.xml | 13 +++ .../openmldb/test_common/util/FEDBDeploy.java | 7 ++ 9 files changed, 96 insertions(+), 50 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_deploy.xml diff --git a/cases/function/join/test_lastjoin_complex.yaml b/cases/function/join/test_lastjoin_complex.yaml index d26159d2203..53cbed62241 100644 --- a/cases/function/join/test_lastjoin_complex.yaml +++ b/cases/function/join/test_lastjoin_complex.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: [] +debugs: ["两个子查询lastjoin,order不是主表的ts-rtidb不支持"] cases: - id: 0 desc: lastjoin+窗口 diff --git a/test/batch-test/openmldb-batch-test/src/test/scala/com/_4paradigm/openmldb/batch_test/QuerySuites.scala b/test/batch-test/openmldb-batch-test/src/test/scala/com/_4paradigm/openmldb/batch_test/QuerySuites.scala index 13de48ccdb7..a7d63bb640d 100644 --- a/test/batch-test/openmldb-batch-test/src/test/scala/com/_4paradigm/openmldb/batch_test/QuerySuites.scala +++ b/test/batch-test/openmldb-batch-test/src/test/scala/com/_4paradigm/openmldb/batch_test/QuerySuites.scala @@ -19,51 +19,51 @@ package com._4paradigm.openmldb.batch_test // TODO: Do not use SQLBaseSuite class QuerySuites extends SQLBaseSuite { // TODO: Do not run yaml cases now - testCases("cases/query/fz_sql.yaml") - testCases("cases/query/group_query.yaml") - testCases("cases/query/last_join_query.yaml") - testCases("cases/query/last_join_window_query.yaml") - testCases("cases/query/udaf_query.yaml") - testCases("cases/query/window_query.yaml") - testCases("cases/query/window_with_union_query.yaml") - - testCases("cases/function/expression/test_arithmetic.yaml") -// testCases("cases/function/expression/test_compare.yaml") - testCases("cases/function/expression/test_condition.yaml") - testCases("cases/function/expression/test_logic.yaml") - testCases("cases/function/expression/test_type.yaml") - - testCases("cases/function/test_feature_zero_function.yaml") - testCases("cases/function/test_fz_sql.yaml") - testCases("cases/function/test_index_optimized.yaml") - testCases("cases/function/join/test_lastjoin_simple.yaml") +// testCases("cases/query/fz_sql.yaml") +// testCases("cases/query/group_query.yaml") +// testCases("cases/query/last_join_query.yaml") +// testCases("cases/query/last_join_window_query.yaml") +// testCases("cases/query/udaf_query.yaml") +// testCases("cases/query/window_query.yaml") +// testCases("cases/query/window_with_union_query.yaml") +// +// testCases("cases/function/expression/test_arithmetic.yaml") +//// testCases("cases/function/expression/test_compare.yaml") +// testCases("cases/function/expression/test_condition.yaml") +// testCases("cases/function/expression/test_logic.yaml") +// testCases("cases/function/expression/test_type.yaml") +// +// testCases("cases/function/test_feature_zero_function.yaml") +// testCases("cases/function/test_fz_sql.yaml") +// testCases("cases/function/test_index_optimized.yaml") +// testCases("cases/function/join/test_lastjoin_simple.yaml") testCases("cases/function/join/test_lastjoin_complex.yaml") +// +// testCases("cases/function/select/test_select_sample.yaml") +// testCases("cases/function/select/test_sub_select.yaml") +//// testCases("cases/function/select/test_where.yaml") - testCases("cases/function/select/test_select_sample.yaml") - testCases("cases/function/select/test_sub_select.yaml") -// testCases("cases/function/select/test_where.yaml") - - testCases("cases/function/function/test_udaf_function.yaml") - testCases("cases/function/function/test_udf_function.yaml") - testCases("cases/function/function/test_calculate.yaml") - testCases("cases/function/function/test_date.yaml") - testCases("cases/function/function/test_string.yaml") - - testCases("cases/function/window/test_window_exclude_current_time.yaml") - testCases("cases/function/window/test_window_row.yaml") - testCases("cases/function/window/test_window_row_range.yaml") - testCases("cases/function/window/test_window_union.yaml") - testCases("cases/function/window/error_window.yaml") - - testCases("cases/function/cluster/test_window_row.yaml") - testCases("cases/function/cluster/test_window_row_range.yaml") - testCases("cases/function/cluster/window_and_lastjoin.yaml") - - testCases("cases/function/spark/test_fqz_studio.yaml") - testCases("cases/function/spark/test_ads.yaml") - testCases("cases/function/spark/test_news.yaml") - testCases("cases/function/spark/test_jd.yaml") - testCases("cases/function/spark/test_credit.yaml") +// testCases("cases/function/function/test_udaf_function.yaml") +// testCases("cases/function/function/test_udf_function.yaml") +// testCases("cases/function/function/test_calculate.yaml") +// testCases("cases/function/function/test_date.yaml") +// testCases("cases/function/function/test_string.yaml") +// +// testCases("cases/function/window/test_window_exclude_current_time.yaml") +// testCases("cases/function/window/test_window_row.yaml") +// testCases("cases/function/window/test_window_row_range.yaml") +// testCases("cases/function/window/test_window_union.yaml") +// testCases("cases/function/window/error_window.yaml") +// +// testCases("cases/function/cluster/test_window_row.yaml") +// testCases("cases/function/cluster/test_window_row_range.yaml") +// testCases("cases/function/cluster/window_and_lastjoin.yaml") +// +// testCases("cases/function/spark/test_fqz_studio.yaml") +// testCases("cases/function/spark/test_ads.yaml") +// testCases("cases/function/spark/test_news.yaml") +// testCases("cases/function/spark/test_jd.yaml") +// testCases("cases/function/spark/test_credit.yaml") // TODO: fix if java cases support not inputs // testCases("cases/query/const_query.yaml") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-fedb.sh b/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-fedb.sh index eded034202f..ca9cb98c2db 100755 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-fedb.sh +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-fedb.sh @@ -32,6 +32,15 @@ sh openmldb-apiserver-1/bin/start.sh start apiserver sh openmldb-task_manager-1/bin/start.sh start taskmanager sh zookeeper-3.4.14/bin/zkServer.sh start +sh openmldb-ns-1/bin/start.sh restart nameserver +sh openmldb-ns-2/bin/start.sh restart nameserver +sh openmldb-tablet-1/bin/start.sh restart tablet +sh openmldb-tablet-2/bin/start.sh restart tablet +sh openmldb-tablet-3/bin/start.sh restart tablet +sh openmldb-apiserver-1/bin/start.sh restart apiserver +sh openmldb-task_manager-1/bin/start.sh restart taskmanager +sh zookeeper-3.4.14/bin/zkServer.sh restart + cp -r openmldb openmldb-ns-1/bin/ cp -r openmldb openmldb-ns-2/bin/ cp -r openmldb openmldb-tablet-1/bin/ @@ -39,3 +48,9 @@ cp -r openmldb openmldb-tablet-2/bin/ cp -r openmldb openmldb-tablet-3/bin/ cp -r openmldb openmldb-apiserver-1/bin/ cp -r openmldb openmldb-task_manager-1/bin/ + +rm -rf openmldb-ns-1/bin/openmldb +rm -rf openmldb-ns-2/bin/openmldb +rm -rf openmldb-tablet-1/bin/openmldb +rm -rf openmldb-tablet-2/bin/openmldb +rm -rf openmldb-tablet-3/bin/openmldb \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/command.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/command.properties index df9ec709326..33a1193f975 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/command.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/command.properties @@ -1,6 +1,6 @@ #远程执行命令时需要进行配置,本地执行则不需要进行配置 -remote_ip=172.24.4.40 +remote_ip=172.24.4.55 remote_user=zhaowei01 remote_password=1qaz0p;/ #remote_private_key_path=src/main/resources/zw-mac-id_rsa \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fedb_deploy.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fedb_deploy.properties index 44ff02afda0..a1f60abd7b7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fedb_deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fedb_deploy.properties @@ -9,8 +9,8 @@ main=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2 0.2.3=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2.3-linux.tar.gz spark_url=https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0.3.2/spark-3.0.0-bin-openmldbspark.tgz -tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.2-linux.tar.gz +tmp2=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.4-linux.tar.gz #tmp=/home/zhaowei01/tobe/openmldb_linux.tar.gz -standalone=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.0-linux.tar.gz -tmp_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz -tmp_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark.tar.gz +standalone=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.4-linux.tar.gz +tmp2_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz +tmp2_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark.tar.gz diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/log4j.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/log4j.properties index f332c949460..2ac49249708 100755 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/log4j.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/log4j.properties @@ -1,5 +1,5 @@ ### set log levels ### -log4j.rootLogger=stdout,warn,error +log4j.rootLogger=debug,stdout,warn,error # console log log4j.appender.stdout = org.apache.log4j.ConsoleAppender diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestFEDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestFEDBDeploy.java index 0c5bac5b4c0..8e9673c5c02 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestFEDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestFEDBDeploy.java @@ -50,9 +50,20 @@ public void test2(){ @Test public void testTmp(){ + FEDBDeploy deploy = new FEDBDeploy("tmp2"); + deploy.setCluster(true); + deploy.setSparkMaster("local"); + // deploy.setBatchJobJarPath("hdfs://172.27.128.215:8020/Users/tobe/openmldb-batchjob-0.4.0-SNAPSHOT.jar"); + // deploy.setSparkYarnJars("hdfs://172.27.128.215:8020/Users/tobe/openmldb_040_jars/*"); + FEDBInfo fedbInfo = deploy.deployFEDB(2, 3); + System.out.println(fedbInfo); + } + @Test + public void testTmpByPath(){ FEDBDeploy deploy = new FEDBDeploy("tmp"); deploy.setCluster(true); deploy.setSparkMaster("local"); + deploy.setInstallPath("/Users/zhaowei/Desktop/openmldb-auto-test"); // deploy.setBatchJobJarPath("hdfs://172.27.128.215:8020/Users/tobe/openmldb-batchjob-0.4.0-SNAPSHOT.jar"); // deploy.setSparkYarnJars("hdfs://172.27.128.215:8020/Users/tobe/openmldb_040_jars/*"); FEDBInfo fedbInfo = deploy.deployFEDB(2, 3); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_deploy.xml new file mode 100644 index 00000000000..ebaa1e6417c --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_deploy.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FEDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FEDBDeploy.java index 76bff84d834..df9a549025f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FEDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FEDBDeploy.java @@ -35,6 +35,7 @@ @Slf4j @Setter public class FEDBDeploy { + private String installPath; private String version; private String fedbUrl; private String fedbName; @@ -55,6 +56,9 @@ public FEDBDeploy(String version){ } public FEDBInfo deployFEDBByStandalone(){ String testPath = DeployUtil.getTestPath(version); + if(StringUtils.isNotEmpty(installPath)){ + testPath = installPath+"/"+version; + } String ip = LinuxUtil.getLocalIP(); File file = new File(testPath); if(!file.exists()){ @@ -72,6 +76,9 @@ public FEDBInfo deployFEDB(String clusterName, int ns, int tablet){ FEDBInfo.FEDBInfoBuilder builder = FEDBInfo.builder(); builder.deployType(OpenMLDBDeployType.CLUSTER); String testPath = DeployUtil.getTestPath(version); + if(StringUtils.isNotEmpty(installPath)){ + testPath = installPath+"/"+version; + } if(StringUtils.isNotEmpty(clusterName)) { testPath = testPath + "/" + clusterName; } From a4e4f40fee641687bf132f20b6bf0ca0e9aa19ea Mon Sep 17 00:00:00 2001 From: zhaowei Date: Fri, 27 May 2022 20:52:10 +0800 Subject: [PATCH 005/172] add diff debug case --- cases/debug/diff-debug-bank.yaml | 320 ++++++++++++++++++ cases/debug/diff-debug-ttgwm.yaml | 167 +++++++++ cases/debug/diff-debug-ttgwm2.yaml | 72 ++++ cases/debug/diff-debug-ttgwm3.yaml | 51 +++ cases/debug/diff-debug.yaml | 191 +++++++++++ .../openmldb-sdk-test/pom.xml | 4 +- .../java_sdk_test/common/FedbTest.java | 4 +- .../java_sdk_test/util/FesqlUtil.java | 8 + .../java_sdk_test/temp/DebugTest.java | 65 ++++ .../java_sdk_test/temp/TestProcedure.java | 7 + 10 files changed, 885 insertions(+), 4 deletions(-) create mode 100644 cases/debug/diff-debug-bank.yaml create mode 100644 cases/debug/diff-debug-ttgwm.yaml create mode 100644 cases/debug/diff-debug-ttgwm2.yaml create mode 100644 cases/debug/diff-debug-ttgwm3.yaml create mode 100644 cases/debug/diff-debug.yaml create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestProcedure.java diff --git a/cases/debug/diff-debug-bank.yaml b/cases/debug/diff-debug-bank.yaml new file mode 100644 index 00000000000..438b54882f9 --- /dev/null +++ b/cases/debug/diff-debug-bank.yaml @@ -0,0 +1,320 @@ +db: test_zw3 +debugs: [] +cases: + - + id: 0 + desc: diff-miaoche + inputs: + - + name: flattenRequest + columns: ["reqId string","eventTime timestamp","main_id string","new_user_id string","loan_ts bigint","split_id int","time1 string"] + create: | + CREATE TABLE IF NOT EXISTS flattenRequest( + reqId string, + eventTime timestamp, + main_id string, + new_user_id string, + loan_ts bigInt, + split_id int, + time1 string + ); + rows: + - ['000014b8ec0ce8ad7c20f56915fc3a9f_2000-09-11',968601600000,'13624','000014b8ec0ce8ad7c20f56915fc3a9f',5923063887,1,'2000-09-11'] + - + name: action + create: | + CREATE TABLE IF NOT EXISTS action( + reqId string, + eventTime timestamp, + ingestionTime timestamp, + actionValue int, + index(key=(reqId), ttl=0m, ttl_type=absolute) + ); + inserts: + - insert into action values ('000014b8ec0ce8ad7c20f56915fc3a9f_2000-09-11',968601600000,968601600000,0); + - + name: bo_bill_detail + create: | + CREATE TABLE IF NOT EXISTS bo_bill_detail( + ingestionTime timestamp, + new_user_id string, + bill_ts bigInt, + bank_id string, + lst_bill_amt double, + lst_repay_amt double, + card_limit double, + cur_blc double, + cur_bill_min_repay double, + buy_cnt double, + cur_bill_amt double, + adj_amt double, + rev_credit double, + avl_amt double, + advc_limit double, + repay_status string, + index(key=(new_user_id), ttl=0m, ttl_type=absolute, ts=`ingestionTime`) + ); + inserts: + - insert into bo_bill_detail values (966441600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920909587,'16',51.71693919790691,48.78645816207608,51.58933610737785,51.799664091574954,48.822455898899634,4.0,49.79404783706583,26.457513110645905,26.457513110645905,26.457513110645905,51.58933610737785,'0'); + - insert into bo_bill_detail values (964454400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918950767,'2',51.94205040234742,52.598874512673746,51.93387237632103,51.93387237632103,26.457513110645905,3.0,52.59481818582511,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0'); + - insert into bo_bill_detail values (950630400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905234527,'16',51.60229258472921,49.15064597744367,51.58933610737785,51.60169377065059,48.61238422459857,6.0,49.14319179703328,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0'); + - insert into bo_bill_detail values (961257600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5884239387,'16',51.58922465011468,51.8106523796024,51.58933610737785,50.91037909896174,48.42646177452984,2.0,51.31735378992179,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0'); + - insert into bo_bill_detail values (966528000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920972167,'6',51.94317086971107,51.998970182110334,51.93387237632103,51.93358932328864,49.66727796044394,0.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,51.26219172060438,'0'); + - insert into bo_bill_detail values (974822400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897781927,'2',51.65759673078105,49.667580975924324,51.93387237632103,51.84769425924358,26.457513110645905,1.0,50.63292505870069,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0'); + - insert into bo_bill_detail values (971712000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5894611827,'16',51.60597058480733,48.68377758555718,51.58933610737785,51.580363511708605,48.589732454501124,3.0,47.9899301937396,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0'); + - insert into bo_bill_detail values (955987200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5878966887,'16',51.027420079796315,51.027918828813704,51.0440789906136,51.043883081129316,48.019525195486885,0.0,51.04437285343018,26.457513110645905,26.457513110645905,26.457513110645905,49.850784346888666,'0'); + - insert into bo_bill_detail values (958579200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5881563927,'16',51.043883081129316,51.043883081129316,51.58933610737785,51.58922465011468,48.599124477710504,0.0,51.58922465011468,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0'); + - insert into bo_bill_detail values (958665600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913108567,'6',51.932222174676866,52.03442226065357,51.93387237632103,51.93395709937767,49.66854638501111,0.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,51.26219172060438,'0'); + - insert into bo_bill_detail values (956505600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910970407,'2',51.93158576434962,51.93387237632103,51.93387237632103,51.947997073996994,26.457513110645905,1.0,51.95024446525733,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0'); + - insert into bo_bill_detail values (969033600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5892018927,'16',51.58823024683053,48.62821608901564,51.58933610737785,51.60597058480733,48.61690035368359,3.0,48.94247746079064,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0'); + - insert into bo_bill_detail values (974476800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5865760587,'16',51.04098353284349,51.0440789906136,51.0440789906136,51.040294865919414,48.01597650782497,0.0,51.04339330412898,26.457513110645905,26.457513110645905,26.457513110645905,49.850784346888666,'0'); + - insert into bo_bill_detail values (972144000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5895101307,'2',26.457513110645905,51.18079913405026,51.93387237632103,51.65759673078105,26.457513110645905,1.0,52.117953720383156,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0'); + - insert into bo_bill_detail values (951148800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905732467,'2',47.087959182788964,26.457513110645905,51.93387237632103,51.93352578055914,26.457513110645905,0.0,51.92555343951569,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0'); + - insert into bo_bill_detail values (963936000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5886917547,'16',50.91037909896174,50.795689777775436,51.58933610737785,51.59442024095241,48.68997432737052,1.0,51.53996895614121,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0'); + - insert into bo_bill_detail values (977068800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5868357567,'16',51.040294865919414,48.67944330002142,51.0440789906136,50.98937242210381,47.96187861208107,2.0,47.89066401711298,26.457513110645905,26.457513110645905,26.457513110645905,49.850784346888666,'0'); + - insert into bo_bill_detail values (956073600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910515667,'6',51.96310518050283,52.10910860876436,51.93387237632103,51.932222174676866,49.66584742053638,0.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,51.26219172060438,'0'); + - insert into bo_bill_detail values (948038400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5871020727,'16',50.98937242210381,51.04015282108783,51.0440789906136,51.03942495757569,48.01482583536048,0.0,51.08769225557169,26.457513110645905,26.457513110645905,26.457513110645905,49.850784346888666,'0'); + - insert into bo_bill_detail values (959097600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913594627,'2',51.947997073996994,52.61299744359753,51.93387237632103,51.94205040234742,26.457513110645905,3.0,52.610072229564565,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0'); + - insert into bo_bill_detail values (961776000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5916270987,'2',51.94205040234742,26.457513110645905,51.93387237632103,51.94205040234742,26.457513110645905,0.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0'); + - insert into bo_bill_detail values (961257600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915720727,'16',51.56826349606898,48.71044754464898,51.58933610737785,51.603461124230805,48.614181058617035,3.0,49.22148006714142,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0'); + - insert into bo_bill_detail values (947952000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5902552767,'16',51.599192823144044,49.07310770676746,51.58933610737785,51.60229258472921,48.613021918000534,3.0,49.11379134214747,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0'); + - insert into bo_bill_detail values (961257600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915775267,'6',51.93395709937767,49.850784346888666,51.93387237632103,51.951256962656835,49.978867534188886,1.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,51.26219172060438,'0'); + - insert into bo_bill_detail values (963936000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918402067,'16',51.603461124230805,48.73642888025343,51.58933610737785,51.71693919790691,48.734682721856316,4.0,49.918256179478064,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0'); + - insert into bo_bill_detail values (966355200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5889332307,'16',51.59442024095241,51.65616032962574,51.58933610737785,51.58823024683053,48.59806580513261,0.0,51.65036011491111,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0'); + - insert into bo_bill_detail values (966960000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921422407,'2',51.93387237632103,51.93387430184657,51.93387237632103,51.97110447161961,26.457513110645905,2.0,51.97110543369267,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0'); + - insert into bo_bill_detail values (977414400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900373327,'2',51.84769425924358,51.8945555911215,51.93387237632103,51.93264098811074,26.457513110645905,1.0,51.97556541298997,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0'); + - insert into bo_bill_detail values (953395200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5876312847,'16',51.050959834267566,51.40695478240274,51.0440789906136,51.027420079796315,48.002294736814406,1.0,51.390784193277305,26.457513110645905,26.457513110645905,26.457513110645905,49.850784346888666,'0'); + - insert into bo_bill_detail values (976982400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5899884927,'16',51.599326545992824,48.73642888025343,51.58933610737785,51.599192823144044,48.60971096396274,3.0,48.73391221726407,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0'); + - insert into bo_bill_detail values (948470400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5903049387,'2',51.93264098811074,52.606433066688716,51.93387237632103,47.087959182788964,26.457513110645905,3.0,51.96187352280516,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0'); + - insert into bo_bill_detail values (953481600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907920007,'16',51.60169377065059,26.457513110645905,51.58933610737785,51.63982862093948,49.34031921258718,6.0,48.386326580967065,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0'); + - insert into bo_bill_detail values (953308800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907824367,'6',51.93357391899772,26.457513110645905,51.93387237632103,51.96310518050283,50.63784750559605,2.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,51.26219172060438,'0'); + - insert into bo_bill_detail values (958579200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913046587,'16',51.60320145107278,48.6224875957617,51.58933610737785,51.56826349606898,48.57689162554558,3.0,47.36033572516141,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0'); + - insert into bo_bill_detail values (963936000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918408367,'6',51.951256962656835,51.93387237632103,51.93387237632103,51.94317086971107,49.847183471084904,1.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,51.26219172060438,'0'); + - insert into bo_bill_detail values (955987200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910503487,'16',51.63982862093948,49.36352904726323,51.58933610737785,51.60320145107278,48.61394964410935,3.0,48.89360387617178,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0'); + - insert into bo_bill_detail values (974304000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897288487,'16',51.580363511708605,48.68377758555718,51.58933610737785,51.599326545992824,48.60988582582765,3.0,48.9990306026558,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0'); + - insert into bo_bill_detail values (953827200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5908292547,'2',51.93352578055914,51.93387237632103,51.93387237632103,51.93158576434962,26.457513110645905,1.0,51.931933335858005,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0'); + - insert into bo_bill_detail values (950716800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905270587,'6',26.457513110645905,26.457513110645905,51.93387237632103,51.93357391899772,49.681316407679866,3.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,51.26219172060438,'0'); + - insert into bo_bill_detail values (950803200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5873703267,'16',51.03942495757569,49.708207571788385,51.0440789906136,51.050959834267566,48.1579214667743,1.0,49.752745652878296,26.457513110645905,26.457513110645905,26.457513110645905,49.850784346888666,'0'); + - + name: bo_browse_history + create: | + CREATE TABLE IF NOT EXISTS bo_browse_history( + ingestionTime timestamp, + new_user_id string, + bws_ts bigInt, + action string, + subaction string, + index(key=(new_user_id), ttl=(0m, 9), ttl_type=absandlat, ts=`ingestionTime`) + ); + - + name: bo_detail + create: | + CREATE TABLE IF NOT EXISTS bo_detail( + ingestionTime timestamp, + new_user_id string, + trx_ts bigInt, + trx_typ string, + trx_amt double, + is_slry string, + index(key=(new_user_id), ttl=(0m, 9), ttl_type=absandlat, ts=`ingestionTime`) + ); + inserts: + - insert into bo_detail values (946742400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5901291087,'1',42.84831151865846,'0'); + - insert into bo_detail values (947001600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5901550287,'1',40.41522237969254,'0'); + - insert into bo_detail values (947088000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5901636687,'1',39.878775056413154,'0'); + - insert into bo_detail values (947174400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5901723087,'1',44.23001243499712,'0'); + - insert into bo_detail values (947260800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5901809487,'0',39.878775056413154,'0'); + - insert into bo_detail values (947865600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5902414287,'1',43.912592726916046,'0'); + - insert into bo_detail values (947952000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5902500687,'1',44.424108319695065,'0'); + - insert into bo_detail values (948038400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5902587087,'1',42.9582215646784,'0'); + - insert into bo_detail values (948124800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5902673487,'1',42.9582215646784,'0'); + - insert into bo_detail values (948297600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5902846287,'1',42.143743307874296,'0'); + - insert into bo_detail values (948384000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5902932687,'1',39.483615589254235,'0'); + - insert into bo_detail values (950025600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5904574287,'1',37.21554379557015,'0'); + - insert into bo_detail values (950112000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5904660687,'1',37.21554379557015,'0'); + - insert into bo_detail values (950198400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5904747087,'1',40.41522237969254,'0'); + - insert into bo_detail values (950284800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5904833487,'1',42.131818142586724,'0'); + - insert into bo_detail values (950371200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5904919887,'1',37.21554379557015,'0'); + - insert into bo_detail values (950457600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905006287,'0',45.99504212412464,'0'); + - insert into bo_detail values (950544000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905092687,'1',40.189050747685,'0'); + - insert into bo_detail values (950630400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905179087,'1',40.1251928344276,'0'); + - insert into bo_detail values (950716800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905265487,'1',42.359495983781486,'0'); + - insert into bo_detail values (950803200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905351887,'1',41.907642501099964,'0'); + - insert into bo_detail values (951235200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905783887,'1',44.18007695783247,'0'); + - insert into bo_detail values (951494400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5906043087,'1',38.44157384915451,'0'); + - insert into bo_detail values (952704000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907166287,'0',36.097553933750135,'0'); + - insert into bo_detail values (952963200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907425487,'1',37.21554379557015,'0'); + - insert into bo_detail values (953049600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907511887,'1',44.92644766727055,'0'); + - insert into bo_detail values (953136000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907598287,'1',37.21554379557015,'0'); + - insert into bo_detail values (953222400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907684687,'1',44.6745531594889,'0'); + - insert into bo_detail values (953308800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907771087,'1',45.31116529068746,'0'); + - insert into bo_detail values (953395200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907857487,'1',44.93560503654089,'0'); + - insert into bo_detail values (953481600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907943887,'1',40.189050747685,'0'); + - insert into bo_detail values (953654400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5908116687,'1',43.2625126408534,'0'); + - insert into bo_detail values (953740800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5908203087,'1',40.6903280891172,'0'); + - insert into bo_detail values (953827200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5908289487,'1',40.189050747685,'0'); + - insert into bo_detail values (954172800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5908635087,'1',40.189050747685,'0'); + - insert into bo_detail values (954259200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5908721487,'1',46.3139395862628,'0'); + - insert into bo_detail values (955468800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5909931087,'0',44.79231742162934,'0'); + - insert into bo_detail values (955555200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910017487,'0',30.56884361568164,'0'); + - insert into bo_detail values (955641600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910103887,'1',39.367207165355275,'0'); + - insert into bo_detail values (955814400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910276687,'1',41.53328785444273,'0'); + - insert into bo_detail values (955900800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910363087,'1',39.367207165355275,'0'); + - insert into bo_detail values (956073600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910535887,'0',46.57905537900055,'1'); + - insert into bo_detail values (956160000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910622287,'1',39.367207165355275,'0'); + - insert into bo_detail values (956246400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910708687,'1',44.893101920005485,'0'); + - insert into bo_detail values (956332800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910795087,'0',42.9582215646784,'0'); + - insert into bo_detail values (956419200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910881487,'1',40.189050747685,'0'); + - insert into bo_detail values (956505600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910967887,'1',42.9582215646784,'0'); + - insert into bo_detail values (956592000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911054287,'1',40.8438281751356,'0'); + - insert into bo_detail values (956764800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911227087,'0',38.05357144868271,'0'); + - insert into bo_detail values (956851200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911313487,'0',43.757547920330275,'0'); + - insert into bo_detail values (956937600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911399887,'0',44.54253472805516,'0'); + - insert into bo_detail values (957024000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911486287,'1',44.969283961388584,'0'); + - insert into bo_detail values (957110400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911572687,'1',42.9582215646784,'0'); + - insert into bo_detail values (957196800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911659087,'1',42.143743307874296,'0'); + - insert into bo_detail values (957283200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911745487,'1',42.63760546747436,'0'); + - insert into bo_detail values (957456000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911918287,'0',45.09442315852372,'0'); + - insert into bo_detail values (957542400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5912004687,'1',40.1251928344276,'0'); + - insert into bo_detail values (957628800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5912091087,'1',42.54106016544486,'0'); + - insert into bo_detail values (957715200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5912177487,'1',43.78016902662665,'0'); + - insert into bo_detail values (957801600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5912263887,'0',42.9582215646784,'0'); + - insert into bo_detail values (958406400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5912868687,'1',44.94670288241397,'0'); + - insert into bo_detail values (958492800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5912955087,'1',39.367207165355275,'0'); + - insert into bo_detail values (958579200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913041487,'1',40.189050747685,'0'); + - insert into bo_detail values (958665600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913127887,'1',43.78016902662665,'0'); + - insert into bo_detail values (958752000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913214287,'1',40.986157419304384,'0'); + - insert into bo_detail values (958838400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913300687,'1',42.143743307874296,'0'); + - insert into bo_detail values (958924800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913387087,'1',39.367207165355275,'0'); + - insert into bo_detail values (959011200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913473487,'1',39.31737147877513,'0'); + - insert into bo_detail values (959097600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913559887,'1',39.367207165355275,'0'); + - insert into bo_detail values (959270400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913732687,'0',42.9582215646784,'0'); + - insert into bo_detail values (959356800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913819087,'1',40.93392969163845,'0'); + - insert into bo_detail values (959443200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913905487,'1',42.14136803664542,'0'); + - insert into bo_detail values (959788800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5914251087,'1',40.298387064496765,'0'); + - insert into bo_detail values (960220800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5914683087,'1',44.79231742162934,'0'); + - insert into bo_detail values (960307200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5914769487,'0',46.496681602024026,'1'); + - insert into bo_detail values (960393600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5914855887,'1',38.21717807478726,'0'); + - insert into bo_detail values (960566400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915028687,'1',44.54253472805516,'0'); + - insert into bo_detail values (960652800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915115087,'1',43.972516416507254,'0'); + - insert into bo_detail values (960912000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915374287,'1',38.21717807478726,'0'); + - insert into bo_detail values (960998400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915460687,'1',38.21717807478726,'0'); + - insert into bo_detail values (961084800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915547087,'0',43.757547920330275,'0'); + - insert into bo_detail values (961171200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915633487,'1',38.21717807478726,'0'); + - insert into bo_detail values (961257600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915719887,'1',44.85326855425366,'0'); + - insert into bo_detail values (961689600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5916151887,'1',44.79231742162934,'0'); + - insert into bo_detail values (962121600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5916583887,'1',39.87071230866085,'0'); + - insert into bo_detail values (962208000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5916670287,'0',43.757547920330275,'0'); + - insert into bo_detail values (962726400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5917188687,'0',43.757547920330275,'0'); + - insert into bo_detail values (963072000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5917534287,'1',45.22220472290134,'0'); + - insert into bo_detail values (963158400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5917620687,'1',43.427583400414996,'0'); + - insert into bo_detail values (963244800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5917707087,'1',44.79231742162934,'0'); + - insert into bo_detail values (963331200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5917793487,'1',39.367207165355275,'0'); + - insert into bo_detail values (963417600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5917879887,'1',38.96689235748727,'0'); + - insert into bo_detail values (963504000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5917966287,'1',40.187806608472684,'0'); + - insert into bo_detail values (963590400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918052687,'1',38.50666435826402,'0'); + - insert into bo_detail values (963676800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918139087,'1',40.947203811737864,'0'); + - insert into bo_detail values (963849600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918311887,'1',44.85326855425366,'0'); + - insert into bo_detail values (963936000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918398287,'1',42.50697707435804,'0'); + - insert into bo_detail values (964022400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918484687,'1',40.8438281751356,'0'); + - insert into bo_detail values (964108800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918571087,'1',41.03004143307682,'0'); + - insert into bo_detail values (964195200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918657487,'1',42.19024887340676,'0'); + - insert into bo_detail values (964281600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918743887,'1',40.189050747685,'0'); + - insert into bo_detail values (964368000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918830287,'1',44.26315171787928,'0'); + - insert into bo_detail values (964540800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919003087,'1',40.76954991166814,'0'); + - insert into bo_detail values (964627200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919089487,'1',41.3606636793947,'0'); + - insert into bo_detail values (964713600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919175887,'1',43.02598749593088,'0'); + - insert into bo_detail values (964800000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919262287,'1',40.6903280891172,'0'); + - insert into bo_detail values (964886400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919348687,'1',41.263791633828326,'0'); + - insert into bo_detail values (964972800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919435087,'1',41.53328785444273,'0'); + - insert into bo_detail values (965059200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919521487,'1',38.05357144868271,'0'); + - insert into bo_detail values (965318400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919780687,'1',43.150435687255815,'0'); + - insert into bo_detail values (965404800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919867087,'1',38.12817199919241,'0'); + - insert into bo_detail values (965491200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919953487,'1',44.35972948519862,'0'); + - insert into bo_detail values (965577600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920039887,'1',40.189050747685,'0'); + - insert into bo_detail values (965750400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920212687,'1',37.21554379557015,'0'); + - insert into bo_detail values (966268800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920731087,'1',41.907642501099964,'0'); + - insert into bo_detail values (966355200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920817487,'0',30.56884361568164,'0'); + - insert into bo_detail values (966441600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920903887,'1',41.53328785444273,'0'); + - insert into bo_detail values (966528000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920990287,'1',40.05638276230144,'0'); + - insert into bo_detail values (966614400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921076687,'1',38.13474137843339,'0'); + - insert into bo_detail values (966700800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921163087,'1',39.548505660770545,'0'); + - insert into bo_detail values (966787200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921249487,'1',43.757547920330275,'0'); + - insert into bo_detail values (966873600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921335887,'1',41.75219275678824,'0'); + - insert into bo_detail values (966960000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921422287,'1',42.5241449061589,'0'); + - insert into bo_detail values (967046400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921508687,'1',40.189050747685,'0'); + - insert into bo_detail values (967132800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921595087,'1',41.81688773689405,'0'); + - insert into bo_detail values (967219200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921681487,'1',30.56884361568164,'0'); + - insert into bo_detail values (967305600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921767887,'0',36.632983771459294,'0'); + - insert into bo_detail values (967392000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921854287,'0',45.55948199881118,'0'); + - insert into bo_detail values (967478400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921940687,'1',43.66837413964482,'0'); + - insert into bo_detail values (967564800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5922027087,'1',42.9582215646784,'0'); + - insert into bo_detail values (967651200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5922113487,'0',42.9582215646784,'0'); + - insert into bo_detail values (967737600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5922199887,'1',42.143743307874296,'0'); + - insert into bo_detail values (967910400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5922372687,'1',42.45387025937682,'0'); + - insert into bo_detail values (968083200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5922545487,'1',42.981263359747814,'0'); + - insert into bo_detail values (968515200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5922977487,'0',37.34324704682227,'0'); + - insert into bo_detail values (968601600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5923063887,'0',45.55948199881118,'0'); + - insert into bo_detail values (971884800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5894811087,'1',37.21554379557015,'0'); + - insert into bo_detail values (972230400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5895156687,'1',38.13474137843339,'0'); + - insert into bo_detail values (972316800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5895243087,'1',37.21554379557015,'0'); + - insert into bo_detail values (972921600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5895847887,'1',42.359495983781486,'0'); + - insert into bo_detail values (973267200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5896193487,'1',37.21554379557015,'0'); + - insert into bo_detail values (973353600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5896279887,'1',41.04228672966457,'0'); + - insert into bo_detail values (973440000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5896366287,'1',39.56939473886352,'0'); + - insert into bo_detail values (973872000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5896798287,'0',43.757547920330275,'0'); + - insert into bo_detail values (974131200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897057487,'1',42.9582215646784,'0'); + - insert into bo_detail values (974217600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897143887,'1',43.757547920330275,'0'); + - insert into bo_detail values (974304000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897230287,'1',43.757547920330275,'0'); + - insert into bo_detail values (974563200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897489487,'1',44.79231742162934,'0'); + - insert into bo_detail values (974736000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897662287,'1',36.523756652348894,'0'); + - insert into bo_detail values (974822400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897748687,'1',40.41522237969254,'0'); + - insert into bo_detail values (974908800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897835087,'0',42.19024887340676,'0'); + - insert into bo_detail values (974995200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897921487,'1',40.6903280891172,'0'); + - insert into bo_detail values (975081600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5898007887,'1',38.66247922728184,'0'); + - insert into bo_detail values (975168000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5898094287,'0',44.79231742162934,'0'); + - insert into bo_detail values (975772800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5898699087,'1',42.9582215646784,'0'); + - insert into bo_detail values (976377600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5899303887,'1',37.21554379557015,'0'); + - insert into bo_detail values (976464000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5899390287,'1',37.21554379557015,'0'); + - insert into bo_detail values (976550400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5899476687,'1',39.31737147877513,'0'); + - insert into bo_detail values (976809600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5899735887,'1',44.894525278701856,'0'); + - insert into bo_detail values (976896000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5899822287,'1',42.14255687544362,'0'); + - insert into bo_detail values (977068800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5899995087,'1',42.957057627356185,'0'); + - insert into bo_detail values (977155200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900081487,'1',43.74606267997155,'0'); + - insert into bo_detail values (977328000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900254287,'1',43.78016902662665,'0'); + - insert into bo_detail values (977500800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900427087,'1',42.94652256003971,'0'); + - insert into bo_detail values (977587200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900513487,'1',42.9582215646784,'0'); + - insert into bo_detail values (977673600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900599887,'1',42.93470158275238,'0'); + - insert into bo_detail values (977846400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900772687,'1',43.23059333388798,'0'); + - insert into bo_detail values (977932800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900859087,'1',42.83541642146134,'0'); + - + name: bo_user + create: | + CREATE TABLE IF NOT EXISTS bo_user( + ingestionTime timestamp, + new_user_id string, + sex string, + prof string, + edu string, + marriage string, + hukou_typ string, + index(key=(new_user_id), ttl=0m, ttl_type=absolute) + ); + inserts: + - insert into bo_user values (1603439606052,'000014b8ec0ce8ad7c20f56915fc3a9f','1','2','3','1','2'); + sql: | + select + reqId as reqId_42, + ingestionTime, + max(`trx_amt`) over bo_detail_new_user_id_ingestionTime_0s_5529601s_100 as bo_detail_trx_amt_multi_max_41, + avg(`trx_amt`) over bo_detail_new_user_id_ingestionTime_0s_5529601s_100 as bo_detail_trx_amt_multi_avg_42, + count(`trx_amt`) over bo_detail_new_user_id_ingestionTime_0s_5529601s_100 as bo_detail_trx_amt_multi_count_43, + sum(`trx_amt`) over bo_detail_new_user_id_ingestionTime_0s_5529601s_100 as bo_detail_trx_amt_multi_sum_44, + from + (select `eventTime` as `ingestionTime`, `new_user_id` as `new_user_id`, bigint(0) as `trx_ts`, '' as `trx_typ`, double(0) as `trx_amt`, '' as `is_slry`, reqId from `flattenRequest`) + window + bo_detail_new_user_id_ingestionTime_0s_5529601s_100 as ( + UNION (select `ingestionTime`, `new_user_id`, `trx_ts`, `trx_typ`, `trx_amt`, `is_slry`, '' as reqId from `bo_detail`) + partition by `new_user_id` order by `ingestionTime` rows_range between 5529600999 preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW); + expect: + success: true \ No newline at end of file diff --git a/cases/debug/diff-debug-ttgwm.yaml b/cases/debug/diff-debug-ttgwm.yaml new file mode 100644 index 00000000000..71f3c95244a --- /dev/null +++ b/cases/debug/diff-debug-ttgwm.yaml @@ -0,0 +1,167 @@ +db: test_zw +debugs: [] +cases: + - + id: 0 + desc: diff-ttgwm + inputs: + - + name: flattenRequest + create: | + CREATE TABLE IF NOT EXISTS flattenRequest( + reqId string, + eventTime timestamp, + f_index string, + f_action_create_order string, + f_action_create_order_actionTime timestamp, + f_action_create_order_itemType string, + f_action_show string, + f_action_show_actionTime timestamp, + f_action_show_itemType string, + f_action_collect string, + f_action_collect_actionTime timestamp, + f_action_collect_itemType string, + f_requestCount double, + f_requestId string, + f_userId string, + f_userName double, + f_userNickName string, + f_userAge double, + f_userGender string, + f_userFromGroup double, + f_userScore double, + f_userConsultCount double, + f_userHeadType double, + f_userAddress string, + f_userZipcode string, + f_userCommunicatingBuyers double, + f_userMessages double, + f_userLastMessageTime double, + f_userChannelTop double, + f_userBarANDTop double, + f_userLastLoginTime double, + f_userLastOrderTime timestamp, + f_userPhoneType double, + f_userMCC double, + f_userMNC double, + f_userAPPVersion double, + f_userDeviceID double, + f_userDeviceOS double, + f_userNetworkType double, + f_userRegisterMethod double, + f_userRegisterTime timestamp, + f_userPhoneNumber double, + f_userCategoryAddToCartCount double, + f_userHomeAddToCartCount double, + f_userLastBuyItTime double, + f_userOrderCount double, + f_userOrderDeliveryFreeCount double, + f_userOrderMoneyCount double, + f_userOrderMoneyAverage double, + f_userOrderMoneyHighest double, + f_userOrderScoreAverage double, + f_userOrderToPayCount double, + f_userOrderToDeliverCount double, + f_userOrderInDeliveryCount double, + f_userOrderToScoreCount double, + f_userFavoriteItems string, + f_userClickedItems string, + f_userSharedItemID double, + f_userPublishedItemID double, + f_userSearchedqueryCount3Period string, + f_userSearchedqueryCount7Period string, + f_userSearchedqueryCount30Period string, + f_userClickedqueryCount3Period string, + f_userClickedqueryCount7Period string, + f_userClickedqueryCount30Period string, + f_syncTime double, + f_itemId string, + f_itemtipoff string, + f_itemName double, + f_itemTitle string, + f_temDescription string, + f_itemCategoryLevel1 string, + f_itemCategoryLevel2 double, + f_itemCategoryLevel3 double, + f_itemHome double, + f_itemPurchasingPlace double, + f_itemDeadline double, + f_itemExpires double, + f_itemWeight double, + f_itemSpec double, + f_itemModelNumber double, + f_itemAgeRange double, + f_itemFunction double, + f_itemTargetPopulation double, + f_itemPackage double, + f_itemStorage double, + f_itemDiscount double, + f_itemPrice double, + f_itemSold double, + f_itemComments double, + f_itemFavorites double, + f_itemDeliveryFree double, + f_itemDutyFree double, + f_itemChannel string, + f_itemBrAND double, + f_itemPublishtime timestamp, + f_itemPublisherId double, + f_itemPublisherRegtime double, + f_itermPublisherOrders double, + f_itermSizeCount double, + f_itemColorCount double, + f_itemDetailsPhotos double, + f_itemDescribePhotos double, + f_itemExpired string, + f_itemHistoryPrice double, + f_itemCartRatio double, + f_itemshownUserID double, + f_itemClickedUserID double, + f_itemPurchasedUserID double, + f_itemTargetPopulationFemale double, + f_itemTargetPopulationMale double, + f_userOrderDids string, + index(key=(f_itemTitle), ttl=7201m, ttl_type=absolute, ts=`eventTime`), + index(key=(f_requestId), ttl=7201m, ttl_type=absolute, ts=`eventTime`), + index(key=(f_temDescription), ttl=7201m, ttl_type=absolute, ts=`eventTime`) + ); + inserts: + - insert into flattenRequest values ('train_195042',1511002870000,'train_195042','0',null,'','1',1511002870000,'disclosure','0',null,'',null,'025606ecb2f078e7931ec90b9a27a826','136646',null,'艺垣',null,'女',null,null,2.0,null,'北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','102600、102600、111111、111111、102600、100000、100000、111111、21212121、111111、111111、3rrrr、102600、102600、123456、345212、234789',null,null,null,null,null,null,1508605149000,null,null,null,null,null,null,null,null,1507546756000,null,null,null,null,43.0,null,5074399.0,118009.27906976744,879900.0,null,null,null,null,null,'31100,31166','30958,30973,31009,31043,31024,31005,31038,31077,31076,31075,31080,31073,31064,30830,31122,30912,31129,31119,31100,31163,31166,30927,31157,31162,30914,31138,31203,31209,30907,31198,31048,31252,31276,31302,31301,31303,31309,31400,31536,31405,31451,31439,31547,31546,31550,31737,31741,31745,31749,32001,32304,32303,32825,32872,32735,32873,32856,32888',null,null,'零食,零食','零食,零食','电脑背包,零食,零食','32856,32872,32888,32873,32735,32825','32856,32872,32888,32873,32001,32735,32304,32303,32825','31547,31048,31203,31073,31064,30912,31400,31100,31077,31076,30927,31166,31166,31119,31009,31745,32856,31122,31198,31209,31138,30907,31198,30973,31303,31076,31076,31080,31162,32872,32888,32873,31749,30914,32001,31309,31302,31252,32735,31100,30914,31075,30958,31024,31024,30958,32304,32303,31405,31303,31301,31737,31741,31166,31309,30830,30912,31129,31536,31276,31276,31301,32825,31439,31451,31546,31550,31043,30912,31157,31745,31276,31276,31439,31163,31024,31043,31005,31038,31276',null,'38282','1',null,'【限地区】东海岸 急冻波士顿龙虾(1只装)450g *2件+苹果2颗','波士顿龙虾应该是龙虾中最便宜的品种之一了(请不要用小龙虾比较),红棕及纯黑色较为常见。波龙产自北大西洋深海,虽然叫波士顿龙虾,但主要产地是加拿大和美国缅因州,波士顿并不产,因是美国最大集散地而得名~学名是美洲鳌龙虾,世界上更普及的叫法是缅因龙虾或加拿大龙虾。 波士顿龙虾属于海螯虾科螯龙虾属,生活于寒冷海域,肉较嫩滑细致,产品具有高蛋白,低脂肪,维生素A、C、D及钙、钠、钾、镁、磷、铁、硫、铜等微量元素丰富,味道鲜美。味道鲜美,营养丰富。此款龙虾中粮我买网自营,品质有保障,产品规格1只,450g。','食品',null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,13600.0,null,null,null,null,null,'中粮我买网',null,1510972014000,null,null,null,null,null,null,null,'1',null,null,null,null,null,1.0,1.0,''); + - insert into flattenRequest values ('train_192870',1511002870000,'train_192870','0',null,'','1',1511002870000,'disclosure','0',null,'',null,'025606ecb2f078e7931ec90b9a27a826','136646',null,'艺垣',null,'女',null,null,2.0,null,'北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','102600、102600、111111、111111、102600、100000、100000、111111、21212121、111111、111111、3rrrr、102600、102600、123456、345212、234789',null,null,null,null,null,null,1508605149000,null,null,null,null,null,null,null,null,1507546756000,null,null,null,null,43.0,null,5074399.0,118009.27906976744,879900.0,null,null,null,null,null,'31100,31166','30958,30973,31009,31043,31024,31005,31038,31077,31076,31075,31080,31073,31064,30830,31122,30912,31129,31119,31100,31163,31166,30927,31157,31162,30914,31138,31203,31209,30907,31198,31048,31252,31276,31302,31301,31303,31309,31400,31536,31405,31451,31439,31547,31546,31550,31737,31741,31745,31749,32001,32304,32303,32825,32872,32735,32873,32856,32888',null,null,'零食,零食','零食,零食','电脑背包,零食,零食','32856,32872,32888,32873,32735,32825','32856,32872,32888,32873,32001,32735,32304,32303,32825','31547,31048,31203,31073,31064,30912,31400,31100,31077,31076,30927,31166,31166,31119,31009,31745,32856,31122,31198,31209,31138,30907,31198,30973,31303,31076,31076,31080,31162,32872,32888,32873,31749,30914,32001,31309,31302,31252,32735,31100,30914,31075,30958,31024,31024,30958,32304,32303,31405,31303,31301,31737,31741,31166,31309,30830,30912,31129,31536,31276,31276,31301,32825,31439,31451,31546,31550,31043,30912,31157,31745,31276,31276,31439,31163,31024,31043,31005,31038,31276',null,'38271','1',null,'【新低价,40码起】saucony 圣康尼 RIDE 10 男款缓震跑鞋 2色','saucony(圣康尼)是来自美国的专业跑鞋品牌, 其名称来源于美国宾夕法尼亚州附近一条美丽的河流——Saucony。现其产品线分为专业运动系列和复古休闲系列两大类,为专业跑步运动员及跑步爱好者提供专业、舒适、安全的跑步产品。 这款saucony 圣康尼 RIDE 10 男款缓震跑鞋是索康尼旗下次顶级避震跑鞋,其获得了17年夏季《跑者世界》的最佳升级奖。从外观来看相比9代有了许多变化,鞋面采用工程网眼面料,增加了鞋面的透气性,外层为saucony经典Flex Film支撑材料覆盖,增加鞋面的延展和贴合性。后跟位置特别加强了稳定设计,外层增加了编织技术,增强整个鞋跟的稳定性。 中底采用全掌PWRFoam中底材料,比之前的EVA材质回弹效果更好,上层使用EVERUN鞋垫,辅助增加中底的缓震和回弹性能。大底依旧采用XT-900耐磨碳素橡胶,在前掌区域增加了IBR+发泡橡胶,材质较轻,并且能提高缓震保护。','服饰',null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,53400.0,null,null,null,null,null,'亚马逊中国',null,1510926904000,null,null,null,null,null,null,null,'1',null,null,null,null,null,0.0,1.0,''); + - insert into flattenRequest values ('train_197066',1511003784000,'train_197066','0',null,'','1',1511003784000,'disclosure','0',null,'',null,'fe5eb556e3768e49b7919ebc4f9375d0','136646',null,'艺垣',null,'女',null,null,2.0,null,'北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','102600、102600、111111、111111、102600、100000、100000、111111、21212121、111111、111111、3rrrr、102600、102600、123456、345212、234789',null,null,null,null,null,null,1508605149000,null,null,null,null,null,null,null,null,1507546756000,null,null,null,null,43.0,null,5074399.0,118009.27906976744,879900.0,null,null,null,null,null,'31100,31166','30958,30973,31009,31043,31024,31005,31038,31077,31076,31075,31080,31073,31064,30830,31122,30912,31129,31119,31100,31163,31166,30927,31157,31162,30914,31138,31203,31209,30907,31198,31048,31252,31276,31302,31301,31303,31309,31400,31536,31405,31451,31439,31547,31546,31550,31737,31741,31745,31749,32001,32304,32303,32825,32872,32735,32873,32856,32888',null,null,'零食,零食','零食,零食','电脑背包,零食,零食','32856,32872,32888,32873,32735,32825','32856,32872,32888,32873,32001,32735,32304,32303,32825','31547,31048,31203,31073,31064,30912,31400,31100,31077,31076,30927,31166,31166,31119,31009,31745,32856,31122,31198,31209,31138,30907,31198,30973,31303,31076,31076,31080,31162,32872,32888,32873,31749,30914,32001,31309,31302,31252,32735,31100,30914,31075,30958,31024,31024,30958,32304,32303,31405,31303,31301,31737,31741,31166,31309,30830,30912,31129,31536,31276,31276,31301,32825,31439,31451,31546,31550,31043,30912,31157,31745,31276,31276,31439,31163,31024,31043,31005,31038,31276',null,'38293','1',null,'【限地区,新低价,PLUS会员】Panasonic 松下 ES-RF41-N405 电动剃须刀','这款Panasonic松下的ES-RF41-N405电动剃须刀,采用立体浮动4刀头,往复式设计,可较好贴合面部轮廓,剃须更舒适高效。刀片为Nano抛光刀片,提升剃须。5级电量显示,干湿两用,带弹出式修剪器。支持1小时快充,可以全身水洗。配有充电底座和便携收纳小包。','日百',null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,30900.0,null,null,null,null,null,'京东',null,1510974778000,null,null,null,null,null,null,null,'1',null,null,null,null,null,1.0,1.0,''); + - insert into flattenRequest values ('train_195043',1511003784000,'train_195043','0',null,'','1',1511003784000,'disclosure','0',null,'',null,'fe5eb556e3768e49b7919ebc4f9375d0','136646',null,'艺垣',null,'女',null,null,2.0,null,'北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','102600、102600、111111、111111、102600、100000、100000、111111、21212121、111111、111111、3rrrr、102600、102600、123456、345212、234789',null,null,null,null,null,null,1508605149000,null,null,null,null,null,null,null,null,1507546756000,null,null,null,null,43.0,null,5074399.0,118009.27906976744,879900.0,null,null,null,null,null,'31100,31166','30958,30973,31009,31043,31024,31005,31038,31077,31076,31075,31080,31073,31064,30830,31122,30912,31129,31119,31100,31163,31166,30927,31157,31162,30914,31138,31203,31209,30907,31198,31048,31252,31276,31302,31301,31303,31309,31400,31536,31405,31451,31439,31547,31546,31550,31737,31741,31745,31749,32001,32304,32303,32825,32872,32735,32873,32856,32888',null,null,'零食,零食','零食,零食','电脑背包,零食,零食','32856,32872,32888,32873,32735,32825','32856,32872,32888,32873,32001,32735,32304,32303,32825','31547,31048,31203,31073,31064,30912,31400,31100,31077,31076,30927,31166,31166,31119,31009,31745,32856,31122,31198,31209,31138,30907,31198,30973,31303,31076,31076,31080,31162,32872,32888,32873,31749,30914,32001,31309,31302,31252,32735,31100,30914,31075,30958,31024,31024,30958,32304,32303,31405,31303,31301,31737,31741,31166,31309,30830,30912,31129,31536,31276,31276,31301,32825,31439,31451,31546,31550,31043,30912,31157,31745,31276,31276,31439,31163,31024,31043,31005,31038,31276',null,'38282','1',null,'【限地区】东海岸 急冻波士顿龙虾(1只装)450g *2件+苹果2颗','波士顿龙虾应该是龙虾中最便宜的品种之一了(请不要用小龙虾比较),红棕及纯黑色较为常见。波龙产自北大西洋深海,虽然叫波士顿龙虾,但主要产地是加拿大和美国缅因州,波士顿并不产,因是美国最大集散地而得名~学名是美洲鳌龙虾,世界上更普及的叫法是缅因龙虾或加拿大龙虾。 波士顿龙虾属于海螯虾科螯龙虾属,生活于寒冷海域,肉较嫩滑细致,产品具有高蛋白,低脂肪,维生素A、C、D及钙、钠、钾、镁、磷、铁、硫、铜等微量元素丰富,味道鲜美。味道鲜美,营养丰富。此款龙虾中粮我买网自营,品质有保障,产品规格1只,450g。','食品',null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,13600.0,null,null,null,null,null,'中粮我买网',null,1510972014000,null,null,null,null,null,null,null,'1',null,null,null,null,null,1.0,1.0,''); + - insert into flattenRequest values ('train_68005',1510928344000,'train_68005','0',null,'','1',1510928344000,'disclosure','0',null,'',null,'caae1f9bd2d0b61af2478e32ce881960','136646',null,'艺垣',null,'女',null,null,2.0,null,'北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','102600、102600、111111、111111、102600、100000、100000、111111、21212121、111111、111111、3rrrr、102600、102600、123456、345212、234789',null,null,null,null,null,null,1508605149000,null,null,null,null,null,null,null,null,1507546756000,null,null,null,null,43.0,null,5074399.0,118009.27906976744,879900.0,null,null,null,null,null,'31100,31166','30958,30973,31009,31043,31024,31005,31038,31077,31076,31075,31080,31073,31064,30830,31122,30912,31129,31119,31100,31163,31166,30927,31157,31162,30914,31138,31203,31209,30907,31198,31048,31252,31276,31302,31301,31303,31309,31400,31536,31405,31451,31439,31547,31546,31550,31737,31741,31745,31749,32001,32304,32303,32825,32872,32735,32873,32856,32888',null,null,'零食,零食','零食,零食','电脑背包,零食,零食','32856,32872,32888,32873,32735,32825','32856,32872,32888,32873,32001,32735,32304,32303,32825','31547,31048,31203,31073,31064,30912,31400,31100,31077,31076,30927,31166,31166,31119,31009,31745,32856,31122,31198,31209,31138,30907,31198,30973,31303,31076,31076,31080,31162,32872,32888,32873,31749,30914,32001,31309,31302,31252,32735,31100,30914,31075,30958,31024,31024,30958,32304,32303,31405,31303,31301,31737,31741,31166,31309,30830,30912,31129,31536,31276,31276,31301,32825,31439,31451,31546,31550,31043,30912,31157,31745,31276,31276,31439,31163,31024,31043,31005,31038,31276',null,'38151','1',null,'【历史低价】BRAUN 博朗 Satin Hair 7 HD785 负离子吹风机','BRAUN博朗HD785电吹风机,采用负离子技术,中和正离子并使毛糙秀发恢复妥帖。另外还有INOTEC炫彩护色离子科技,这种先进离子性能在造型过程中可以处理卷结与静电,使秀发更加闪亮顺滑。 最值得一提的是内置新型智能温度传感器,每分钟监控头发温度600次,及时智能调整秀发受热温度,从而避免过热,保护秀发。这款电吹风机拥有双头可以更换,2000W大功率,有速干效果,还拥有4档温度和2档风量。','日百',null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,35900.0,null,null,null,null,null,'丰趣海淘',null,1510890999000,null,null,null,null,null,null,null,'1',null,null,null,null,null,1.0,0.0,''); + - + name: action + create: | + CREATE TABLE IF NOT EXISTS action( + reqId string, + eventTime timestamp, + ingestionTime timestamp, + actionValue int, + index(key=(reqId), ttl=0m, ttl_type=absolute) + ); + inserts: + - insert into action values ('train_0',1511188285000,1511188285000,0); + sql: | + select + reqId as reqId_1, + `reqId` as flattenRequest_reqId_original_0, + `eventTime` as flattenRequest_eventTime_original_1, + distinct_count(`f_userAddress`) over flattenRequest_f_temDescription_eventTime_0s_172801s_100 as flattenRequest_f_userAddress_window_unique_count_111, + distinct_count(`f_userAddress`) over flattenRequest_f_temDescription_eventTime_0s_432001s_100 as flattenRequest_f_userAddress_window_unique_count_112, + distinct_count(`f_userAddress`) over flattenRequest_f_itemTitle_eventTime_0s_172801s_100 as flattenRequest_f_userAddress_window_unique_count_113, + distinct_count(`f_userAddress`) over flattenRequest_f_itemTitle_eventTime_0s_432001s_100 as flattenRequest_f_userAddress_window_unique_count_114, + fz_top1_ratio(`f_itemTitle`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 as flattenRequest_f_itemTitle_window_top1_ratio_126, + case when !isnull(at(`f_userGender`, 0)) over flattenRequest_f_requestId_eventTime_0s_432001s_100 then count(`f_userGender`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 else null end as flattenRequest_f_userGender_window_count_127, + case when !isnull(at(`f_itemExpired`, 0)) over flattenRequest_f_requestId_eventTime_0s_432001s_100 then count(`f_itemExpired`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 else null end as flattenRequest_f_itemExpired_window_count_128 + from + `flattenRequest` + window flattenRequest_f_temDescription_eventTime_0s_172801s_100 as (partition by `f_temDescription` order by `eventTime` rows_range between 172800999 preceding and 0s preceding MAXSIZE 100), + flattenRequest_f_temDescription_eventTime_0s_432001s_100 as (partition by `f_temDescription` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100), + flattenRequest_f_itemTitle_eventTime_0s_172801s_100 as (partition by `f_itemTitle` order by `eventTime` rows_range between 172800999 preceding and 0s preceding MAXSIZE 100), + flattenRequest_f_itemTitle_eventTime_0s_432001s_100 as (partition by `f_itemTitle` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100), + flattenRequest_f_requestId_eventTime_0s_432001s_100 as (partition by `f_requestId` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100) + ; + expect: + success: true \ No newline at end of file diff --git a/cases/debug/diff-debug-ttgwm2.yaml b/cases/debug/diff-debug-ttgwm2.yaml new file mode 100644 index 00000000000..dfdd8baf8f0 --- /dev/null +++ b/cases/debug/diff-debug-ttgwm2.yaml @@ -0,0 +1,72 @@ +db: test1 +debugs: [] +cases: + - + id: 0 + desc: diff-ttgwm + inputs: + - + name: flattenRequest + columns: ["reqId string","eventTime timestamp","f_index string","f_requestId string","f_userGender string","f_userAddress string","f_itemTitle string","f_temDescription string","f_itemExpired string"] + indexs: ["index1:f_itemTitle:eventTime:0m:absolute","index2:f_requestId:eventTime:0m:absolute","index3:f_temDescription:eventTime:0m:absolute"] +# create: | +# CREATE TABLE IF NOT EXISTS flattenRequest( +# reqId string, +# eventTime timestamp, +# f_index string, +# f_requestId string, +# f_userGender string, +# f_userAddress string, +# f_itemTitle string, +# f_temDescription string, +# f_itemExpired string, +# index(key=(f_itemTitle), ttl=7201m, ttl_type=absolute, ts=`eventTime`), +# index(key=(f_requestId), ttl=7201m, ttl_type=absolute, ts=`eventTime`), +# index(key=(f_temDescription), ttl=7201m, ttl_type=absolute, ts=`eventTime`) +# ); +# inserts: +# - insert into flattenRequest values ('train_195042',1511002870000,'train_195042','025606ecb2f078e7931ec90b9a27a826','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【限地区】东海岸 急冻波士顿龙虾(1只装)450g *2件+苹果2颗','波士顿龙虾应该是龙虾中最便宜的品种之一了(请不要用小龙虾比较),红棕及纯黑色较为常见。波龙产自北大西洋深海,虽然叫波士顿龙虾,但主要产地是加拿大和美国缅因州,波士顿并不产,因是美国最大集散地而得名~学名是美洲鳌龙虾,世界上更普及的叫法是缅因龙虾或加拿大龙虾。 波士顿龙虾属于海螯虾科螯龙虾属,生活于寒冷海域,肉较嫩滑细致,产品具有高蛋白,低脂肪,维生素A、C、D及钙、钠、钾、镁、磷、铁、硫、铜等微量元素丰富,味道鲜美。味道鲜美,营养丰富。此款龙虾中粮我买网自营,品质有保障,产品规格1只,450g。','1'); +# - insert into flattenRequest values ('train_192870',1511002870000,'train_192870','025606ecb2f078e7931ec90b9a27a826','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【新低价,40码起】saucony 圣康尼 RIDE 10 男款缓震跑鞋 2色','saucony(圣康尼)是来自美国的专业跑鞋品牌, 其名称来源于美国宾夕法尼亚州附近一条美丽的河流——Saucony。现其产品线分为专业运动系列和复古休闲系列两大类,为专业跑步运动员及跑步爱好者提供专业、舒适、安全的跑步产品。 这款saucony 圣康尼 RIDE 10 男款缓震跑鞋是索康尼旗下次顶级避震跑鞋,其获得了17年夏季《跑者世界》的最佳升级奖。从外观来看相比9代有了许多变化,鞋面采用工程网眼面料,增加了鞋面的透气性,外层为saucony经典Flex Film支撑材料覆盖,增加鞋面的延展和贴合性。后跟位置特别加强了稳定设计,外层增加了编织技术,增强整个鞋跟的稳定性。 中底采用全掌PWRFoam中底材料,比之前的EVA材质回弹效果更好,上层使用EVERUN鞋垫,辅助增加中底的缓震和回弹性能。大底依旧采用XT-900耐磨碳素橡胶,在前掌区域增加了IBR+发泡橡胶,材质较轻,并且能提高缓震保护。','1'); +# - insert into flattenRequest values ('train_197066',1511003784000,'train_197066','fe5eb556e3768e49b7919ebc4f9375d0','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【限地区,新低价,PLUS会员】Panasonic 松下 ES-RF41-N405 电动剃须刀','这款Panasonic松下的ES-RF41-N405电动剃须刀,采用立体浮动4刀头,往复式设计,可较好贴合面部轮廓,剃须更舒适高效。刀片为Nano抛光刀片,提升剃须。5级电量显示,干湿两用,带弹出式修剪器。支持1小时快充,可以全身水洗。配有充电底座和便携收纳小包。','1'); +# - insert into flattenRequest values ('train_195043',1511003784000,'train_195043','fe5eb556e3768e49b7919ebc4f9375d0','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【限地区】东海岸 急冻波士顿龙虾(1只装)450g *2件+苹果2颗','波士顿龙虾应该是龙虾中最便宜的品种之一了(请不要用小龙虾比较),红棕及纯黑色较为常见。波龙产自北大西洋深海,虽然叫波士顿龙虾,但主要产地是加拿大和美国缅因州,波士顿并不产,因是美国最大集散地而得名~学名是美洲鳌龙虾,世界上更普及的叫法是缅因龙虾或加拿大龙虾。 波士顿龙虾属于海螯虾科螯龙虾属,生活于寒冷海域,肉较嫩滑细致,产品具有高蛋白,低脂肪,维生素A、C、D及钙、钠、钾、镁、磷、铁、硫、铜等微量元素丰富,味道鲜美。味道鲜美,营养丰富。此款龙虾中粮我买网自营,品质有保障,产品规格1只,450g。','1'); +# - insert into flattenRequest values ('train_68005',1510928344000,'train_68005','caae1f9bd2d0b61af2478e32ce881960','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【历史低价】BRAUN 博朗 Satin Hair 7 HD785 负离子吹风机','BRAUN博朗HD785电吹风机,采用负离子技术,中和正离子并使毛糙秀发恢复妥帖。另外还有INOTEC炫彩护色离子科技,这种先进离子性能在造型过程中可以处理卷结与静电,使秀发更加闪亮顺滑。 最值得一提的是内置新型智能温度传感器,每分钟监控头发温度600次,及时智能调整秀发受热温度,从而避免过热,保护秀发。这款电吹风机拥有双头可以更换,2000W大功率,有速干效果,还拥有4档温度和2档风量。','1'); + rows: + - ["train_195042",1511002870000,"train_195042",'025606ecb2f078e7931ec90b9a27a826','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【限地区】东海岸 急冻波士顿龙虾(1只装)450g *2件+苹果2颗','波士顿龙虾应该是龙虾中最便宜的品种之一了(请不要用小龙虾比较),红棕及纯黑色较为常见。波龙产自北大西洋深海,虽然叫波士顿龙虾,但主要产地是加拿大和美国缅因州,波士顿并不产,因是美国最大集散地而得名~学名是美洲鳌龙虾,世界上更普及的叫法是缅因龙虾或加拿大龙虾。 波士顿龙虾属于海螯虾科螯龙虾属,生活于寒冷海域,肉较嫩滑细致,产品具有高蛋白,低脂肪,维生素A、C、D及钙、钠、钾、镁、磷、铁、硫、铜等微量元素丰富,味道鲜美。味道鲜美,营养丰富。此款龙虾中粮我买网自营,品质有保障,产品规格1只,450g。',"1"] + - ['train_192870',1511002870000,'train_192870','025606ecb2f078e7931ec90b9a27a826','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【新低价,40码起】saucony 圣康尼 RIDE 10 男款缓震跑鞋 2色','saucony(圣康尼)是来自美国的专业跑鞋品牌, 其名称来源于美国宾夕法尼亚州附近一条美丽的河流——Saucony。现其产品线分为专业运动系列和复古休闲系列两大类,为专业跑步运动员及跑步爱好者提供专业、舒适、安全的跑步产品。 这款saucony 圣康尼 RIDE 10 男款缓震跑鞋是索康尼旗下次顶级避震跑鞋,其获得了17年夏季《跑者世界》的最佳升级奖。从外观来看相比9代有了许多变化,鞋面采用工程网眼面料,增加了鞋面的透气性,外层为saucony经典Flex Film支撑材料覆盖,增加鞋面的延展和贴合性。后跟位置特别加强了稳定设计,外层增加了编织技术,增强整个鞋跟的稳定性。 中底采用全掌PWRFoam中底材料,比之前的EVA材质回弹效果更好,上层使用EVERUN鞋垫,辅助增加中底的缓震和回弹性能。大底依旧采用XT-900耐磨碳素橡胶,在前掌区域增加了IBR+发泡橡胶,材质较轻,并且能提高缓震保护。','1'] + - ['train_197066',1511003784000,'train_197066','fe5eb556e3768e49b7919ebc4f9375d0','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【限地区,新低价,PLUS会员】Panasonic 松下 ES-RF41-N405 电动剃须刀','这款Panasonic松下的ES-RF41-N405电动剃须刀,采用立体浮动4刀头,往复式设计,可较好贴合面部轮廓,剃须更舒适高效。刀片为Nano抛光刀片,提升剃须。5级电量显示,干湿两用,带弹出式修剪器。支持1小时快充,可以全身水洗。配有充电底座和便携收纳小包。','1'] + - ['train_195043',1511003784000,'train_195043','fe5eb556e3768e49b7919ebc4f9375d0','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【限地区】东海岸 急冻波士顿龙虾(1只装)450g *2件+苹果2颗','波士顿龙虾应该是龙虾中最便宜的品种之一了(请不要用小龙虾比较),红棕及纯黑色较为常见。波龙产自北大西洋深海,虽然叫波士顿龙虾,但主要产地是加拿大和美国缅因州,波士顿并不产,因是美国最大集散地而得名~学名是美洲鳌龙虾,世界上更普及的叫法是缅因龙虾或加拿大龙虾。 波士顿龙虾属于海螯虾科螯龙虾属,生活于寒冷海域,肉较嫩滑细致,产品具有高蛋白,低脂肪,维生素A、C、D及钙、钠、钾、镁、磷、铁、硫、铜等微量元素丰富,味道鲜美。味道鲜美,营养丰富。此款龙虾中粮我买网自营,品质有保障,产品规格1只,450g。','1'] + - ['train_68005',1510928344000,'train_68005','caae1f9bd2d0b61af2478e32ce881960','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【历史低价】BRAUN 博朗 Satin Hair 7 HD785 负离子吹风机','BRAUN博朗HD785电吹风机,采用负离子技术,中和正离子并使毛糙秀发恢复妥帖。另外还有INOTEC炫彩护色离子科技,这种先进离子性能在造型过程中可以处理卷结与静电,使秀发更加闪亮顺滑。 最值得一提的是内置新型智能温度传感器,每分钟监控头发温度600次,及时智能调整秀发受热温度,从而避免过热,保护秀发。这款电吹风机拥有双头可以更换,2000W大功率,有速干效果,还拥有4档温度和2档风量。','1'] + - + name: adinfo + create: | + CREATE TABLE IF NOT EXISTS action( + reqId string, + eventTime timestamp, + ingestionTime timestamp, + actionValue int, + index(key=(reqId), ttl=0m, ttl_type=absolute) + ); + inserts: + - insert into action values ('train_0',1511188285000,1511188285000,0); + sql: | + select + reqId as reqId_1, + `reqId` as flattenRequest_reqId_original_0, + `eventTime` as flattenRequest_eventTime_original_1, + distinct_count(`f_userAddress`) over flattenRequest_f_temDescription_eventTime_0s_172801s_100 as flattenRequest_f_userAddress_window_unique_count_111, + distinct_count(`f_userAddress`) over flattenRequest_f_temDescription_eventTime_0s_432001s_100 as flattenRequest_f_userAddress_window_unique_count_112, + distinct_count(`f_userAddress`) over flattenRequest_f_itemTitle_eventTime_0s_172801s_100 as flattenRequest_f_userAddress_window_unique_count_113, + distinct_count(`f_userAddress`) over flattenRequest_f_itemTitle_eventTime_0s_432001s_100 as flattenRequest_f_userAddress_window_unique_count_114, + fz_top1_ratio(`f_itemTitle`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 as flattenRequest_f_itemTitle_window_top1_ratio_126, + case when !isnull(at(`f_userGender`, 0)) over flattenRequest_f_requestId_eventTime_0s_432001s_100 then count(`f_userGender`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 else null end as flattenRequest_f_userGender_window_count_127, + case when !isnull(at(`f_itemExpired`, 0)) over flattenRequest_f_requestId_eventTime_0s_432001s_100 then count(`f_itemExpired`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 else null end as flattenRequest_f_itemExpired_window_count_128 + from + `flattenRequest` + window flattenRequest_f_temDescription_eventTime_0s_172801s_100 as (partition by `f_temDescription` order by `eventTime` rows_range between 172800999 preceding and 0s preceding MAXSIZE 100), + flattenRequest_f_temDescription_eventTime_0s_432001s_100 as (partition by `f_temDescription` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100), + flattenRequest_f_itemTitle_eventTime_0s_172801s_100 as (partition by `f_itemTitle` order by `eventTime` rows_range between 172800999 preceding and 0s preceding MAXSIZE 100), + flattenRequest_f_itemTitle_eventTime_0s_432001s_100 as (partition by `f_itemTitle` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100), + flattenRequest_f_requestId_eventTime_0s_432001s_100 as (partition by `f_requestId` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100) + ; + expect: + success: true \ No newline at end of file diff --git a/cases/debug/diff-debug-ttgwm3.yaml b/cases/debug/diff-debug-ttgwm3.yaml new file mode 100644 index 00000000000..77dd9463527 --- /dev/null +++ b/cases/debug/diff-debug-ttgwm3.yaml @@ -0,0 +1,51 @@ +db: test3 +debugs: [] +cases: + - + id: 0 + desc: diff-ttgwm + inputs: + - + name: flattenRequest + columns: ["reqId string","eventTime timestamp","f_index string","f_requestId string","f_userGender string","f_userAddress string","f_itemTitle string","f_temDescription string","f_itemExpired string"] + indexs: ["index1:f_itemTitle:eventTime:0m:absolute","index2:f_requestId:eventTime:0m:absolute","index3:f_temDescription:eventTime:0m:absolute"] + rows: + - ['train_178837',1511188561000,'train_178837','2cf15328efc127cc26ae35cac0e896db','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表','此款CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表外观设计简约,表盘直径约42mm,表壳厚度11mm。亮骚活力的橙色刻度显示,简约三针计时,采用日本石英表芯,光动能驱动,搭载矿物玻璃表镜,防刮擦又比较耐摔。3点钟方向自带日期显示,皮革表带质感上乘,工艺精湛,还自带100米生活防水。','0'] + - ['train_1', 1511190175000,'train_1','14d51082b22b7e78177177fa82ef942d','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表','此款CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表外观设计简约,表盘直径约42mm,表壳厚度11mm。亮骚活力的橙色刻度显示,简约三针计时,采用日本石英表芯,光动能驱动,搭载矿物玻璃表镜,防刮擦又比较耐摔。3点钟方向自带日期显示,皮革表带质感上乘,工艺精湛,还自带100米生活防水。','0'] + - ['train_147', 1511191732000,'train_147','c4c081b82bb4b4d6907924317c13e8a3','女','安徽省,合肥市,瑶海区,当涂北路与新海大道交口新海尚宸家园*号楼*','CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表','此款CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表外观设计简约,表盘直径约42mm,表壳厚度11mm。亮骚活力的橙色刻度显示,简约三针计时,采用日本石英表芯,光动能驱动,搭载矿物玻璃表镜,防刮擦又比较耐摔。3点钟方向自带日期显示,皮革表带质感上乘,工艺精湛,还自带100米生活防水。','0'] + - ['train_104', 1511192140000,'train_104','a9a98fd04053253626ab05ede3b37e43','女','*川省,成都市,蒲江县,海川阳光尚城*栋*','CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表','此款CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表外观设计简约,表盘直径约42mm,表壳厚度11mm。亮骚活力的橙色刻度显示,简约三针计时,采用日本石英表芯,光动能驱动,搭载矿物玻璃表镜,防刮擦又比较耐摔。3点钟方向自带日期显示,皮革表带质感上乘,工艺精湛,还自带100米生活防水。','0'] + - ['train_92', 1511192324000,'train_92','04c2c1e536c275ebf26fcc90aa86105f','女','河北省,衡水市,桃城区,河北省衡水市桃城区胜利西路利康胡同*号楼*单元*、河北省,保定市,北市区,河北省保定市莲池区*东路*号河大新区坤舆生活区','CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表','此款CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表外观设计简约,表盘直径约42mm,表壳厚度11mm。亮骚活力的橙色刻度显示,简约三针计时,采用日本石英表芯,光动能驱动,搭载矿物玻璃表镜,防刮擦又比较耐摔。3点钟方向自带日期显示,皮革表带质感上乘,工艺精湛,还自带100米生活防水。','0'] + - + name: adinfo + create: | + CREATE TABLE IF NOT EXISTS action( + reqId string, + eventTime timestamp, + ingestionTime timestamp, + actionValue int, + index(key=(reqId), ttl=0m, ttl_type=absolute) + ); + inserts: + - insert into action values ('train_0',1511188285000,1511188285000,0); + sql: | + select + reqId as reqId_1, + `reqId` as flattenRequest_reqId_original_0, + `eventTime` as flattenRequest_eventTime_original_1, + distinct_count(`f_userAddress`) over flattenRequest_f_temDescription_eventTime_0s_172801s_100 as flattenRequest_f_userAddress_window_unique_count_111, + distinct_count(`f_userAddress`) over flattenRequest_f_temDescription_eventTime_0s_432001s_100 as flattenRequest_f_userAddress_window_unique_count_112, + distinct_count(`f_userAddress`) over flattenRequest_f_itemTitle_eventTime_0s_172801s_100 as flattenRequest_f_userAddress_window_unique_count_113, + distinct_count(`f_userAddress`) over flattenRequest_f_itemTitle_eventTime_0s_432001s_100 as flattenRequest_f_userAddress_window_unique_count_114, + fz_top1_ratio(`f_itemTitle`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 as flattenRequest_f_itemTitle_window_top1_ratio_126, + case when !isnull(at(`f_userGender`, 0)) over flattenRequest_f_requestId_eventTime_0s_432001s_100 then count(`f_userGender`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 else null end as flattenRequest_f_userGender_window_count_127, + case when !isnull(at(`f_itemExpired`, 0)) over flattenRequest_f_requestId_eventTime_0s_432001s_100 then count(`f_itemExpired`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 else null end as flattenRequest_f_itemExpired_window_count_128 + from + `flattenRequest` + window flattenRequest_f_temDescription_eventTime_0s_172801s_100 as (partition by `f_temDescription` order by `eventTime` rows_range between 172800999 preceding and 0s preceding MAXSIZE 100), + flattenRequest_f_temDescription_eventTime_0s_432001s_100 as (partition by `f_temDescription` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100), + flattenRequest_f_itemTitle_eventTime_0s_172801s_100 as (partition by `f_itemTitle` order by `eventTime` rows_range between 172800999 preceding and 0s preceding MAXSIZE 100), + flattenRequest_f_itemTitle_eventTime_0s_432001s_100 as (partition by `f_itemTitle` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100), + flattenRequest_f_requestId_eventTime_0s_432001s_100 as (partition by `f_requestId` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100) + ; + expect: + success: true \ No newline at end of file diff --git a/cases/debug/diff-debug.yaml b/cases/debug/diff-debug.yaml new file mode 100644 index 00000000000..88a8caa1078 --- /dev/null +++ b/cases/debug/diff-debug.yaml @@ -0,0 +1,191 @@ +db: test_zw +debugs: [] +cases: + - + id: 0 + desc: diff-miaoche + inputs: + - + name: behaviourTable + create: | + CREATE TABLE IF NOT EXISTS behaviourTable( + itemId string, + reqId string, + tags string, + instanceKey string, + eventTime timestamp, + rank string, + mcuid string, + ip string, + browser string, + browser_version string, + platform string, + query string, + sort_rule string, + _i_rank string + ); + inserts: + - insert into behaviourTable values ('15966','1cbnZ2e7db70386+6a7f','pc_000,h','1cbnZ2e7db70386+6a7f^15966',1589517641000,'d','72619bce98fd345e15d37a41cda90351','115.213.231','Google Chrome','46.0.2486.0','windows','','def','9'); + - insert into behaviourTable values ('15966','1cbnZ2e7db70386+6a7f','pc_000,h','1cbnZ2e7db70386+6a7f^15966',1589517641000,'d','72619bce98fd345e15d37a41cda90351','115.213.231','Google Chrome','46.0.2486.0','windows','','def','9'); + - insert into behaviourTable values ('15966','1cbnZ2e7db70386+6a7f','pc_000,h','1cbnZ2e7db70386+6a7f^15966',1589517641000,'d','72619bce98fd345e15d37a41cda90351','115.213.231','Google Chrome','46.0.2486.0','windows','','def','9'); + - insert into behaviourTable values ('15966','1cbnZ2e7db70386+6a7f','pc_000,h','1cbnZ2e7db70386+6a7f^15966',1589517641000,'d','72619bce98fd345e15d37a41cda90351','115.213.231','Google Chrome','46.0.2486.0','windows','','def','9'); + - insert into behaviourTable values ('15966','1cbnZ2e7db70386+6a7f','pc_000,h','1cbnZ2e7db70386+6a7f^15966',1589517641000,'d','72619bce98fd345e15d37a41cda90351','115.213.231','Google Chrome','46.0.2486.0','windows','','def','9'); + - insert into behaviourTable values ('15966','1cbnZ2e7db70386+6a7f','pc_000,h','1cbnZ2e7db70386+6a7f^15966',1589517641000,'d','72619bce98fd345e15d37a41cda90351','115.213.231','Google Chrome','46.0.2486.0','windows','','def','9'); + - insert into behaviourTable values ('15966','1cbnZ2e7db70386+6a7f','pc_000,h','1cbnZ2e7db70386+6a7f^15966',1589517641000,'d','72619bce98fd345e15d37a41cda90351','115.213.231','Google Chrome','46.0.2486.0','windows','','def','9'); + - + name: feedbackTable + create: | + CREATE TABLE IF NOT EXISTS feedbackTable( + itemId string, + reqId string, + instanceKey string, + eventTime timestamp, + ingestionTime timestamp, + actionValue double, + rank string, + index(key=(instanceKey), ttl=0m, ttl_type=absolute) + ); + - + name: adinfo + create: | + CREATE TABLE IF NOT EXISTS adinfo( + id string, + ingestionTime timestamp, + item_ts timestamp, + I_brand_id string, + I_series_id string, + I_deal_record int, + I_weight int, + I_discount double, + I_msrp double, + I_min_price double, + I_price_difference double, + index(key=(id), ttl=0m, ttl_type=absolute) + ); + inserts: + - insert into adinfo values ('15966',1606829773651,1461455554999,'57','142',0,183,9.5,59400.0,56400.0,3000.0); + - insert into adinfo values ('15966',1606829770353,1461168198999,'57','142',0,163,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829763134,1460476061999,'57','142',0,121,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829766231,1460736086999,'57','142',0,127,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829768458,1460949164999,'57','142',0,148,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829766806,1460772891999,'57','142',0,130,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829747775,1458921819999,'57','142',0,0,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829747037,1458894698999,'57','142',0,0,8.9,59400.0,52900.0,6500.0); + - insert into adinfo values ('15966',1606829770755,1461215180999,'57','142',0,171,9.5,59400.0,56400.0,3000.0); + - insert into adinfo values ('15966',1606829763146,1460471547999,'57','142',0,121,10.0,59400.0,59400.0,0.0); + - insert into adinfo values ('15966',1606829775064,1461600012999,'57','142',0,192,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829756644,1459958431999,'57','142',0,87,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829758171,1460045111999,'57','142',0,90,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829753139,1459612901999,'57','142',0,87,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829762301,1460390409999,'57','142',0,114,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829772288,1461401287999,'57','142',0,183,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829767305,1460822654999,'57','142',0,130,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829763039,1460427581999,'57','142',0,121,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829772193,1461377130999,'57','142',0,183,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829771541,1461290734999,'57','142',0,174,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829767875,1460908981999,'57','142',0,133,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829749745,1459304681999,'57','142',0,78,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829773598,1461434542999,'57','142',0,183,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829764377,1460560778999,'57','142',0,121,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829761046,1460259644999,'57','142',0,102,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829769091,1461119108999,'57','142',0,163,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829770895,1461254443999,'57','142',0,163,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829753948,1459699532999,'57','142',0,87,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829763513,1460477116999,'57','142',0,121,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829760094,1460171824999,'57','142',0,99,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829764378,1460563205999,'57','142',0,121,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829762134,1460346316999,'57','142',0,114,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829764377,1460555818999,'57','142',0,121,10.0,59400.0,59400.0,0.0); + - insert into adinfo values ('15966',1606829748975,1459221747999,'57','142',0,75,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829774044,1461480568999,'57','142',0,192,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829748548,1459131536999,'57','142',0,69,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829768701,1460995386999,'57','142',0,148,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829772289,1461400681999,'57','142',0,183,9.5,59400.0,56400.0,3000.0); + - insert into adinfo values ('15966',1606829757385,1460044803999,'57','142',0,87,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829774138,1461513605999,'57','142',0,183,9.8,59400.0,58400.0,1000.0); + - insert into adinfo values ('15966',1606829765662,1460672955999,'57','142',0,127,10.0,59400.0,59400.0,0.0); + - insert into adinfo values ('15966',1606829754685,1459785626999,'57','142',0,87,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829760278,1460217604999,'57','142',0,99,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829761325,1460304048999,'57','142',0,102,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829774768,1461573207999,'57','142',0,183,9.5,59400.0,56400.0,3000.0); + - insert into adinfo values ('15966',1606829773702,1461471084999,'57','142',0,183,9.5,59400.0,56400.0,3000.0); + - insert into adinfo values ('15966',1606829768628,1460987463999,'57','142',0,148,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829771673,1461340817999,'57','142',0,171,9.5,59400.0,56400.0,3000.0); + - insert into adinfo values ('15966',1606829770007,1461198993999,'57','142',0,160,9.5,59400.0,56400.0,3000.0); + - insert into adinfo values ('15966',1606829759457,1460131235999,'57','142',0,93,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829772025,1461386182999,'57','142',0,183,9.5,59400.0,56400.0,3000.0); + - insert into adinfo values ('15966',1606829773662,1461459086999,'57','142',0,183,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829770716,1461205528999,'57','142',0,163,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829771475,1461265956999,'57','142',0,171,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829774558,1461515993999,'57','142',0,192,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829747037,1458894690999,'57','142',null,0,8.9,59400.0,52900.0,6500.0); + - insert into adinfo values ('15966',1606829750078,1459440043999,'57','142',0,81,9.8,59400.0,58400.0,1000.0); + - insert into adinfo values ('15966',1606829748683,1459180812999,'57','142',0,69,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829772033,1461427208999,'57','142',0,183,9.5,59400.0,56400.0,3000.0); + - insert into adinfo values ('15966',1606829747371,1458901214999,'57','142',0,0,9.5,66400.0,62900.0,3500.0); + - insert into adinfo values ('15966',1606829748068,1459094421999,'57','142',0,0,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829765721,1460675891999,'57','142',0,127,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829749046,1459267706999,'57','142',0,75,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829770792,1461220692999,'57','142',0,163,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829772034,1461342695999,'57','142',0,174,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829765263,1460649666999,'57','142',0,127,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829752101,1459526430999,'57','142',0,87,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829772025,1461386244999,'57','142',0,183,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829751449,1459445334999,'57','142',0,81,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829751069,1459480434999,'57','142',0,87,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829771056,1461295703999,'57','142',0,171,9.5,59400.0,56400.0,3000.0); + - insert into adinfo values ('15966',1606829749437,1459267358999,'57','142',0,75,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829748154,1459008046999,'57','142',0,0,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829771559,1461298469999,'57','142',0,174,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829755725,1459872065999,'57','142',0,87,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829775678,1461641476999,'57','142',0,202,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829767681,1460864505999,'57','142',0,133,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829763017,1460476828999,'57','142',0,121,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829750105,1459353816999,'57','142',0,78,9.4,59400.0,55900.0,3500.0); + - insert into adinfo values ('15966',1606829759182,1460094013999,'57','142',0,93,9.2,59400.0,54900.0,4500.0); + - insert into adinfo values ('15966',1606829769452,1461081633999,'57','142',0,148,9.3,59400.0,55400.0,4000.0); + - insert into adinfo values ('15966',1606829765072,1460601578999,'57','142',0,127,9.3,59400.0,55400.0,4000.0); + sql: | + select * from + ( + select + `instanceKey` as instanceKey_1, + `eventTime` as behaviourTable_eventTime_original_0, + `instanceKey` as behaviourTable_instanceKey_original_1, + `_i_rank` as behaviourTable__i_rank_original_14, + `browser` as behaviourTable_browser_original_15, + `browser_version` as behaviourTable_browser_version_original_16, + `ip` as behaviourTable_ip_original_17, + `itemId` as behaviourTable_itemId_original_18, + `mcuid` as behaviourTable_mcuid_original_19, + `platform` as behaviourTable_platform_original_20, + `query` as behaviourTable_query_original_21, + `rank` as behaviourTable_rank_original_22 + from + `behaviourTable` + ) + as out0 + last join + ( + select + `behaviourTable`.`instanceKey` as instanceKey_3, + `feedbackTable_instanceKey`.`actionValue` as feedbackTable_actionValue_multi_direct_2, + `adinfo_id`.`I_brand_id` as adinfo_I_brand_id_multi_direct_3, + `adinfo_id`.`I_deal_record` as adinfo_I_deal_record_multi_direct_4, + `adinfo_id`.`I_discount` as adinfo_I_discount_multi_direct_5, + `adinfo_id`.`I_min_price` as adinfo_I_min_price_multi_direct_6, + `adinfo_id`.`I_msrp` as adinfo_I_msrp_multi_direct_7, + `adinfo_id`.`I_price_difference` as adinfo_I_price_difference_multi_direct_8, + `adinfo_id`.`I_series_id` as adinfo_I_series_id_multi_direct_9, + `adinfo_id`.`I_weight` as adinfo_I_weight_multi_direct_10, + `adinfo_id`.`ingestionTime` as adinfo_ingestionTime_multi_direct_11, + `adinfo_id`.`item_ts` as adinfo_item_ts_multi_direct_12, + `feedbackTable_instanceKey`.`rank` as feedbackTable_rank_multi_direct_13 + from + `behaviourTable` last join `feedbackTable` as `feedbackTable_instanceKey` on `behaviourTable`.`instanceKey` = `feedbackTable_instanceKey`.`instanceKey` + last join `adinfo` as `adinfo_id` on `behaviourTable`.`itemId` = `adinfo_id`.`id` + ) + as out1 + on out0.instanceKey_1 = out1.instanceKey_3; + expect: + success: true \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml index e3cca3769cd..fde0a440c5a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml @@ -16,8 +16,8 @@ 8 UTF-8 - 0.4.2 - 0.4.2-macos + 0.5.0-SNAPSHOT + 0.5.0-macos-SNAPSHOT test_suite/test_tmp.xml 1.8.9 diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java index 1157cc471f5..af540ba557d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java @@ -54,8 +54,8 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi }else{ FedbGlobalVar.mainInfo = FEDBInfo.builder() .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/home/zhaowei01/fedb-auto-test/tmp") - .fedbPath("/home/zhaowei01/fedb-auto-test/tmp/openmldb-ns-1/bin/openmldb") + .basePath("/home/zhaowei01/openmldb-auto-test/tmp") + .fedbPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") .zk_cluster("172.24.4.55:10000") .zk_root_path("/openmldb") .nsNum(2).tabletNum(3) diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java index 376e6bcd79c..0a507cc1087 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java @@ -1291,4 +1291,12 @@ public static String getColumnTypeByType(int type){ } throw new IllegalArgumentException("not know type"); } + public static void setOnline(SqlExecutor sqlExecutor){ + Statement statement = sqlExecutor.getStatement(); + try { + statement.execute("SET @@execute_mode='online';"); + } catch (SQLException e) { + e.printStackTrace(); + } + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java new file mode 100644 index 00000000000..e7d884b19bf --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java @@ -0,0 +1,65 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.openmldb.java_sdk_test.temp; + +import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; +import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.provider.Yaml; +import io.qameta.allure.Feature; +import io.qameta.allure.Step; +import io.qameta.allure.Story; +import lombok.extern.slf4j.Slf4j; +import org.testng.annotations.Test; + +/** + * @author zhaowei + * @date 2020/6/11 2:53 PM + */ +@Slf4j +@Feature("DebugTest") +public class DebugTest extends FedbTest { + + @Story("batch") + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/debug/diff-debug.yaml"}) + @Step("{testCase.desc}") + public void testSelect(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor, testCase, SQLCaseType.kBatch).run(); + } + @Story("request") + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"debug/diff-debug-bank.yaml"}) + public void testSelectRequestMode(SQLCase testCase) throws Exception { + FesqlUtil.setOnline(executor); + ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); + } + @Story("requestWithSp") + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/select/","query/const_query.yaml"}) + public void testSelectRequestModeWithSp(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); + } + @Story("requestWithSpAysn") + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/select/","query/const_query.yaml"}) + public void testSelectRequestModeWithSpAysn(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestProcedure.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestProcedure.java new file mode 100644 index 00000000000..affe4c20734 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestProcedure.java @@ -0,0 +1,7 @@ +package com._4paradigm.openmldb.java_sdk_test.temp; + +import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; + +public class TestProcedure extends FedbTest { + +} From be6a9476aeecdf06226a6d85531cb0e0854149d2 Mon Sep 17 00:00:00 2001 From: wangkaidong Date: Tue, 7 Jun 2022 17:12:18 +0800 Subject: [PATCH 006/172] window union cluster --- .../window/test_window_union_cluster.yaml | 738 ++++++++++++ .../test_window_union_cluster_thousand.yaml | 1042 +++++++++++++++++ 2 files changed, 1780 insertions(+) create mode 100644 cases/function/window/test_window_union_cluster.yaml create mode 100644 cases/function/window/test_window_union_cluster_thousand.yaml diff --git a/cases/function/window/test_window_union_cluster.yaml b/cases/function/window/test_window_union_cluster.yaml new file mode 100644 index 00000000000..2775ea6fa63 --- /dev/null +++ b/cases/function/window/test_window_union_cluster.yaml @@ -0,0 +1,738 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + +db: test_zw +debugs: [] +cases: + - id: 0 + desc: 正常union + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 1 + desc: union的表列个数不一致 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000] + - [3,"cc",20,32,1.3,2.3,1590738992000] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - id: 2 + desc: 列类型不一致 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 string"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - id: 3 + desc: 列名不一致 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c9 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - id: 4 + desc: 使用列别名后schema一致 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c9 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION (select id, c1,c3,c4,c5,c6,c7,c9 as c8 from {1}) + PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 5 + desc: 样本表使用索引,UNION表未命中索引 + mode: rtidb-unsupport + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 6 + desc: union表使用索引,样本表未命中索引 + mode: rtidb-unsupport + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 7 + desc: 样本表union表都使用索引 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 8 + desc: union多表 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [6,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1},{2} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,126] + - [5,"dd",20,129] + - [6,"ee",21,34] + - id: 9 + desc: 结合limit + tags: ["TODO", "@zhaowei remove limit case here"] + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) limit 2; + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [5,"ee",21,34] + - id: 10 + desc: 使用两个pk + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1|c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"aa",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - [6,"ee",21,33,1.4,2.4,1590738995000,"2020-05-04"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1|c3:c7"] + rows: + - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"aa",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"aa",20,96] + - [5,"ee",21,34] + - [6,"ee",21,67] + - id: 11 + desc: 样本表和union表都使用子查询 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM (select * from {0}) WINDOW w1 AS (UNION (select * from {1}) PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 12 + desc: union多表,其中一个子查询 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [6,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION (select * from {1}),{2} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,126] + - [5,"dd",20,129] + - [6,"ee",21,34] + - id: 13 + desc: 样本表不进入window + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,93] + - [4,"dd",20,96] + - [5,"ee",21,34] + + - id: 14-1 + desc: WINDOW UNION 子查询, column cast 和 const cast子查询, string cast as date + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4str string","c5 float","c6 double","c7 timestamp"] + indexs: ["index1:c3:c7"] + rows: + - [2, "bb", 20, "31", 1.2, 2.2, 1590738991000] + - [3, "cc", 20, "32", 1.3, 2.3, 1590738992000] + sql: | + SELECT id, c1, c3, c8, + distinct_count(c8) OVER w1 as w1_c8_dis_cnt, + sum(c4) OVER w1 as w1_c4_sum + FROM {0} WINDOW + w1 AS (UNION (select id, c1, c3, bigint(c4str) as c4, c5, c6, c7, date("2020-10-01") as c8 from {1}) + PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int", "c1 string", "c3 int", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"] + rows: + - [1, "aa", 20, "2020-05-01", 2, 93] + - [4, "dd", 20, "2020-05-04", 2, 96] + - [5, "ee", 21, "2020-05-05", 1, 34] + - id: 14-2 + desc: WINDOW UNION 子查询, column cast 和 const cast子查询. cast column as partition key + inputs: + - columns: ["id int","c1 string","c3f float","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20.0, 30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20.1, 33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21.2, 34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000] + - [3,"cc",20,32,1.3,2.3,1590738992000] + sql: | + SELECT id, c1, c3, c8, + distinct_count(c8) OVER w1 as w1_c8_dis_cnt, + sum(c4) OVER w1 as w1_c4_sum + FROM (select id, c1, int(c3f) as c3, c4, c5, c6, c7, c8 from {0}) WINDOW + w1 AS (UNION (select id, c1, c3, c4, c5, c6, c7, date("2020-10-01") as c8 from {1}) + PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int", "c1 string", "c3 int", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"] + rows: + - [1, "aa", 20, "2020-05-01", 2, 93] + - [4, "dd", 20, "2020-05-04", 2, 96] + - [5, "ee", 21, "2020-05-05", 1, 34] + - id: 14-3 + desc: WINDOW UNION 子查询, timestamp(string) as window ts + inputs: + - columns: ["id int","c1 string","c3f float","c4 bigint","c5 float","c6 double","c7str string","c8 date"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa",20.0, 30,1.1,2.1,"2020-05-29 15:56:33","2020-05-01"] + - [4,"dd",20.1, 33,1.4,2.4,"2020-05-29 15:56:34","2020-05-04"] + - [5,"ee",21.2, 34,1.5,2.5,"2020-05-29 15:56:35","2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2, 1590738991000] + - [3,"cc",20,32,1.3,2.3, 1590738992000] + sql: | + SELECT id, c1, c3, c7, c8, + distinct_count(c8) OVER w1 as w1_c8_dis_cnt, + sum(c4) OVER w1 as w1_c4_sum + FROM (select id, c1, int(c3f) as c3, c4, c5, c6, timestamp(c7str) as c7, c8 from {0}) WINDOW + w1 AS (UNION (select id, c1, c3, c4, c5, c6, c7, date("2020-10-01") as c8 from {1}) + PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int", "c1 string", "c3 int", "c7 timestamp", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"] + rows: + - [1, "aa", 20, 1590738993000, "2020-05-01", 2, 93] + - [4, "dd", 20, 1590738994000, "2020-05-04", 2, 96] + - [5, "ee", 21, 1590738995000, "2020-05-05", 1, 34] + - id: 14-4 + desc: WINDOW UNION 子查询, cast另一种写法 cast(column as timestamp) as window ts + inputs: + - columns: ["id int","c1 string","c3f float","c4 bigint","c5 float","c6 double","c7str string","c8 date"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa",20.0, 30,1.1,2.1,"2020-05-29 15:56:33","2020-05-01"] + - [4,"dd",20.1, 33,1.4,2.4,"2020-05-29 15:56:34","2020-05-04"] + - [5,"ee",21.2, 34,1.5,2.5,"2020-05-29 15:56:35","2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2, 1590738991000] + - [3,"cc",20,32,1.3,2.3, 1590738992000] + sql: | + SELECT id, c1, c3, c7, c8, + distinct_count(c8) OVER w1 as w1_c8_dis_cnt, + sum(c4) OVER w1 as w1_c4_sum + FROM (select id, c1, cast(c3f as int) as c3, c4, c5, c6, cast(c7str as timestamp) as c7, c8 from {0}) WINDOW + w1 AS (UNION (select id, c1, c3, c4, c5, c6, c7, cast("2020-10-01" as date) as c8 from {1}) + PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int", "c1 string", "c3 int", "c7 timestamp", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"] + rows: + - [1, "aa", 20, 1590738993000, "2020-05-01", 2, 93] + - [4, "dd", 20, 1590738994000, "2020-05-04", 2, 96] + - [5, "ee", 21, 1590738995000, "2020-05-05", 1, 34] + - id: 16 + desc: 主表window 添加 INSTANCE_NOT_IN_WINDOW 没有明显错误日志 + db: db_wzx + sql: | + select + c1, + min(c1) over table_1_s2_t1 as table_1_c1_9, + min(c2) over table_1_s2_t1 as table_1_c2_10, + identity(case when lag(d1, 1) != null then distinct_count(d1) else null end) over table_1_s2_t1 as table_1_d1_11, + identity(case when lag(d2, 1) != null then distinct_count(d2) else null end) over table_1_s2_t1 as table_1_d2_12, + identity(case when lag(s1, 1) != null then distinct_count(s1) else null end) over table_1_s2_t1 as table_1_s1_13 + from + {0} as main + window table_1_s2_t1 as (partition by s2 order by t1 rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW); + inputs: + - columns: ["label int", "s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s2:t1", "index2:s1:t1", "index3:d1:t1", "index4:d2:t1"] + rows: + - [1, "1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s2:t1"] + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s1:t1"] + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s1:t1"] + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + expect: + order: c1 + columns: ["c1 int", "table_1_c1_9 int", "table_1_c2_10 bigint", "table_1_d1_11 bigint", "table_1_d2_12 bigint", "table_1_s1_13 bigint"] + rows: + - [1, 1, 2, NULL, NULL, NULL] + + - id: 16-2 + desc: 主表window 添加 INSTANCE_NOT_IN_WINDOW 没有明显错误日志 case when写法优化 + db: db_wzx + sql: | + select + c1, + min(c1) over table_1_s2_t1 as table_1_c1_9, + min(c2) over table_1_s2_t1 as table_1_c2_10, + case when !isnull(lag(d1, 1) over table_1_s2_t1) then distinct_count(d1) over table_1_s2_t1 else null end as table_1_d1_11, + case when !isnull(lag(d2, 1) over table_1_s2_t1) then distinct_count(d2) over table_1_s2_t1 else null end as table_1_d2_12, + case when !isnull(lag(s1, 1) over table_1_s2_t1) then distinct_count(s1) over table_1_s2_t1 else null end as table_1_s1_13 + from + {0} as main + window table_1_s2_t1 as (partition by s2 order by t1 rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW); + inputs: + - columns: ["label int", "s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s2:t1", "index2:s1:t1", "index3:d1:t1", "index4:d2:t1"] + rows: + - [1, "1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s2:t1"] + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s1:t1"] + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s1:t1"] + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + expect: + order: c1 + columns: ["c1 int", "table_1_c1_9 int", "table_1_c2_10 bigint", "table_1_d1_11 bigint", "table_1_d2_12 bigint", "table_1_s1_13 bigint"] + rows: + - [1, 1, 2, NULL, NULL, NULL] + - id: 17 + desc: 两个索引不一致的表union + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7","index2:c1:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 18 + desc: 主表ts都大于副表的 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738995000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,93] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 19 + desc: 主表ts都小于副表的 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738991000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738992000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738993000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738994000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,63] + - [5,"ee",21,34] + - id: 20 + desc: 主表副表ts有交集 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 21 + desc: 主表和副表分片在同一节点上 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + replicaNum: 3 + partitionNum: 1 + distribution: + - leader: "{tb_endpoint_1}" + followers: [ "{tb_endpoint_0}","{tb_endpoint_2}" ] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + replicaNum: 3 + partitionNum: 1 + distribution: + - leader: "{tb_endpoint_1}" + followers: [ "{tb_endpoint_0}","{tb_endpoint_2}" ] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 21 + desc: 主表和副表分片在不同的节点上 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + distribution: + - leader: "{tb_endpoint_1}" + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + distribution: + - leader: "{tb_endpoint_0}" + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 22 + desc: 两张副表,一张和主表在同一节点,另一张不在 + db: db_wzx + sql: | + select + c1, + min(c1) over table_1_s2_t1 as table_1_c1_9, + min(c2) over table_1_s2_t1 as table_1_c2_10, + case when !isnull(lag(d1, 1) over table_1_s2_t1) then distinct_count(d1) over table_1_s2_t1 else null end as table_1_d1_11, + case when !isnull(lag(d2, 1) over table_1_s2_t1) then distinct_count(d2) over table_1_s2_t1 else null end as table_1_d2_12, + case when !isnull(lag(s1, 1) over table_1_s2_t1) then distinct_count(s1) over table_1_s2_t1 else null end as table_1_s1_13 + from + {0} as main + window table_1_s2_t1 as (partition by s2 order by t1 rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW); + inputs: + - columns: ["label int", "s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s2:t1", "index2:s1:t1", "index3:d1:t1", "index4:d2:t1"] + distribution: + - leader: "{tb_endpoint_1}" + rows: + - [1, "1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s2:t1"] + distribution: + - leader: "{tb_endpoint_1}" + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s1:t1"] + distribution: + - leader: "{tb_endpoint_0}" + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s1:t1"] + distribution: + - leader: "{tb_endpoint_0}" + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + expect: + order: c1 + columns: ["c1 int", "table_1_c1_9 int", "table_1_c2_10 bigint", "table_1_d1_11 bigint", "table_1_d2_12 bigint", "table_1_s1_13 bigint"] + rows: + - [1, 1, 2, NULL, NULL, NULL] \ No newline at end of file diff --git a/cases/function/window/test_window_union_cluster_thousand.yaml b/cases/function/window/test_window_union_cluster_thousand.yaml new file mode 100644 index 00000000000..432927ea744 --- /dev/null +++ b/cases/function/window/test_window_union_cluster_thousand.yaml @@ -0,0 +1,1042 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + +db: test_zw +debugs: [] +cases: + - id: 0 + desc: 正常union + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,90] + - [4,"dd",20,96] + - [5,"ee",21,34] \ No newline at end of file From d38bf099c832bbcb4783e3c9fd9473b8014e5897 Mon Sep 17 00:00:00 2001 From: wangkaidong Date: Mon, 20 Jun 2022 14:09:57 +0800 Subject: [PATCH 007/172] v0.5.0 long window and disk table --- cases/function/disk_table/disk_table.yaml | 366 ++++++++++++++++++ cases/function/long_window/long_window.yaml | 331 ++++++++++++++++ .../java_sdk_test/common/FedbTest.java | 12 +- .../java_sdk_test/util/FesqlUtil.java | 131 ++++++- .../java_sdk_test/cluster/v030/DMLTest.java | 7 + .../java_sdk_test/cluster/v040/OutInTest.java | 9 + .../cluster/v050/DiskTableTest.java | 21 + .../cluster/v230/FunctionTest.java | 1 + .../cluster/v230/WindowTest.java | 22 ++ .../standalone/v030/DDLTest.java | 6 +- .../standalone/v030/DeploymentTest.java | 4 +- .../standalone/v030/OutInTest.java | 1 + .../openmldb-sdk-test/test_suite/test_all.xml | 2 +- .../openmldb/test_common/model/Table.java | 13 +- .../case_test/sdk/SQLCaseTest.java | 31 +- 15 files changed, 929 insertions(+), 28 deletions(-) create mode 100644 cases/function/disk_table/disk_table.yaml create mode 100644 cases/function/long_window/long_window.yaml create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java diff --git a/cases/function/disk_table/disk_table.yaml b/cases/function/disk_table/disk_table.yaml new file mode 100644 index 00000000000..4da51b27aec --- /dev/null +++ b/cases/function/disk_table/disk_table.yaml @@ -0,0 +1,366 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: ['HDD 插入索引和ts 一样的数据'] +cases: + - + id: 0 + desc: 创建SSD表,插入多条数据,查询 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: SSD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + id: 1 + desc: 创建HDD表,插入多条数据,查询 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: HDD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + + - + id: 2 + desc: ssd和内存表,join + mode: cluster-unsupport + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: SSD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {0}.c3 on {0}.c1={1}.c1; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + id: 3 + desc: hdd和内存表,join + mode: cluster-unsupport + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: HDD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {0}.c3 on {0}.c1={1}.c1; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + id: 4 + desc: 内存表和ssd,join + mode: cluster-unsupport + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: SSD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {0}.c3 on {0}.c1={1}.c1; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + id: 5 + desc: 内存表和hdd,join + mode: cluster-unsupport + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: HDD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {0}.c3 on {0}.c1={1}.c1; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + id: 6 + desc: hdd和ssd,join + mode: cluster-unsupport + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: SSD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: HDD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {0}.c3 on {0}.c1={1}.c1; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + id: 7 + desc: hdd和ssd,join + mode: cluster-unsupport + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: SSD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: HDD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select {1}.c1,{1}.c2,{2}.c3,{0}.c4 from {1} last join {2} ORDER BY hdd.c3 on ssd.c1=hdd.c1 last join mem on ssd.c1 = mem.c1; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + + - id: 8 + desc: ssd union 内存表 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + storage: SSD + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 9 + desc: hdd union 内存表 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + storage: HDD + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 10 + desc: 内存表 union ssd + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + storage: SSD + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 11 + desc: 内存表 union hdd + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + storage: HDD + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 12 + desc: SSD 插入索引和ts 一样的数据 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: SSD + rows: + - ["aa", 2, 3, 1590738989000] + - ["aa", 2, 3, 1590738989000] + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - id: 12 + desc: HDD 插入索引和ts 一样的数据 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: HDD + rows: + - ["aa", 2, 3, 1590738989000] + - ["aa", 2, 3, 1590738989000] + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - id: 13 + desc: storage_mode=其他字符 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: hdp + rows: + - ["aa", 2, 3, 1590738989000] + sql: select * from {0}; + expect: + success: false \ No newline at end of file diff --git a/cases/function/long_window/long_window.yaml b/cases/function/long_window/long_window.yaml new file mode 100644 index 00000000000..97398b10518 --- /dev/null +++ b/cases/function/long_window/long_window.yaml @@ -0,0 +1,331 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: ["options(long_window='w1:2d')"] +cases: + - + id: 0 + desc: options(long_window='w1:2') + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 1 + desc: options(long_window='w1:2d') + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7::latest"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2d') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 2 + desc: options(long_window='w1:2h') + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2d') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 3 + desc: options(long_window='w1:2m') + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2m') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 4 + desc: options(long_window='w1:2s') + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2s') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 5 + desc: avg算子(smallint, int, bigint, float, double, string) + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2') SELECT id, c1, avg(c4) OVER w1 as w1_c4_avg FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 6 + desc: min算子(smallint, int, bigint, float, double, string) + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2d') SELECT id, c1, min(c4) OVER w1 as w1_c4_min FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 7 + desc: max算子(smallint, int, bigint, float, double, string) + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2h') SELECT id, c1, max(c4) OVER w1 as w1_c4_max FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 8 + desc: count算子(smallint, int, bigint, float, double, string) + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2m') SELECT id, c1, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 9 + desc: 相同的PARTITION BY和ORDER BY,长窗口和短窗口可合并 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, avg(c4) OVER w1 as w1_c4_avg from {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 10 + desc: 相同的PARTITION BY和ORDER BY,长窗口之间可合并 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2,w2:2') SELECT id, c1, sum(c3) OVER w1 as w1_c3_sum, avg(c3) OVER w2 as w2_c3_avg from {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 11 + desc: 相同的PARTITION BY和ORDER BY,-短窗口之间可合并(三个窗口 一个长窗口,俩个短窗口) + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c3) OVER w1 as w1_c3_sum, avg(c3) OVER w2 as w2_c3_avg, count(c3) OVER w3 as w3_c3_count from {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 12 + desc: 不同的PARTITION BY和ORDER BY,长窗口和短窗口混合-不可合并窗口 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c5) OVER w1 as w1_c5_sum, + avg(c5) OVER w2 as w2_c5_avg from {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 13 + desc: 窗口名不存在 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w9090:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 14 + desc: options(long_window='w1:2y') + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w9090:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 15 + desc: options格式错误 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:100') SELECT id, c1, avg(c5) OVER w1 as w1_c4_avg FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java index 048b2db2f56..54a3b7d8bbe 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java @@ -54,14 +54,14 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi }else{ FedbGlobalVar.mainInfo = FEDBInfo.builder() .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/home/wangkaidong/fedb-auto-test/tmp") - .fedbPath("/home/wangkaidong/fedb-auto-test/tmp/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:10018") + .basePath("/home/wangkaidong/openmldb-auto-test/tmp") + .fedbPath("/home/wangkaidong/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") + .zk_cluster("172.24.4.55:30016") .zk_root_path("/openmldb") .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("172.24.4.55:10023", "172.24.4.55:10024")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:10020", "172.24.4.55:10021", "172.24.4.55:10022")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:10025")) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30026", "172.24.4.55:30027")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30023", "172.24.4.55:30024", "172.24.4.55:30025")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30028")) .build(); FedbGlobalVar.env = "cluster"; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java index 18f63b48126..620bcd1b6c8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java @@ -21,6 +21,9 @@ import com._4paradigm.openmldb.Schema; import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBColumn; +import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBIndex; +import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBSchema; import com._4paradigm.openmldb.jdbc.CallablePreparedStatement; import com._4paradigm.openmldb.jdbc.SQLResultSet; import com._4paradigm.openmldb.sdk.QueryFuture; @@ -252,7 +255,7 @@ public static FesqlResult sqls(SqlExecutor executor, String dbName, List public static FesqlResult sqlRequestMode(SqlExecutor executor, String dbName, Boolean need_insert_request_row, String sql, InputDesc input) throws SQLException { FesqlResult fesqlResult = null; - if (sql.toLowerCase().startsWith("select")) { + if (sql.toLowerCase().startsWith("select")||sql.toLowerCase().startsWith("deploy")) { fesqlResult = selectRequestModeWithPreparedStatement(executor, dbName, need_insert_request_row, sql, input); } else { logger.error("unsupport sql: {}", sql); @@ -290,6 +293,8 @@ public static FesqlResult sql(SqlExecutor executor, String dbName, String sql) { FesqlResult fesqlResult = null; if (sql.startsWith("create database") || sql.startsWith("drop database")) { fesqlResult = db(executor, sql); + }else if(sql.startsWith("CREATE INDEX")||sql.startsWith("create index")){ + fesqlResult = createIndex(executor, dbName, sql); }else if (sql.startsWith("create") || sql.startsWith("CREATE") || sql.startsWith("DROP")|| sql.startsWith("drop")) { fesqlResult = ddl(executor, dbName, sql); } else if (sql.startsWith("insert")||sql.startsWith("INSERT")) { @@ -298,12 +303,39 @@ public static FesqlResult sql(SqlExecutor executor, String dbName, String sql) { fesqlResult = showDeploys(executor,dbName,sql); }else if(sql.startsWith("show deployment")){ fesqlResult = deploy(executor, dbName, sql); + }else if(sql.startsWith("desc ")){ + fesqlResult = desc(executor,dbName,sql); + }else if(sql.contains("outfile")){ + fesqlResult = selectInto(executor, dbName, sql); }else { fesqlResult = select(executor, dbName, sql); } return fesqlResult; } + public static FesqlResult selectInto(SqlExecutor executor,String dbName,String outSql){ + if (outSql.isEmpty()){ + return null; + } + logger.info("select into:{}",outSql); + FesqlResult fesqlResult = new FesqlResult(); + ResultSet rawRs = executor.executeSQL(dbName, outSql); + if (rawRs == null) { + fesqlResult.setOk(false); + fesqlResult.setMsg("executeSQL fail, result is null"); + } else if (rawRs instanceof SQLResultSet){ + try { + SQLResultSet rs = (SQLResultSet)rawRs; + fesqlResult.setOk(true); + } catch (Exception e) { + fesqlResult.setOk(false); + fesqlResult.setMsg(e.getMessage()); + } + } + logger.info("select result:{} \n", fesqlResult); + return fesqlResult; + } + public static FesqlResult deploy(SqlExecutor executor,String dbName,String showdeploySql){ if (showdeploySql.isEmpty()){ return null; @@ -376,6 +408,18 @@ private static String convertRestultSetToListDeploy(SQLResultSet rs) throws SQLE return string; } + private static List convertRestultSetToListDesc(SQLResultSet rs) throws SQLException { + List res = new ArrayList<>(); + while (rs.next()) { + int columnCount = rs.getMetaData().getColumnCount(); + for (int i = 0; i < columnCount; i++) { + String string=String.valueOf(getColumnData(rs, i)); + res.add(string); + } + } + return res; + } + private static OpenmldbDeployment parseDeployment(List lines){ OpenmldbDeployment deployment = new OpenmldbDeployment(); List inColumns = new ArrayList<>(); @@ -414,6 +458,91 @@ private static OpenmldbDeployment parseDeployment(List lines){ return deployment; } + public static FesqlResult desc(SqlExecutor executor,String dbName,String descSql){ + if (descSql.isEmpty()){ + return null; + } + logger.info("desc:{}",descSql); + FesqlResult fesqlResult = new FesqlResult(); + ResultSet rawRs = executor.executeSQL(dbName, descSql); + + if (rawRs == null) { + fesqlResult.setOk(false); + fesqlResult.setMsg("executeSQL fail, result is null"); + } else if (rawRs instanceof SQLResultSet){ + try { + SQLResultSet rs = (SQLResultSet)rawRs; + JDBCUtil.setSchema(rs.getMetaData(),fesqlResult); + fesqlResult.setOk(true); + String deployStr = convertRestultSetToListDeploy(rs); + List listDesc = convertRestultSetToListDesc(rs); + String[] strings = deployStr.split("\n"); + List stringList = Arrays.asList(strings); + OpenMLDBSchema openMLDBSchema = parseSchema(stringList); + fesqlResult.setSchema(openMLDBSchema); + } catch (Exception e) { + fesqlResult.setOk(false); + fesqlResult.setMsg(e.getMessage()); + } + } + + return fesqlResult; + } + + public static OpenMLDBSchema parseSchema(List lines){ + OpenMLDBSchema schema = new OpenMLDBSchema(); + List cols = new ArrayList<>(); + List indexs = new ArrayList<>(); + Iterator it = lines.iterator(); +// while(it.hasNext()){ +// String line = it.next(); +// if(line.contains("ttl_type")) break; +// if(line.startsWith("#")||line.startsWith("-"))continue; +// OpenMLDBColumn col = new OpenMLDBColumn(); +// String[] infos = line.split("\\s+"); +// col.setId(Integer.parseInt(infos[0])); +// col.setFieldName(infos[1]); +// col.setFieldType(infos[2]); +// col.setNullable(infos[3].equals("NO")?false:true); +// cols.add(col); +// it.remove(); +// } + while(it.hasNext()){ + String line = it.next().trim(); + if(line.startsWith("#")||line.startsWith("-"))continue; + OpenMLDBIndex index = new OpenMLDBIndex(); + String[] infos = line.split("\\s+"); + index.setId(Integer.parseInt(infos[0])); + index.setIndexName(infos[1]); + index.setKeys(Arrays.asList(infos[2].split("\\|"))); + index.setTs(infos[3]); + index.setTtl(infos[4]); + index.setTtlType(infos[5]); + indexs.add(index); + //it.remove(); + } + schema.setIndexs(indexs); + //schema.setColumns(cols); + return schema; + } + + public static FesqlResult createIndex(SqlExecutor executor, String dbName, String sql) { + if (sql.isEmpty()) { + return null; + } + logger.info("ddl sql:{}", sql); + FesqlResult fesqlResult = new FesqlResult(); + boolean createOk = false; + try { + createOk = executor.getStatement().execute(sql); + Thread.sleep(10000); + } catch (Exception e) { + e.printStackTrace(); + } + fesqlResult.setOk(createOk); + logger.info("ddl result:{}", fesqlResult); + return fesqlResult; + } public static FesqlResult insert(SqlExecutor executor, String dbName, String insertSql) { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/DMLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/DMLTest.java index b6e5371bdb5..312b88e4239 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/DMLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/DMLTest.java @@ -47,4 +47,11 @@ public void testMultiInsert(SQLCase testCase){ public void testMultiInsertByCli(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); } + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/dml/multi_insert.yaml") + @Story("multi-insert") + public void testMultiInsertBySDK(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/OutInTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/OutInTest.java index 44144462c1d..889400f75c8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/OutInTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/OutInTest.java @@ -27,4 +27,13 @@ public class OutInTest extends FedbTest { public void testOutInByOffline(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); } + + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/out_in/test_out_in.yaml") + @Story("online") + public void testOutInByOnline(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + } \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java new file mode 100644 index 00000000000..bab74200fc9 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java @@ -0,0 +1,21 @@ +package com._4paradigm.openmldb.java_sdk_test.cluster.v050; + +import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; +import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.provider.Yaml; +import io.qameta.allure.Story; +import lombok.extern.slf4j.Slf4j; +import org.testng.annotations.Test; + +@Slf4j +public class DiskTableTest extends FedbTest { + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/disk_table/disk_table.yaml") + @Story("Disk-Table") + public void testDiskTable(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kRequest).run(); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FunctionTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FunctionTest.java index 2777386e9df..a4d2756fd22 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FunctionTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FunctionTest.java @@ -52,6 +52,7 @@ public void testFunctionRequestMode(SQLCase testCase) throws Exception { public void testFunctionRequestModeWithSp(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); } +// 146-157有问题 @Story("requestWithSpAysn") @Test(dataProvider = "getCase") @Yaml(filePaths = "function/function/") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java index ee4acce1407..3aa2af89cba 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java @@ -66,4 +66,26 @@ public void testWindowRequestModeWithSp(SQLCase testCase) throws Exception { public void testWindowRequestModeWithSpAsync(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); } + + + @Story("batch") + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/window/test_window_union_cluster.yaml"}) + public void testWindowBatch2(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor, testCase, SQLCaseType.kBatch).run(); + } + + @Story("request") + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/window/test_window_union_cluster_thousand.yaml"}) + public void testWindowRequestMode2(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); + } + + @Story("requestWithSp") + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/window/test_window_union_cluster.yaml"}) + public void testWindowRequestModeWithSp2(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DDLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DDLTest.java index 06315a8a1aa..ac9cac4d656 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DDLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DDLTest.java @@ -71,6 +71,7 @@ public void testCreateNoIndex(SQLCase testCase){ //SDK版本 + //all-pass @Test(dataProvider = "getCase") @Yaml(filePaths = "function/ddl/test_create.yaml") @Story("create") @@ -86,7 +87,7 @@ public void testTTLSDK(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); } - //有问题 + //pass 单机 @Test(dataProvider = "getCase") @Yaml(filePaths = "function/ddl/test_options.yaml") @Story("options") @@ -94,7 +95,7 @@ public void testOptionsSDK(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); } - //有问题 + //all pass @Test(dataProvider = "getCase") @Yaml(filePaths = "function/ddl/test_create_index.yaml") @Story("create_index") @@ -102,6 +103,7 @@ public void testCreateIndexSDK(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); } + //all pass @Test(dataProvider = "getCase") @Yaml(filePaths = "function/ddl/test_create_no_index.yaml") @Story("create_no_index") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DeploymentTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DeploymentTest.java index ffa1c8b1e1c..c85f9ba8753 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DeploymentTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DeploymentTest.java @@ -61,7 +61,7 @@ public void testCreateSDK(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); } - //0有问题 + //all pass @Test(dataProvider = "getCase") @Yaml(filePaths = "function/deploy/test_show_deploy.yaml") @Story("show") @@ -69,7 +69,7 @@ public void testShowSDK(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); } - //0 有问题 + //all pass @Test(dataProvider = "getCase") @Yaml(filePaths = "function/deploy/test_drop_deploy.yaml") @Story("drop") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/OutInTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/OutInTest.java index eacabb8fb27..f267573ba3a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/OutInTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/OutInTest.java @@ -21,6 +21,7 @@ public void testOutIn(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kStandaloneCLI).run(); } + //11 17 18没有pass @Test(dataProvider = "getCase") @Yaml(filePaths = "function/out_in/test_out_in.yaml") @Story("Out-In") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_all.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_all.xml index d8269507a0f..9709ef4e8a0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_all.xml +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_all.xml @@ -2,7 +2,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java index 50aaa493440..ae50b2016c4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java @@ -48,6 +48,7 @@ public class Table implements Serializable{ private int repeat = 1; private int replicaNum = 1; private int partitionNum = 1; + private String storage; private List distribution; private List common_column_indices; @@ -68,7 +69,7 @@ public String extractCreate() { if (!StringUtils.isEmpty(create)) { return create; } - return buildCreateSQLFromColumnsIndexs(name, getColumns(), getIndexs(), replicaNum,partitionNum,distribution); + return buildCreateSQLFromColumnsIndexs(name, getColumns(), getIndexs(), replicaNum,partitionNum,distribution,storage); } // public String extractCreate() { @@ -405,7 +406,7 @@ public static String buildInsertSQLWithPrepared(String name, List column } public static String buildCreateSQLFromColumnsIndexs(String name, List columns, List indexs, - int replicaNum,int partitionNum,List distribution) { + int replicaNum,int partitionNum,List distribution,String storage) { if (CollectionUtils.isEmpty(columns)) { return ""; } @@ -459,7 +460,13 @@ public static String buildCreateSQLFromColumnsIndexs(String name, List c } distributionStr.deleteCharAt(distributionStr.length()-1).append("]"); } - String option = String.format("options(partitionnum=%s,replicanum=%s%s)",partitionNum,replicaNum,distributionStr); + String option = null; + if(StringUtils.isNotEmpty(storage)){ + option = String.format("options(partitionnum=%s,replicanum=%s%s,storage_mode=\"%s\")",partitionNum,replicaNum,distributionStr,storage); + }else { + option = String.format("options(partitionnum=%s,replicanum=%s%s)",partitionNum,replicaNum,distributionStr); + } + //String option = String.format("options(partitionnum=%s,replicanum=%s%s)",partitionNum,replicaNum,distributionStr); sql = sql+option+";"; // if (replicaNum == 1) { // sql += ");"; diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/test/java/com/_4paradigm/openmldb/test_common/case_test/sdk/SQLCaseTest.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/test/java/com/_4paradigm/openmldb/test_common/case_test/sdk/SQLCaseTest.java index 465039216f0..f19813ecf77 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/test/java/com/_4paradigm/openmldb/test_common/case_test/sdk/SQLCaseTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/test/java/com/_4paradigm/openmldb/test_common/case_test/sdk/SQLCaseTest.java @@ -40,19 +40,24 @@ public void testSqlFormatAuto() { @Test public void testCreateBuilder() { - Assert.assertEquals(Table.buildCreateSQLFromColumnsIndexs("auto_t1", +// Assert.assertEquals(Table.buildCreateSQLFromColumnsIndexs("auto_t1", +// Lists.newArrayList("c1 string", "c2 bigint", "c3 int", "c4 float", +// "c5 timestamp"), Lists.newArrayList("index1:c1:c5", "index2:c1|c2:c5:365d", +// "index3:c1:c5:1000:absolute"), 1,1,null,"SSD"), +// "create table auto_t1(\n" + +// "c1 string,\n" + +// "c2 bigint,\n" + +// "c3 int,\n" + +// "c4 float,\n" + +// "c5 timestamp,\n" + +// "index(key=(c1),ts=c5),\n" + +// "index(key=(c1,c2),ts=c5,ttl=365d),\n" + +// "index(key=(c1),ts=c5,ttl=1000,ttl_type=absolute)" + +// ");"); + + String sql = Table.buildCreateSQLFromColumnsIndexs("auto_t1", Lists.newArrayList("c1 string", "c2 bigint", "c3 int", "c4 float", - "c5 timestamp"), Lists.newArrayList("index1:c1:c5", "index2:c1|c2:c5:365d", - "index3:c1:c5:1000:absolute"), 1,1,null), - "create table auto_t1(\n" + - "c1 string,\n" + - "c2 bigint,\n" + - "c3 int,\n" + - "c4 float,\n" + - "c5 timestamp,\n" + - "index(key=(c1),ts=c5),\n" + - "index(key=(c1,c2),ts=c5,ttl=365d),\n" + - "index(key=(c1),ts=c5,ttl=1000,ttl_type=absolute)" + - ");"); + "c5 timestamp"), Lists.newArrayList("index1:c1:c5"), 1, 1, null, null); + System.out.println(sql); } } From b748b21d101a449fbf9dd42fc3c4e14ba7c98cdf Mon Sep 17 00:00:00 2001 From: wangkaidong Date: Mon, 20 Jun 2022 14:13:17 +0800 Subject: [PATCH 008/172] python deployment --- .../python-sdk-test/entity/fedb_result.py | 1 + .../entity/openmldb_deployment.py | 25 +++++++ .../standalone/test_standalone_deploy.py | 10 ++- .../python-sdk-test/util/fedb_util.py | 72 ++++++++++++++++--- 4 files changed, 98 insertions(+), 10 deletions(-) create mode 100644 test/integration-test/python-sdk-test/entity/openmldb_deployment.py diff --git a/test/integration-test/python-sdk-test/entity/fedb_result.py b/test/integration-test/python-sdk-test/entity/fedb_result.py index 1b5a3cacd74..d0fd960c215 100644 --- a/test/integration-test/python-sdk-test/entity/fedb_result.py +++ b/test/integration-test/python-sdk-test/entity/fedb_result.py @@ -22,6 +22,7 @@ def __init__(self): self.resultSchema = None self.msg = None self.rs = None + self.deployment = None def __str__(self): resultStr = "FesqlResult{ok=" + str(self.ok) + ", count=" + str(self.count) + ", msg=" + str(self.msg) + "}" diff --git a/test/integration-test/python-sdk-test/entity/openmldb_deployment.py b/test/integration-test/python-sdk-test/entity/openmldb_deployment.py new file mode 100644 index 00000000000..888a9fe98a1 --- /dev/null +++ b/test/integration-test/python-sdk-test/entity/openmldb_deployment.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +class OpenmldbDeployment(): + + def __init__(self): + self.dbName = None + self.name = None + self.sql = None + self.inColumns = None + self.outColumns = None + diff --git a/test/integration-test/python-sdk-test/standalone/test_standalone_deploy.py b/test/integration-test/python-sdk-test/standalone/test_standalone_deploy.py index 2b6aeaddee7..f37d85e5799 100644 --- a/test/integration-test/python-sdk-test/standalone/test_standalone_deploy.py +++ b/test/integration-test/python-sdk-test/standalone/test_standalone_deploy.py @@ -24,7 +24,8 @@ log = LogManager('python-sdk-test').get_logger_and_add_handlers() -#都不行 + +# 都不行 class TestStandaloneDeploy(StandaloneTest): @pytest.mark.parametrize("testCase", getCases(["/function/deploy/test_create_deploy.yaml"])) @@ -38,5 +39,12 @@ def test_create(self, testCase): @allure.feature("deploy") @allure.story("show") def test_show(self, testCase): + print(testCase) + fedb_executor.build(self.connect, testCase).run() + + @pytest.mark.parametrize("testCase", getCases(["/function/deploy/test_drop_deploy.yaml"])) + @allure.feature("deploy") + @allure.story("drop") + def test_drop(self, testCase): print(testCase) fedb_executor.build(self.connect, testCase).run() \ No newline at end of file diff --git a/test/integration-test/python-sdk-test/util/fedb_util.py b/test/integration-test/python-sdk-test/util/fedb_util.py index 6da87e00200..5739c4c039d 100644 --- a/test/integration-test/python-sdk-test/util/fedb_util.py +++ b/test/integration-test/python-sdk-test/util/fedb_util.py @@ -88,6 +88,7 @@ def sqls(executor, dbName: str, sqls: list): def sql(executor, dbName: str, sql: str): + #useDB(executor,dbName) # fedbResult = None if sql.startswith("create") or sql.startswith("drop"): fedbResult = ddl(executor, dbName, sql) @@ -95,10 +96,62 @@ def sql(executor, dbName: str, sql: str): fedbResult = insert(executor, dbName, sql) elif sql.startswith("load"): fedbResult = load(executor,sql) + elif sql.startswith("deploy"): + fedbResult = deploy(executor, dbName, sql) + elif sql.__contains__("outfile"): + fedbResult = outfile(executor, dbName, sql) + # elif sql.startswith("show deployment"): + # fedbResult = showDeployment(executor,dbName,sql) else: fedbResult = select(executor, dbName, sql) return fedbResult +def outfile(executor, dbName: str, sql: str): + log.info("outfile sql:"+sql) + fedbResult = FedbResult() + try: + executor.execute(sql) + time.sleep(4) + fedbResult.ok = True + fedbResult.msg = "ok" + except Exception as e: + log.info("select into exception is {}".format(e)) + fedbResult.ok = False + fedbResult.msg = str(e) + log.info("select into result:" + str(fedbResult)) + return fedbResult + +def useDB(executor,dbName:str): + sql = "use {};".format(dbName) + log.info("use sql:"+sql) + executor.execute(sql) + +def deploy(executor, dbName: str, sql: str): + useDB(executor,dbName) + log.info("deploy sql:"+sql) + fedbResult = FedbResult() + executor.execute(sql) + fedbResult.ok = True + fedbResult.msg = "ok" + +def showDeployment(executor, dbName: str, sql: str): + useDB(executor,dbName) + log.info("show deployment sql:" + sql) + fedbResult = FedbResult() + try: + rs = executor.execute(sql) + fedbResult.ok = True + fedbResult.msg = "ok" + fedbResult.rs = rs + fedbResult.count = rs.rowcount + #fedbResult.result = rs.fetchall() + fedbResult.result = convertRestultSetToListRS(rs) + except Exception as e: + log.info("select exception is {}".format(e)) + fedbResult.ok = False + fedbResult.msg = str(e) + log.info("select result:" + str(fedbResult)) + return fedbResult def selectRequestMode(executor, dbName: str, selectSql: str, input): if selectSql is None or len(selectSql) == 0: @@ -163,6 +216,7 @@ def sqlRequestMode(executor, dbName: str, sql: str, input): def insert(executor, dbName: str, sql: str): + useDB(executor,dbName) log.info("insert sql:" + sql) fesqlResult = FedbResult() try: @@ -295,8 +349,8 @@ def select(executor, dbName: str, sql: str): fedbResult.msg = "ok" fedbResult.rs = rs fedbResult.count = rs.rowcount - fedbResult.result = rs.fetchall() - #fedbResult.result = convertRestultSetToListRS(rs) + #fedbResult.result = rs.fetchall() + fedbResult.result = convertRestultSetToListRS(rs) except Exception as e: log.info("select exception is {}".format(e)) fedbResult.ok = False @@ -341,13 +395,13 @@ def createAndInsert(executor, dbName, inputs, requestMode: bool = False): dbnames.add(dbName) fedbResult = FedbResult() if inputs != None and len(inputs) > 0: - for index, input in enumerate(inputs): - if input.__contains__('db') == True and dbnames.__contains__(input.get('db')) == False: - db = input.get('db') - log.info("db:" + db) - createDB(executor,db) - dbnames.add(db) - log.info("create input db, dbName:"+db) + # for index, input in enumerate(inputs): + # if input.__contains__('db') == True and dbnames.__contains__(input.get('db')) == False: + # db = input.get('db') + # log.info("db:" + db) + # createDB(executor,db) + # dbnames.add(db) + # log.info("create input db, dbName:"+db) for index, input in enumerate(inputs): From 396bf6fc141228208c07aeb9ff7b9a59366688ce Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 23 Jun 2022 10:08:49 +0800 Subject: [PATCH 009/172] Changes --- cases/debug/diff-debug-myhug.yaml | 130 ++++++++++++++++++ .../http_test/common/ClusterTest.java | 8 +- .../openmldb-sdk-test/pom.xml | 4 +- .../java_sdk_test/common/FedbTest.java | 8 +- .../executor/BaseSQLExecutor.java | 2 +- .../executor/StoredProcedureSQLExecutor.java | 2 +- .../java_sdk_test/temp/DebugTest.java | 4 +- 7 files changed, 144 insertions(+), 14 deletions(-) create mode 100644 cases/debug/diff-debug-myhug.yaml diff --git a/cases/debug/diff-debug-myhug.yaml b/cases/debug/diff-debug-myhug.yaml new file mode 100644 index 00000000000..00b9ba5599d --- /dev/null +++ b/cases/debug/diff-debug-myhug.yaml @@ -0,0 +1,130 @@ +db: test_zw1 +debugs: [] +cases: + - + id: 0 + desc: diff-myhug + inputs: + - + name: flattenRequest + columns: ["reqId string","eventTime timestamp","index1 string","uUserId string","zUserId string","fRequestId string","fDisplayRank double","fSessionId string","nRoomUserNum double","nRoomInLm double","nRoomInGame double","nRequestTime timestamp","zSex string","zPhoneType string","zLongitude double","zLatitude double","zPosition string","zHome string","zChannel string","zAge double","zHasCreatedGroup string","zRegTime timestamp","zFaceScore double","zFansNum double","zFollowNum double","zGainNum double","zSGiftNum double","zSWihsperNum double","zSChatMsgNum double","zLiveAvgLength double","zLiveFrequency double","zLiveDawn double","zLiveMorning double","zLiveAfternoon double","zLiveEvening double","zMaxRGiftNumOneUser double","zRGiftUserNum double","zLiveMsgNum double","zLiveDisharmony double","zLiveShareNum double","zSmallGiftNum double","zBigGiftNum double","uSex string","uPhoneType string","uLongitude double","uLatitude double","uPosition string","uHome string","uChannel string","uAge double","uHasJoinedGroup string","uRegTime timestamp","uFirstChargeNum double","uLatestChargeTime timestamp","uRemainDiamondNum double","uFansNum double","uFollowNum double","uGainNum double","uSGiftNum double","uSWihsperNum double","uSChatMsgNum double","uLiveSMsgNum double","uHasBeenBanned double","uSMsgFiltered double","uWatchDawn double","uWatchMorning double","uWatchAfternoon double","uWatchEvening double","uWatchAvgLength double","uEnterRoomFrequency double","uTopThreeNum double","uWatchSameCity double","uPlayGame string","uLive double","uLmNum double","uSBigGiftNum double","uSSmallGiftNum double","uRGiftUserNum double","uWatchTopList int","split_id int"] + create: | + CREATE TABLE IF NOT EXISTS flattenRequest( + reqId string, + eventTime timestamp, + index1 string, + uUserId string, + zUserId string, + fRequestId string, + fDisplayRank double, + fSessionId string, + nRoomUserNum double, + nRoomInLm double, + nRoomInGame double, + nRequestTime timestamp, + zSex string, + zPhoneType string, + zLongitude double, + zLatitude double, + zPosition string, + zHome string, + zChannel string, + zAge double, + zHasCreatedGroup string, + zRegTime timestamp, + zFaceScore double, + zFansNum double, + zFollowNum double, + zGainNum double, + zSGiftNum double, + zSWihsperNum double, + zSChatMsgNum double, + zLiveAvgLength double, + zLiveFrequency double, + zLiveDawn double, + zLiveMorning double, + zLiveAfternoon double, + zLiveEvening double, + zMaxRGiftNumOneUser double, + zRGiftUserNum double, + zLiveMsgNum double, + zLiveDisharmony double, + zLiveShareNum double, + zSmallGiftNum double, + zBigGiftNum double, + uSex string, + uPhoneType string, + uLongitude double, + uLatitude double, + uPosition string, + uHome string, + uChannel string, + uAge double, + uHasJoinedGroup string, + uRegTime timestamp, + uFirstChargeNum double, + uLatestChargeTime timestamp, + uRemainDiamondNum double, + uFansNum double, + uFollowNum double, + uGainNum double, + uSGiftNum double, + uSWihsperNum double, + uSChatMsgNum double, + uLiveSMsgNum double, + uHasBeenBanned double, + uSMsgFiltered double, + uWatchDawn double, + uWatchMorning double, + uWatchAfternoon double, + uWatchEvening double, + uWatchAvgLength double, + uEnterRoomFrequency double, + uTopThreeNum double, + uWatchSameCity double, + uPlayGame string, + uLive double, + uLmNum double, + uSBigGiftNum double, + uSSmallGiftNum double, + uRGiftUserNum double, + uWatchTopList int, + split_id int, + index(key=(uHasJoinedGroup), ttl=0m, ttl_type=absolute, ts=`eventTime`), + index(key=(uPlayGame), ttl=0m, ttl_type=absolute, ts=`eventTime`), + index(key=(uSex), ttl=(0m, 0), ttl_type=absandlat, ts=`eventTime`), + index(key=(uUserId), ttl=(0m, 0), ttl_type=absandlat, ts=`eventTime`), + index(key=(zChannel), ttl=0m, ttl_type=absolute, ts=`eventTime`), + index(key=(zUserId), ttl=0m, ttl_type=absolute, ts=`eventTime`) + ); + rows: + - ['1000013',1494076990000,'1000013','42856512','33788164','31318526',116.0,'239113725',6.0,0.0,0.0,1494076990000,'2','iPhone8,2',120.6397,31.257472999999997,'中国江苏省苏州市','','app_store',25.0,'0',1458401107000,1.0,60.0,10.0,0.0,0.0,1.0,5.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,'1','',0.0,0.0,'','','',0.0,'0',null,0.0,null,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,'0',0.0,0.0,0.0,0.0,0.0,null,1] + - + name: bo_hislabel + columns: ["ingestionTime timestamp","zUserId string","uUserId string","nRequestTime timestamp","fWatchedTimeLen double"] + create: | + CREATE TABLE IF NOT EXISTS bo_hislabel( + ingestionTime timestamp, + zUserId string, + uUserId string, + nRequestTime timestamp, + fWatchedTimeLen double, + index(key=(zUserId), ttl=0m, ttl_type=absolute, ts=`ingestionTime`) + ); + rows: + - [1494076376000,'33788164','42856512',1494076376000,2.0] + - [1494076990000,'33788164','42856512',1494076990000,1.0] + sql: | + select + reqId as reqId_75, + max(`fWatchedTimeLen`) over bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s_100 as bo_hislabel_fWatchedTimeLen_multi_max_74, + avg(`fWatchedTimeLen`) over bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s_100 as bo_hislabel_fWatchedTimeLen_multi_avg_75 + from + ( + select `eventTime` as `ingestionTime`, `zUserId` as `zUserId`, `uUserId` as `uUserId`, timestamp('2019-07-18 09:20:20') as `nRequestTime`, double(0) as `fWatchedTimeLen`, reqId from `flattenRequest` + ) + window bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s_100 as ( + UNION (select `ingestionTime`, `zUserId`, `uUserId`, `nRequestTime`, `fWatchedTimeLen`, '' as reqId from `bo_hislabel`) + partition by `zUserId`,`uUserId` order by `ingestionTime` rows_range between 172800999 preceding and 1s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW); + expect: + success: true \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java index 5a5d377f802..1a6d7b334ce 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java @@ -67,12 +67,12 @@ public void beforeTest(@Optional("qa") String env, @Optional("main") String vers .deployType(OpenMLDBDeployType.CLUSTER) .basePath("/home/zhaowei01/fedb-auto-test/tmp") .fedbPath("/home/zhaowei01/fedb-auto-test/tmp/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:10000") + .zk_cluster("172.24.4.55:30000") .zk_root_path("/openmldb") .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("172.24.4.55:10004", "172.24.4.55:10005")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:10001", "172.24.4.55:10002", "172.24.4.55:10003")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:10006")) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) .build(); FedbGlobalVar.env = "cluster"; } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml index fde0a440c5a..0cccc1eed76 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml @@ -16,8 +16,8 @@ 8 UTF-8 - 0.5.0-SNAPSHOT - 0.5.0-macos-SNAPSHOT + 0.5.0 + 0.5.0-macos test_suite/test_tmp.xml 1.8.9 diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java index af540ba557d..3f8284b1da4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java @@ -56,12 +56,12 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi .deployType(OpenMLDBDeployType.CLUSTER) .basePath("/home/zhaowei01/openmldb-auto-test/tmp") .fedbPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:10000") + .zk_cluster("172.24.4.55:30008") .zk_root_path("/openmldb") .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("172.24.4.55:10004", "172.24.4.55:10005")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:10001", "172.24.4.55:10002", "172.24.4.55:10003")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:10006")) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) .build(); FedbGlobalVar.env = "cluster"; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java index 09fcf3edc94..86de4576996 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java @@ -132,7 +132,7 @@ public void tearDown(String version,SqlExecutor executor) { if(table.isDrop()) { String drop = "drop table " + table.getName() + ";"; String tableDBName = table.getDb().isEmpty() ? dbName : table.getDb(); - FesqlUtil.ddl(executor, tableDBName, drop); +// FesqlUtil.ddl(executor, tableDBName, drop); } } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java index 3c1a5c3b92f..87263e42ff8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java @@ -111,7 +111,7 @@ public void tearDown(String version,SqlExecutor executor) { } for (String spName : spNames) { String drop = "drop procedure " + spName + ";"; - FesqlUtil.ddl(executor, dbName, drop); +// FesqlUtil.ddl(executor, dbName, drop); } super.tearDown(version,executor); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java index e7d884b19bf..f6b1515c6c2 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java @@ -45,14 +45,14 @@ public void testSelect(SQLCase testCase) throws Exception { } @Story("request") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"debug/diff-debug-bank.yaml"}) + @Yaml(filePaths = {"debug/diff-debug-myhug.yaml"}) public void testSelectRequestMode(SQLCase testCase) throws Exception { FesqlUtil.setOnline(executor); ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); } @Story("requestWithSp") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/select/","query/const_query.yaml"}) + @Yaml(filePaths = {"debug/diff-debug-myhug.yaml"}) public void testSelectRequestModeWithSp(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); } From bfd9dda542134e6d38e269ae4bf19e949ab13a99 Mon Sep 17 00:00:00 2001 From: wangkaidong Date: Thu, 23 Jun 2022 19:10:06 +0800 Subject: [PATCH 010/172] high availability disk table insert --- .../high_availability/HighDiskTableTest.java | 135 ++++++++++++++++++ 1 file changed, 135 insertions(+) create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/high_availability/HighDiskTableTest.java diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/high_availability/HighDiskTableTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/high_availability/HighDiskTableTest.java new file mode 100644 index 00000000000..63f1ca2e300 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/high_availability/HighDiskTableTest.java @@ -0,0 +1,135 @@ +package com._4paradigm.openmldb.java_sdk_test.cluster.high_availability; + +import com._4paradigm.openmldb.jdbc.SQLResultSet; +import com._4paradigm.openmldb.sdk.SdkOption; +import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.sdk.impl.SqlClusterExecutor; +import lombok.extern.slf4j.Slf4j; +import org.testng.annotations.Test; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +@Slf4j +public class HighDiskTableTest { + + @Test + public void test1() throws Exception { + SdkOption option = new SdkOption(); + option.setZkPath("/openmldb"); + option.setZkCluster("172.24.4.55:30030"); + SqlExecutor router = null; + try { + router = new SqlClusterExecutor(option); + } catch (Exception e) { + System.out.println(e.getMessage()); + } + + Statement statement = router.getStatement(); + statement.execute("set @@SESSION.execute_mode='online';"); + statement.execute("use test_zw"); + statement.execute("create table table1 (\n" + + " id int,\n" + + " c1 string,\n" + + " c3 int,\n" + + " c4 bigint,\n" + + " c5 float,\n" + + " c6 double,\n" + + " c7 timestamp,\n" + + " c8 date,\n" + + " index(key=c1,ts=c7,ttl=60m,ttl_type=ABSOLUTE )\n" + + ")options(partitionnum = 1,replicanum = 1,storage_mode=\"SSD\");"); + + insert10000(statement,"table1",1000*60*60*24L); + ResultSet resultSet = statement.executeQuery("select * from table1"); + SQLResultSet rs = (SQLResultSet)resultSet; + List> result = convertRestultSetToList(rs); + System.out.println(result.size()); + result.forEach(s-> System.out.println(s)); + System.out.println(result.size()); + statement.execute("DROP TABLE table1"); + } + + /** + * + * @param statement + * @param tableName + * @param lastTime 1000*秒*分钟*小时 + * @throws Exception + */ + public static void insert10000(Statement statement,String tableName,Long lastTime) throws Exception { + + long startTime = new Date().getTime(); + + int i = 0; + while (true){ + long time = new Date().getTime(); +// String sql = String.format("insert into %s values('bb',%d,%d,%d);",tableName,i,i+1,time); + String sql = String.format("insert into %s values (%d,\"aa\",%d,30,1.1,2.1,%d,\"2020-05-01\");",tableName,i,i+1,time); + System.out.println(sql); + statement.execute(sql); + Thread.sleep(1000); + if(timestartTime+lastTime-1000){ + break; + } + i++; + } + log.info("stop stop stop"); + } + + + private static List> convertRestultSetToList(SQLResultSet rs) throws SQLException { + List> result = new ArrayList<>(); + while (rs.next()) { + List list = new ArrayList(); + int columnCount = rs.getMetaData().getColumnCount(); + for (int i = 0; i < columnCount; i++) { + list.add(getColumnData(rs, i)); + } + result.add(list); + } + return result; + } + + public static Object getColumnData(SQLResultSet rs, int index) throws SQLException { + Object obj = null; + int columnType = rs.getMetaData().getColumnType(index + 1); + if (rs.getNString(index + 1) == null) { + log.info("rs is null"); + return null; + } + if (columnType == Types.BOOLEAN) { + obj = rs.getBoolean(index + 1); + } else if (columnType == Types.DATE) { + try { +// obj = new Date(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss") +// .parse(rs.getNString(index + 1) + " 00:00:00").getTime()); + obj = rs.getDate(index + 1); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + } else if (columnType == Types.DOUBLE) { + obj = rs.getDouble(index + 1); + } else if (columnType == Types.FLOAT) { + obj = rs.getFloat(index + 1); + } else if (columnType == Types.SMALLINT) { + obj = rs.getShort(index + 1); + } else if (columnType == Types.INTEGER) { + obj = rs.getInt(index + 1); + } else if (columnType == Types.BIGINT) { + obj = rs.getLong(index + 1); + } else if (columnType == Types.VARCHAR) { + obj = rs.getString(index + 1); + log.info("conver string data {}", obj); + } else if (columnType == Types.TIMESTAMP) { + obj = rs.getTimestamp(index + 1); + } + return obj; + } +} From 9f390d6fa4a4be7c6dbbd77bc85df5a1b1ade858 Mon Sep 17 00:00:00 2001 From: wangkaidong Date: Mon, 27 Jun 2022 15:50:09 +0800 Subject: [PATCH 011/172] last code --- cases/function/disk_table/disk_table.yaml | 151 ++++++++++++++++-- cases/function/long_window/long_window.yaml | 48 ++++-- .../test_multiple_databases.yaml | 10 +- .../java_sdk_test/common/FedbTest.java | 14 +- .../java_sdk_test/common/StandaloneTest.java | 8 +- .../executor/ExecutorFactory.java | 10 +- .../executor/RequestQuerySQLExecutor.java | 24 +-- .../java_sdk_test/util/FesqlUtil.java | 9 ++ .../cluster/v050/DiskTableTest.java | 16 ++ .../cluster/v230/WindowTest.java | 8 + .../standalone/v050/DiskTableTest.java | 36 +++++ 11 files changed, 279 insertions(+), 55 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v050/DiskTableTest.java diff --git a/cases/function/disk_table/disk_table.yaml b/cases/function/disk_table/disk_table.yaml index 4da51b27aec..809d4ccd535 100644 --- a/cases/function/disk_table/disk_table.yaml +++ b/cases/function/disk_table/disk_table.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: ['HDD 插入索引和ts 一样的数据'] +debugs: ['创建磁盘表,有两个索引,分别为latest和absolute,insert=10 ,where条件'] cases: - id: 0 @@ -74,7 +74,7 @@ cases: - ["aa", 2, 3, 1590738989000] - ["bb", 21, 31, 1590738990000] - ["cc", 41, 51, 1590738991000] - sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {0}.c3 on {0}.c1={1}.c1; + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1; expect: columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] rows: @@ -101,7 +101,7 @@ cases: - ["aa", 2, 3, 1590738989000] - ["bb", 21, 31, 1590738990000] - ["cc", 41, 51, 1590738991000] - sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {0}.c3 on {0}.c1={1}.c1; + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1; expect: columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] rows: @@ -128,7 +128,7 @@ cases: - ["aa", 2, 3, 1590738989000] - ["bb", 21, 31, 1590738990000] - ["cc", 41, 51, 1590738991000] - sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {0}.c3 on {0}.c1={1}.c1; + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1; expect: columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] rows: @@ -155,7 +155,7 @@ cases: - ["aa", 2, 3, 1590738989000] - ["bb", 21, 31, 1590738990000] - ["cc", 41, 51, 1590738991000] - sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {0}.c3 on {0}.c1={1}.c1; + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1; expect: columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] rows: @@ -183,7 +183,7 @@ cases: - ["aa", 2, 3, 1590738989000] - ["bb", 21, 31, 1590738990000] - ["cc", 41, 51, 1590738991000] - sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {0}.c3 on {0}.c1={1}.c1; + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1; expect: columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] rows: @@ -218,7 +218,7 @@ cases: - ["aa", 2, 3, 1590738989000] - ["bb", 21, 31, 1590738990000] - ["cc", 41, 51, 1590738991000] - sql: select {1}.c1,{1}.c2,{2}.c3,{0}.c4 from {1} last join {2} ORDER BY hdd.c3 on ssd.c1=hdd.c1 last join mem on ssd.c1 = mem.c1; + sql: select {1}.c1,{1}.c2,{2}.c3,{0}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1 last join {2} on {0}.c1 = {2}.c1; expect: columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] rows: @@ -329,10 +329,12 @@ cases: columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] indexs: ["index1:c1:c4"] storage: SSD - rows: - - ["aa", 2, 3, 1590738989000] - - ["aa", 2, 3, 1590738989000] - sql: select * from {0}; +# rows: +# - ["aa", 2, 3, 1590738989000] + sqls: + - insert into {0} values("aa", 2, 3, 1590738989000) + - insert into {0} values("aa", 2, 3, 1590738989000) + - select * from {0}; expect: columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] rows: @@ -359,8 +361,129 @@ cases: columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] indexs: ["index1:c1:c4"] storage: hdp - rows: - - ["aa", 2, 3, 1590738989000] +# rows: +# - ["aa", 2, 3, 1590738989000] sql: select * from {0}; expect: - success: false \ No newline at end of file + success: false + + + - id: 14 + desc: 创建磁盘表,ttl_type=latest,ttl=4,insert 10 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4:4:latest"] + storage: SSD + rows: + - ["bb", 2, 3, "{currentTime}-600001"] + - ["bb", 4, 5, "{currentTime}-600002"] + - ["bb", 6, 7, "{currentTime}-600003"] + - ["bb", 8, 9, "{currentTime}-600004"] + - ["bb", 10, 11, "{currentTime}-600005"] + - ["bb", 12, 13, "{currentTime}-600006"] + - ["bb", 14, 15, "{currentTime}-600007"] + - ["bb", 16, 17, "{currentTime}-600008"] + - ["bb", 18, 19, "{currentTime}-600009"] + - ["bb", 20, 21, "{currentTime}-600010"] + sql: select c1,c2,c3 from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint"] + rows: + - ["bb", 2, 3] + - ["bb", 4, 5] + - ["bb", 6, 7] + - ["bb", 8, 9] + + - id: 15 + desc: 创建磁盘表,ttl_type=absolute,ttl=10m, insert 10 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4:10m:absolute"] + storage: hdd + rows: + - ["bb", 2, 3, "{currentTime}-100"] + - ["bb", 4, 5, "{currentTime}-200"] + - ["bb", 6, 7, "{currentTime}-599000"] + - ["bb", 8, 9, "{currentTime}-600000"] + - ["bb", 10, 11, "{currentTime}-600005"] + - ["bb", 12, 13, "{currentTime}-600006"] + - ["bb", 14, 15, "{currentTime}-600007"] + - ["bb", 16, 17, "{currentTime}-600008"] + - ["bb", 18, 19, "{currentTime}-600009"] + - ["bb", 20, 21, "{currentTime}-600010"] + sql: select c1,c2,c3 from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint"] + rows: + - ["bb", 2, 3] + - ["bb", 4, 5] + - ["bb", 6, 7] + + - id: 16 + desc: 创建磁盘表,有两个索引,分别为latest和absolute,insert=10 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index2:c2:c4:4:latest","index1:c1:c4:10m:absolute"] + storage: hdd + rows: + - ["bb", 2, 3, "{currentTime}-100"] + - ["bb", 2, 5, "{currentTime}-200"] + - ["bb", 2, 7, "{currentTime}-59"] + - ["bb", 2, 9, "{currentTime}-600"] + - ["bb", 2, 11, "{currentTime}-602"] + - ["bb", 2, 13, "{currentTime}-600006"] + - ["bb", 2, 15, "{currentTime}-600007"] + - ["bb", 2, 17, "{currentTime}-600008"] + - ["bb", 2, 19, "{currentTime}-600009"] + - ["bb", 2, 21, "{currentTime}-600010"] + sql: select c1,c2,c3 from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint"] + rows: + - ["bb", 2, 7] + - ["bb", 2, 3] + - ["bb", 2, 5] + - ["bb", 2, 9] + + - id: 17 + desc: 创建磁盘表,有两个索引,分别为latest和absolute,insert=10 ,where条件 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index2:c2:c4:4:latest","index1:c1:c4:10m:absolute"] + storage: hdd + rows: + - ["bb", 2, 3, "{currentTime}-100"] + - ["bb", 2, 5, "{currentTime}-200"] + - ["bb", 2, 7, "{currentTime}-59"] + - ["bb", 2, 9, "{currentTime}-600"] + - ["bb", 2, 11, "{currentTime}-602"] + - ["bb", 2, 13, "{currentTime}-600006"] + - ["bb", 2, 15, "{currentTime}-600007"] + - ["bb", 2, 17, "{currentTime}-600008"] + - ["bb", 2, 19, "{currentTime}-600009"] + - ["bb", 2, 21, "{currentTime}-600010"] + sql: select c1,c2,c3 from {0} where c1 = "bb"; + expect: + columns: ["c1 string","c2 int","c3 bigint"] + rows: + - ["bb", 2, 7] + - ["bb", 2, 3] + - ["bb", 2, 5] + - ["bb", 2, 9] + - ["bb", 2, 11] + +# - id: 16 +# desc: 数据过期类型长时间 +# inputs: +# - +# columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] +# indexs: ["index1:c1:c4:10000h"] +# storage: hdp +# rows: +# - ["aa", 2, 3, 1590738989000] +# sql: select * from {0}; +# expect: \ No newline at end of file diff --git a/cases/function/long_window/long_window.yaml b/cases/function/long_window/long_window.yaml index 97398b10518..7344aca2cce 100644 --- a/cases/function/long_window/long_window.yaml +++ b/cases/function/long_window/long_window.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: ["options(long_window='w1:2d')"] +debugs: ["options(long_window='w1:2h')"] cases: - id: 0 @@ -59,19 +59,45 @@ cases: inputs: - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + indexs: ["index1:c1:c7::latest"] +# rows: +# - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] +# - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] +# - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] +# - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] +# - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] dataProvider: - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w1:2d') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); + sqls: + - deploy deploy_{0} options(long_windows='w1:2d') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); + - show deployment deploy_{0}; expect: - success: true + deployment : + name: deploy_{0} + dbName: test_zw + sql: | + DEPLOY {0} SELECT + id, + c1, + sum(c4) OVER (w1) AS w1_c4_sum + FROM + {0} + WINDOW w1 AS (PARTITION BY {0}.c1 + ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) + ; + inColumns: + - 1,id,kInt32,NO + - 2,c1,kVarchar,NO + - 3,c3,kInt32,NO + - 4,c4,kInt64,NO + - 5,c5,kFloat,NO + - 6,c6,kDouble,NO + - 7,c7,kTimestamp,NO + - 8,c8,kDate,NO + outColumns: + - 1,id,kInt32,NO + - 2,c1,kVarchar,NO + - 3,w1_c4_sum,kInt64,NO - id: 3 desc: options(long_window='w1:2m') diff --git a/cases/function/multiple_databases/test_multiple_databases.yaml b/cases/function/multiple_databases/test_multiple_databases.yaml index 9145e2219f0..3c0590828c8 100644 --- a/cases/function/multiple_databases/test_multiple_databases.yaml +++ b/cases/function/multiple_databases/test_multiple_databases.yaml @@ -32,7 +32,7 @@ cases: - [ "aa",2,13,1590738989000 ] - [ "bb",21,131,1590738990000 ] - [ "cc",41,151,1590738992000 ] - sql: select {0}.c1,{0}.c2,db2.{1}.c3,db2.{1}.c4 from {0} last join db2.{1} ORDER BY db2.{1}.c3 on {0}.c1=db2.{1}.c1; + sql: select db1.{0}.c1,db1.{0}.c2,db2.{1}.c3,db2.{1}.c4 from db1.{0} last join db2.{1} ORDER BY db2.{1}.c3 on db1.{0}.c1=db2.{1}.c1; expect: order: c1 columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] @@ -113,7 +113,7 @@ cases: success: false - id: 4 desc: 全部使用默认库 - db: db + db: test_zw inputs: - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] indexs: [ "index1:c1:c4" ] @@ -137,7 +137,7 @@ cases: - [ "cc",41,151,1590738992000 ] - id: 5 desc: 指定当前库查询 - db: db + db: test_zw inputs: - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] indexs: [ "index1:c1:c4" ] @@ -151,7 +151,7 @@ cases: - [ "aa",2,13,1590738989000 ] - [ "bb",21,131,1590738990000 ] - [ "cc",41,151,1590738992000 ] - sql: select db.{0}.c1,db.{0}.c2,db.{1}.c3,db.{1}.c4 from db.{0} last join db.{1} ORDER BY db.{1}.c3 on db.{0}.c1=db.{1}.c1; + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1; expect: order: c1 columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] @@ -161,7 +161,7 @@ cases: - [ "cc",41,151,1590738992000 ] - id: 6 desc: 查询使用其他库 - db: db + db: test_zw inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java index 54a3b7d8bbe..a5c840b4107 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java @@ -29,6 +29,8 @@ import org.testng.annotations.Optional; import org.testng.annotations.Parameters; +import java.sql.Statement; + /** * @author zhaowei * @date 2020/6/11 2:02 PM @@ -56,12 +58,12 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi .deployType(OpenMLDBDeployType.CLUSTER) .basePath("/home/wangkaidong/openmldb-auto-test/tmp") .fedbPath("/home/wangkaidong/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:30016") + .zk_cluster("172.24.4.55:30030") .zk_root_path("/openmldb") .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("172.24.4.55:30026", "172.24.4.55:30027")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30023", "172.24.4.55:30024", "172.24.4.55:30025")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30028")) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30034", "172.24.4.55:30035")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30031", "172.24.4.55:30032", "172.24.4.55:30033")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30036")) .build(); FedbGlobalVar.env = "cluster"; @@ -74,5 +76,9 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi FedbClient fesqlClient = new FedbClient(FedbGlobalVar.mainInfo); executor = fesqlClient.getExecutor(); log.info("executor:{}",executor); + //todo + + Statement statement = executor.getStatement(); + statement.execute("SET @@execute_mode='online';"); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java index ec26084b82e..ed92cefeffe 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java @@ -51,11 +51,11 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi .basePath("/home/wangkaidong/fedb-auto-test/standalone") .fedbPath("/home/wangkaidong/fedb-auto-test/standalone/openmldb-standalone/bin/openmldb") .nsNum(1).tabletNum(1) - .nsEndpoints(Lists.newArrayList("172.24.4.55:10027")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:10028")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:10029")) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30016")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30017")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30018")) .host("172.24.4.55") - .port(10027) + .port(30016) .build(); } String caseEnv = System.getProperty("caseEnv"); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java index 7a72a6de384..451ad72426f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java @@ -107,11 +107,11 @@ private static BaseSQLExecutor getDDLExecutor(SqlExecutor sqlExecutor, SQLCase f return executor; } private static BaseSQLExecutor getFeBatchQueryExecutor(SqlExecutor sqlExecutor, SQLCase fesqlCase, SQLCaseType type) { - if (FedbConfig.isCluster()) { - log.info("cluster unsupport batch query mode"); - reportLog.info("cluster unsupport batch query mode"); - return new NullExecutor(sqlExecutor, fesqlCase, type); - } +// if (FedbConfig.isCluster()) { +// log.info("cluster unsupport batch query mode"); +// reportLog.info("cluster unsupport batch query mode"); +// return new NullExecutor(sqlExecutor, fesqlCase, type); +// } BaseSQLExecutor executor = null; executor = new BatchSQLExecutor(sqlExecutor, fesqlCase, type); return executor; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java index d8bfb58cbad..3311c66313d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java @@ -55,18 +55,18 @@ public FesqlResult execute(String version, SqlExecutor executor) { logger.info("version:{} execute begin",version); FesqlResult fesqlResult = null; try { - // List sqls = fesqlCase.getSqls(); - // if (sqls != null && sqls.size() > 0) { - // for (String sql : sqls) { - // // log.info("sql:{}", sql); - // if(MapUtils.isNotEmpty(fedbInfoMap)) { - // sql = FesqlUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); - // }else { - // sql = FesqlUtil.formatSql(sql, tableNames); - // } - // fesqlResult = FesqlUtil.sql(executor, dbName, sql); - // } - // } + List sqls = fesqlCase.getSqls(); + if (sqls != null && sqls.size() > 0) { + for (String sql : sqls) { + // log.info("sql:{}", sql); + if(MapUtils.isNotEmpty(fedbInfoMap)) { + sql = FesqlUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + }else { + sql = FesqlUtil.formatSql(sql, tableNames); + } + fesqlResult = FesqlUtil.sql(executor, dbName, sql); + } + } String sql = fesqlCase.getSql(); if (sql != null && sql.length() > 0) { // log.info("sql:{}", sql); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java index 620bcd1b6c8..f1698c6842c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java @@ -1555,4 +1555,13 @@ public static void useDB(SqlExecutor executor,String dbName){ } } } + + public static void setOnline(SqlExecutor sqlExecutor){ + Statement statement = sqlExecutor.getStatement(); + try { + statement.execute("SET @@execute_mode='online';"); + } catch (SQLException e) { + e.printStackTrace(); + } + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java index bab74200fc9..3eb64ea9d95 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java @@ -18,4 +18,20 @@ public class DiskTableTest extends FedbTest { public void testDiskTable(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kRequest).run(); } + + //all pass + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/disk_table/disk_table.yaml") + @Story("Disk-Table") + public void testDiskTable2(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/disk_table/disk_table.yaml") + @Story("Disk-Table") + public void testDiskTable3(SQLCase testCase){ + ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java index 3aa2af89cba..5f46d870ba8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java @@ -88,4 +88,12 @@ public void testWindowRequestMode2(SQLCase testCase) throws Exception { public void testWindowRequestModeWithSp2(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); } + + //暂时不支持 + @Story("requestWithSp") + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/window/test_window_union_cluster.yaml"}) + public void testWindowCLI(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor, testCase, SQLCaseType.kClusterCLI).run(); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v050/DiskTableTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v050/DiskTableTest.java new file mode 100644 index 00000000000..c4fb8e4a4b8 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v050/DiskTableTest.java @@ -0,0 +1,36 @@ +package com._4paradigm.openmldb.java_sdk_test.standalone.v050; + +import com._4paradigm.openmldb.java_sdk_test.common.StandaloneTest; +import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; +import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.provider.Yaml; +import io.qameta.allure.Story; +import org.testng.annotations.Test; + +public class DiskTableTest extends StandaloneTest { + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/disk_table/disk_table.yaml") + @Story("Disk-Table") + public void testDiskTable(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kRequest).run(); + } + + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/disk_table/disk_table.yaml") + @Story("Disk-Table") + public void testDiskTable2(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/disk_table/disk_table.yaml") + @Story("Disk-Table") + public void testDiskTable3(SQLCase testCase){ + ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); + } + +} From 65ed7a8346108fd769f1da0dc3583a838dfe401a Mon Sep 17 00:00:00 2001 From: wangkaidong Date: Mon, 27 Jun 2022 16:16:14 +0800 Subject: [PATCH 012/172] last code, longwindowTest --- .../cluster/v050/LongWindowTest.java | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java new file mode 100644 index 00000000000..e5b97430abb --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java @@ -0,0 +1,29 @@ +package com._4paradigm.openmldb.java_sdk_test.cluster.v050; + +import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; +import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.provider.Yaml; +import io.qameta.allure.Story; +import lombok.extern.slf4j.Slf4j; +import org.testng.annotations.Test; + +@Slf4j +public class LongWindowTest extends FedbTest{ + + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/long_window/long_window.yaml") + @Story("Out-In") + public void testLongWindow1(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/long_window/long_window.yaml") + @Story("Out-In") + public void testLongWindow2(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kRequest).run(); + } +} From 7924f6a5756e11358799f03d35672e5f68360d96 Mon Sep 17 00:00:00 2001 From: wangkaidong Date: Mon, 4 Jul 2022 16:29:42 +0800 Subject: [PATCH 013/172] last code, longwindowTest --- .../common/StandaloneClient.java | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneClient.java diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneClient.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneClient.java new file mode 100644 index 00000000000..8f9a96973c2 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneClient.java @@ -0,0 +1,34 @@ +package com._4paradigm.openmldb.java_sdk_test.common; + +import com._4paradigm.openmldb.sdk.SdkOption; +import com._4paradigm.openmldb.sdk.SqlException; +import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.sdk.impl.SqlClusterExecutor; +import com._4paradigm.openmldb.test_common.bean.FEDBInfo; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +@Data +@Slf4j +public class StandaloneClient { + + private SqlExecutor executor; + + public StandaloneClient(String host, Integer port){ + SdkOption option = new SdkOption(); + option.setHost(host); + option.setPort(port); + option.setClusterMode(false); + option.setSessionTimeout(10000); + option.setRequestTimeout(60000); + log.info("host {}, port {}", option.getHost(), option.getPort()); + try { + executor = new SqlClusterExecutor(option); + } catch (SqlException e) { + e.printStackTrace(); + } + } + public StandaloneClient(FEDBInfo fedbInfo){ + this(fedbInfo.getHost(),fedbInfo.getPort()); + } +} From bd7077e3e63cbbbd69fb5535996677863b52b726 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 4 Jul 2022 16:37:07 +0800 Subject: [PATCH 014/172] Changes --- .../openmldb/java_sdk_test/common/FedbTest.java | 2 +- .../_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java index 6b4ee43326c..28e2a97b7e0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java @@ -58,7 +58,7 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi .deployType(OpenMLDBDeployType.CLUSTER) .basePath("/home/zhaowei01/openmldb-auto-test/tmp") .fedbPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:30008") + .zk_cluster("172.24.4.55:30000") .zk_root_path("/openmldb") .nsNum(2).tabletNum(3) .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java index f6b1515c6c2..c0466d62411 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java @@ -62,4 +62,12 @@ public void testSelectRequestModeWithSp(SQLCase testCase) throws Exception { public void testSelectRequestModeWithSpAysn(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); } + + @Story("batch") + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/select/test_select_sample.yaml"}) + @Step("{testCase.desc}") + public void testSelect2(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor, testCase, SQLCaseType.kBatch).run(); + } } From 619a83e7a06c7a29800b0208a280a24663483e77 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 5 Jul 2022 09:36:42 +0800 Subject: [PATCH 015/172] add openmldb deploy --- .../openmldb-deploy/pom.xml | 70 +++ .../bean/OpenMLDBDeployType.java | 6 + .../qa/openmldb_deploy/bean/OpenMLDBInfo.java | 63 +++ .../common/OpenMLDBDeploy.java | 456 ++++++++++++++++++ .../conf/OpenMLDBDeployConfig.java | 51 ++ .../qa/openmldb_deploy/util/DeployUtil.java | 45 ++ .../qa/openmldb_deploy/util/FedbTool.java | 132 +++++ .../util/OpenMLDBCommandUtil.java | 35 ++ .../src/main/resources/command.properties | 6 + .../src/main/resources/deploy.properties | 24 + .../src/main/resources/log4j.properties | 51 ++ .../qa/openmldb_deploy/test/TmpDeploy.java | 20 + .../test/TmpDeploySingleNodeCluster.java | 20 + .../test/TmpDeployStandalone.java | 18 + .../test-suite/test_deploy-standalone.xml | 14 + .../test-suite/test_deploy.xml | 14 + .../test-suite/test_deploy_single_node.xml | 15 + .../test-suite/test_deploy_tmp2.xml | 15 + .../java_sdk_test/temp/DebugTest.java | 2 +- .../openmldb-test-java/pom.xml | 1 + test/test-tool/command-tool/pom.xml | 2 +- .../command_tool/common/LinuxUtil.java | 6 + 22 files changed, 1064 insertions(+), 2 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBDeployType.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/DeployUtil.java create mode 100755 test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/FedbTool.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/OpenMLDBCommandUtil.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/command.properties create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties create mode 100755 test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/log4j.properties create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploySingleNodeCluster.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeployStandalone.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy-standalone.xml create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_single_node.xml create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml new file mode 100644 index 00000000000..ba43c16d75a --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml @@ -0,0 +1,70 @@ + + + + openmldb-test-java + com.4paradigm.openmldb + 0.1.0-SNAPSHOT + + 4.0.0 + + openmldb-deploy + + + 8 + 8 + + + + + com.4paradigm.openmldb.test-tool + command-tool + 1.0-SNAPSHOT + + + org.projectlombok + lombok + 1.18.20 + provided + + + com.google.guava + guava + 29.0-jre + + + org.apache.commons + commons-lang3 + 3.4 + + + commons-io + commons-io + 2.7 + + + org.testng + testng + 6.14.3 + + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20.1 + + false + 1 + + ${suite} + + always + + + + + + \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBDeployType.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBDeployType.java new file mode 100644 index 00000000000..fc9b592088b --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBDeployType.java @@ -0,0 +1,6 @@ +package com._4paradigm.qa.openmldb_deploy.bean; + +public enum OpenMLDBDeployType { + CLUSTER, + STANDALONE +} diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java new file mode 100644 index 00000000000..7e47b724ddd --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java @@ -0,0 +1,63 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.qa.openmldb_deploy.bean; + +import lombok.Builder; +import lombok.Data; + +import java.util.ArrayList; +import java.util.List; + +/** + * @author zhaowei + * @date 2021/2/7 12:10 PM + */ +@Data +@Builder +public class OpenMLDBInfo { + private OpenMLDBDeployType deployType; + private String host; + private int port; + private String basePath; + private String fedbPath; + private String zk_cluster; + private String zk_root_path; + private int nsNum; + private List nsEndpoints = new ArrayList<>(); + private List nsNames = new ArrayList<>(); + private int tabletNum; + private List tabletEndpoints = new ArrayList<>(); + private List tabletNames = new ArrayList<>(); + private int blobServerNum; + private List blobServerEndpoints = new ArrayList<>(); + private List blobServerNames = new ArrayList<>(); + private int blobProxyNum; + private List blobProxyEndpoints = new ArrayList<>(); + private List blobProxyNames = new ArrayList<>(); + private List apiServerEndpoints = new ArrayList<>(); + private List apiServerNames = new ArrayList<>(); + private List taskManagerEndpoints = new ArrayList<>(); + private String runCommand; + + public String getRunCommand(){ + if(deployType==OpenMLDBDeployType.CLUSTER) { + return fedbPath + " --zk_cluster=" + zk_cluster + " --zk_root_path=" + zk_root_path + " --role=sql_client"; + }else{ + return fedbPath + " --host=" + host + " --port=" + port; + } + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java new file mode 100644 index 00000000000..2fa42c42649 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java @@ -0,0 +1,456 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.qa.openmldb_deploy.common; + + +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com._4paradigm.qa.openmldb_deploy.conf.OpenMLDBDeployConfig; +import com._4paradigm.qa.openmldb_deploy.util.DeployUtil; +import com._4paradigm.qa.openmldb_deploy.util.FedbTool; +import com._4paradigm.qa.openmldb_deploy.util.OpenMLDBCommandUtil; +import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; +import com._4paradigm.test_tool.command_tool.common.LinuxUtil; +import com.google.common.collect.Lists; +import lombok.Setter; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; + +import java.io.File; +import java.util.List; + +@Slf4j +@Setter +public class OpenMLDBDeploy { + private String installPath; + private String version; + private String openMLDBUrl; + private String openMLDBName; + private String openMLDBPath; + private boolean useName; + private boolean isCluster = true; + private String sparkMaster = "local"; + private String batchJobJarPath; + private String sparkYarnJars = ""; + private String offlineDataPrefix = "file:///tmp/openmldb_offline_storage/"; + private String nameNodeUri = "172.27.12.215:8020"; + + public static final int SLEEP_TIME = 10*1000; + + public OpenMLDBDeploy(String version){ + this.version = version; + this.openMLDBUrl = OpenMLDBDeployConfig.getUrl(version); + } + public OpenMLDBInfo deployFEDBByStandalone(){ + String testPath = DeployUtil.getTestPath(version); + if(StringUtils.isNotEmpty(installPath)){ + testPath = installPath+"/"+version; + } + String ip = LinuxUtil.getLocalIP(); + File file = new File(testPath); + if(!file.exists()){ + file.mkdirs(); + } + downloadFEDB(testPath); + OpenMLDBInfo fedbInfo = deployStandalone(testPath,ip); + log.info("openmldb-info:"+fedbInfo); + return fedbInfo; + } + public OpenMLDBInfo deployFEDB(int ns, int tablet){ + return deployFEDB(null,ns,tablet); + } + public OpenMLDBInfo deployFEDB(String clusterName, int ns, int tablet){ + OpenMLDBInfo.OpenMLDBInfoBuilder builder = OpenMLDBInfo.builder(); + builder.deployType(OpenMLDBDeployType.CLUSTER); + String testPath = DeployUtil.getTestPath(version); + if(StringUtils.isNotEmpty(installPath)){ + testPath = installPath+"/"+version; + } + if(StringUtils.isNotEmpty(clusterName)) { + testPath = testPath + "/" + clusterName; + } + builder.nsNum(ns).tabletNum(tablet).basePath(testPath); + String ip = LinuxUtil.hostnameI(); + File file = new File(testPath); + if(!file.exists()){ + file.mkdirs(); + } + int zkPort = deployZK(testPath); + downloadFEDB(testPath); + String zk_point = ip+":"+zkPort; + builder.zk_cluster(zk_point).zk_root_path("/openmldb"); + builder.nsEndpoints(Lists.newArrayList()).nsNames(Lists.newArrayList()); + builder.tabletEndpoints(Lists.newArrayList()).tabletNames(Lists.newArrayList()); + builder.apiServerEndpoints(Lists.newArrayList()).apiServerNames(Lists.newArrayList()); + builder.taskManagerEndpoints(Lists.newArrayList()); + builder.fedbPath(testPath+"/openmldb-ns-1/bin/openmldb"); + OpenMLDBInfo fedbInfo = builder.build(); + for(int i=1;i<=tablet;i++) { + int tablet_port ; + if(useName){ + String tabletName = clusterName+"-tablet-"+i; + tablet_port = deployTablet(testPath,null, i, zk_point,tabletName); + fedbInfo.getTabletNames().add(tabletName); + }else { + tablet_port = deployTablet(testPath, ip, i, zk_point,null); + } + fedbInfo.getTabletEndpoints().add(ip+":"+tablet_port); + FedbTool.sleep(SLEEP_TIME); + } + for(int i=1;i<=ns;i++){ + int ns_port; + if(useName){ + String nsName = clusterName+"-ns-"+i; + ns_port = deployNS(testPath,null, i, zk_point,nsName); + fedbInfo.getNsNames().add(nsName); + }else { + ns_port = deployNS(testPath, ip, i, zk_point,null); + } + fedbInfo.getNsEndpoints().add(ip+":"+ns_port); + FedbTool.sleep(SLEEP_TIME); + } + + for(int i=1;i<=1;i++) { + int apiserver_port ; + if(useName){ + String apiserverName = clusterName+"-apiserver-"+i; + apiserver_port = deployApiserver(testPath,null, i, zk_point,apiserverName); + fedbInfo.getApiServerNames().add(apiserverName); + }else { + apiserver_port = deployApiserver(testPath, ip, i, zk_point,null); + } + fedbInfo.getApiServerEndpoints().add(ip+":"+apiserver_port); + FedbTool.sleep(SLEEP_TIME); + } + if(version.equals("tmp")||version.compareTo("0.4.0")>=0) { + for (int i = 1; i <= 1; i++) { + int task_manager_port = deployTaskManager(testPath, ip, i, zk_point); + fedbInfo.getTaskManagerEndpoints().add(ip + ":" + task_manager_port); + } + } + log.info("openmldb-info:"+fedbInfo); + return fedbInfo; + } + + private void downloadFEDB(String testPath){ + try { + String command; + if(openMLDBUrl.startsWith("http")) { + command = "wget -P " + testPath + " -q " + openMLDBUrl; + }else{ + command = "cp -r " + openMLDBUrl +" "+ testPath; + } + ExecutorUtil.run(command); + String packageName = openMLDBUrl.substring(openMLDBUrl.lastIndexOf("/") + 1); + command = "ls " + testPath + " | grep "+packageName; + List result = ExecutorUtil.run(command); + String tarName = result.get(0); + command = "tar -zxvf " + testPath + "/"+tarName+" -C "+testPath; + ExecutorUtil.run(command); + command = "ls " + testPath + " | grep openmldb | grep -v .tar.gz"; + result = ExecutorUtil.run(command); + if (result != null && result.size() > 0) { + openMLDBName = result.get(0); + log.info("FEDB下载成功:{}", openMLDBName); + }else{ + throw new RuntimeException("FEDB下载失败"); + } + }catch (Exception e){ + e.printStackTrace(); + } + } + public int deployZK(String testPath){ + try { + int port = LinuxUtil.getNoUsedPort(); + String[] commands = { + "wget -P "+testPath+" "+ OpenMLDBDeployConfig.getZKUrl(version), + "tar -zxvf "+testPath+"/zookeeper-3.4.14.tar.gz -C "+testPath, + "cp "+testPath+"/zookeeper-3.4.14/conf/zoo_sample.cfg "+testPath+"/zookeeper-3.4.14/conf/zoo.cfg", + "sed -i 's#dataDir=/tmp/zookeeper#dataDir="+testPath+"/data#' "+testPath+"/zookeeper-3.4.14/conf/zoo.cfg", + "sed -i 's#clientPort=2181#clientPort="+port+"#' "+testPath+"/zookeeper-3.4.14/conf/zoo.cfg", + "sh "+testPath+"/zookeeper-3.4.14/bin/zkServer.sh start" + }; + for(String command:commands){ + ExecutorUtil.run(command); + } + boolean used = LinuxUtil.checkPortIsUsed(port,3000,30); + if(used){ + log.info("zk部署成功,port:"+port); + return port; + } + }catch (Exception e){ + e.printStackTrace(); + } + throw new RuntimeException("zk部署失败"); + } + + public int deployNS(String testPath, String ip, int index, String zk_endpoint, String name){ + try { + int port = LinuxUtil.getNoUsedPort(); + String ns_name = "/openmldb-ns-"+index; + List commands = Lists.newArrayList( + "cp -r " + testPath + "/" + openMLDBName + " " + testPath + ns_name, + "sed -i 's#--zk_cluster=.*#--zk_cluster=" + zk_endpoint + "#' " + testPath + ns_name + "/conf/nameserver.flags", + "sed -i 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+ns_name+"/conf/nameserver.flags", + "sed -i 's@#--zk_cluster=.*@--zk_cluster=" + zk_endpoint + "@' " + testPath + ns_name + "/conf/nameserver.flags", + "sed -i 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+ns_name+"/conf/nameserver.flags", + "sed -i 's@--tablet=.*@#--tablet=127.0.0.1:9921@' "+testPath+ns_name+"/conf/nameserver.flags", + "echo '--request_timeout_ms=60000' >> " + testPath + ns_name + "/conf/nameserver.flags" + ); + if(useName){ + commands.add("sed -i 's/--endpoint=.*/#&/' " + testPath + ns_name + "/conf/nameserver.flags"); + commands.add("echo '--use_name=true' >> " + testPath + ns_name + "/conf/nameserver.flags"); + commands.add("echo '--port=" + port + "' >> " + testPath + ns_name + "/conf/nameserver.flags"); + if(name!=null){ + commands.add("mkdir -p " + testPath + ns_name + "/data"); + commands.add("echo " + name + " >> " + testPath + ns_name + "/data/name.txt"); + } + }else{ + String ip_port = ip+":"+port; + commands.add("sed -i 's#--endpoint=.*#--endpoint=" + ip_port + "#' " + testPath + ns_name + "/conf/nameserver.flags"); + } + if(isCluster){ + commands.add("sed -i 's@#--enable_distsql=.*@--enable_distsql=true@' " + testPath + ns_name + "/conf/nameserver.flags"); + // commands.add("echo '--enable_distsql=true' >> " + testPath + ns_name + "/conf/nameserver.flags"); + }else{ + commands.add("sed -i 's@#--enable_distsql=.*@--enable_distsql=false@' " + testPath + ns_name + "/conf/nameserver.flags"); + } + commands.forEach(ExecutorUtil::run); + if(StringUtils.isNotEmpty(openMLDBPath)){ + OpenMLDBCommandUtil.cpRtidb(testPath+ns_name, openMLDBPath); + } +// ExecutorUtil.run("sh "+testPath+ns_name+"/bin/start_ns.sh start"); + ExecutorUtil.run("sh "+testPath+ns_name+"/bin/start.sh start nameserver"); + boolean used = LinuxUtil.checkPortIsUsed(port,3000,30); + if(used){ + log.info("ns部署成功,port:"+port); + return port; + } + }catch (Exception e){ + e.printStackTrace(); + } + throw new RuntimeException("ns部署失败"); + } + public int deployTablet(String testPath, String ip, int index, String zk_endpoint, String name){ + try { + int port = LinuxUtil.getNoUsedPort(); + String tablet_name = "/openmldb-tablet-"+index; + List commands = Lists.newArrayList( + "cp -r "+testPath+"/"+ openMLDBName +" "+testPath+tablet_name, + "sed -i 's/--zk_cluster=.*/--zk_cluster="+zk_endpoint+"/' "+testPath+tablet_name+"/conf/tablet.flags", + "sed -i 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+tablet_name+"/conf/tablet.flags", + "sed -i 's@#--zk_cluster=.*@--zk_cluster="+zk_endpoint+"@' "+testPath+tablet_name+"/conf/tablet.flags", + "sed -i 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+tablet_name+"/conf/tablet.flags", + "sed -i 's@#--make_snapshot_threshold_offset=100000@--make_snapshot_threshold_offset=10@' "+testPath+tablet_name+"/conf/tablet.flags", + "sed -i 's@--scan_concurrency_limit=16@--scan_concurrency_limit=0@' "+testPath+tablet_name+"/conf/tablet.flags", + "sed -i 's@--put_concurrency_limit=8@--put_concurrency_limit=0@' "+testPath+tablet_name+"/conf/tablet.flags", + "sed -i 's@--get_concurrency_limit=16@--get_concurrency_limit=0@' "+testPath+tablet_name+"/conf/tablet.flags", + "echo '--hdd_root_path=./db_hdd' >> "+testPath+tablet_name+"/conf/tablet.flags", + "echo '--recycle_bin_hdd_root_path=./recycle_hdd' >> "+testPath+tablet_name+"/conf/tablet.flags", + "echo '--ssd_root_path=./db_ssd' >> "+testPath+tablet_name+"/conf/tablet.flags", + "echo '--recycle_bin_ssd_root_path=./recycle_ssd' >> "+testPath+tablet_name+"/conf/tablet.flags" + ); + if(useName){ + commands.add("sed -i 's/--endpoint=.*/#&/' " + testPath + tablet_name + "/conf/tablet.flags"); + commands.add("echo '--use_name=true' >> " + testPath + tablet_name + "/conf/tablet.flags"); + commands.add("echo '--port=" + port + "' >> " + testPath + tablet_name + "/conf/tablet.flags"); + if(name!=null){ + commands.add("mkdir -p " + testPath + tablet_name + "/data"); + commands.add("echo " + name + " >> " + testPath + tablet_name + "/data/name.txt"); + } + }else{ + String ip_port = ip+":"+port; + commands.add("sed -i 's#--endpoint=.*#--endpoint="+ip_port+"#' "+testPath+tablet_name+"/conf/tablet.flags"); + + } + if(isCluster){ + commands.add("sed -i 's#--enable_distsql=.*#--enable_distsql=true#' " + testPath + tablet_name + "/conf/tablet.flags"); + }else{ + commands.add("sed -i 's#--enable_distsql=.*#--enable_distsql=false#' " + testPath + tablet_name + "/conf/tablet.flags"); + } + commands.forEach(ExecutorUtil::run); + if(StringUtils.isNotEmpty(openMLDBPath)){ + OpenMLDBCommandUtil.cpRtidb(testPath+tablet_name, openMLDBPath); + } + ExecutorUtil.run("sh "+testPath+tablet_name+"/bin/start.sh start tablet"); + boolean used = LinuxUtil.checkPortIsUsed(port,3000,30); + if(used){ + log.info("tablet部署成功,port:"+port); + return port; + } + }catch (Exception e){ + e.printStackTrace(); + } + throw new RuntimeException("tablet部署失败"); + } + public int deployApiserver(String testPath, String ip, int index, String zk_endpoint, String name){ + try { + int port = LinuxUtil.getNoUsedPort(); + String apiserver_name = "/openmldb-apiserver-"+index; + List commands = Lists.newArrayList( + "cp -r "+testPath+"/"+ openMLDBName +" "+testPath+apiserver_name, + "sed -i 's/--zk_cluster=.*/--zk_cluster="+zk_endpoint+"/' "+testPath+apiserver_name+"/conf/apiserver.flags", + "sed -i 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+apiserver_name+"/conf/apiserver.flags", + "sed -i 's@#--zk_cluster=.*@--zk_cluster="+zk_endpoint+"@' "+testPath+apiserver_name+"/conf/apiserver.flags", + "sed -i 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+apiserver_name+"/conf/apiserver.flags", + "sed -i 's@--nameserver=.*@#--nameserver=127.0.0.1:6527@' "+testPath+apiserver_name+"/conf/apiserver.flags" + ); + if(useName){ + commands.add("sed -i 's/--endpoint=.*/#&/' " + testPath + apiserver_name + "/conf/apiserver.flags"); + commands.add("echo '--use_name=true' >> " + testPath + apiserver_name + "/conf/apiserver.flags"); + commands.add("echo '--port=" + port + "' >> " + testPath + apiserver_name + "/conf/apiserver.flags"); + if(name!=null){ + commands.add("mkdir -p " + testPath + apiserver_name + "/data"); + commands.add("echo " + name + " >> " + testPath + apiserver_name + "/data/name.txt"); + } + }else{ + String ip_port = ip+":"+port; + commands.add("sed -i 's#--endpoint=.*#--endpoint="+ip_port+"#' "+testPath+apiserver_name+"/conf/apiserver.flags"); + + } + if(isCluster){ + commands.add("sed -i 's#--enable_distsql=.*#--enable_distsql=true#' " + testPath + apiserver_name + "/conf/apiserver.flags"); + }else{ + commands.add("sed -i 's#--enable_distsql=.*#--enable_distsql=false#' " + testPath + apiserver_name + "/conf/apiserver.flags"); + } + commands.forEach(ExecutorUtil::run); + if(StringUtils.isNotEmpty(openMLDBPath)){ + OpenMLDBCommandUtil.cpRtidb(testPath+apiserver_name, openMLDBPath); + } + ExecutorUtil.run("sh "+testPath+apiserver_name+"/bin/start.sh start apiserver"); + boolean used = LinuxUtil.checkPortIsUsed(port,3000,30); + if(used){ + log.info("apiserver部署成功,port:"+port); + return port; + } + }catch (Exception e){ + e.printStackTrace(); + } + throw new RuntimeException("apiserver部署失败"); + } + + + public String deploySpark(String testPath){ + try { + ExecutorUtil.run("wget -P "+testPath+" -q "+ OpenMLDBDeployConfig.getSparkUrl(version)); + String tarName = ExecutorUtil.run("ls "+ testPath +" | grep spark").get(0); + ExecutorUtil.run("tar -zxvf " + testPath + "/"+tarName+" -C "+testPath); + String sparkHome = ExecutorUtil.run("ls "+ testPath +" | grep spark | grep -v .tgz ").get(0); + String sparkPath = testPath+"/"+sparkHome; + return sparkPath; + }catch (Exception e){ + e.printStackTrace(); + } + throw new RuntimeException("spark 部署失败"); + } + + public int deployTaskManager(String testPath, String ip, int index, String zk_endpoint){ + try { + String sparkHome = deploySpark(testPath); + int port = LinuxUtil.getNoUsedPort(); + String task_manager_name = "/openmldb-task_manager-"+index; + ExecutorUtil.run("cp -r " + testPath + "/" + openMLDBName + " " + testPath + task_manager_name); + if(batchJobJarPath==null) { + String batchJobName = ExecutorUtil.run("ls " + testPath + task_manager_name + "/taskmanager/lib | grep openmldb-batchjob").get(0); + batchJobJarPath = testPath + task_manager_name + "/taskmanager/lib/" + batchJobName; + } + + List commands = Lists.newArrayList( + "sed -i 's#server.host=.*#server.host=" + ip + "#' " + testPath + task_manager_name + "/conf/taskmanager.properties", + "sed -i 's#server.port=.*#server.port=" + port + "#' " + testPath + task_manager_name + "/conf/taskmanager.properties", + "sed -i 's#zookeeper.cluster=.*#zookeeper.cluster=" + zk_endpoint + "#' " + testPath + task_manager_name + "/conf/taskmanager.properties", + "sed -i 's@zookeeper.root_path=.*@zookeeper.root_path=/openmldb@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", + "sed -i 's@spark.master=.*@spark.master=" + sparkMaster + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", + "sed -i 's@spark.home=.*@spark.home=" + sparkHome + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", + "sed -i 's@batchjob.jar.path=.*@batchjob.jar.path=" + batchJobJarPath + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", + "sed -i 's@spark.yarn.jars=.*@spark.yarn.jars=" + sparkYarnJars + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", + "sed -i 's@offline.data.prefix=.*@offline.data.prefix=" + offlineDataPrefix + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", + "sed -i 's@namenode.uri=.*@namenode.uri=" + nameNodeUri + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties" + ); + commands.forEach(ExecutorUtil::run); + ExecutorUtil.run("sh "+testPath+task_manager_name+"/bin/start.sh start taskmanager"); + boolean used = LinuxUtil.checkPortIsUsed(port,3000,30); + if(used){ + log.info("task manager部署成功,port:"+port); + return port; + } + }catch (Exception e){ + e.printStackTrace(); + } + throw new RuntimeException("task manager部署失败"); + } + + public OpenMLDBInfo deployStandalone(String testPath, String ip){ + try { + int nsPort = LinuxUtil.getNoUsedPort(); + int tabletPort = LinuxUtil.getNoUsedPort(); + int apiServerPort = LinuxUtil.getNoUsedPort(); + String nsEndpoint = ip+":"+nsPort; + String tabletEndpoint = ip+":"+tabletPort; + String apiServerEndpoint = ip+":"+apiServerPort; + String standaloneName = "/openmldb-standalone"; + List commands = Lists.newArrayList( + "cp -r " + testPath + "/" + openMLDBName + " " + testPath + standaloneName, + "sed -i 's@--zk_cluster=.*@#--zk_cluster=127.0.0.1:2181@' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", + "sed -i 's@--zk_root_path=.*@#--zk_root_path=/openmldb@' "+testPath+standaloneName+"/conf/standalone_nameserver.flags", + "sed -i 's#--endpoint=.*#--endpoint=" + nsEndpoint + "#' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", + "sed -i 's@#--tablet=.*@--tablet=" + tabletEndpoint + "@' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", + "sed -i 's@--tablet=.*@--tablet=" + tabletEndpoint + "@' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", + "sed -i 's@--zk_cluster=.*@#--zk_cluster=127.0.0.1:2181@' " + testPath + standaloneName + "/conf/standalone_tablet.flags", + "sed -i 's@--zk_root_path=.*@#--zk_root_path=/openmldb@' "+testPath+standaloneName+"/conf/standalone_tablet.flags", + "sed -i 's#--endpoint=.*#--endpoint=" + tabletEndpoint + "#' " + testPath + standaloneName + "/conf/standalone_tablet.flags", + "echo '--hdd_root_path=./db_hdd' >> "+testPath+standaloneName+"/conf/standalone_tablet.flags", + "echo '--recycle_bin_hdd_root_path=./recycle_hdd' >> "+testPath+standaloneName+"/conf/standalone_tablet.flags", + "echo '--ssd_root_path=./db_ssd' >> "+testPath+standaloneName+"/conf/standalone_tablet.flags", + "echo '--recycle_bin_ssd_root_path=./recycle_ssd' >> "+testPath+standaloneName+"/conf/standalone_tablet.flags", + "sed -i 's@--zk_cluster=.*@#--zk_cluster=127.0.0.1:2181@' "+testPath+standaloneName+"/conf/standalone_apiserver.flags", + "sed -i 's@--zk_root_path=.*@#--zk_root_path=/openmldb@' "+testPath+standaloneName+"/conf/standalone_apiserver.flags", + "sed -i 's#--endpoint=.*#--endpoint="+apiServerEndpoint+"#' "+testPath+standaloneName+"/conf/standalone_apiserver.flags", + "sed -i 's#--nameserver=.*#--nameserver="+nsEndpoint+"#' "+testPath+standaloneName+"/conf/standalone_apiserver.flags" + ); + commands.forEach(ExecutorUtil::run); + if(StringUtils.isNotEmpty(openMLDBPath)){ + OpenMLDBCommandUtil.cpRtidb(testPath+standaloneName, openMLDBPath); + } + ExecutorUtil.run("sh "+testPath+standaloneName+"/bin/start-standalone.sh"); + boolean nsOk = LinuxUtil.checkPortIsUsed(nsPort,3000,30); + boolean tabletOk = LinuxUtil.checkPortIsUsed(tabletPort,3000,30); + boolean apiServerOk = LinuxUtil.checkPortIsUsed(apiServerPort,3000,30); + if(nsOk&&tabletOk&&apiServerOk){ + log.info(String.format("standalone 部署成功,nsPort:{},tabletPort:{},apiServerPort:{}",nsPort,tabletPort,apiServerPort)); + OpenMLDBInfo fedbInfo = OpenMLDBInfo.builder() + .deployType(OpenMLDBDeployType.STANDALONE) + .fedbPath(testPath+"/openmldb-standalone/bin/openmldb") + .apiServerEndpoints(Lists.newArrayList()) + .basePath(testPath) + .nsEndpoints(Lists.newArrayList(nsEndpoint)) + .nsNum(1) + .host(ip) + .port(nsPort) + .tabletNum(1) + .tabletEndpoints(Lists.newArrayList(tabletEndpoint)) + .apiServerEndpoints(Lists.newArrayList(apiServerEndpoint)) + .build(); + return fedbInfo; + } + }catch (Exception e){ + e.printStackTrace(); + } + throw new RuntimeException("standalone 部署失败"); + } +} + diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java new file mode 100644 index 00000000000..9f7ade0c9db --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java @@ -0,0 +1,51 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.qa.openmldb_deploy.conf; + +import com._4paradigm.qa.openmldb_deploy.util.DeployUtil; +import com._4paradigm.qa.openmldb_deploy.util.FedbTool; +import lombok.extern.slf4j.Slf4j; + +import java.util.Properties; + +/** + * @author zhaowei + * @date 2020/6/11 11:34 AM + */ +@Slf4j +public class OpenMLDBDeployConfig { + + public static final String ZK_URL; + public static final String SPARK_URL; + public static final Properties CONFIG; + + static { + CONFIG = FedbTool.getProperties("deploy.properties"); + ZK_URL = CONFIG.getProperty("zk_url"); + SPARK_URL = CONFIG.getProperty("spark_url"); + } + + public static String getUrl(String version){ + return CONFIG.getProperty(version, DeployUtil.getOpenMLDBUrl(version)); + } + public static String getZKUrl(String version){ + return CONFIG.getProperty(version+"_zk_url", ZK_URL); + } + public static String getSparkUrl(String version){ + return CONFIG.getProperty(version+"_spark_url", SPARK_URL); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/DeployUtil.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/DeployUtil.java new file mode 100644 index 00000000000..40e53d7b5f8 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/DeployUtil.java @@ -0,0 +1,45 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.qa.openmldb_deploy.util; + +import com._4paradigm.fe.command.common.LinuxUtil; + +/** + * @author zhaowei + * @date 2021/2/6 9:43 PM + */ +public class DeployUtil { + public static String BASE_PATH ; + public static String getTestPath(String testPath,String version){ + String userHome = LinuxUtil.getHome(); + if(BASE_PATH!=null){ + return userHome+ BASE_PATH; + } + return userHome+"/"+testPath+"/"+ version; + } + public static String getTestPath(String version){ + return getTestPath("openmldb-auto-test",version); + } +// public static String getTestPath(){ +// return getTestPath("fedb-auto-test", FedbGlobalVar.version); +// } + + public static String getOpenMLDBUrl(String version){ + String release_url = "https://github.com/4paradigm/OpenMLDB/releases/download/v%s/openmldb-%s-linux.tar.gz"; + return String.format(release_url,version,version); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/FedbTool.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/FedbTool.java new file mode 100755 index 00000000000..8d522119e0c --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/FedbTool.java @@ -0,0 +1,132 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.qa.openmldb_deploy.util; + + +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.Assert; + +import java.io.File; +import java.io.IOException; +import java.lang.reflect.Field; +import java.util.*; + + +public class FedbTool { + private static final Logger logger = LoggerFactory.getLogger(FedbTool.class); + + public static String getFilePath(String filename) { + return FedbTool.class.getClassLoader().getResource(filename).getFile(); + } + + public static String getCasePath(String yamlCaseDir, String casePath) { + String caseDir = StringUtils.isEmpty(yamlCaseDir) ? FedbTool.rtidbDir().getAbsolutePath() : yamlCaseDir; + Assert.assertNotNull(caseDir); + String caseAbsPath = caseDir + "/cases/" + casePath; + logger.debug("case absolute path: {}", caseAbsPath); + return caseAbsPath; + } + + public static File rtidbDir() { + File directory = new File("."); + directory = directory.getAbsoluteFile(); + while (null != directory) { + if (directory.isDirectory() && "OpenMLDB".equals(directory.getName())) { + break; + } + logger.debug("current directory name {}", directory.getName()); + directory = directory.getParentFile(); + } + + if ("OpenMLDB".equals(directory.getName())) { + return directory; + } else { + return null; + } + } + + public static void sleep(long time) { + try { + Thread.sleep(time); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + public static List getPaths(File directory) { + List list = new ArrayList<>(); + Collection files = FileUtils.listFiles(directory, null, true); + for (File f : files) { + list.add(f.getAbsolutePath()); + } + Collections.sort(list); + return list; + } + + + public static Properties getProperties(String fileName) { + Properties ps = new Properties(); + try { + ps.load(FedbTool.class.getClassLoader().getResourceAsStream(fileName)); + } catch (IOException e) { + e.printStackTrace(); + logger.error(e.getMessage()); + } + return ps; + } + + public static String uuid() { + String uuid = UUID.randomUUID().toString().replaceAll("-", ""); + return uuid; + } + + public static void mergeObject(T origin, T destination) { + if (origin == null || destination == null) + return; + if (!origin.getClass().equals(destination.getClass())) + return; + Field[] fields = origin.getClass().getDeclaredFields(); + for (int i = 0; i < fields.length; i++) { + try { + fields[i].setAccessible(true); + Object originValue = fields[i].get(origin); + Object destValue = fields[i].get(destination); + if (null == destValue) { + fields[i].set(destination, originValue); + } + fields[i].setAccessible(false); + } catch (Exception e) { + } + } + } + +} + + + + + + + + + + + + diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/OpenMLDBCommandUtil.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/OpenMLDBCommandUtil.java new file mode 100644 index 00000000000..3344962d52e --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/OpenMLDBCommandUtil.java @@ -0,0 +1,35 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.qa.openmldb_deploy.util; + +import com._4paradigm.fe.command.common.LinuxUtil; +import org.testng.Assert; + +/** + * @author zhaowei + * @date 2021/2/7 8:50 AM + */ +public class OpenMLDBCommandUtil { + public static void cpRtidb(String path,String fedbPath){ + boolean ok = LinuxUtil.cp(fedbPath,path+"/bin",path+"/bin/openmldb"); + Assert.assertTrue(ok,"copy conf fail"); + } + public static void cpConf(String path,String confPath){ + boolean ok = LinuxUtil.cp(confPath,path,path+"/conf"); + Assert.assertTrue(ok,"copy conf fail"); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/command.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/command.properties new file mode 100644 index 00000000000..81f31828e0f --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/command.properties @@ -0,0 +1,6 @@ + +#远程执行命令时需要进行配置,本地执行则不需要进行配置 +remote_ip=172.24.4.55 +remote_user=zhaowei01 +remote_password=1qaz0p;/ +#remote_private_key_path=src/main/resources/zw-mac-id_rsa diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties new file mode 100644 index 00000000000..a266c821cfc --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -0,0 +1,24 @@ + +#zk的url +zk_url=https://archive.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz + +#配置fedb版本以及对应的url + +main=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2.3-linux.tar.gz +0.2.2=https://github.com/4paradigm/OpenMLDB/releases/download/0.2.2/openmldb-0.2.2-linux.tar.gz +0.2.3=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2.3-linux.tar.gz +spark_url=https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0.3.2/spark-3.0.0-bin-openmldbspark.tgz + +tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-linux.tar.gz +tmp_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz +tmp_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz + +single=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-linux.tar.gz +single_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz +single_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz + +standalone=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-linux.tar.gz + +tmp2=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-linux.tar.gz +tmp2_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz +tmp2_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/log4j.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/log4j.properties new file mode 100755 index 00000000000..8aa7e8e77dc --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/log4j.properties @@ -0,0 +1,51 @@ +### set log levels ### +log4j.rootLogger=debug,info,stdout,warn,error + +# console log +log4j.appender.stdout = org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target = System.out +log4j.appender.stdout.Threshold = INFO +log4j.appender.stdout.layout = org.apache.log4j.PatternLayout +log4j.appender.stdout.Encoding=UTF-8 +log4j.appender.stdout.layout.ConversionPattern = %d{yyyy-MM-dd HH:mm:ss} [ %c.%M(%F:%L) ] - [ %p ] %m%n + +#info log +log4j.logger.info=info +log4j.appender.info=org.apache.log4j.DailyRollingFileAppender +log4j.appender.info.DatePattern='_'yyyy-MM-dd'.log' +log4j.appender.info.File=logs/info.log +log4j.appender.info.Append=true +log4j.appender.info.Threshold=INFO +log4j.appender.info.Encoding=UTF-8 +log4j.appender.info.layout=org.apache.log4j.PatternLayout +log4j.appender.info.layout.ConversionPattern= %d{yyyy-MM-dd HH:mm:ss} [ %c.%M(%F:%L) ] - [ %p ] %m%n +#debugs log +log4j.logger.debug=debug +log4j.appender.debug=org.apache.log4j.DailyRollingFileAppender +log4j.appender.debug.DatePattern='_'yyyy-MM-dd'.log' +log4j.appender.debug.File=logs/debug.log +log4j.appender.debug.Append=true +log4j.appender.debug.Threshold=DEBUG +log4j.appender.debug.Encoding=UTF-8 +log4j.appender.debug.layout=org.apache.log4j.PatternLayout +log4j.appender.debug.layout.ConversionPattern= %d{yyyy-MM-dd HH:mm:ss} [ %c.%M(%F:%L) ] - [ %p ] %m%n +#warn log +log4j.logger.warn=warn +log4j.appender.warn=org.apache.log4j.DailyRollingFileAppender +log4j.appender.warn.DatePattern='_'yyyy-MM-dd'.log' +log4j.appender.warn.File=logs/warn.log +log4j.appender.warn.Append=true +log4j.appender.warn.Threshold=WARN +log4j.appender.warn.Encoding=UTF-8 +log4j.appender.warn.layout=org.apache.log4j.PatternLayout +log4j.appender.warn.layout.ConversionPattern= %d{yyyy-MM-dd HH:mm:ss} [ %c.%M(%F:%L) ] - [ %p ] %m%n +#error +log4j.logger.error=error +log4j.appender.error = org.apache.log4j.DailyRollingFileAppender +log4j.appender.error.DatePattern='_'yyyy-MM-dd'.log' +log4j.appender.error.File = logs/error.log +log4j.appender.error.Append = true +log4j.appender.error.Threshold = ERROR +log4j.appender.error.Encoding=UTF-8 +log4j.appender.error.layout = org.apache.log4j.PatternLayout +log4j.appender.error.layout.ConversionPattern = %d{yyyy-MM-dd HH:mm:ss} [ %c.%M(%F:%L) ] - [ %p ] %m%n diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java new file mode 100644 index 00000000000..5f5de651006 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java @@ -0,0 +1,20 @@ +package com._4paradigm.qa.openmldb_deploy.test; + +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; +import org.testng.annotations.Optional; +import org.testng.annotations.Parameters; +import org.testng.annotations.Test; + +public class TmpDeploy { + @Test + @Parameters({"version","openMLDBPath"}) + public void testTmp(@Optional("tmp") String version,@Optional("") String openMLDBPath){ + OpenMLDBDeploy deploy = new OpenMLDBDeploy(version); + deploy.setOpenMLDBPath(openMLDBPath); + deploy.setCluster(true); + deploy.setSparkMaster("local"); + OpenMLDBInfo openMLDBInfo = deploy.deployFEDB(2, 3); + System.out.println(openMLDBInfo); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploySingleNodeCluster.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploySingleNodeCluster.java new file mode 100644 index 00000000000..200955e74b7 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploySingleNodeCluster.java @@ -0,0 +1,20 @@ +package com._4paradigm.qa.openmldb_deploy.test; + +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; +import org.testng.annotations.Optional; +import org.testng.annotations.Parameters; +import org.testng.annotations.Test; + +public class TmpDeploySingleNodeCluster { + @Test + @Parameters({"version","openMLDBPath"}) + public void testTmp(@Optional("tmp") String version,@Optional("") String openMLDBPath){ + OpenMLDBDeploy deploy = new OpenMLDBDeploy(version); + deploy.setOpenMLDBPath(openMLDBPath); + deploy.setCluster(false); + deploy.setSparkMaster("local"); + OpenMLDBInfo openMLDBInfo = deploy.deployFEDB(1, 2); + System.out.println(openMLDBInfo); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeployStandalone.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeployStandalone.java new file mode 100644 index 00000000000..c8e346a9560 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeployStandalone.java @@ -0,0 +1,18 @@ +package com._4paradigm.qa.openmldb_deploy.test; + +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; +import org.testng.annotations.Optional; +import org.testng.annotations.Parameters; +import org.testng.annotations.Test; + +public class TmpDeployStandalone { + @Test + @Parameters({"openMLDBPath"}) + public void testTmp(@Optional("") String openMLDBPath){ + OpenMLDBDeploy deploy = new OpenMLDBDeploy("standalone"); + deploy.setOpenMLDBPath(openMLDBPath); + OpenMLDBInfo openMLDBInfo = deploy.deployFEDBByStandalone(); + System.out.println(openMLDBInfo); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy-standalone.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy-standalone.xml new file mode 100644 index 00000000000..95763f349fb --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy-standalone.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml new file mode 100644 index 00000000000..17760957816 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_single_node.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_single_node.xml new file mode 100644 index 00000000000..83747359d71 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_single_node.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml new file mode 100644 index 00000000000..144229117e0 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java index c0466d62411..7b84ee86f0a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java @@ -65,7 +65,7 @@ public void testSelectRequestModeWithSpAysn(SQLCase testCase) throws Exception { @Story("batch") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/select/test_select_sample.yaml"}) + @Yaml(filePaths = {"function/ddl/test_create.yaml"}) @Step("{testCase.desc}") public void testSelect2(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kBatch).run(); diff --git a/test/integration-test/openmldb-test-java/pom.xml b/test/integration-test/openmldb-test-java/pom.xml index 0f51b2f0154..c1ab3da744f 100644 --- a/test/integration-test/openmldb-test-java/pom.xml +++ b/test/integration-test/openmldb-test-java/pom.xml @@ -13,6 +13,7 @@ openmldb-sdk-test openmldb-http-test openmldb-tool-test + openmldb-deploy diff --git a/test/test-tool/command-tool/pom.xml b/test/test-tool/command-tool/pom.xml index a921925ea09..b1a492c8bb1 100644 --- a/test/test-tool/command-tool/pom.xml +++ b/test/test-tool/command-tool/pom.xml @@ -4,7 +4,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 - com.4paradigm.test-tool + com.4paradigm.openmldb.test-tool command-tool 1.0-SNAPSHOT diff --git a/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/LinuxUtil.java b/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/LinuxUtil.java index bc868f66cd5..6ba5b38f8b0 100644 --- a/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/LinuxUtil.java +++ b/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/LinuxUtil.java @@ -61,6 +61,12 @@ public static boolean cp(String src,String dst){ return cp(src,dst,null); } + public static String hostnameI(){ + String command = "hostname -i"; ///usr/sbin/ + List result = ExecutorUtil.run(command); + return result.get(0); + } + public static String getLocalIP(){ String command = "hostname -i"; try { From 3b20e18df73f8442f41c603c52cfbd01dccfbac9 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 5 Jul 2022 11:03:45 +0800 Subject: [PATCH 016/172] modify name from fedb to openMLDB --- .../openmldb-deploy/pom.xml | 5 + .../bean/OpenMLDBDeployType.java | 16 + .../qa/openmldb_deploy/bean/OpenMLDBInfo.java | 6 +- .../common/OpenMLDBDeploy.java | 34 +- .../conf/OpenMLDBDeployConfig.java | 4 +- .../qa/openmldb_deploy/util/DeployUtil.java | 2 +- .../util/OpenMLDBCommandUtil.java | 6 +- .../qa/openmldb_deploy/test/TmpDeploy.java | 2 +- .../test/TmpDeploySingleNodeCluster.java | 2 +- .../test/TmpDeployStandalone.java | 2 +- .../openmldb/http_test/common/BaseTest.java | 10 - .../http_test/common/ClusterTest.java | 42 +- .../http_test/common/RestfulCaseFileList.java | 4 +- .../http_test/common/RestfulGlobalVar.java | 4 +- .../http_test/common/StandaloneTest.java | 19 +- .../http_test/config/FedbRestfulConfig.java | 4 +- .../openmldb/http_test/tmp/TestDropTable.java | 4 +- .../openmldb-sdk-test/pom.xml | 5 + .../command/OpenMLDBComamndFacade.java | 14 +- .../command/OpenMLDBCommandUtil.java | 20 +- .../command/OpenmlDBCommandFactory.java | 18 +- .../command/chain/AbstractSQLHandler.java | 10 +- .../command/chain/DDLHandler.java | 6 +- .../command/chain/DMLHandler.java | 7 +- .../command/chain/DescHandler.java | 9 +- .../command/chain/QueryHandler.java | 6 +- .../command/chain/ShowDeploymentHandler.java | 6 +- .../command/chain/ShowDeploymentsHandler.java | 6 +- .../command/chain/SqlChainManager.java | 7 +- .../java_sdk_test/common/FedbGlobalVar.java | 4 +- .../java_sdk_test/common/FedbTest.java | 29 +- .../{FedbClient.java => OpenMLDBClient.java} | 10 +- .../common/StandaloneClient.java | 4 +- .../java_sdk_test/common/StandaloneTest.java | 17 +- .../executor/BaseSQLExecutor.java | 6 +- .../executor/BatchSQLExecutor.java | 4 +- .../executor/ClusterCliExecutor.java | 6 +- .../executor/CommandExecutor.java | 52 +- .../executor/ExecutorFactory.java | 5 +- .../executor/InsertPreparedExecutor.java | 8 +- .../executor/QueryPreparedExecutor.java | 6 +- .../executor/RequestQuerySQLExecutor.java | 4 +- .../executor/StandaloneCliExecutor.java | 21 +- .../executor/StoredProcedureSQLExecutor.java | 4 +- .../java_sdk_test/util/FesqlUtil.java | 6 +- .../auto_gen_case/AutoGenCaseTest.java | 22 +- .../java_sdk_test/deploy/TestFEDBDeploy.java | 18 +- .../java_sdk_test/temp/TestCommand.java | 1 - .../java_sdk_test/temp/TestDropTable.java | 4 +- .../java_sdk_test/temp/TestFEDBDeploy.java | 99 ---- .../openmldb-test-common/pom.xml | 8 - .../openmldb/test_common/bean/FEDBInfo.java | 63 --- .../test_common/bean/OpenMLDBDeployType.java | 6 - .../test_common/common/FedbDeployConfig.java | 52 -- .../test_common/main/DeloyFedbMain.java | 29 -- .../restful/model/RestfulCaseFile.java | 4 +- .../restful/util/OpenMLDBTool.java} | 24 +- .../openmldb/test_common/util/DeployUtil.java | 46 -- .../test_common/util/FEDBCommandUtil.java | 37 -- .../openmldb/test_common/util/FEDBDeploy.java | 447 ------------------ .../test_common/util/FedbSDKUtil.java | 20 - .../openmldb/test_common/util/FedbTool.java | 132 ------ .../tool_test/import_tool/data/CheckData.java | 4 +- 63 files changed, 255 insertions(+), 1227 deletions(-) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/{FedbClient.java => OpenMLDBClient.java} (84%) delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestFEDBDeploy.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/FEDBInfo.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBDeployType.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/FedbDeployConfig.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/main/DeloyFedbMain.java rename test/integration-test/openmldb-test-java/{openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/FedbTool.java => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/util/OpenMLDBTool.java} (81%) delete mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DeployUtil.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FEDBCommandUtil.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FEDBDeploy.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FedbSDKUtil.java delete mode 100755 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FedbTool.java diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml index ba43c16d75a..e155a43d991 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml @@ -17,6 +17,11 @@ + + com.4paradigm.openmldb + openmldb-test-common + ${project.version} + com.4paradigm.openmldb.test-tool command-tool diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBDeployType.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBDeployType.java index fc9b592088b..f92dc3c66f4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBDeployType.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBDeployType.java @@ -1,3 +1,19 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package com._4paradigm.qa.openmldb_deploy.bean; public enum OpenMLDBDeployType { diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java index 7e47b724ddd..7e7c42ffc92 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java @@ -33,7 +33,7 @@ public class OpenMLDBInfo { private String host; private int port; private String basePath; - private String fedbPath; + private String openMLDBPath; private String zk_cluster; private String zk_root_path; private int nsNum; @@ -55,9 +55,9 @@ public class OpenMLDBInfo { public String getRunCommand(){ if(deployType==OpenMLDBDeployType.CLUSTER) { - return fedbPath + " --zk_cluster=" + zk_cluster + " --zk_root_path=" + zk_root_path + " --role=sql_client"; + return openMLDBPath + " --zk_cluster=" + zk_cluster + " --zk_root_path=" + zk_root_path + " --role=sql_client"; }else{ - return fedbPath + " --host=" + host + " --port=" + port; + return openMLDBPath + " --host=" + host + " --port=" + port; } } } diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java index 2fa42c42649..b156fe043cc 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java @@ -17,11 +17,11 @@ package com._4paradigm.qa.openmldb_deploy.common; +import com._4paradigm.openmldb.test_common.restful.util.OpenMLDBTool; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com._4paradigm.qa.openmldb_deploy.conf.OpenMLDBDeployConfig; import com._4paradigm.qa.openmldb_deploy.util.DeployUtil; -import com._4paradigm.qa.openmldb_deploy.util.FedbTool; import com._4paradigm.qa.openmldb_deploy.util.OpenMLDBCommandUtil; import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; import com._4paradigm.test_tool.command_tool.common.LinuxUtil; @@ -55,7 +55,7 @@ public OpenMLDBDeploy(String version){ this.version = version; this.openMLDBUrl = OpenMLDBDeployConfig.getUrl(version); } - public OpenMLDBInfo deployFEDBByStandalone(){ + public OpenMLDBInfo deployStandalone(){ String testPath = DeployUtil.getTestPath(version); if(StringUtils.isNotEmpty(installPath)){ testPath = installPath+"/"+version; @@ -65,15 +65,15 @@ public OpenMLDBInfo deployFEDBByStandalone(){ if(!file.exists()){ file.mkdirs(); } - downloadFEDB(testPath); + downloadOpenMLDB(testPath); OpenMLDBInfo fedbInfo = deployStandalone(testPath,ip); log.info("openmldb-info:"+fedbInfo); return fedbInfo; } - public OpenMLDBInfo deployFEDB(int ns, int tablet){ - return deployFEDB(null,ns,tablet); + public OpenMLDBInfo deployCluster(int ns, int tablet){ + return deployCluster(null,ns,tablet); } - public OpenMLDBInfo deployFEDB(String clusterName, int ns, int tablet){ + public OpenMLDBInfo deployCluster(String clusterName, int ns, int tablet){ OpenMLDBInfo.OpenMLDBInfoBuilder builder = OpenMLDBInfo.builder(); builder.deployType(OpenMLDBDeployType.CLUSTER); String testPath = DeployUtil.getTestPath(version); @@ -90,14 +90,14 @@ public OpenMLDBInfo deployFEDB(String clusterName, int ns, int tablet){ file.mkdirs(); } int zkPort = deployZK(testPath); - downloadFEDB(testPath); + downloadOpenMLDB(testPath); String zk_point = ip+":"+zkPort; builder.zk_cluster(zk_point).zk_root_path("/openmldb"); builder.nsEndpoints(Lists.newArrayList()).nsNames(Lists.newArrayList()); builder.tabletEndpoints(Lists.newArrayList()).tabletNames(Lists.newArrayList()); builder.apiServerEndpoints(Lists.newArrayList()).apiServerNames(Lists.newArrayList()); builder.taskManagerEndpoints(Lists.newArrayList()); - builder.fedbPath(testPath+"/openmldb-ns-1/bin/openmldb"); + builder.openMLDBPath(testPath+"/openmldb-ns-1/bin/openmldb"); OpenMLDBInfo fedbInfo = builder.build(); for(int i=1;i<=tablet;i++) { int tablet_port ; @@ -109,7 +109,7 @@ public OpenMLDBInfo deployFEDB(String clusterName, int ns, int tablet){ tablet_port = deployTablet(testPath, ip, i, zk_point,null); } fedbInfo.getTabletEndpoints().add(ip+":"+tablet_port); - FedbTool.sleep(SLEEP_TIME); + OpenMLDBTool.sleep(SLEEP_TIME); } for(int i=1;i<=ns;i++){ int ns_port; @@ -121,7 +121,7 @@ public OpenMLDBInfo deployFEDB(String clusterName, int ns, int tablet){ ns_port = deployNS(testPath, ip, i, zk_point,null); } fedbInfo.getNsEndpoints().add(ip+":"+ns_port); - FedbTool.sleep(SLEEP_TIME); + OpenMLDBTool.sleep(SLEEP_TIME); } for(int i=1;i<=1;i++) { @@ -134,7 +134,7 @@ public OpenMLDBInfo deployFEDB(String clusterName, int ns, int tablet){ apiserver_port = deployApiserver(testPath, ip, i, zk_point,null); } fedbInfo.getApiServerEndpoints().add(ip+":"+apiserver_port); - FedbTool.sleep(SLEEP_TIME); + OpenMLDBTool.sleep(SLEEP_TIME); } if(version.equals("tmp")||version.compareTo("0.4.0")>=0) { for (int i = 1; i <= 1; i++) { @@ -146,7 +146,7 @@ public OpenMLDBInfo deployFEDB(String clusterName, int ns, int tablet){ return fedbInfo; } - private void downloadFEDB(String testPath){ + private void downloadOpenMLDB(String testPath){ try { String command; if(openMLDBUrl.startsWith("http")) { @@ -231,7 +231,7 @@ public int deployNS(String testPath, String ip, int index, String zk_endpoint, S } commands.forEach(ExecutorUtil::run); if(StringUtils.isNotEmpty(openMLDBPath)){ - OpenMLDBCommandUtil.cpRtidb(testPath+ns_name, openMLDBPath); + OpenMLDBCommandUtil.cpOpenMLDB(testPath+ns_name, openMLDBPath); } // ExecutorUtil.run("sh "+testPath+ns_name+"/bin/start_ns.sh start"); ExecutorUtil.run("sh "+testPath+ns_name+"/bin/start.sh start nameserver"); @@ -284,7 +284,7 @@ public int deployTablet(String testPath, String ip, int index, String zk_endpoin } commands.forEach(ExecutorUtil::run); if(StringUtils.isNotEmpty(openMLDBPath)){ - OpenMLDBCommandUtil.cpRtidb(testPath+tablet_name, openMLDBPath); + OpenMLDBCommandUtil.cpOpenMLDB(testPath+tablet_name, openMLDBPath); } ExecutorUtil.run("sh "+testPath+tablet_name+"/bin/start.sh start tablet"); boolean used = LinuxUtil.checkPortIsUsed(port,3000,30); @@ -329,7 +329,7 @@ public int deployApiserver(String testPath, String ip, int index, String zk_endp } commands.forEach(ExecutorUtil::run); if(StringUtils.isNotEmpty(openMLDBPath)){ - OpenMLDBCommandUtil.cpRtidb(testPath+apiserver_name, openMLDBPath); + OpenMLDBCommandUtil.cpOpenMLDB(testPath+apiserver_name, openMLDBPath); } ExecutorUtil.run("sh "+testPath+apiserver_name+"/bin/start.sh start apiserver"); boolean used = LinuxUtil.checkPortIsUsed(port,3000,30); @@ -424,7 +424,7 @@ public OpenMLDBInfo deployStandalone(String testPath, String ip){ ); commands.forEach(ExecutorUtil::run); if(StringUtils.isNotEmpty(openMLDBPath)){ - OpenMLDBCommandUtil.cpRtidb(testPath+standaloneName, openMLDBPath); + OpenMLDBCommandUtil.cpOpenMLDB(testPath+standaloneName, openMLDBPath); } ExecutorUtil.run("sh "+testPath+standaloneName+"/bin/start-standalone.sh"); boolean nsOk = LinuxUtil.checkPortIsUsed(nsPort,3000,30); @@ -434,7 +434,7 @@ public OpenMLDBInfo deployStandalone(String testPath, String ip){ log.info(String.format("standalone 部署成功,nsPort:{},tabletPort:{},apiServerPort:{}",nsPort,tabletPort,apiServerPort)); OpenMLDBInfo fedbInfo = OpenMLDBInfo.builder() .deployType(OpenMLDBDeployType.STANDALONE) - .fedbPath(testPath+"/openmldb-standalone/bin/openmldb") + .openMLDBPath(testPath+"/openmldb-standalone/bin/openmldb") .apiServerEndpoints(Lists.newArrayList()) .basePath(testPath) .nsEndpoints(Lists.newArrayList(nsEndpoint)) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java index 9f7ade0c9db..64ba013f290 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java @@ -16,8 +16,8 @@ package com._4paradigm.qa.openmldb_deploy.conf; +import com._4paradigm.openmldb.test_common.restful.util.OpenMLDBTool; import com._4paradigm.qa.openmldb_deploy.util.DeployUtil; -import com._4paradigm.qa.openmldb_deploy.util.FedbTool; import lombok.extern.slf4j.Slf4j; import java.util.Properties; @@ -34,7 +34,7 @@ public class OpenMLDBDeployConfig { public static final Properties CONFIG; static { - CONFIG = FedbTool.getProperties("deploy.properties"); + CONFIG = OpenMLDBTool.getProperties("deploy.properties"); ZK_URL = CONFIG.getProperty("zk_url"); SPARK_URL = CONFIG.getProperty("spark_url"); } diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/DeployUtil.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/DeployUtil.java index 40e53d7b5f8..4a2bce05b95 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/DeployUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/DeployUtil.java @@ -16,7 +16,7 @@ package com._4paradigm.qa.openmldb_deploy.util; -import com._4paradigm.fe.command.common.LinuxUtil; +import com._4paradigm.test_tool.command_tool.common.LinuxUtil; /** * @author zhaowei diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/OpenMLDBCommandUtil.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/OpenMLDBCommandUtil.java index 3344962d52e..5f4cb5a379d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/OpenMLDBCommandUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/OpenMLDBCommandUtil.java @@ -16,7 +16,7 @@ package com._4paradigm.qa.openmldb_deploy.util; -import com._4paradigm.fe.command.common.LinuxUtil; +import com._4paradigm.test_tool.command_tool.common.LinuxUtil; import org.testng.Assert; /** @@ -24,8 +24,8 @@ * @date 2021/2/7 8:50 AM */ public class OpenMLDBCommandUtil { - public static void cpRtidb(String path,String fedbPath){ - boolean ok = LinuxUtil.cp(fedbPath,path+"/bin",path+"/bin/openmldb"); + public static void cpOpenMLDB(String path, String openMLDBPath){ + boolean ok = LinuxUtil.cp(openMLDBPath,path+"/bin",path+"/bin/openmldb"); Assert.assertTrue(ok,"copy conf fail"); } public static void cpConf(String path,String confPath){ diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java index 5f5de651006..81f6212bf2b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java @@ -14,7 +14,7 @@ public void testTmp(@Optional("tmp") String version,@Optional("") String openMLD deploy.setOpenMLDBPath(openMLDBPath); deploy.setCluster(true); deploy.setSparkMaster("local"); - OpenMLDBInfo openMLDBInfo = deploy.deployFEDB(2, 3); + OpenMLDBInfo openMLDBInfo = deploy.deployCluster(2, 3); System.out.println(openMLDBInfo); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploySingleNodeCluster.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploySingleNodeCluster.java index 200955e74b7..005ad2e451e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploySingleNodeCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploySingleNodeCluster.java @@ -14,7 +14,7 @@ public void testTmp(@Optional("tmp") String version,@Optional("") String openMLD deploy.setOpenMLDBPath(openMLDBPath); deploy.setCluster(false); deploy.setSparkMaster("local"); - OpenMLDBInfo openMLDBInfo = deploy.deployFEDB(1, 2); + OpenMLDBInfo openMLDBInfo = deploy.deployCluster(1, 2); System.out.println(openMLDBInfo); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeployStandalone.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeployStandalone.java index c8e346a9560..b3811f6ce59 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeployStandalone.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeployStandalone.java @@ -12,7 +12,7 @@ public class TmpDeployStandalone { public void testTmp(@Optional("") String openMLDBPath){ OpenMLDBDeploy deploy = new OpenMLDBDeploy("standalone"); deploy.setOpenMLDBPath(openMLDBPath); - OpenMLDBInfo openMLDBInfo = deploy.deployFEDBByStandalone(); + OpenMLDBInfo openMLDBInfo = deploy.deployStandalone(); System.out.println(openMLDBInfo); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/BaseTest.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/BaseTest.java index ad2cd21a162..bed21ec0888 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/BaseTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/BaseTest.java @@ -16,23 +16,13 @@ package com._4paradigm.openmldb.http_test.common; -import com._4paradigm.openmldb.java_sdk_test.common.FedbClient; -import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; -import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.provider.Yaml; import com._4paradigm.openmldb.test_common.restful.model.RestfulCase; import com._4paradigm.openmldb.test_common.restful.model.RestfulCaseFile; -import com._4paradigm.openmldb.test_common.util.FEDBDeploy; -import com.google.common.collect.Lists; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; -import org.testng.annotations.BeforeTest; import org.testng.annotations.DataProvider; -import org.testng.annotations.Optional; -import org.testng.annotations.Parameters; import java.io.FileNotFoundException; import java.lang.reflect.Method; diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java index 1a6d7b334ce..4306eb12d0d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java @@ -16,36 +16,26 @@ package com._4paradigm.openmldb.http_test.common; -import com._4paradigm.openmldb.java_sdk_test.common.FedbClient; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBClient; import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; -import com._4paradigm.openmldb.test_common.bean.OpenMLDBDeployType; -import com._4paradigm.openmldb.test_common.common.LogProxy; -import com._4paradigm.openmldb.test_common.provider.Yaml; -import com._4paradigm.openmldb.test_common.restful.model.RestfulCase; -import com._4paradigm.openmldb.test_common.restful.model.RestfulCaseFile; -import com._4paradigm.openmldb.test_common.util.FEDBDeploy; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; import com.google.common.collect.Lists; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; import org.testng.annotations.BeforeTest; -import org.testng.annotations.DataProvider; import org.testng.annotations.Optional; import org.testng.annotations.Parameters; -import java.io.FileNotFoundException; -import java.lang.reflect.Method; -import java.util.List; - @Slf4j public class ClusterTest extends BaseTest{ protected SqlExecutor executor; @BeforeTest() @Parameters({"env", "version", "fedbPath"}) - public void beforeTest(@Optional("qa") String env, @Optional("main") String version, @Optional("") String fedbPath) throws Exception { + public void beforeTest(@Optional("qa") String env, @Optional("main") String version, @Optional("") String openMLDBPath) throws Exception { RestfulGlobalVar.env = env; String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { @@ -53,20 +43,20 @@ public void beforeTest(@Optional("qa") String env, @Optional("main") String vers } log.info("fedb global var env: {}", RestfulGlobalVar.env); if (env.equalsIgnoreCase("cluster")) { - FEDBDeploy fedbDeploy = new FEDBDeploy(version); - fedbDeploy.setFedbPath(fedbPath); - fedbDeploy.setCluster(true); - RestfulGlobalVar.mainInfo = fedbDeploy.deployFEDB(2, 3); + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); + openMLDBDeploy.setOpenMLDBPath(openMLDBPath); + openMLDBDeploy.setCluster(true); + RestfulGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); } else if (env.equalsIgnoreCase("standalone")) { - FEDBDeploy fedbDeploy = new FEDBDeploy(version); - fedbDeploy.setFedbPath(fedbPath); - fedbDeploy.setCluster(false); - RestfulGlobalVar.mainInfo = fedbDeploy.deployFEDB(2, 3); + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); + openMLDBDeploy.setOpenMLDBPath(openMLDBPath); + openMLDBDeploy.setCluster(false); + RestfulGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); } else { - RestfulGlobalVar.mainInfo = FEDBInfo.builder() + RestfulGlobalVar.mainInfo = OpenMLDBInfo.builder() .deployType(OpenMLDBDeployType.CLUSTER) .basePath("/home/zhaowei01/fedb-auto-test/tmp") - .fedbPath("/home/zhaowei01/fedb-auto-test/tmp/openmldb-ns-1/bin/openmldb") + .openMLDBPath("/home/zhaowei01/fedb-auto-test/tmp/openmldb-ns-1/bin/openmldb") .zk_cluster("172.24.4.55:30000") .zk_root_path("/openmldb") .nsNum(2).tabletNum(3) @@ -76,7 +66,7 @@ public void beforeTest(@Optional("qa") String env, @Optional("main") String vers .build(); FedbGlobalVar.env = "cluster"; } - FedbClient fesqlClient = new FedbClient(RestfulGlobalVar.mainInfo); + OpenMLDBClient fesqlClient = new OpenMLDBClient(RestfulGlobalVar.mainInfo); executor = fesqlClient.getExecutor(); System.out.println("fesqlClient = " + fesqlClient); } diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/RestfulCaseFileList.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/RestfulCaseFileList.java index 90721fe38df..1448162d4a9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/RestfulCaseFileList.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/RestfulCaseFileList.java @@ -21,7 +21,7 @@ import com._4paradigm.openmldb.test_common.provider.YamlUtil; import com._4paradigm.openmldb.test_common.restful.model.RestfulCase; import com._4paradigm.openmldb.test_common.restful.model.RestfulCaseFile; -import com._4paradigm.openmldb.test_common.util.FedbTool; +import com._4paradigm.openmldb.test_common.restful.util.OpenMLDBTool; import org.apache.commons.lang3.StringUtils; import java.io.File; @@ -61,7 +61,7 @@ public static List generatorCaseFileList(String[] caseFiles) th && !FedbRestfulConfig.FESQL_CASE_PATH.equals(caseFile)) { continue; } - String casePath = FedbTool.getCasePath(FedbRestfulConfig.YAML_CASE_BASE_DIR, caseFile); + String casePath = OpenMLDBTool.getCasePath(FedbRestfulConfig.YAML_CASE_BASE_DIR, caseFile); File file = new File(casePath); if (!file.exists()) { continue; diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/RestfulGlobalVar.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/RestfulGlobalVar.java index a23f94a5777..6d7049c7c28 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/RestfulGlobalVar.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/RestfulGlobalVar.java @@ -16,12 +16,12 @@ package com._4paradigm.openmldb.http_test.common; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; public class RestfulGlobalVar { public static String env; public static String level; public static String version; public static String fedbPath; - public static FEDBInfo mainInfo; + public static OpenMLDBInfo mainInfo; } diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/StandaloneTest.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/StandaloneTest.java index eb981127bf8..744c788fb2f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/StandaloneTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/StandaloneTest.java @@ -16,12 +16,9 @@ package com._4paradigm.openmldb.http_test.common; -import com._4paradigm.openmldb.java_sdk_test.common.FedbClient; -import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; -import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; -import com._4paradigm.openmldb.test_common.bean.OpenMLDBDeployType; -import com._4paradigm.openmldb.test_common.util.FEDBDeploy; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; import com.google.common.collect.Lists; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; @@ -42,14 +39,14 @@ public void beforeTest(@Optional("qa") String env, @Optional("main") String vers } log.info("openmldb global var env: {}", RestfulGlobalVar.env); if(env.equalsIgnoreCase("standalone")){ - FEDBDeploy fedbDeploy = new FEDBDeploy(version); - fedbDeploy.setFedbPath(fedbPath); - RestfulGlobalVar.mainInfo = fedbDeploy.deployFEDBByStandalone(); + OpenMLDBDeploy fedbDeploy = new OpenMLDBDeploy(version); + fedbDeploy.setOpenMLDBPath(fedbPath); + RestfulGlobalVar.mainInfo = fedbDeploy.deployStandalone(); }else{ - RestfulGlobalVar.mainInfo = FEDBInfo.builder() + RestfulGlobalVar.mainInfo = OpenMLDBInfo.builder() .deployType(OpenMLDBDeployType.STANDALONE) .basePath("/home/zhaowei01/fedb-auto-test/standalone") - .fedbPath("/home/zhaowei01/fedb-auto-test/standalone/openmldb-standalone/bin/openmldb") + .openMLDBPath("/home/zhaowei01/fedb-auto-test/standalone/openmldb-standalone/bin/openmldb") .nsNum(1).tabletNum(1) .nsEndpoints(Lists.newArrayList("172.24.4.55:10018")) .tabletEndpoints(Lists.newArrayList("172.24.4.55:10019")) diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/config/FedbRestfulConfig.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/config/FedbRestfulConfig.java index e1b869b9a56..67d8769caec 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/config/FedbRestfulConfig.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/config/FedbRestfulConfig.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.http_test.config; import com._4paradigm.openmldb.http_test.common.RestfulGlobalVar; -import com._4paradigm.openmldb.test_common.util.FedbTool; +import com._4paradigm.openmldb.test_common.restful.util.OpenMLDBTool; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.testng.collections.Lists; @@ -47,7 +47,7 @@ public class FedbRestfulConfig { // public static final String BASE_URL; public static final String DB_NAME; - public static final Properties CONFIG = FedbTool.getProperties("fedb.properties"); + public static final Properties CONFIG = OpenMLDBTool.getProperties("fedb.properties"); static { String levelStr = System.getProperty("caseLevel"); diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/test/java/com/_4paradigm/openmldb/http_test/tmp/TestDropTable.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/test/java/com/_4paradigm/openmldb/http_test/tmp/TestDropTable.java index 06f77b392f4..1ed46f58c19 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/test/java/com/_4paradigm/openmldb/http_test/tmp/TestDropTable.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/test/java/com/_4paradigm/openmldb/http_test/tmp/TestDropTable.java @@ -15,7 +15,7 @@ */ package com._4paradigm.openmldb.http_test.tmp; -import com._4paradigm.openmldb.java_sdk_test.common.FedbClient; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBClient; import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; import com._4paradigm.openmldb.test_common.restful.model.HttpResult; @@ -32,7 +32,7 @@ public class TestDropTable { @Test public void testAll() throws Exception { - FedbClient fedbClient = new FedbClient("172.24.4.55:10000","/fedb"); + OpenMLDBClient fedbClient = new OpenMLDBClient("172.24.4.55:10000","/fedb"); String apiserver = "172.24.4.55:20000"; String dbName = "test_zw"; String url = String.format("http://%s/dbs/%s/tables",apiserver,dbName); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml index 0cccc1eed76..61356771f69 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml @@ -51,6 +51,11 @@ openmldb-test-common ${project.version} + + com.4paradigm.openmldb + openmldb-deploy + 0.1.0-SNAPSHOT + io.qameta.allure allure-testng diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenMLDBComamndFacade.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenMLDBComamndFacade.java index 6d3f3f8fff3..dd26032e526 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenMLDBComamndFacade.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenMLDBComamndFacade.java @@ -17,33 +17,29 @@ import com._4paradigm.openmldb.java_sdk_test.command.chain.SqlChainManager; import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com._4paradigm.openmldb.test_common.common.LogProxy; -import com._4paradigm.openmldb.test_common.model.InputDesc; -import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; -import java.util.HashSet; import java.util.List; -import java.util.Objects; @Slf4j public class OpenMLDBComamndFacade { private static final Logger logger = new LogProxy(log); - public static FesqlResult sql(FEDBInfo fedbInfo, String dbName, String sql) { + public static FesqlResult sql(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { logger.info("sql:"+sql); sql = StringUtils.replace(sql,"\n"," "); sql = sql.trim(); - FesqlResult fesqlResult = SqlChainManager.of().sql(fedbInfo, dbName, sql); + FesqlResult fesqlResult = SqlChainManager.of().sql(openMLDBInfo, dbName, sql); logger.info("fesqlResult:"+fesqlResult); return fesqlResult; } - public static FesqlResult sqls(FEDBInfo fedbInfo, String dbName, List sqls) { + public static FesqlResult sqls(OpenMLDBInfo openMLDBInfo, String dbName, List sqls) { FesqlResult fesqlResult = null; for(String sql:sqls){ - fesqlResult = sql(fedbInfo,dbName,sql); + fesqlResult = sql(openMLDBInfo,dbName,sql); } return fesqlResult; } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenMLDBCommandUtil.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenMLDBCommandUtil.java index b3ac4f91dbb..3bc66b02fa4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenMLDBCommandUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenMLDBCommandUtil.java @@ -18,10 +18,10 @@ import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; @@ -34,19 +34,19 @@ public class OpenMLDBCommandUtil { private static final Logger logger = new LogProxy(log); - public static FesqlResult createDB(FEDBInfo fedbInfo, String dbName) { + public static FesqlResult createDB(OpenMLDBInfo openMLDBInfo, String dbName) { String sql = String.format("create database %s ;",dbName); - FesqlResult fesqlResult = OpenMLDBComamndFacade.sql(fedbInfo,dbName,sql); + FesqlResult fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,sql); return fesqlResult; } - public static FesqlResult desc(FEDBInfo fedbInfo, String dbName, String tableName) { + public static FesqlResult desc(OpenMLDBInfo openMLDBInfo, String dbName, String tableName) { String sql = String.format("desc %s ;",tableName); - FesqlResult fesqlResult = OpenMLDBComamndFacade.sql(fedbInfo,dbName,sql); + FesqlResult fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,sql); return fesqlResult; } - public static FesqlResult createAndInsert(FEDBInfo fedbInfo, String defaultDBName, List inputs) { + public static FesqlResult createAndInsert(OpenMLDBInfo openMLDBInfo, String defaultDBName, List inputs) { HashSet dbNames = new HashSet<>(); if (StringUtils.isNotEmpty(defaultDBName)) { dbNames.add(defaultDBName); @@ -55,7 +55,7 @@ public static FesqlResult createAndInsert(FEDBInfo fedbInfo, String defaultDBNam for (InputDesc input : inputs) { // CreateDB if input's db has been configured and hasn't been created before if (!StringUtils.isEmpty(input.getDb()) && !dbNames.contains(input.getDb())) { - FesqlResult createDBResult = createDB(fedbInfo,input.getDb()); + FesqlResult createDBResult = createDB(openMLDBInfo,input.getDb()); dbNames.add(input.getDb()); log.info("create db:{},{}", input.getDb(), createDBResult.isOk()); } @@ -70,9 +70,9 @@ public static FesqlResult createAndInsert(FEDBInfo fedbInfo, String defaultDBNam //create table String createSql = inputDesc.extractCreate(); createSql = SQLCase.formatSql(createSql, i, tableName); - createSql = FesqlUtil.formatSql(createSql, fedbInfo); + createSql = FesqlUtil.formatSql(createSql, openMLDBInfo); if (!createSql.isEmpty()) { - FesqlResult res = OpenMLDBComamndFacade.sql(fedbInfo,dbName,createSql); + FesqlResult res = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,createSql); if (!res.isOk()) { logger.error("fail to create table"); // reportLog.error("fail to create table"); @@ -84,7 +84,7 @@ public static FesqlResult createAndInsert(FEDBInfo fedbInfo, String defaultDBNam for (String insertSql : inserts) { insertSql = SQLCase.formatSql(insertSql, i, input.getName()); if (!insertSql.isEmpty()) { - FesqlResult res = OpenMLDBComamndFacade.sql(fedbInfo,dbName,insertSql); + FesqlResult res = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,insertSql); if (!res.isOk()) { logger.error("fail to insert table"); // reportLog.error("fail to insert table"); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenmlDBCommandFactory.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenmlDBCommandFactory.java index 99506e822de..1b068111e00 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenmlDBCommandFactory.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenmlDBCommandFactory.java @@ -15,11 +15,9 @@ */ package com._4paradigm.openmldb.java_sdk_test.command; -import com._4paradigm.openmldb.java_sdk_test.util.Tool; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; -import com._4paradigm.openmldb.test_common.bean.OpenMLDBDeployType; import com._4paradigm.openmldb.test_common.common.LogProxy; -import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.slf4j.Logger; @@ -46,16 +44,16 @@ private static String getNoInteractiveCommandByCLuster(String rtidbPath,String z // logger.info("generate rtidb no interactive command:{}",line); return line; } - private static String getNoInteractiveCommand(FEDBInfo fedbInfo, String dbName, String command){ - if(fedbInfo.getDeployType()== OpenMLDBDeployType.CLUSTER){ - return getNoInteractiveCommandByCLuster(fedbInfo.getFedbPath(),fedbInfo.getZk_cluster(),fedbInfo.getZk_root_path(),dbName,command); + private static String getNoInteractiveCommand(OpenMLDBInfo openMLDBInfo, String dbName, String command){ + if(openMLDBInfo.getDeployType()== OpenMLDBDeployType.CLUSTER){ + return getNoInteractiveCommandByCLuster(openMLDBInfo.getOpenMLDBPath(),openMLDBInfo.getZk_cluster(),openMLDBInfo.getZk_root_path(),dbName,command); }else{ - return getNoInteractiveCommandByStandalone(fedbInfo.getFedbPath(),fedbInfo.getHost(),fedbInfo.getPort(),dbName,command); + return getNoInteractiveCommandByStandalone(openMLDBInfo.getOpenMLDBPath(),openMLDBInfo.getHost(),openMLDBInfo.getPort(),dbName,command); } } - public static List runNoInteractive(FEDBInfo fedbInfo, String dbName, String command){ - return CommandUtil.run(getNoInteractiveCommand(fedbInfo,dbName,command)); + public static List runNoInteractive(OpenMLDBInfo openMLDBInfo, String dbName, String command){ + return CommandUtil.run(getNoInteractiveCommand(openMLDBInfo,dbName,command)); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/AbstractSQLHandler.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/AbstractSQLHandler.java index bc35bdcb61c..2bc32f03154 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/AbstractSQLHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/AbstractSQLHandler.java @@ -17,7 +17,7 @@ import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.Setter; @Setter @@ -26,14 +26,14 @@ public abstract class AbstractSQLHandler { public abstract boolean preHandle(String sql); - public abstract FesqlResult onHandle(FEDBInfo fedbInfo, String dbName, String sql); + public abstract FesqlResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql); - public FesqlResult doHandle(FEDBInfo fedbInfo, String dbName,String sql){ + public FesqlResult doHandle(OpenMLDBInfo openMLDBInfo, String dbName,String sql){ if(preHandle(sql)){ - return onHandle(fedbInfo,dbName,sql); + return onHandle(openMLDBInfo,dbName,sql); } if(nextHandler!=null){ - return nextHandler.doHandle(fedbInfo,dbName,sql); + return nextHandler.doHandle(openMLDBInfo,dbName,sql); } throw new RuntimeException("no next chain"); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DDLHandler.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DDLHandler.java index e0497321c09..bfec6fd6dc9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DDLHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DDLHandler.java @@ -19,7 +19,7 @@ import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.CommandResultUtil; import com._4paradigm.openmldb.java_sdk_test.util.Tool; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.base.Joiner; import java.util.List; @@ -32,9 +32,9 @@ public boolean preHandle(String sql) { } @Override - public FesqlResult onHandle(FEDBInfo fedbInfo, String dbName, String sql) { + public FesqlResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { FesqlResult fesqlResult = new FesqlResult(); - List result = OpenmlDBCommandFactory.runNoInteractive(fedbInfo,dbName,sql); + List result = OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); fesqlResult.setMsg(Joiner.on("\n").join(result)); fesqlResult.setOk(CommandResultUtil.success(result)); fesqlResult.setDbName(dbName); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DMLHandler.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DMLHandler.java index fb9e6ec132c..d183a1fcea8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DMLHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DMLHandler.java @@ -18,9 +18,8 @@ import com._4paradigm.openmldb.java_sdk_test.command.OpenmlDBCommandFactory; import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.CommandResultUtil; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.base.Joiner; -import org.apache.commons.collections4.CollectionUtils; import java.util.List; @@ -32,9 +31,9 @@ public boolean preHandle(String sql) { } @Override - public FesqlResult onHandle(FEDBInfo fedbInfo, String dbName, String sql) { + public FesqlResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { FesqlResult fesqlResult = new FesqlResult(); - List result = OpenmlDBCommandFactory.runNoInteractive(fedbInfo,dbName,sql); + List result = OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); fesqlResult.setMsg(Joiner.on("\n").join(result)); fesqlResult.setOk(CommandResultUtil.success(result)); fesqlResult.setDbName(dbName); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DescHandler.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DescHandler.java index 10f84aa72d4..c690bc8cbc5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DescHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DescHandler.java @@ -20,12 +20,9 @@ import com._4paradigm.openmldb.java_sdk_test.command.OpenmlDBCommandFactory; import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.CommandResultUtil; -import com._4paradigm.openmldb.java_sdk_test.util.Tool; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.base.Joiner; -import java.util.ArrayList; -import java.util.Arrays; import java.util.List; public class DescHandler extends AbstractSQLHandler{ @@ -35,9 +32,9 @@ public boolean preHandle(String sql) { } @Override - public FesqlResult onHandle(FEDBInfo fedbInfo, String dbName, String sql) { + public FesqlResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { FesqlResult fesqlResult = new FesqlResult(); - List result = OpenmlDBCommandFactory.runNoInteractive(fedbInfo,dbName,sql); + List result = OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); boolean ok = CommandResultUtil.success(result); fesqlResult.setMsg(Joiner.on("\n").join(result)); fesqlResult.setOk(ok); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/QueryHandler.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/QueryHandler.java index 3e769e2873a..9a8eb053afd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/QueryHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/QueryHandler.java @@ -20,7 +20,7 @@ import com._4paradigm.openmldb.java_sdk_test.command.OpenmlDBCommandFactory; import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.CommandResultUtil; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.base.Joiner; import org.apache.commons.collections4.CollectionUtils; @@ -35,9 +35,9 @@ public boolean preHandle(String sql) { } @Override - public FesqlResult onHandle(FEDBInfo fedbInfo, String dbName, String sql) { + public FesqlResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { FesqlResult fesqlResult = new FesqlResult(); - List result = OpenmlDBCommandFactory.runNoInteractive(fedbInfo,dbName,sql); + List result = OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); boolean ok = CommandResultUtil.success(result); fesqlResult.setMsg(Joiner.on("\n").join(result)); fesqlResult.setOk(ok); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/ShowDeploymentHandler.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/ShowDeploymentHandler.java index 9944ab56d46..e4e4b9e2394 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/ShowDeploymentHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/ShowDeploymentHandler.java @@ -20,7 +20,7 @@ import com._4paradigm.openmldb.java_sdk_test.command.OpenmlDBCommandFactory; import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.CommandResultUtil; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.base.Joiner; import java.util.List; @@ -33,9 +33,9 @@ public boolean preHandle(String sql) { } @Override - public FesqlResult onHandle(FEDBInfo fedbInfo, String dbName, String sql) { + public FesqlResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { FesqlResult fesqlResult = new FesqlResult(); - List result = OpenmlDBCommandFactory.runNoInteractive(fedbInfo,dbName,sql); + List result = OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); boolean ok = CommandResultUtil.success(result); fesqlResult.setMsg(Joiner.on("\n").join(result)); fesqlResult.setOk(ok); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/ShowDeploymentsHandler.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/ShowDeploymentsHandler.java index 54f91b79e04..8ef12deee1f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/ShowDeploymentsHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/ShowDeploymentsHandler.java @@ -20,7 +20,7 @@ import com._4paradigm.openmldb.java_sdk_test.command.OpenmlDBCommandFactory; import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.CommandResultUtil; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.base.Joiner; import org.testng.collections.Lists; @@ -34,9 +34,9 @@ public boolean preHandle(String sql) { } @Override - public FesqlResult onHandle(FEDBInfo fedbInfo, String dbName, String sql) { + public FesqlResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { FesqlResult fesqlResult = new FesqlResult(); - List result = OpenmlDBCommandFactory.runNoInteractive(fedbInfo,dbName,sql); + List result = OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); boolean ok = CommandResultUtil.success(result); fesqlResult.setMsg(Joiner.on("\n").join(result)); fesqlResult.setOk(ok); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/SqlChainManager.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/SqlChainManager.java index ad7fe3c860d..884f4716085 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/SqlChainManager.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/SqlChainManager.java @@ -16,9 +16,8 @@ package com._4paradigm.openmldb.java_sdk_test.command.chain; -import ch.ethz.ssh2.crypto.digest.SHA1; import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; public class SqlChainManager { private AbstractSQLHandler sqlHandler; @@ -48,8 +47,8 @@ private static class ClassHolder { public static SqlChainManager of() { return ClassHolder.holder; } - public FesqlResult sql(FEDBInfo fedbInfo, String dbName, String sql){ - FesqlResult fesqlResult = sqlHandler.doHandle(fedbInfo, dbName, sql); + public FesqlResult sql(OpenMLDBInfo openMLDBInfo, String dbName, String sql){ + FesqlResult fesqlResult = sqlHandler.doHandle(openMLDBInfo, dbName, sql); return fesqlResult; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbGlobalVar.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbGlobalVar.java index c1414e27d6b..480e72aeb13 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbGlobalVar.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbGlobalVar.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.common; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; /** * @author zhaowei @@ -28,6 +28,6 @@ public class FedbGlobalVar { public static String level; public static String version; public static String fedbPath; - public static FEDBInfo mainInfo; + public static OpenMLDBInfo mainInfo; public static String dbName = "test_zw"; } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java index 28e2a97b7e0..4618368af64 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java @@ -18,10 +18,9 @@ import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; -import com._4paradigm.openmldb.test_common.bean.OpenMLDBDeployType; -import com._4paradigm.openmldb.test_common.model.SQLCase; -import com._4paradigm.openmldb.test_common.util.FEDBDeploy; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; import com.google.common.collect.Lists; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; @@ -44,20 +43,20 @@ public class FedbTest extends BaseTest { public void beforeTest(@Optional("qa") String env,@Optional("main") String version,@Optional("")String fedbPath) throws Exception { FedbGlobalVar.env = env; if(env.equalsIgnoreCase("cluster")){ - FEDBDeploy fedbDeploy = new FEDBDeploy(version);; - fedbDeploy.setFedbPath(fedbPath); - fedbDeploy.setCluster(true); - FedbGlobalVar.mainInfo = fedbDeploy.deployFEDB(2, 3); + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version);; + openMLDBDeploy.setOpenMLDBPath(fedbPath); + openMLDBDeploy.setCluster(true); + FedbGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); }else if(env.equalsIgnoreCase("standalone")){ - FEDBDeploy fedbDeploy = new FEDBDeploy(version); - fedbDeploy.setFedbPath(fedbPath); - fedbDeploy.setCluster(false); - FedbGlobalVar.mainInfo = fedbDeploy.deployFEDB(2, 3); + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); + openMLDBDeploy.setOpenMLDBPath(fedbPath); + openMLDBDeploy.setCluster(false); + FedbGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); }else{ - FedbGlobalVar.mainInfo = FEDBInfo.builder() + FedbGlobalVar.mainInfo = OpenMLDBInfo.builder() .deployType(OpenMLDBDeployType.CLUSTER) .basePath("/home/zhaowei01/openmldb-auto-test/tmp") - .fedbPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") + .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") .zk_cluster("172.24.4.55:30000") .zk_root_path("/openmldb") .nsNum(2).tabletNum(3) @@ -73,7 +72,7 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi FedbGlobalVar.env = caseEnv; } log.info("fedb global var env: {}", env); - FedbClient fesqlClient = new FedbClient(FedbGlobalVar.mainInfo); + OpenMLDBClient fesqlClient = new OpenMLDBClient(FedbGlobalVar.mainInfo); executor = fesqlClient.getExecutor(); log.info("executor:{}",executor); //todo diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbClient.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBClient.java similarity index 84% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbClient.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBClient.java index c927652b207..65b2502eced 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBClient.java @@ -21,7 +21,7 @@ import com._4paradigm.openmldb.sdk.SqlException; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.sdk.impl.SqlClusterExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.Data; import lombok.extern.slf4j.Slf4j; @@ -31,11 +31,11 @@ */ @Data @Slf4j -public class FedbClient { +public class OpenMLDBClient { private SqlExecutor executor; - public FedbClient(String zkCluster, String zkPath){ + public OpenMLDBClient(String zkCluster, String zkPath){ SdkOption option = new SdkOption(); option.setZkCluster(zkCluster); option.setZkPath(zkPath); @@ -49,7 +49,7 @@ public FedbClient(String zkCluster, String zkPath){ e.printStackTrace(); } } - public FedbClient(FEDBInfo fedbInfo){ - this(fedbInfo.getZk_cluster(),fedbInfo.getZk_root_path()); + public OpenMLDBClient(OpenMLDBInfo openMLDBInfo){ + this(openMLDBInfo.getZk_cluster(),openMLDBInfo.getZk_root_path()); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneClient.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneClient.java index 8f9a96973c2..66c9e60258b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneClient.java @@ -4,7 +4,7 @@ import com._4paradigm.openmldb.sdk.SqlException; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.sdk.impl.SqlClusterExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.Data; import lombok.extern.slf4j.Slf4j; @@ -28,7 +28,7 @@ public StandaloneClient(String host, Integer port){ e.printStackTrace(); } } - public StandaloneClient(FEDBInfo fedbInfo){ + public StandaloneClient(OpenMLDBInfo fedbInfo){ this(fedbInfo.getHost(),fedbInfo.getPort()); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java index ed92cefeffe..9fc66633c48 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java @@ -18,10 +18,9 @@ import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; -import com._4paradigm.openmldb.test_common.bean.OpenMLDBDeployType; -import com._4paradigm.openmldb.test_common.model.SQLCase; -import com._4paradigm.openmldb.test_common.util.FEDBDeploy; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; import com.google.common.collect.Lists; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; @@ -42,14 +41,14 @@ public class StandaloneTest extends BaseTest { public void beforeTest(@Optional("qa") String env,@Optional("main") String version,@Optional("")String fedbPath) throws Exception { FedbGlobalVar.env = env; if(env.equalsIgnoreCase("standalone")){ - FEDBDeploy fedbDeploy = new FEDBDeploy(version); - fedbDeploy.setFedbPath(fedbPath); - FedbGlobalVar.mainInfo = fedbDeploy.deployFEDBByStandalone(); + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); + openMLDBDeploy.setOpenMLDBPath(fedbPath); + FedbGlobalVar.mainInfo = openMLDBDeploy.deployStandalone(); }else{ - FedbGlobalVar.mainInfo = FEDBInfo.builder() + FedbGlobalVar.mainInfo = OpenMLDBInfo.builder() .deployType(OpenMLDBDeployType.STANDALONE) .basePath("/home/wangkaidong/fedb-auto-test/standalone") - .fedbPath("/home/wangkaidong/fedb-auto-test/standalone/openmldb-standalone/bin/openmldb") + .openMLDBPath("/home/wangkaidong/fedb-auto-test/standalone/openmldb-standalone/bin/openmldb") .nsNum(1).tabletNum(1) .nsEndpoints(Lists.newArrayList("172.24.4.55:30016")) .tabletEndpoints(Lists.newArrayList("172.24.4.55:30017")) diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java index 86de4576996..d7bbbbbe3d5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java @@ -23,10 +23,10 @@ import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; @@ -44,7 +44,7 @@ public abstract class BaseSQLExecutor extends BaseExecutor{ protected SqlExecutor executor; private Map executorMap; - protected Map fedbInfoMap; + protected Map fedbInfoMap; private Map resultMap; public BaseSQLExecutor(SqlExecutor executor, SQLCase fesqlCase, SQLCaseType executorType) { @@ -59,7 +59,7 @@ public BaseSQLExecutor(SqlExecutor executor, SQLCase fesqlCase, SQLCaseType exec } } - public BaseSQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map fedbInfoMap, SQLCaseType executorType) { + public BaseSQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map fedbInfoMap, SQLCaseType executorType) { this(executor,fesqlCase,executorType); this.executor = executor; this.executorMap = executorMap; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java index f1a0e2d416c..df36ed4000e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java @@ -19,9 +19,9 @@ import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.MapUtils; @@ -38,7 +38,7 @@ public class BatchSQLExecutor extends BaseSQLExecutor { public BatchSQLExecutor(SqlExecutor executor, SQLCase fesqlCase, SQLCaseType executorType) { super(executor, fesqlCase, executorType); } - public BatchSQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map fedbInfoMap, SQLCaseType executorType) { + public BatchSQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map fedbInfoMap, SQLCaseType executorType) { super(fesqlCase, executor, executorMap, fedbInfoMap, executorType); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ClusterCliExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ClusterCliExecutor.java index 297e35c08e8..7bf71a59627 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ClusterCliExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ClusterCliExecutor.java @@ -16,10 +16,10 @@ package com._4paradigm.openmldb.java_sdk_test.executor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.slf4j.Logger; @@ -32,8 +32,8 @@ public ClusterCliExecutor(SQLCase fesqlCase, SQLCaseType executorType) { super(fesqlCase, executorType); } - public ClusterCliExecutor(SQLCase fesqlCase, Map fedbInfoMap, SQLCaseType executorType) { - super(fesqlCase, fedbInfoMap, executorType); + public ClusterCliExecutor(SQLCase fesqlCase, Map openMLDBInfoMap, SQLCaseType executorType) { + super(fesqlCase, openMLDBInfoMap, executorType); } @Override diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java index 7dff702acde..56f83600002 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java @@ -22,16 +22,14 @@ import com._4paradigm.openmldb.java_sdk_test.command.OpenMLDBComamndFacade; import com._4paradigm.openmldb.java_sdk_test.command.OpenMLDBCommandUtil; import com._4paradigm.openmldb.java_sdk_test.command.OpenmlDBCommandFactory; -import com._4paradigm.openmldb.java_sdk_test.common.FedbClient; import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; -import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; @@ -46,7 +44,7 @@ public class CommandExecutor extends BaseExecutor{ private static final Logger logger = new LogProxy(log); - protected Map fedbInfoMap; + protected Map openMLDBInfoMap; private Map resultMap; public CommandExecutor(SQLCase fesqlCase, SQLCaseType executorType) { @@ -60,9 +58,9 @@ public CommandExecutor(SQLCase fesqlCase, SQLCaseType executorType) { } } - public CommandExecutor(SQLCase fesqlCase, Map fedbInfoMap, SQLCaseType executorType) { + public CommandExecutor(SQLCase fesqlCase, Map openMLDBInfoMap, SQLCaseType executorType) { this(fesqlCase,executorType); - this.fedbInfoMap = fedbInfoMap; + this.openMLDBInfoMap = openMLDBInfoMap; } @Override @@ -97,16 +95,16 @@ public boolean verify() { @Override public void prepare(){ prepare("mainVersion", FedbGlobalVar.mainInfo); - if(MapUtils.isNotEmpty(fedbInfoMap)) { - fedbInfoMap.entrySet().stream().forEach(e -> prepare(e.getKey(), e.getValue())); + if(MapUtils.isNotEmpty(openMLDBInfoMap)) { + openMLDBInfoMap.entrySet().stream().forEach(e -> prepare(e.getKey(), e.getValue())); } } - protected void prepare(String version, FEDBInfo fedbInfo){ + protected void prepare(String version, OpenMLDBInfo openMLDBInfo){ logger.info("version:{} prepare begin",version); - FesqlResult fesqlResult = OpenMLDBCommandUtil.createDB(fedbInfo,dbName); + FesqlResult fesqlResult = OpenMLDBCommandUtil.createDB(openMLDBInfo,dbName); logger.info("version:{},create db:{},{}", version, dbName, fesqlResult.isOk()); - FesqlResult res = OpenMLDBCommandUtil.createAndInsert(fedbInfo, dbName, fesqlCase.getInputs()); + FesqlResult res = OpenMLDBCommandUtil.createAndInsert(openMLDBInfo, dbName, fesqlCase.getInputs()); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail . version:"+version); } @@ -120,36 +118,36 @@ public void execute() { if(CollectionUtils.isNotEmpty(tableNames)) { mainResult.setTableNames(tableNames); } - if(MapUtils.isNotEmpty(fedbInfoMap)) { - resultMap = fedbInfoMap.entrySet().stream(). + if(MapUtils.isNotEmpty(openMLDBInfoMap)) { + resultMap = openMLDBInfoMap.entrySet().stream(). collect(Collectors.toMap(e -> e.getKey(), e -> execute(e.getKey(), e.getValue()))); } } - protected FesqlResult execute(String version, FEDBInfo fedbInfo){ + protected FesqlResult execute(String version, OpenMLDBInfo openMLDBInfo){ logger.info("version:{} execute begin",version); FesqlResult fesqlResult = null; List sqls = fesqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { // log.info("sql:{}", sql); - if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = FesqlUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + if(MapUtils.isNotEmpty(openMLDBInfoMap)) { + sql = FesqlUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); }else { sql = FesqlUtil.formatSql(sql, tableNames); } - fesqlResult = OpenMLDBComamndFacade.sql(fedbInfo, dbName, sql); + fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo, dbName, sql); } } String sql = fesqlCase.getSql(); if (StringUtils.isNotEmpty(sql)) { // log.info("sql:{}", sql); - if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = FesqlUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + if(MapUtils.isNotEmpty(openMLDBInfoMap)) { + sql = FesqlUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); }else { sql = FesqlUtil.formatSql(sql, tableNames); } - fesqlResult = OpenMLDBComamndFacade.sql(fedbInfo, dbName, sql); + fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo, dbName, sql); } logger.info("version:{} execute end",version); return fesqlResult; @@ -168,23 +166,23 @@ public void check() throws Exception { @Override public void tearDown() { tearDown("mainVersion",FedbGlobalVar.mainInfo); - if(MapUtils.isNotEmpty(fedbInfoMap)) { - fedbInfoMap.entrySet().stream().forEach(e -> tearDown(e.getKey(), e.getValue())); + if(MapUtils.isNotEmpty(openMLDBInfoMap)) { + openMLDBInfoMap.entrySet().stream().forEach(e -> tearDown(e.getKey(), e.getValue())); } } - public void tearDown(String version,FEDBInfo fedbInfo) { + public void tearDown(String version,OpenMLDBInfo openMLDBInfo) { logger.info("version:{},begin tear down",version); List tearDown = fesqlCase.getTearDown(); if(CollectionUtils.isNotEmpty(tearDown)){ tearDown.forEach(sql->{ - if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = FesqlUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + if(MapUtils.isNotEmpty(openMLDBInfoMap)) { + sql = FesqlUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); }else { sql = FesqlUtil.formatSql(sql, tableNames); } - OpenmlDBCommandFactory.runNoInteractive(fedbInfo,dbName, sql); + OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,dbName, sql); }); } logger.info("version:{},begin drop table",version); @@ -196,7 +194,7 @@ public void tearDown(String version,FEDBInfo fedbInfo) { if(table.isDrop()) { String drop = "drop table " + table.getName() + ";"; String db = table.getDb().isEmpty() ? dbName : table.getDb(); - OpenmlDBCommandFactory.runNoInteractive(fedbInfo,db,drop); + OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,db,drop); } } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java index 451ad72426f..670a8727ebb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java @@ -16,12 +16,11 @@ package com._4paradigm.openmldb.java_sdk_test.executor; -import com._4paradigm.openmldb.java_sdk_test.common.FedbConfig; import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com._4paradigm.openmldb.test_common.common.ReportLog; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import java.util.Map; @@ -47,7 +46,7 @@ public static IExecutor build(SQLCase fesqlCase, SQLCaseType type) { return null; } - public static IExecutor build(SqlExecutor executor, Map executorMap, Map fedbInfoMap, SQLCase fesqlCase, SQLCaseType type) { + public static IExecutor build(SqlExecutor executor, Map executorMap, Map fedbInfoMap, SQLCase fesqlCase, SQLCaseType type) { switch (type) { case kDiffBatch: { return new BatchSQLExecutor(fesqlCase, executor, executorMap, fedbInfoMap, type); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java index 4cd96a4328b..b2a31867527 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java @@ -20,12 +20,10 @@ import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; -import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; -import org.slf4j.Logger; import java.util.Map; @@ -38,8 +36,8 @@ public class InsertPreparedExecutor extends BatchSQLExecutor { public InsertPreparedExecutor(SqlExecutor executor, SQLCase fesqlCase, SQLCaseType executorType) { super(executor, fesqlCase, executorType); } - public InsertPreparedExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map fedbInfoMap, SQLCaseType executorType) { - super(fesqlCase, executor, executorMap, fedbInfoMap, executorType); + public InsertPreparedExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map openMLDBInfoMap, SQLCaseType executorType) { + super(fesqlCase, executor, executorMap, openMLDBInfoMap, executorType); } @Override diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java index 674a35c297c..7d12bd2293e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java @@ -19,10 +19,10 @@ import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.MapUtils; @@ -40,8 +40,8 @@ public class QueryPreparedExecutor extends BatchSQLExecutor { public QueryPreparedExecutor(SqlExecutor executor, SQLCase fesqlCase, SQLCaseType executorType) { super(executor, fesqlCase, executorType); } - public QueryPreparedExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map fedbInfoMap, SQLCaseType executorType) { - super(fesqlCase, executor, executorMap, fedbInfoMap, executorType); + public QueryPreparedExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map openMLDBInfoMap, SQLCaseType executorType) { + super(fesqlCase, executor, executorMap, openMLDBInfoMap, executorType); } @Override diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java index 3311c66313d..ce87463229e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java @@ -20,10 +20,10 @@ import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; @@ -44,7 +44,7 @@ public RequestQuerySQLExecutor(SqlExecutor executor, SQLCase fesqlCase, this.isBatchRequest = isBatchRequest; this.isAsyn = isAsyn; } - public RequestQuerySQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map fedbInfoMap, + public RequestQuerySQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map fedbInfoMap, boolean isBatchRequest, boolean isAsyn, SQLCaseType executorType) { super(fesqlCase, executor, executorMap, fedbInfoMap, executorType); this.isBatchRequest = isBatchRequest; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StandaloneCliExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StandaloneCliExecutor.java index d7fee0ce548..b803148ab5c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StandaloneCliExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StandaloneCliExecutor.java @@ -16,29 +16,14 @@ package com._4paradigm.openmldb.java_sdk_test.executor; -import com._4paradigm.openmldb.java_sdk_test.checker.Checker; -import com._4paradigm.openmldb.java_sdk_test.checker.CheckerStrategy; -import com._4paradigm.openmldb.java_sdk_test.checker.DiffVersionChecker; -import com._4paradigm.openmldb.java_sdk_test.command.OpenMLDBComamndFacade; -import com._4paradigm.openmldb.java_sdk_test.command.OpenMLDBCommandUtil; -import com._4paradigm.openmldb.java_sdk_test.command.OpenmlDBCommandFactory; -import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com._4paradigm.openmldb.test_common.common.LogProxy; -import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.collections4.CollectionUtils; -import org.apache.commons.collections4.MapUtils; -import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; -import java.util.List; import java.util.Map; -import java.util.stream.Collectors; @Slf4j public class StandaloneCliExecutor extends CommandExecutor{ @@ -47,8 +32,8 @@ public StandaloneCliExecutor(SQLCase fesqlCase, SQLCaseType executorType) { super(fesqlCase, executorType); } - public StandaloneCliExecutor(SQLCase fesqlCase, Map fedbInfoMap, SQLCaseType executorType) { - super(fesqlCase, fedbInfoMap, executorType); + public StandaloneCliExecutor(SQLCase fesqlCase, Map openMLDBInfoMap, SQLCaseType executorType) { + super(fesqlCase, openMLDBInfoMap, executorType); } @Override diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java index 87263e42ff8..d4e4a33cae5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java @@ -19,9 +19,9 @@ import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; @@ -40,7 +40,7 @@ public StoredProcedureSQLExecutor(SqlExecutor executor, SQLCase fesqlCase, boole spNames = new ArrayList<>(); } - public StoredProcedureSQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map fedbInfoMap, boolean isBatchRequest, boolean isAsyn, SQLCaseType executorType) { + public StoredProcedureSQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map fedbInfoMap, boolean isBatchRequest, boolean isAsyn, SQLCaseType executorType) { super(fesqlCase, executor, executorMap, fedbInfoMap, isBatchRequest, isAsyn, executorType); spNames = new ArrayList<>(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java index f1698c6842c..df9e6adc059 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java @@ -28,11 +28,11 @@ import com._4paradigm.openmldb.jdbc.SQLResultSet; import com._4paradigm.openmldb.sdk.QueryFuture; import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.base.Joiner; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; @@ -1382,7 +1382,7 @@ public static Object getColumnData(SQLResultSet rs, int index) throws SQLExcepti return obj; } - public static String formatSql(String sql, List tableNames, FEDBInfo fedbInfo) { + public static String formatSql(String sql, List tableNames, OpenMLDBInfo fedbInfo) { Matcher matcher = pattern.matcher(sql); while (matcher.find()) { int index = Integer.parseInt(matcher.group(1)); @@ -1392,7 +1392,7 @@ public static String formatSql(String sql, List tableNames, FEDBInfo fed return sql; } - public static String formatSql(String sql, FEDBInfo fedbInfo) { + public static String formatSql(String sql, OpenMLDBInfo fedbInfo) { if(sql.contains("{tb_endpoint_0}")){ sql = sql.replace("{tb_endpoint_0}", fedbInfo.getTabletEndpoints().get(0)); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java index 4a16c88b804..9190b9159e9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java @@ -16,17 +16,17 @@ package com._4paradigm.openmldb.java_sdk_test.auto_gen_case; -import com._4paradigm.openmldb.java_sdk_test.common.FedbClient; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBClient; import com._4paradigm.openmldb.java_sdk_test.common.FedbConfig; import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; import com._4paradigm.openmldb.test_common.provider.Yaml; -import com._4paradigm.openmldb.test_common.util.FEDBDeploy; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; import io.qameta.allure.Feature; import io.qameta.allure.Story; import lombok.extern.slf4j.Slf4j; @@ -45,16 +45,16 @@ public class AutoGenCaseTest extends FedbTest { private Map executorMap = new HashMap<>(); - private Map fedbInfoMap = new HashMap<>(); + private Map fedbInfoMap = new HashMap<>(); @BeforeClass public void beforeClass(){ if(FedbConfig.INIT_VERSION_ENV) { FedbConfig.VERSIONS.forEach(version -> { - FEDBDeploy fedbDeploy = new FEDBDeploy(version); - fedbDeploy.setCluster("cluster".equals(FedbGlobalVar.env)); - FEDBInfo fedbInfo = fedbDeploy.deployFEDB(2, 3); - FedbClient fesqlClient = new FedbClient(fedbInfo); + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); + openMLDBDeploy.setCluster("cluster".equals(FedbGlobalVar.env)); + OpenMLDBInfo fedbInfo = openMLDBDeploy.deployCluster(2, 3); + OpenMLDBClient fesqlClient = new OpenMLDBClient(fedbInfo); executorMap.put(version, fesqlClient.getExecutor()); fedbInfoMap.put(version, fedbInfo); }); @@ -62,16 +62,16 @@ public void beforeClass(){ }else{ //测试调试用 String verion = "2.2.2"; - FEDBInfo fedbInfo = FEDBInfo.builder() + OpenMLDBInfo fedbInfo = OpenMLDBInfo.builder() .basePath("/home/zhaowei01/fedb-auto-test/2.2.2") - .fedbPath("/home/zhaowei01/fedb-auto-test/2.2.2/fedb-ns-1/bin/fedb") + .openMLDBPath("/home/zhaowei01/fedb-auto-test/2.2.2/fedb-ns-1/bin/fedb") .zk_cluster("172.24.4.55:10006") .zk_root_path("/fedb") .nsNum(2).tabletNum(3) .nsEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:10007", "172.24.4.55:10008")) .tabletEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:10009", "172.24.4.55:10010", "172.24.4.55:10011")) .build(); - executorMap.put(verion, new FedbClient(fedbInfo).getExecutor()); + executorMap.put(verion, new OpenMLDBClient(fedbInfo).getExecutor()); fedbInfoMap.put(verion, fedbInfo); fedbInfoMap.put("mainVersion", FedbGlobalVar.mainInfo); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/deploy/TestFEDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/deploy/TestFEDBDeploy.java index 61dd43c022b..bb2d8c3d126 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/deploy/TestFEDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/deploy/TestFEDBDeploy.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.deploy; import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; -import com._4paradigm.openmldb.test_common.util.FEDBDeploy; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; import org.testng.annotations.Optional; import org.testng.annotations.Test; @@ -26,15 +26,15 @@ public class TestFEDBDeploy{ public void pythonDeploy(@Optional("qa") String env, @Optional("main") String version, @Optional("")String fedbPath){ FedbGlobalVar.env = env; if(env.equalsIgnoreCase("cluster")){ - FEDBDeploy fedbDeploy = new FEDBDeploy(version); - fedbDeploy.setFedbPath(fedbPath); - fedbDeploy.setCluster(true); - FedbGlobalVar.mainInfo = fedbDeploy.deployFEDB(2, 3); + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); + openMLDBDeploy.setOpenMLDBPath(fedbPath); + openMLDBDeploy.setCluster(true); + FedbGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); }else if(env.equalsIgnoreCase("standalone")){ - FEDBDeploy fedbDeploy = new FEDBDeploy(version); - fedbDeploy.setFedbPath(fedbPath); - fedbDeploy.setCluster(false); - FedbGlobalVar.mainInfo = fedbDeploy.deployFEDB(2, 3); + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); + openMLDBDeploy.setOpenMLDBPath(fedbPath); + openMLDBDeploy.setCluster(false); + FedbGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); } } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestCommand.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestCommand.java index 5154a0eff61..d7dbe3339d4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestCommand.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestCommand.java @@ -17,7 +17,6 @@ import com._4paradigm.openmldb.java_sdk_test.command.OpenmlDBCommandFactory; -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; import com.google.common.collect.Lists; import org.testng.annotations.Test; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestDropTable.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestDropTable.java index b68517c8fac..eb9d9edbb29 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestDropTable.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestDropTable.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.temp; -import com._4paradigm.openmldb.java_sdk_test.common.FedbClient; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBClient; import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; import org.testng.annotations.Test; @@ -27,7 +27,7 @@ public class TestDropTable { @Test public void testAll(){ - FedbClient fedbClient = new FedbClient("172.24.4.55:10000","/openmldb"); + OpenMLDBClient fedbClient = new OpenMLDBClient("172.24.4.55:10000","/openmldb"); String dbName = "test_zw"; String sql = "show tables;"; FesqlResult fesqlResult = FesqlUtil.select(fedbClient.getExecutor(), dbName, sql); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestFEDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestFEDBDeploy.java deleted file mode 100644 index 8e9673c5c02..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestFEDBDeploy.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com._4paradigm.openmldb.java_sdk_test.temp; - -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; -import com._4paradigm.openmldb.test_common.util.FEDBDeploy; -import org.testng.annotations.Test; - -public class TestFEDBDeploy { - @Test - public void test1(){ - FEDBDeploy deploy = new FEDBDeploy("0.2.3"); - FEDBInfo fedbInfo = deploy.deployFEDB(2,3); - System.out.println(fedbInfo); - } - @Test - public void test5(){ - FEDBDeploy deploy = new FEDBDeploy("0.2.3"); - deploy.setCluster(false); - FEDBInfo fedbInfo = deploy.deployFEDB(2,3); - System.out.println(fedbInfo); - } - @Test - public void test3(){ - FEDBDeploy deploy = new FEDBDeploy("2.2.2"); - FEDBInfo fedbInfo = deploy.deployFEDB(2,3); - System.out.println(fedbInfo); - } - @Test - public void test2(){ - FEDBDeploy deploy = new FEDBDeploy("main"); - deploy.setCluster(false); - FEDBInfo fedbInfo = deploy.deployFEDB(2, 3); - System.out.println(fedbInfo); - } - - @Test - public void testTmp(){ - FEDBDeploy deploy = new FEDBDeploy("tmp2"); - deploy.setCluster(true); - deploy.setSparkMaster("local"); - // deploy.setBatchJobJarPath("hdfs://172.27.128.215:8020/Users/tobe/openmldb-batchjob-0.4.0-SNAPSHOT.jar"); - // deploy.setSparkYarnJars("hdfs://172.27.128.215:8020/Users/tobe/openmldb_040_jars/*"); - FEDBInfo fedbInfo = deploy.deployFEDB(2, 3); - System.out.println(fedbInfo); - } - @Test - public void testTmpByPath(){ - FEDBDeploy deploy = new FEDBDeploy("tmp"); - deploy.setCluster(true); - deploy.setSparkMaster("local"); - deploy.setInstallPath("/Users/zhaowei/Desktop/openmldb-auto-test"); - // deploy.setBatchJobJarPath("hdfs://172.27.128.215:8020/Users/tobe/openmldb-batchjob-0.4.0-SNAPSHOT.jar"); - // deploy.setSparkYarnJars("hdfs://172.27.128.215:8020/Users/tobe/openmldb_040_jars/*"); - FEDBInfo fedbInfo = deploy.deployFEDB(2, 3); - System.out.println(fedbInfo); - } - @Test - public void testStandalone(){ - FEDBDeploy deploy = new FEDBDeploy("standalone"); - FEDBInfo fedbInfo = deploy.deployFEDBByStandalone(); - System.out.println(fedbInfo); - } - - @Test - public void testTask(){ - FEDBDeploy deploy = new FEDBDeploy("tmp"); - deploy.setFedbName("openmldb_linux"); - deploy.setCluster(true); - deploy.setSparkMaster("local"); - deploy.deployTaskManager("/home/zhaowei01/fedb-auto-test/tmp","172.24.4.55",1,"172.24.4.55:10000"); - // System.out.println(fedbInfo); - } - - @Test - public void testLocalDeploy(){ - FEDBDeploy deploy = new FEDBDeploy("tmp"); - deploy.setCluster(true); - deploy.setSparkMaster("local"); - // deploy.setBatchJobJarPath("hdfs://172.27.128.215:8020/Users/tobe/openmldb-batchjob-0.4.0-SNAPSHOT.jar"); - // deploy.setSparkYarnJars("hdfs://172.27.128.215:8020/Users/tobe/openmldb_040_jars/*"); - FEDBInfo fedbInfo = deploy.deployFEDB(2, 3); - System.out.println(fedbInfo); - } -} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index b43dcf97593..d9b4ee12db9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -14,17 +14,9 @@ 8 8 - - 1.0-SNAPSHOT - - com.4paradigm.test-tool - command-tool - ${command.tool.version} - - org.apache.httpcomponents httpclient diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/FEDBInfo.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/FEDBInfo.java deleted file mode 100644 index 450931982ca..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/FEDBInfo.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com._4paradigm.openmldb.test_common.bean; - -import lombok.Builder; -import lombok.Data; - -import java.util.ArrayList; -import java.util.List; - -/** - * @author zhaowei - * @date 2021/2/7 12:10 PM - */ -@Data -@Builder -public class FEDBInfo { - private OpenMLDBDeployType deployType; - private String host; - private int port; - private String basePath; - private String fedbPath; - private String zk_cluster; - private String zk_root_path; - private int nsNum; - private List nsEndpoints = new ArrayList<>(); - private List nsNames = new ArrayList<>(); - private int tabletNum; - private List tabletEndpoints = new ArrayList<>(); - private List tabletNames = new ArrayList<>(); - private int blobServerNum; - private List blobServerEndpoints = new ArrayList<>(); - private List blobServerNames = new ArrayList<>(); - private int blobProxyNum; - private List blobProxyEndpoints = new ArrayList<>(); - private List blobProxyNames = new ArrayList<>(); - private List apiServerEndpoints = new ArrayList<>(); - private List apiServerNames = new ArrayList<>(); - private List taskManagerEndpoints = new ArrayList<>(); - private String runCommand; - - public String getRunCommand(){ - if(deployType==OpenMLDBDeployType.CLUSTER) { - return fedbPath + " --zk_cluster=" + zk_cluster + " --zk_root_path=" + zk_root_path + " --role=sql_client"; - }else{ - return fedbPath + " --host=" + host + " --port=" + port; - } - } -} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBDeployType.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBDeployType.java deleted file mode 100644 index bc4b9aa3211..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBDeployType.java +++ /dev/null @@ -1,6 +0,0 @@ -package com._4paradigm.openmldb.test_common.bean; - -public enum OpenMLDBDeployType { - CLUSTER, - STANDALONE -} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/FedbDeployConfig.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/FedbDeployConfig.java deleted file mode 100644 index c1e5f05d6fd..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/FedbDeployConfig.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com._4paradigm.openmldb.test_common.common; - - -import com._4paradigm.openmldb.test_common.util.DeployUtil; -import com._4paradigm.openmldb.test_common.util.FedbTool; -import lombok.extern.slf4j.Slf4j; - -import java.util.Properties; - -/** - * @author zhaowei - * @date 2020/6/11 11:34 AM - */ -@Slf4j -public class FedbDeployConfig { - - public static final String ZK_URL; - public static final String SPARK_URL; - public static final Properties CONFIG; - - static { - CONFIG = FedbTool.getProperties("fedb_deploy.properties"); - ZK_URL = CONFIG.getProperty("zk_url"); - SPARK_URL = CONFIG.getProperty("spark_url"); - } - - public static String getUrl(String version){ - return CONFIG.getProperty(version, DeployUtil.getOpenMLDBUrl(version)); - } - public static String getZKUrl(String version){ - return CONFIG.getProperty(version+"_zk_url", ZK_URL); - } - public static String getSparkUrl(String version){ - return CONFIG.getProperty(version+"_spark_url", SPARK_URL); - } -} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/main/DeloyFedbMain.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/main/DeloyFedbMain.java deleted file mode 100644 index ebd48ac0b4b..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/main/DeloyFedbMain.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com._4paradigm.openmldb.test_common.main; - - -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; -import com._4paradigm.openmldb.test_common.util.FEDBDeploy; - -public class DeloyFedbMain { - public static void main(String[] args) { - String version = args[0]; - FEDBDeploy deploy = new FEDBDeploy(version); - FEDBInfo fedbInfo = deploy.deployFEDB(2,3); - System.out.println(fedbInfo); - } -} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/model/RestfulCaseFile.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/model/RestfulCaseFile.java index 3fad519fa57..b17e6e160ac 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/model/RestfulCaseFile.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/model/RestfulCaseFile.java @@ -16,8 +16,8 @@ package com._4paradigm.openmldb.test_common.restful.model; +import com._4paradigm.openmldb.test_common.restful.util.OpenMLDBTool; import com._4paradigm.openmldb.test_common.restful.util.Tool; -import com._4paradigm.openmldb.test_common.util.FedbTool; import lombok.Data; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; @@ -56,7 +56,7 @@ public List getCases() { List debugs = getDebugs(); for (RestfulCase tmpCase : cases) { if(baseCase!=null){ - FedbTool.mergeObject(baseCase,tmpCase); + OpenMLDBTool.mergeObject(baseCase,tmpCase); } if (!CollectionUtils.isEmpty(debugs)) { if (debugs.contains(tmpCase.getDesc().trim())) { diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/FedbTool.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/util/OpenMLDBTool.java similarity index 81% rename from test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/FedbTool.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/util/OpenMLDBTool.java index 8d522119e0c..bd905f9a5ae 100755 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/FedbTool.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/util/OpenMLDBTool.java @@ -14,13 +14,12 @@ * limitations under the License. */ -package com._4paradigm.qa.openmldb_deploy.util; +package com._4paradigm.openmldb.test_common.restful.util; +import lombok.extern.slf4j.Slf4j; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.testng.Assert; import java.io.File; @@ -28,30 +27,29 @@ import java.lang.reflect.Field; import java.util.*; - -public class FedbTool { - private static final Logger logger = LoggerFactory.getLogger(FedbTool.class); +@Slf4j +public class OpenMLDBTool { public static String getFilePath(String filename) { - return FedbTool.class.getClassLoader().getResource(filename).getFile(); + return OpenMLDBTool.class.getClassLoader().getResource(filename).getFile(); } public static String getCasePath(String yamlCaseDir, String casePath) { - String caseDir = StringUtils.isEmpty(yamlCaseDir) ? FedbTool.rtidbDir().getAbsolutePath() : yamlCaseDir; + String caseDir = StringUtils.isEmpty(yamlCaseDir) ? OpenMLDBTool.openMLDBDir().getAbsolutePath() : yamlCaseDir; Assert.assertNotNull(caseDir); String caseAbsPath = caseDir + "/cases/" + casePath; - logger.debug("case absolute path: {}", caseAbsPath); + log.debug("case absolute path: {}", caseAbsPath); return caseAbsPath; } - public static File rtidbDir() { + public static File openMLDBDir() { File directory = new File("."); directory = directory.getAbsoluteFile(); while (null != directory) { if (directory.isDirectory() && "OpenMLDB".equals(directory.getName())) { break; } - logger.debug("current directory name {}", directory.getName()); + log.debug("current directory name {}", directory.getName()); directory = directory.getParentFile(); } @@ -84,10 +82,10 @@ public static List getPaths(File directory) { public static Properties getProperties(String fileName) { Properties ps = new Properties(); try { - ps.load(FedbTool.class.getClassLoader().getResourceAsStream(fileName)); + ps.load(OpenMLDBTool.class.getClassLoader().getResourceAsStream(fileName)); } catch (IOException e) { e.printStackTrace(); - logger.error(e.getMessage()); + log.error(e.getMessage()); } return ps; } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DeployUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DeployUtil.java deleted file mode 100644 index 840b40e2f06..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DeployUtil.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com._4paradigm.openmldb.test_common.util; - - -import com._4paradigm.test_tool.command_tool.common.LinuxUtil; - -/** - * @author zhaowei - * @date 2021/2/6 9:43 PM - */ -public class DeployUtil { - public static String BASE_PATH ; - public static String getTestPath(String testPath,String version){ - String userHome = LinuxUtil.getHome(); - if(BASE_PATH!=null){ - return userHome+ BASE_PATH; - } - return userHome+"/"+testPath+"/"+ version; - } - public static String getTestPath(String version){ - return getTestPath("fedb-auto-test",version); - } -// public static String getTestPath(){ -// return getTestPath("fedb-auto-test", FedbGlobalVar.version); -// } - - public static String getOpenMLDBUrl(String version){ - String release_url = "https://github.com/4paradigm/OpenMLDB/releases/download/v%s/openmldb-%s-linux.tar.gz"; - return String.format(release_url,version,version); - } -} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FEDBCommandUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FEDBCommandUtil.java deleted file mode 100644 index e6b3f47e808..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FEDBCommandUtil.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com._4paradigm.openmldb.test_common.util; - - - -import com._4paradigm.test_tool.command_tool.common.LinuxUtil; -import org.testng.Assert; - -/** - * @author zhaowei - * @date 2021/2/7 8:50 AM - */ -public class FEDBCommandUtil { - public static void cpRtidb(String path,String fedbPath){ - boolean ok = LinuxUtil.cp(fedbPath,path+"/bin",path+"/bin/openmldb"); - Assert.assertTrue(ok,"copy conf fail"); - } - public static void cpConf(String path,String confPath){ - boolean ok = LinuxUtil.cp(confPath,path,path+"/conf"); - Assert.assertTrue(ok,"copy conf fail"); - } -} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FEDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FEDBDeploy.java deleted file mode 100644 index df9a549025f..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FEDBDeploy.java +++ /dev/null @@ -1,447 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com._4paradigm.openmldb.test_common.util; - - -import com._4paradigm.openmldb.test_common.bean.FEDBInfo; -import com._4paradigm.openmldb.test_common.bean.OpenMLDBDeployType; -import com._4paradigm.openmldb.test_common.common.FedbDeployConfig; -import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; -import com._4paradigm.test_tool.command_tool.common.LinuxUtil; -import com.google.common.collect.Lists; -import lombok.Setter; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.tuple.Pair; -import sun.tools.jar.resources.jar; - -import java.io.File; -import java.util.List; - -@Slf4j -@Setter -public class FEDBDeploy { - private String installPath; - private String version; - private String fedbUrl; - private String fedbName; - private String fedbPath; - private boolean useName; - private boolean isCluster = true; - private String sparkMaster = "local"; - private String batchJobJarPath; - private String sparkYarnJars = ""; - private String offlineDataPrefix = "file:///tmp/openmldb_offline_storage/"; - private String nameNodeUri = "172.27.12.215:8020"; - - public static final int SLEEP_TIME = 10*1000; - - public FEDBDeploy(String version){ - this.version = version; - this.fedbUrl = FedbDeployConfig.getUrl(version); - } - public FEDBInfo deployFEDBByStandalone(){ - String testPath = DeployUtil.getTestPath(version); - if(StringUtils.isNotEmpty(installPath)){ - testPath = installPath+"/"+version; - } - String ip = LinuxUtil.getLocalIP(); - File file = new File(testPath); - if(!file.exists()){ - file.mkdirs(); - } - downloadFEDB(testPath); - FEDBInfo fedbInfo = deployStandalone(testPath,ip); - log.info("openmldb-info:"+fedbInfo); - return fedbInfo; - } - public FEDBInfo deployFEDB(int ns, int tablet){ - return deployFEDB(null,ns,tablet); - } - public FEDBInfo deployFEDB(String clusterName, int ns, int tablet){ - FEDBInfo.FEDBInfoBuilder builder = FEDBInfo.builder(); - builder.deployType(OpenMLDBDeployType.CLUSTER); - String testPath = DeployUtil.getTestPath(version); - if(StringUtils.isNotEmpty(installPath)){ - testPath = installPath+"/"+version; - } - if(StringUtils.isNotEmpty(clusterName)) { - testPath = testPath + "/" + clusterName; - } - builder.nsNum(ns).tabletNum(tablet).basePath(testPath); - String ip = LinuxUtil.getLocalIP(); - File file = new File(testPath); - if(!file.exists()){ - file.mkdirs(); - } - int zkPort = deployZK(testPath); - downloadFEDB(testPath); - String zk_point = ip+":"+zkPort; - builder.zk_cluster(zk_point).zk_root_path("/openmldb"); - builder.nsEndpoints(Lists.newArrayList()).nsNames(Lists.newArrayList()); - builder.tabletEndpoints(Lists.newArrayList()).tabletNames(Lists.newArrayList()); - builder.apiServerEndpoints(Lists.newArrayList()).apiServerNames(Lists.newArrayList()); - builder.taskManagerEndpoints(Lists.newArrayList()); - builder.fedbPath(testPath+"/openmldb-ns-1/bin/openmldb"); - FEDBInfo fedbInfo = builder.build(); - for(int i=1;i<=tablet;i++) { - int tablet_port ; - if(useName){ - String tabletName = clusterName+"-tablet-"+i; - tablet_port = deployTablet(testPath,null, i, zk_point,tabletName); - fedbInfo.getTabletNames().add(tabletName); - }else { - tablet_port = deployTablet(testPath, ip, i, zk_point,null); - } - fedbInfo.getTabletEndpoints().add(ip+":"+tablet_port); - FedbTool.sleep(SLEEP_TIME); - } - for(int i=1;i<=ns;i++){ - int ns_port; - if(useName){ - String nsName = clusterName+"-ns-"+i; - ns_port = deployNS(testPath,null, i, zk_point,nsName); - fedbInfo.getNsNames().add(nsName); - }else { - ns_port = deployNS(testPath, ip, i, zk_point,null); - } - fedbInfo.getNsEndpoints().add(ip+":"+ns_port); - FedbTool.sleep(SLEEP_TIME); - } - - for(int i=1;i<=1;i++) { - int apiserver_port ; - if(useName){ - String apiserverName = clusterName+"-apiserver-"+i; - apiserver_port = deployApiserver(testPath,null, i, zk_point,apiserverName); - fedbInfo.getApiServerNames().add(apiserverName); - }else { - apiserver_port = deployApiserver(testPath, ip, i, zk_point,null); - } - fedbInfo.getApiServerEndpoints().add(ip+":"+apiserver_port); - FedbTool.sleep(SLEEP_TIME); - } - if(version.equals("tmp")||version.compareTo("0.4.0")>=0) { - for (int i = 1; i <= 1; i++) { - int task_manager_port = deployTaskManager(testPath, ip, i, zk_point); - fedbInfo.getTaskManagerEndpoints().add(ip + ":" + task_manager_port); - } - } - log.info("openmldb-info:"+fedbInfo); - return fedbInfo; - } - - private void downloadFEDB(String testPath){ - try { - String command; - if(fedbUrl.startsWith("http")) { - command = "wget -P " + testPath + " -q " + fedbUrl; - }else{ - command = "cp -r " + fedbUrl +" "+ testPath; - } - ExecutorUtil.run(command); - String packageName = fedbUrl.substring(fedbUrl.lastIndexOf("/") + 1); - command = "ls " + testPath + " | grep "+packageName; - List result = ExecutorUtil.run(command); - String tarName = result.get(0); - command = "tar -zxvf " + testPath + "/"+tarName+" -C "+testPath; - ExecutorUtil.run(command); - command = "ls " + testPath + " | grep openmldb | grep -v .tar.gz"; - result = ExecutorUtil.run(command); - if (result != null && result.size() > 0) { - fedbName = result.get(0); - log.info("FEDB下载成功:{}",fedbName); - }else{ - throw new RuntimeException("FEDB下载失败"); - } - }catch (Exception e){ - e.printStackTrace(); - } - } - public int deployZK(String testPath){ - try { - int port = LinuxUtil.getNoUsedPort(); - String[] commands = { - "wget -P "+testPath+" "+ FedbDeployConfig.getZKUrl(version), - "tar -zxvf "+testPath+"/zookeeper-3.4.14.tar.gz -C "+testPath, - "cp "+testPath+"/zookeeper-3.4.14/conf/zoo_sample.cfg "+testPath+"/zookeeper-3.4.14/conf/zoo.cfg", - "sed -i 's#dataDir=/tmp/zookeeper#dataDir="+testPath+"/data#' "+testPath+"/zookeeper-3.4.14/conf/zoo.cfg", - "sed -i 's#clientPort=2181#clientPort="+port+"#' "+testPath+"/zookeeper-3.4.14/conf/zoo.cfg", - "sh "+testPath+"/zookeeper-3.4.14/bin/zkServer.sh start" - }; - for(String command:commands){ - ExecutorUtil.run(command); - } - boolean used = LinuxUtil.checkPortIsUsed(port,3000,30); - if(used){ - log.info("zk部署成功,port:"+port); - return port; - } - }catch (Exception e){ - e.printStackTrace(); - } - throw new RuntimeException("zk部署失败"); - } - - public int deployNS(String testPath, String ip, int index, String zk_endpoint, String name){ - try { - int port = LinuxUtil.getNoUsedPort(); - String ns_name = "/openmldb-ns-"+index; - List commands = Lists.newArrayList( - "cp -r " + testPath + "/" + fedbName + " " + testPath + ns_name, - "sed -i 's#--zk_cluster=.*#--zk_cluster=" + zk_endpoint + "#' " + testPath + ns_name + "/conf/nameserver.flags", - "sed -i 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+ns_name+"/conf/nameserver.flags", - "sed -i 's@#--zk_cluster=.*@--zk_cluster=" + zk_endpoint + "@' " + testPath + ns_name + "/conf/nameserver.flags", - "sed -i 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+ns_name+"/conf/nameserver.flags", - "sed -i 's@--tablet=.*@#--tablet=127.0.0.1:9921@' "+testPath+ns_name+"/conf/nameserver.flags", - "echo '--request_timeout_ms=60000' >> " + testPath + ns_name + "/conf/nameserver.flags" - ); - if(useName){ - commands.add("sed -i 's/--endpoint=.*/#&/' " + testPath + ns_name + "/conf/nameserver.flags"); - commands.add("echo '--use_name=true' >> " + testPath + ns_name + "/conf/nameserver.flags"); - commands.add("echo '--port=" + port + "' >> " + testPath + ns_name + "/conf/nameserver.flags"); - if(name!=null){ - commands.add("mkdir -p " + testPath + ns_name + "/data"); - commands.add("echo " + name + " >> " + testPath + ns_name + "/data/name.txt"); - } - }else{ - String ip_port = ip+":"+port; - commands.add("sed -i 's#--endpoint=.*#--endpoint=" + ip_port + "#' " + testPath + ns_name + "/conf/nameserver.flags"); - } - if(isCluster){ - commands.add("sed -i 's#--enable_distsql=.*#--enable_distsql=true#' " + testPath + ns_name + "/conf/nameserver.flags"); - commands.add("echo '--enable_distsql=true' >> " + testPath + ns_name + "/conf/nameserver.flags"); - }else{ - commands.add("sed -i 's#--enable_distsql=.*#--enable_distsql=false#' " + testPath + ns_name + "/conf/nameserver.flags"); - } - commands.forEach(ExecutorUtil::run); - if(StringUtils.isNotEmpty(fedbPath)){ - FEDBCommandUtil.cpRtidb(testPath+ns_name,fedbPath); - } -// ExecutorUtil.run("sh "+testPath+ns_name+"/bin/start_ns.sh start"); - ExecutorUtil.run("sh "+testPath+ns_name+"/bin/start.sh start nameserver"); - boolean used = LinuxUtil.checkPortIsUsed(port,3000,30); - if(used){ - log.info("ns部署成功,port:"+port); - return port; - } - }catch (Exception e){ - e.printStackTrace(); - } - throw new RuntimeException("ns部署失败"); - } - public int deployTablet(String testPath, String ip, int index, String zk_endpoint, String name){ - try { - int port = LinuxUtil.getNoUsedPort(); - String tablet_name = "/openmldb-tablet-"+index; - List commands = Lists.newArrayList( - "cp -r "+testPath+"/"+fedbName+" "+testPath+tablet_name, - "sed -i 's/--zk_cluster=.*/--zk_cluster="+zk_endpoint+"/' "+testPath+tablet_name+"/conf/tablet.flags", - "sed -i 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+tablet_name+"/conf/tablet.flags", - "sed -i 's@#--zk_cluster=.*@--zk_cluster="+zk_endpoint+"@' "+testPath+tablet_name+"/conf/tablet.flags", - "sed -i 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+tablet_name+"/conf/tablet.flags", - "sed -i 's@#--make_snapshot_threshold_offset=100000@--make_snapshot_threshold_offset=10@' "+testPath+tablet_name+"/conf/tablet.flags", - "sed -i 's@--scan_concurrency_limit=16@--scan_concurrency_limit=0@' "+testPath+tablet_name+"/conf/tablet.flags", - "sed -i 's@--put_concurrency_limit=8@--put_concurrency_limit=0@' "+testPath+tablet_name+"/conf/tablet.flags", - "sed -i 's@--get_concurrency_limit=16@--get_concurrency_limit=0@' "+testPath+tablet_name+"/conf/tablet.flags" - ); - if(useName){ - commands.add("sed -i 's/--endpoint=.*/#&/' " + testPath + tablet_name + "/conf/tablet.flags"); - commands.add("echo '--use_name=true' >> " + testPath + tablet_name + "/conf/tablet.flags"); - commands.add("echo '--port=" + port + "' >> " + testPath + tablet_name + "/conf/tablet.flags"); - if(name!=null){ - commands.add("mkdir -p " + testPath + tablet_name + "/data"); - commands.add("echo " + name + " >> " + testPath + tablet_name + "/data/name.txt"); - } - }else{ - String ip_port = ip+":"+port; - commands.add("sed -i 's#--endpoint=.*#--endpoint="+ip_port+"#' "+testPath+tablet_name+"/conf/tablet.flags"); - - } - if(isCluster){ - commands.add("sed -i 's#--enable_distsql=.*#--enable_distsql=true#' " + testPath + tablet_name + "/conf/tablet.flags"); - }else{ - commands.add("sed -i 's#--enable_distsql=.*#--enable_distsql=false#' " + testPath + tablet_name + "/conf/tablet.flags"); - } - commands.forEach(ExecutorUtil::run); - if(StringUtils.isNotEmpty(fedbPath)){ - FEDBCommandUtil.cpRtidb(testPath+tablet_name,fedbPath); - } - ExecutorUtil.run("sh "+testPath+tablet_name+"/bin/start.sh start tablet"); - boolean used = LinuxUtil.checkPortIsUsed(port,3000,30); - if(used){ - log.info("tablet部署成功,port:"+port); - return port; - } - }catch (Exception e){ - e.printStackTrace(); - } - throw new RuntimeException("tablet部署失败"); - } - public int deployApiserver(String testPath, String ip, int index, String zk_endpoint, String name){ - try { - int port = LinuxUtil.getNoUsedPort(); - String apiserver_name = "/openmldb-apiserver-"+index; - List commands = Lists.newArrayList( - "cp -r "+testPath+"/"+fedbName+" "+testPath+apiserver_name, - "sed -i 's/--zk_cluster=.*/--zk_cluster="+zk_endpoint+"/' "+testPath+apiserver_name+"/conf/apiserver.flags", - "sed -i 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+apiserver_name+"/conf/apiserver.flags", - "sed -i 's@#--zk_cluster=.*@--zk_cluster="+zk_endpoint+"@' "+testPath+apiserver_name+"/conf/apiserver.flags", - "sed -i 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+apiserver_name+"/conf/apiserver.flags", - "sed -i 's@--nameserver=.*@#--nameserver=127.0.0.1:6527@' "+testPath+apiserver_name+"/conf/apiserver.flags" - ); - if(useName){ - commands.add("sed -i 's/--endpoint=.*/#&/' " + testPath + apiserver_name + "/conf/apiserver.flags"); - commands.add("echo '--use_name=true' >> " + testPath + apiserver_name + "/conf/apiserver.flags"); - commands.add("echo '--port=" + port + "' >> " + testPath + apiserver_name + "/conf/apiserver.flags"); - if(name!=null){ - commands.add("mkdir -p " + testPath + apiserver_name + "/data"); - commands.add("echo " + name + " >> " + testPath + apiserver_name + "/data/name.txt"); - } - }else{ - String ip_port = ip+":"+port; - commands.add("sed -i 's#--endpoint=.*#--endpoint="+ip_port+"#' "+testPath+apiserver_name+"/conf/apiserver.flags"); - - } - if(isCluster){ - commands.add("sed -i 's#--enable_distsql=.*#--enable_distsql=true#' " + testPath + apiserver_name + "/conf/apiserver.flags"); - }else{ - commands.add("sed -i 's#--enable_distsql=.*#--enable_distsql=false#' " + testPath + apiserver_name + "/conf/apiserver.flags"); - } - commands.forEach(ExecutorUtil::run); - if(StringUtils.isNotEmpty(fedbPath)){ - FEDBCommandUtil.cpRtidb(testPath+apiserver_name,fedbPath); - } - ExecutorUtil.run("sh "+testPath+apiserver_name+"/bin/start.sh start apiserver"); - boolean used = LinuxUtil.checkPortIsUsed(port,3000,30); - if(used){ - log.info("apiserver部署成功,port:"+port); - return port; - } - }catch (Exception e){ - e.printStackTrace(); - } - throw new RuntimeException("apiserver部署失败"); - } - - - public String deploySpark(String testPath){ - try { - ExecutorUtil.run("wget -P "+testPath+" -q "+ FedbDeployConfig.getSparkUrl(version)); - String tarName = ExecutorUtil.run("ls "+ testPath +" | grep spark").get(0); - ExecutorUtil.run("tar -zxvf " + testPath + "/"+tarName+" -C "+testPath); - String sparkHome = ExecutorUtil.run("ls "+ testPath +" | grep spark | grep -v .tgz ").get(0); - String sparkPath = testPath+"/"+sparkHome; - return sparkPath; - }catch (Exception e){ - e.printStackTrace(); - } - throw new RuntimeException("spark 部署失败"); - } - - public int deployTaskManager(String testPath, String ip, int index, String zk_endpoint){ - try { - String sparkHome = deploySpark(testPath); - int port = LinuxUtil.getNoUsedPort(); - String task_manager_name = "/openmldb-task_manager-"+index; - ExecutorUtil.run("cp -r " + testPath + "/" + fedbName + " " + testPath + task_manager_name); - if(batchJobJarPath==null) { - String batchJobName = ExecutorUtil.run("ls " + testPath + task_manager_name + "/taskmanager/lib | grep openmldb-batchjob").get(0); - batchJobJarPath = testPath + task_manager_name + "/taskmanager/lib/" + batchJobName; - } - - List commands = Lists.newArrayList( - "sed -i 's#server.host=.*#server.host=" + ip + "#' " + testPath + task_manager_name + "/conf/taskmanager.properties", - "sed -i 's#server.port=.*#server.port=" + port + "#' " + testPath + task_manager_name + "/conf/taskmanager.properties", - "sed -i 's#zookeeper.cluster=.*#zookeeper.cluster=" + zk_endpoint + "#' " + testPath + task_manager_name + "/conf/taskmanager.properties", - "sed -i 's@zookeeper.root_path=.*@zookeeper.root_path=/openmldb@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", - "sed -i 's@spark.master=.*@spark.master=" + sparkMaster + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", - "sed -i 's@spark.home=.*@spark.home=" + sparkHome + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", - "sed -i 's@batchjob.jar.path=.*@batchjob.jar.path=" + batchJobJarPath + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", - "sed -i 's@spark.yarn.jars=.*@spark.yarn.jars=" + sparkYarnJars + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", - "sed -i 's@offline.data.prefix=.*@offline.data.prefix=" + offlineDataPrefix + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", - "sed -i 's@namenode.uri=.*@namenode.uri=" + nameNodeUri + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties" - ); - commands.forEach(ExecutorUtil::run); - ExecutorUtil.run("sh "+testPath+task_manager_name+"/bin/start.sh start taskmanager"); - boolean used = LinuxUtil.checkPortIsUsed(port,3000,30); - if(used){ - log.info("task manager部署成功,port:"+port); - return port; - } - }catch (Exception e){ - e.printStackTrace(); - } - throw new RuntimeException("task manager部署失败"); - } - - public FEDBInfo deployStandalone(String testPath, String ip){ - try { - int nsPort = LinuxUtil.getNoUsedPort(); - int tabletPort = LinuxUtil.getNoUsedPort(); - int apiServerPort = LinuxUtil.getNoUsedPort(); - String nsEndpoint = ip+":"+nsPort; - String tabletEndpoint = ip+":"+tabletPort; - String apiServerEndpoint = ip+":"+apiServerPort; - String standaloneName = "/openmldb-standalone"; - List commands = Lists.newArrayList( - "cp -r " + testPath + "/" + fedbName + " " + testPath + standaloneName, - "sed -i 's@--zk_cluster=.*@#--zk_cluster=127.0.0.1:2181@' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", - "sed -i 's@--zk_root_path=.*@#--zk_root_path=/openmldb@' "+testPath+standaloneName+"/conf/standalone_nameserver.flags", - "sed -i 's#--endpoint=.*#--endpoint=" + nsEndpoint + "#' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", - "sed -i 's@#--tablet=.*@--tablet=" + tabletEndpoint + "@' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", - "sed -i 's@--tablet=.*@--tablet=" + tabletEndpoint + "@' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", - "sed -i 's@--zk_cluster=.*@#--zk_cluster=127.0.0.1:2181@' " + testPath + standaloneName + "/conf/standalone_tablet.flags", - "sed -i 's@--zk_root_path=.*@#--zk_root_path=/openmldb@' "+testPath+standaloneName+"/conf/standalone_tablet.flags", - "sed -i 's#--endpoint=.*#--endpoint=" + tabletEndpoint + "#' " + testPath + standaloneName + "/conf/standalone_tablet.flags", - "sed -i 's@--zk_cluster=.*@#--zk_cluster=127.0.0.1:2181@' "+testPath+standaloneName+"/conf/standalone_apiserver.flags", - "sed -i 's@--zk_root_path=.*@#--zk_root_path=/openmldb@' "+testPath+standaloneName+"/conf/standalone_apiserver.flags", - "sed -i 's#--endpoint=.*#--endpoint="+apiServerEndpoint+"#' "+testPath+standaloneName+"/conf/standalone_apiserver.flags", - "sed -i 's#--nameserver=.*#--nameserver="+nsEndpoint+"#' "+testPath+standaloneName+"/conf/standalone_apiserver.flags" - ); - commands.forEach(ExecutorUtil::run); - if(StringUtils.isNotEmpty(fedbPath)){ - FEDBCommandUtil.cpRtidb(testPath+standaloneName,fedbPath); - } - ExecutorUtil.run("sh "+testPath+standaloneName+"/bin/start-standalone.sh"); - boolean nsOk = LinuxUtil.checkPortIsUsed(nsPort,3000,30); - boolean tabletOk = LinuxUtil.checkPortIsUsed(tabletPort,3000,30); - boolean apiServerOk = LinuxUtil.checkPortIsUsed(apiServerPort,3000,30); - if(nsOk&&tabletOk&&apiServerOk){ - log.info(String.format("standalone 部署成功,nsPort:{},tabletPort:{},apiServerPort:{}",nsPort,tabletPort,apiServerPort)); - FEDBInfo fedbInfo = FEDBInfo.builder() - .deployType(OpenMLDBDeployType.STANDALONE) - .fedbPath(testPath+"/openmldb-standalone/bin/openmldb") - .apiServerEndpoints(Lists.newArrayList()) - .basePath(testPath) - .nsEndpoints(Lists.newArrayList(nsEndpoint)) - .nsNum(1) - .host(ip) - .port(nsPort) - .tabletNum(1) - .tabletEndpoints(Lists.newArrayList(tabletEndpoint)) - .apiServerEndpoints(Lists.newArrayList(apiServerEndpoint)) - .build(); - return fedbInfo; - } - }catch (Exception e){ - e.printStackTrace(); - } - throw new RuntimeException("standalone 部署失败"); - } -} - diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FedbSDKUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FedbSDKUtil.java deleted file mode 100644 index 43719a8ea46..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FedbSDKUtil.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com._4paradigm.openmldb.test_common.util; - -public class FedbSDKUtil { - -} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FedbTool.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FedbTool.java deleted file mode 100755 index 1b077392015..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/FedbTool.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com._4paradigm.openmldb.test_common.util; - - -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testng.Assert; - -import java.io.File; -import java.io.IOException; -import java.lang.reflect.Field; -import java.util.*; - - -public class FedbTool { - private static final Logger logger = LoggerFactory.getLogger(FedbTool.class); - - public static String getFilePath(String filename) { - return FedbTool.class.getClassLoader().getResource(filename).getFile(); - } - - public static String getCasePath(String yamlCaseDir, String casePath) { - String caseDir = StringUtils.isEmpty(yamlCaseDir) ? FedbTool.rtidbDir().getAbsolutePath() : yamlCaseDir; - Assert.assertNotNull(caseDir); - String caseAbsPath = caseDir + "/cases/" + casePath; - logger.debug("case absolute path: {}", caseAbsPath); - return caseAbsPath; - } - - public static File rtidbDir() { - File directory = new File("."); - directory = directory.getAbsoluteFile(); - while (null != directory) { - if (directory.isDirectory() && "OpenMLDB".equals(directory.getName())) { - break; - } - logger.debug("current directory name {}", directory.getName()); - directory = directory.getParentFile(); - } - - if ("OpenMLDB".equals(directory.getName())) { - return directory; - } else { - return null; - } - } - - public static void sleep(long time) { - try { - Thread.sleep(time); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - - public static List getPaths(File directory) { - List list = new ArrayList<>(); - Collection files = FileUtils.listFiles(directory, null, true); - for (File f : files) { - list.add(f.getAbsolutePath()); - } - Collections.sort(list); - return list; - } - - - public static Properties getProperties(String fileName) { - Properties ps = new Properties(); - try { - ps.load(FedbTool.class.getClassLoader().getResourceAsStream(fileName)); - } catch (IOException e) { - e.printStackTrace(); - logger.error(e.getMessage()); - } - return ps; - } - - public static String uuid() { - String uuid = UUID.randomUUID().toString().replaceAll("-", ""); - return uuid; - } - - public static void mergeObject(T origin, T destination) { - if (origin == null || destination == null) - return; - if (!origin.getClass().equals(destination.getClass())) - return; - Field[] fields = origin.getClass().getDeclaredFields(); - for (int i = 0; i < fields.length; i++) { - try { - fields[i].setAccessible(true); - Object originValue = fields[i].get(origin); - Object destValue = fields[i].get(destination); - if (null == destValue) { - fields[i].set(destination, originValue); - } - fields[i].setAccessible(false); - } catch (Exception e) { - } - } - } - -} - - - - - - - - - - - - diff --git a/test/integration-test/openmldb-test-java/openmldb-tool-test/src/test/java/com/_4paradigm/openmldb/tool_test/import_tool/data/CheckData.java b/test/integration-test/openmldb-test-java/openmldb-tool-test/src/test/java/com/_4paradigm/openmldb/tool_test/import_tool/data/CheckData.java index 4bfca0b822f..50fb0fa1411 100644 --- a/test/integration-test/openmldb-test-java/openmldb-tool-test/src/test/java/com/_4paradigm/openmldb/tool_test/import_tool/data/CheckData.java +++ b/test/integration-test/openmldb-test-java/openmldb-tool-test/src/test/java/com/_4paradigm/openmldb/tool_test/import_tool/data/CheckData.java @@ -17,7 +17,7 @@ -import com._4paradigm.openmldb.java_sdk_test.common.FedbClient; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBClient; import java.io.BufferedReader; import java.io.FileInputStream; @@ -35,7 +35,7 @@ public void testImportDataRight() throws Exception { String[] data = line.split(","); } - FedbClient fedbClient = new FedbClient("172.24.4.55:10015","/openmldb"); + OpenMLDBClient fedbClient = new OpenMLDBClient("172.24.4.55:10015","/openmldb"); } } From 1170cda0097226731760c83aefab2ee43fc66004 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 5 Jul 2022 15:06:17 +0800 Subject: [PATCH 017/172] Deploy compatible mac --- .../common/OpenMLDBDeploy.java | 112 +++++++++--------- .../src/main/resources/deploy.properties | 4 + .../qa/openmldb_deploy/test/TmpDeploy.java | 2 +- .../command_tool/common/ExecutorUtil.java | 3 +- .../command_tool/common/LinuxUtil.java | 27 +++-- 5 files changed, 82 insertions(+), 66 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java index b156fe043cc..84df42f8074 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java @@ -25,6 +25,7 @@ import com._4paradigm.qa.openmldb_deploy.util.OpenMLDBCommandUtil; import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; import com._4paradigm.test_tool.command_tool.common.LinuxUtil; +import com._4paradigm.test_tool.command_tool.util.OSInfoUtil; import com.google.common.collect.Lists; import lombok.Setter; import lombok.extern.slf4j.Slf4j; @@ -51,9 +52,12 @@ public class OpenMLDBDeploy { public static final int SLEEP_TIME = 10*1000; + private String sedSeparator; + public OpenMLDBDeploy(String version){ this.version = version; this.openMLDBUrl = OpenMLDBDeployConfig.getUrl(version); + this.sedSeparator = OSInfoUtil.isMac()?"''":""; } public OpenMLDBInfo deployStandalone(){ String testPath = DeployUtil.getTestPath(version); @@ -180,8 +184,8 @@ public int deployZK(String testPath){ "wget -P "+testPath+" "+ OpenMLDBDeployConfig.getZKUrl(version), "tar -zxvf "+testPath+"/zookeeper-3.4.14.tar.gz -C "+testPath, "cp "+testPath+"/zookeeper-3.4.14/conf/zoo_sample.cfg "+testPath+"/zookeeper-3.4.14/conf/zoo.cfg", - "sed -i 's#dataDir=/tmp/zookeeper#dataDir="+testPath+"/data#' "+testPath+"/zookeeper-3.4.14/conf/zoo.cfg", - "sed -i 's#clientPort=2181#clientPort="+port+"#' "+testPath+"/zookeeper-3.4.14/conf/zoo.cfg", + "sed -i "+sedSeparator+" 's#dataDir=/tmp/zookeeper#dataDir="+testPath+"/data#' "+testPath+"/zookeeper-3.4.14/conf/zoo.cfg", + "sed -i "+sedSeparator+" 's#clientPort=2181#clientPort="+port+"#' "+testPath+"/zookeeper-3.4.14/conf/zoo.cfg", "sh "+testPath+"/zookeeper-3.4.14/bin/zkServer.sh start" }; for(String command:commands){ @@ -204,15 +208,15 @@ public int deployNS(String testPath, String ip, int index, String zk_endpoint, S String ns_name = "/openmldb-ns-"+index; List commands = Lists.newArrayList( "cp -r " + testPath + "/" + openMLDBName + " " + testPath + ns_name, - "sed -i 's#--zk_cluster=.*#--zk_cluster=" + zk_endpoint + "#' " + testPath + ns_name + "/conf/nameserver.flags", - "sed -i 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+ns_name+"/conf/nameserver.flags", - "sed -i 's@#--zk_cluster=.*@--zk_cluster=" + zk_endpoint + "@' " + testPath + ns_name + "/conf/nameserver.flags", - "sed -i 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+ns_name+"/conf/nameserver.flags", - "sed -i 's@--tablet=.*@#--tablet=127.0.0.1:9921@' "+testPath+ns_name+"/conf/nameserver.flags", + "sed -i "+sedSeparator+" 's#--zk_cluster=.*#--zk_cluster=" + zk_endpoint + "#' " + testPath + ns_name + "/conf/nameserver.flags", + "sed -i "+sedSeparator+" 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+ns_name+"/conf/nameserver.flags", + "sed -i "+sedSeparator+" 's@#--zk_cluster=.*@--zk_cluster=" + zk_endpoint + "@' " + testPath + ns_name + "/conf/nameserver.flags", + "sed -i "+sedSeparator+" 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+ns_name+"/conf/nameserver.flags", + "sed -i "+sedSeparator+" 's@--tablet=.*@#--tablet=127.0.0.1:9921@' "+testPath+ns_name+"/conf/nameserver.flags", "echo '--request_timeout_ms=60000' >> " + testPath + ns_name + "/conf/nameserver.flags" ); if(useName){ - commands.add("sed -i 's/--endpoint=.*/#&/' " + testPath + ns_name + "/conf/nameserver.flags"); + commands.add("sed -i "+sedSeparator+" 's/--endpoint=.*/#&/' " + testPath + ns_name + "/conf/nameserver.flags"); commands.add("echo '--use_name=true' >> " + testPath + ns_name + "/conf/nameserver.flags"); commands.add("echo '--port=" + port + "' >> " + testPath + ns_name + "/conf/nameserver.flags"); if(name!=null){ @@ -221,13 +225,13 @@ public int deployNS(String testPath, String ip, int index, String zk_endpoint, S } }else{ String ip_port = ip+":"+port; - commands.add("sed -i 's#--endpoint=.*#--endpoint=" + ip_port + "#' " + testPath + ns_name + "/conf/nameserver.flags"); + commands.add("sed -i "+sedSeparator+" 's#--endpoint=.*#--endpoint=" + ip_port + "#' " + testPath + ns_name + "/conf/nameserver.flags"); } if(isCluster){ - commands.add("sed -i 's@#--enable_distsql=.*@--enable_distsql=true@' " + testPath + ns_name + "/conf/nameserver.flags"); + commands.add("sed -i "+sedSeparator+" 's@#--enable_distsql=.*@--enable_distsql=true@' " + testPath + ns_name + "/conf/nameserver.flags"); // commands.add("echo '--enable_distsql=true' >> " + testPath + ns_name + "/conf/nameserver.flags"); }else{ - commands.add("sed -i 's@#--enable_distsql=.*@--enable_distsql=false@' " + testPath + ns_name + "/conf/nameserver.flags"); + commands.add("sed -i "+sedSeparator+" 's@#--enable_distsql=.*@--enable_distsql=false@' " + testPath + ns_name + "/conf/nameserver.flags"); } commands.forEach(ExecutorUtil::run); if(StringUtils.isNotEmpty(openMLDBPath)){ @@ -251,21 +255,21 @@ public int deployTablet(String testPath, String ip, int index, String zk_endpoin String tablet_name = "/openmldb-tablet-"+index; List commands = Lists.newArrayList( "cp -r "+testPath+"/"+ openMLDBName +" "+testPath+tablet_name, - "sed -i 's/--zk_cluster=.*/--zk_cluster="+zk_endpoint+"/' "+testPath+tablet_name+"/conf/tablet.flags", - "sed -i 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+tablet_name+"/conf/tablet.flags", - "sed -i 's@#--zk_cluster=.*@--zk_cluster="+zk_endpoint+"@' "+testPath+tablet_name+"/conf/tablet.flags", - "sed -i 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+tablet_name+"/conf/tablet.flags", - "sed -i 's@#--make_snapshot_threshold_offset=100000@--make_snapshot_threshold_offset=10@' "+testPath+tablet_name+"/conf/tablet.flags", - "sed -i 's@--scan_concurrency_limit=16@--scan_concurrency_limit=0@' "+testPath+tablet_name+"/conf/tablet.flags", - "sed -i 's@--put_concurrency_limit=8@--put_concurrency_limit=0@' "+testPath+tablet_name+"/conf/tablet.flags", - "sed -i 's@--get_concurrency_limit=16@--get_concurrency_limit=0@' "+testPath+tablet_name+"/conf/tablet.flags", + "sed -i "+sedSeparator+" 's/--zk_cluster=.*/--zk_cluster="+zk_endpoint+"/' "+testPath+tablet_name+"/conf/tablet.flags", + "sed -i "+sedSeparator+" 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+tablet_name+"/conf/tablet.flags", + "sed -i "+sedSeparator+" 's@#--zk_cluster=.*@--zk_cluster="+zk_endpoint+"@' "+testPath+tablet_name+"/conf/tablet.flags", + "sed -i "+sedSeparator+" 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+tablet_name+"/conf/tablet.flags", + "sed -i "+sedSeparator+" 's@#--make_snapshot_threshold_offset=100000@--make_snapshot_threshold_offset=10@' "+testPath+tablet_name+"/conf/tablet.flags", + "sed -i "+sedSeparator+" 's@--scan_concurrency_limit=16@--scan_concurrency_limit=0@' "+testPath+tablet_name+"/conf/tablet.flags", + "sed -i "+sedSeparator+" 's@--put_concurrency_limit=8@--put_concurrency_limit=0@' "+testPath+tablet_name+"/conf/tablet.flags", + "sed -i "+sedSeparator+" 's@--get_concurrency_limit=16@--get_concurrency_limit=0@' "+testPath+tablet_name+"/conf/tablet.flags", "echo '--hdd_root_path=./db_hdd' >> "+testPath+tablet_name+"/conf/tablet.flags", "echo '--recycle_bin_hdd_root_path=./recycle_hdd' >> "+testPath+tablet_name+"/conf/tablet.flags", "echo '--ssd_root_path=./db_ssd' >> "+testPath+tablet_name+"/conf/tablet.flags", "echo '--recycle_bin_ssd_root_path=./recycle_ssd' >> "+testPath+tablet_name+"/conf/tablet.flags" ); if(useName){ - commands.add("sed -i 's/--endpoint=.*/#&/' " + testPath + tablet_name + "/conf/tablet.flags"); + commands.add("sed -i "+sedSeparator+" 's/--endpoint=.*/#&/' " + testPath + tablet_name + "/conf/tablet.flags"); commands.add("echo '--use_name=true' >> " + testPath + tablet_name + "/conf/tablet.flags"); commands.add("echo '--port=" + port + "' >> " + testPath + tablet_name + "/conf/tablet.flags"); if(name!=null){ @@ -274,13 +278,13 @@ public int deployTablet(String testPath, String ip, int index, String zk_endpoin } }else{ String ip_port = ip+":"+port; - commands.add("sed -i 's#--endpoint=.*#--endpoint="+ip_port+"#' "+testPath+tablet_name+"/conf/tablet.flags"); + commands.add("sed -i "+sedSeparator+" 's#--endpoint=.*#--endpoint="+ip_port+"#' "+testPath+tablet_name+"/conf/tablet.flags"); } if(isCluster){ - commands.add("sed -i 's#--enable_distsql=.*#--enable_distsql=true#' " + testPath + tablet_name + "/conf/tablet.flags"); + commands.add("sed -i "+sedSeparator+" 's#--enable_distsql=.*#--enable_distsql=true#' " + testPath + tablet_name + "/conf/tablet.flags"); }else{ - commands.add("sed -i 's#--enable_distsql=.*#--enable_distsql=false#' " + testPath + tablet_name + "/conf/tablet.flags"); + commands.add("sed -i "+sedSeparator+" 's#--enable_distsql=.*#--enable_distsql=false#' " + testPath + tablet_name + "/conf/tablet.flags"); } commands.forEach(ExecutorUtil::run); if(StringUtils.isNotEmpty(openMLDBPath)){ @@ -303,14 +307,14 @@ public int deployApiserver(String testPath, String ip, int index, String zk_endp String apiserver_name = "/openmldb-apiserver-"+index; List commands = Lists.newArrayList( "cp -r "+testPath+"/"+ openMLDBName +" "+testPath+apiserver_name, - "sed -i 's/--zk_cluster=.*/--zk_cluster="+zk_endpoint+"/' "+testPath+apiserver_name+"/conf/apiserver.flags", - "sed -i 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+apiserver_name+"/conf/apiserver.flags", - "sed -i 's@#--zk_cluster=.*@--zk_cluster="+zk_endpoint+"@' "+testPath+apiserver_name+"/conf/apiserver.flags", - "sed -i 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+apiserver_name+"/conf/apiserver.flags", - "sed -i 's@--nameserver=.*@#--nameserver=127.0.0.1:6527@' "+testPath+apiserver_name+"/conf/apiserver.flags" + "sed -i "+sedSeparator+" 's/--zk_cluster=.*/--zk_cluster="+zk_endpoint+"/' "+testPath+apiserver_name+"/conf/apiserver.flags", + "sed -i "+sedSeparator+" 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+apiserver_name+"/conf/apiserver.flags", + "sed -i "+sedSeparator+" 's@#--zk_cluster=.*@--zk_cluster="+zk_endpoint+"@' "+testPath+apiserver_name+"/conf/apiserver.flags", + "sed -i "+sedSeparator+" 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+apiserver_name+"/conf/apiserver.flags", + "sed -i "+sedSeparator+" 's@--nameserver=.*@#--nameserver=127.0.0.1:6527@' "+testPath+apiserver_name+"/conf/apiserver.flags" ); if(useName){ - commands.add("sed -i 's/--endpoint=.*/#&/' " + testPath + apiserver_name + "/conf/apiserver.flags"); + commands.add("sed -i "+sedSeparator+" 's/--endpoint=.*/#&/' " + testPath + apiserver_name + "/conf/apiserver.flags"); commands.add("echo '--use_name=true' >> " + testPath + apiserver_name + "/conf/apiserver.flags"); commands.add("echo '--port=" + port + "' >> " + testPath + apiserver_name + "/conf/apiserver.flags"); if(name!=null){ @@ -319,13 +323,13 @@ public int deployApiserver(String testPath, String ip, int index, String zk_endp } }else{ String ip_port = ip+":"+port; - commands.add("sed -i 's#--endpoint=.*#--endpoint="+ip_port+"#' "+testPath+apiserver_name+"/conf/apiserver.flags"); + commands.add("sed -i "+sedSeparator+" 's#--endpoint=.*#--endpoint="+ip_port+"#' "+testPath+apiserver_name+"/conf/apiserver.flags"); } if(isCluster){ - commands.add("sed -i 's#--enable_distsql=.*#--enable_distsql=true#' " + testPath + apiserver_name + "/conf/apiserver.flags"); + commands.add("sed -i "+sedSeparator+" 's#--enable_distsql=.*#--enable_distsql=true#' " + testPath + apiserver_name + "/conf/apiserver.flags"); }else{ - commands.add("sed -i 's#--enable_distsql=.*#--enable_distsql=false#' " + testPath + apiserver_name + "/conf/apiserver.flags"); + commands.add("sed -i "+sedSeparator+" 's#--enable_distsql=.*#--enable_distsql=false#' " + testPath + apiserver_name + "/conf/apiserver.flags"); } commands.forEach(ExecutorUtil::run); if(StringUtils.isNotEmpty(openMLDBPath)){ @@ -370,16 +374,16 @@ public int deployTaskManager(String testPath, String ip, int index, String zk_en } List commands = Lists.newArrayList( - "sed -i 's#server.host=.*#server.host=" + ip + "#' " + testPath + task_manager_name + "/conf/taskmanager.properties", - "sed -i 's#server.port=.*#server.port=" + port + "#' " + testPath + task_manager_name + "/conf/taskmanager.properties", - "sed -i 's#zookeeper.cluster=.*#zookeeper.cluster=" + zk_endpoint + "#' " + testPath + task_manager_name + "/conf/taskmanager.properties", - "sed -i 's@zookeeper.root_path=.*@zookeeper.root_path=/openmldb@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", - "sed -i 's@spark.master=.*@spark.master=" + sparkMaster + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", - "sed -i 's@spark.home=.*@spark.home=" + sparkHome + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", - "sed -i 's@batchjob.jar.path=.*@batchjob.jar.path=" + batchJobJarPath + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", - "sed -i 's@spark.yarn.jars=.*@spark.yarn.jars=" + sparkYarnJars + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", - "sed -i 's@offline.data.prefix=.*@offline.data.prefix=" + offlineDataPrefix + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", - "sed -i 's@namenode.uri=.*@namenode.uri=" + nameNodeUri + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties" + "sed -i "+sedSeparator+" 's#server.host=.*#server.host=" + ip + "#' " + testPath + task_manager_name + "/conf/taskmanager.properties", + "sed -i "+sedSeparator+" 's#server.port=.*#server.port=" + port + "#' " + testPath + task_manager_name + "/conf/taskmanager.properties", + "sed -i "+sedSeparator+" 's#zookeeper.cluster=.*#zookeeper.cluster=" + zk_endpoint + "#' " + testPath + task_manager_name + "/conf/taskmanager.properties", + "sed -i "+sedSeparator+" 's@zookeeper.root_path=.*@zookeeper.root_path=/openmldb@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", + "sed -i "+sedSeparator+" 's@spark.master=.*@spark.master=" + sparkMaster + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", + "sed -i "+sedSeparator+" 's@spark.home=.*@spark.home=" + sparkHome + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", + "sed -i "+sedSeparator+" 's@batchjob.jar.path=.*@batchjob.jar.path=" + batchJobJarPath + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", + "sed -i "+sedSeparator+" 's@spark.yarn.jars=.*@spark.yarn.jars=" + sparkYarnJars + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", + "sed -i "+sedSeparator+" 's@offline.data.prefix=.*@offline.data.prefix=" + offlineDataPrefix + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties", + "sed -i "+sedSeparator+" 's@namenode.uri=.*@namenode.uri=" + nameNodeUri + "@' "+testPath + task_manager_name+ "/conf/taskmanager.properties" ); commands.forEach(ExecutorUtil::run); ExecutorUtil.run("sh "+testPath+task_manager_name+"/bin/start.sh start taskmanager"); @@ -405,22 +409,22 @@ public OpenMLDBInfo deployStandalone(String testPath, String ip){ String standaloneName = "/openmldb-standalone"; List commands = Lists.newArrayList( "cp -r " + testPath + "/" + openMLDBName + " " + testPath + standaloneName, - "sed -i 's@--zk_cluster=.*@#--zk_cluster=127.0.0.1:2181@' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", - "sed -i 's@--zk_root_path=.*@#--zk_root_path=/openmldb@' "+testPath+standaloneName+"/conf/standalone_nameserver.flags", - "sed -i 's#--endpoint=.*#--endpoint=" + nsEndpoint + "#' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", - "sed -i 's@#--tablet=.*@--tablet=" + tabletEndpoint + "@' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", - "sed -i 's@--tablet=.*@--tablet=" + tabletEndpoint + "@' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", - "sed -i 's@--zk_cluster=.*@#--zk_cluster=127.0.0.1:2181@' " + testPath + standaloneName + "/conf/standalone_tablet.flags", - "sed -i 's@--zk_root_path=.*@#--zk_root_path=/openmldb@' "+testPath+standaloneName+"/conf/standalone_tablet.flags", - "sed -i 's#--endpoint=.*#--endpoint=" + tabletEndpoint + "#' " + testPath + standaloneName + "/conf/standalone_tablet.flags", + "sed -i "+sedSeparator+" 's@--zk_cluster=.*@#--zk_cluster=127.0.0.1:2181@' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", + "sed -i "+sedSeparator+" 's@--zk_root_path=.*@#--zk_root_path=/openmldb@' "+testPath+standaloneName+"/conf/standalone_nameserver.flags", + "sed -i "+sedSeparator+" 's#--endpoint=.*#--endpoint=" + nsEndpoint + "#' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", + "sed -i "+sedSeparator+" 's@#--tablet=.*@--tablet=" + tabletEndpoint + "@' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", + "sed -i "+sedSeparator+" 's@--tablet=.*@--tablet=" + tabletEndpoint + "@' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", + "sed -i "+sedSeparator+" 's@--zk_cluster=.*@#--zk_cluster=127.0.0.1:2181@' " + testPath + standaloneName + "/conf/standalone_tablet.flags", + "sed -i "+sedSeparator+" 's@--zk_root_path=.*@#--zk_root_path=/openmldb@' "+testPath+standaloneName+"/conf/standalone_tablet.flags", + "sed -i "+sedSeparator+" 's#--endpoint=.*#--endpoint=" + tabletEndpoint + "#' " + testPath + standaloneName + "/conf/standalone_tablet.flags", "echo '--hdd_root_path=./db_hdd' >> "+testPath+standaloneName+"/conf/standalone_tablet.flags", "echo '--recycle_bin_hdd_root_path=./recycle_hdd' >> "+testPath+standaloneName+"/conf/standalone_tablet.flags", "echo '--ssd_root_path=./db_ssd' >> "+testPath+standaloneName+"/conf/standalone_tablet.flags", "echo '--recycle_bin_ssd_root_path=./recycle_ssd' >> "+testPath+standaloneName+"/conf/standalone_tablet.flags", - "sed -i 's@--zk_cluster=.*@#--zk_cluster=127.0.0.1:2181@' "+testPath+standaloneName+"/conf/standalone_apiserver.flags", - "sed -i 's@--zk_root_path=.*@#--zk_root_path=/openmldb@' "+testPath+standaloneName+"/conf/standalone_apiserver.flags", - "sed -i 's#--endpoint=.*#--endpoint="+apiServerEndpoint+"#' "+testPath+standaloneName+"/conf/standalone_apiserver.flags", - "sed -i 's#--nameserver=.*#--nameserver="+nsEndpoint+"#' "+testPath+standaloneName+"/conf/standalone_apiserver.flags" + "sed -i "+sedSeparator+" 's@--zk_cluster=.*@#--zk_cluster=127.0.0.1:2181@' "+testPath+standaloneName+"/conf/standalone_apiserver.flags", + "sed -i "+sedSeparator+" 's@--zk_root_path=.*@#--zk_root_path=/openmldb@' "+testPath+standaloneName+"/conf/standalone_apiserver.flags", + "sed -i "+sedSeparator+" 's#--endpoint=.*#--endpoint="+apiServerEndpoint+"#' "+testPath+standaloneName+"/conf/standalone_apiserver.flags", + "sed -i "+sedSeparator+" 's#--nameserver=.*#--nameserver="+nsEndpoint+"#' "+testPath+standaloneName+"/conf/standalone_apiserver.flags" ); commands.forEach(ExecutorUtil::run); if(StringUtils.isNotEmpty(openMLDBPath)){ diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index a266c821cfc..3df7998d397 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -22,3 +22,7 @@ standalone=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-linux.tar.gz tmp2=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-linux.tar.gz tmp2_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz tmp2_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz + +tmp_mac=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-darwin.tar.gz +tmp_mac_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz +tmp_mac_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java index 81f6212bf2b..966b95ea440 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java @@ -9,7 +9,7 @@ public class TmpDeploy { @Test @Parameters({"version","openMLDBPath"}) - public void testTmp(@Optional("tmp") String version,@Optional("") String openMLDBPath){ + public void testTmp(@Optional("tmp_mac") String version,@Optional("") String openMLDBPath){ OpenMLDBDeploy deploy = new OpenMLDBDeploy(version); deploy.setOpenMLDBPath(openMLDBPath); deploy.setCluster(true); diff --git a/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/ExecutorUtil.java b/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/ExecutorUtil.java index 194eb13f41a..9704569d7ca 100644 --- a/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/ExecutorUtil.java +++ b/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/ExecutorUtil.java @@ -43,7 +43,8 @@ private static void printResult(List lines){ private static CommandExecutor getExecutor(){ CommandExecutor executor; if(OSInfoUtil.isMac()){ - executor = new RemoteExecutor(); +// executor = new RemoteExecutor(); + executor = new LocalExecutor(); }else{ executor = new LocalExecutor(); } diff --git a/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/LinuxUtil.java b/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/LinuxUtil.java index 6ba5b38f8b0..2b90e6ebb0a 100644 --- a/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/LinuxUtil.java +++ b/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/LinuxUtil.java @@ -1,5 +1,6 @@ package com._4paradigm.test_tool.command_tool.common; +import com._4paradigm.test_tool.command_tool.util.OSInfoUtil; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; @@ -8,18 +9,20 @@ @Slf4j public class LinuxUtil { - public static int port = 10000; + public static int port = 30000; public static boolean checkPortIsUsed(int port){ - String command ="netstat -ntulp | grep "+port; - try { + if (OSInfoUtil.isMac()) { + String command = "lsof -i:" + port; + List result = ExecutorUtil.run(command); + return result.size()>0; + }else { + String command = "netstat -ntulp | grep " + port; List result = ExecutorUtil.run(command); - for(String line:result){ - if(line.contains(port+"")){ + for (String line : result) { + if (line.contains(port + "")) { return true; } } - }catch (Exception e){ - e.printStackTrace(); } return false; } @@ -62,9 +65,13 @@ public static boolean cp(String src,String dst){ } public static String hostnameI(){ - String command = "hostname -i"; ///usr/sbin/ - List result = ExecutorUtil.run(command); - return result.get(0); + if(OSInfoUtil.isMac()){ + return "127.0.0.1"; + }else{ + String command = "hostname -i"; ///usr/sbin/ + List result = ExecutorUtil.run(command); + return result.get(0); + } } public static String getLocalIP(){ From bc6fd0e15d2558fb54c3bb5c3e45d446e6381475 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 5 Jul 2022 18:13:18 +0800 Subject: [PATCH 018/172] Optimize the structure --- .../openmldb-deploy/pom.xml | 5 - .../common/OpenMLDBDeploy.java | 9 +- .../conf/OpenMLDBDeployConfig.java | 4 +- .../qa/openmldb_deploy/util/Tool.java | 60 +++++ .../openmldb-devops-test/pom.xml | 27 +++ .../high_availability/TestCluster.java | 7 + .../http_test/common/ClusterTest.java | 12 +- .../http_test/common/RestfulCaseFileList.java | 4 +- .../http_test/config/FedbRestfulConfig.java | 4 +- .../http_test/executor/BaseExecutor.java | 5 +- .../executor/RestfulCliExecutor.java | 28 ++- .../http_test/executor/RestfulExecutor.java | 26 +-- .../openmldb/http_test/tmp/TestDropTable.java | 10 +- .../openmldb-sdk-test/pom.xml | 15 +- .../java_sdk_test/checker/BaseChecker.java | 10 +- .../checker/CatCheckerByCli.java | 11 +- .../checker/CheckerStrategy.java | 4 +- .../java_sdk_test/checker/ColumnsChecker.java | 10 +- .../checker/ColumnsCheckerByCli.java | 4 +- .../checker/ColumnsCheckerByJBDC.java | 4 +- .../java_sdk_test/checker/CountChecker.java | 4 +- .../checker/DeploymentCheckerByCli.java | 13 +- .../DeploymentContainsCheckerByCli.java | 4 +- .../checker/DeploymentCountCheckerByCli.java | 4 +- .../checker/DiffResultChecker.java | 8 +- .../checker/DiffVersionChecker.java | 6 +- .../java_sdk_test/checker/IndexChecker.java | 7 +- .../checker/IndexCountChecker.java | 4 +- .../java_sdk_test/checker/OptionsChecker.java | 12 +- .../java_sdk_test/checker/ResultChecker.java | 10 +- .../checker/ResultCheckerByCli.java | 12 +- .../checker/ResultCheckerByJDBC.java | 10 +- .../java_sdk_test/checker/SuccessChecker.java | 4 +- .../java_sdk_test/common/BaseTest.java | 3 +- .../java_sdk_test/common/FedbConfig.java | 11 +- .../java_sdk_test/common/FedbTest.java | 16 +- .../common/FedbVersionConfig.java | 2 +- .../common/StandaloneClient.java | 34 --- .../java_sdk_test/common/StandaloneTest.java | 12 +- .../entity/FesqlDataProviderList.java | 2 +- .../java_sdk_test/executor/BaseExecutor.java | 5 +- .../executor/BaseSQLExecutor.java | 14 +- .../executor/BatchSQLExecutor.java | 24 +- .../executor/CommandExecutor.java | 44 ++-- .../executor/DiffResultExecutor.java | 4 +- .../executor/InsertPreparedExecutor.java | 6 +- .../java_sdk_test/executor/MysqlExecutor.java | 10 +- .../java_sdk_test/executor/NullExecutor.java | 4 +- .../executor/QueryPreparedExecutor.java | 14 +- .../executor/RequestQuerySQLExecutor.java | 24 +- .../executor/Sqlite3Executor.java | 10 +- .../executor/StoredProcedureSQLExecutor.java | 20 +- .../openmldb/java_sdk_test/util/JDBCUtil.java | 11 +- .../openmldb/java_sdk_test/util/Tool.java | 111 --------- .../auto_gen_case/AutoGenCaseTest.java | 14 +- .../cluster/v030/SchemaTest.java | 17 +- .../java_sdk_test/deploy/TestFEDBDeploy.java | 8 +- .../entity/FesqlDataProviderTest.java | 4 +- .../standalone/v030/DMLTest.java | 13 +- .../java_sdk_test/temp/DebugTest.java | 4 +- .../java_sdk_test/temp/TestCommand.java | 4 +- .../java_sdk_test/temp/TestDropTable.java | 8 +- .../openmldb-test-common/pom.xml | 21 ++ .../test_common/bean}/OpenMLDBColumn.java | 2 +- .../test_common/bean}/OpenMLDBIndex.java | 2 +- .../test_common/bean/OpenMLDBResult.java} | 6 +- .../test_common/bean}/OpenMLDBSchema.java | 2 +- .../test_common}/command/CommandUtil.java | 5 +- .../command/OpenMLDBComamndFacade.java | 14 +- .../command/OpenMLDBCommandFactory.java} | 7 +- .../command/OpenMLDBCommandUtil.java | 27 ++- .../command/chain/AbstractSQLHandler.java | 8 +- .../command/chain/DDLHandler.java | 17 +- .../command/chain/DMLHandler.java | 14 +- .../command/chain/DescHandler.java | 14 +- .../command/chain/QueryHandler.java | 14 +- .../command/chain/ShowDeploymentHandler.java | 14 +- .../command/chain/ShowDeploymentsHandler.java | 14 +- .../command/chain/SqlChainManager.java | 8 +- .../test_common/openmldb}/OpenMLDBClient.java | 21 +- .../openmldb/OpenMLDBGlobalVar.java} | 4 +- .../{FedbHttp.java => OpenMLDBHttp.java} | 4 +- .../restful/model/RestfulCaseFile.java | 5 +- .../restful/util/OpenMLDBTool.java | 130 ----------- .../test_common}/util/CommandResultUtil.java | 6 +- .../{restful => }/util/HttpRequest.java | 2 +- .../test_common/util/OpenMLDBUtil.java} | 177 +++++++------- .../openmldb/test_common/util/ResultUtil.java | 32 +++ .../test_common/{restful => }/util/Tool.java | 219 +++++++----------- .../tool_test/import_tool/data/CheckData.java | 2 +- .../openmldb-test-java/pom.xml | 1 + 91 files changed, 709 insertions(+), 893 deletions(-) create mode 100755 test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/Tool.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/pom.xml create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneClient.java delete mode 100755 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/Tool.java rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean}/OpenMLDBColumn.java (93%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean}/OpenMLDBIndex.java (93%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlResult.java => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java} (95%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean}/OpenMLDBSchema.java (93%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common}/command/CommandUtil.java (89%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common}/command/OpenMLDBComamndFacade.java (70%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenmlDBCommandFactory.java => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandFactory.java} (91%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common}/command/OpenMLDBCommandUtil.java (75%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common}/command/chain/AbstractSQLHandler.java (77%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common}/command/chain/DDLHandler.java (72%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common}/command/chain/DMLHandler.java (72%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common}/command/chain/DescHandler.java (72%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common}/command/chain/QueryHandler.java (80%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common}/command/chain/ShowDeploymentHandler.java (73%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common}/command/chain/ShowDeploymentsHandler.java (75%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common}/command/chain/SqlChainManager.java (85%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb}/OpenMLDBClient.java (68%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbGlobalVar.java => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java} (91%) rename test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/common/{FedbHttp.java => OpenMLDBHttp.java} (97%) delete mode 100755 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/util/OpenMLDBTool.java rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common}/util/CommandResultUtil.java (95%) rename test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/{restful => }/util/HttpRequest.java (99%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/OpenMLDBUtil.java} (89%) create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java rename test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/{restful => }/util/Tool.java (52%) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml index e155a43d991..ba43c16d75a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml @@ -17,11 +17,6 @@ - - com.4paradigm.openmldb - openmldb-test-common - ${project.version} - com.4paradigm.openmldb.test-tool command-tool diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java index 84df42f8074..3cdfd26cec7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java @@ -16,13 +16,12 @@ package com._4paradigm.qa.openmldb_deploy.common; - -import com._4paradigm.openmldb.test_common.restful.util.OpenMLDBTool; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com._4paradigm.qa.openmldb_deploy.conf.OpenMLDBDeployConfig; import com._4paradigm.qa.openmldb_deploy.util.DeployUtil; import com._4paradigm.qa.openmldb_deploy.util.OpenMLDBCommandUtil; +import com._4paradigm.qa.openmldb_deploy.util.Tool; import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; import com._4paradigm.test_tool.command_tool.common.LinuxUtil; import com._4paradigm.test_tool.command_tool.util.OSInfoUtil; @@ -113,7 +112,7 @@ public OpenMLDBInfo deployCluster(String clusterName, int ns, int tablet){ tablet_port = deployTablet(testPath, ip, i, zk_point,null); } fedbInfo.getTabletEndpoints().add(ip+":"+tablet_port); - OpenMLDBTool.sleep(SLEEP_TIME); + Tool.sleep(SLEEP_TIME); } for(int i=1;i<=ns;i++){ int ns_port; @@ -125,7 +124,7 @@ public OpenMLDBInfo deployCluster(String clusterName, int ns, int tablet){ ns_port = deployNS(testPath, ip, i, zk_point,null); } fedbInfo.getNsEndpoints().add(ip+":"+ns_port); - OpenMLDBTool.sleep(SLEEP_TIME); + Tool.sleep(SLEEP_TIME); } for(int i=1;i<=1;i++) { @@ -138,7 +137,7 @@ public OpenMLDBInfo deployCluster(String clusterName, int ns, int tablet){ apiserver_port = deployApiserver(testPath, ip, i, zk_point,null); } fedbInfo.getApiServerEndpoints().add(ip+":"+apiserver_port); - OpenMLDBTool.sleep(SLEEP_TIME); + Tool.sleep(SLEEP_TIME); } if(version.equals("tmp")||version.compareTo("0.4.0")>=0) { for (int i = 1; i <= 1; i++) { diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java index 64ba013f290..0b4f228db29 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java @@ -16,8 +16,8 @@ package com._4paradigm.qa.openmldb_deploy.conf; -import com._4paradigm.openmldb.test_common.restful.util.OpenMLDBTool; import com._4paradigm.qa.openmldb_deploy.util.DeployUtil; +import com._4paradigm.qa.openmldb_deploy.util.Tool; import lombok.extern.slf4j.Slf4j; import java.util.Properties; @@ -34,7 +34,7 @@ public class OpenMLDBDeployConfig { public static final Properties CONFIG; static { - CONFIG = OpenMLDBTool.getProperties("deploy.properties"); + CONFIG = Tool.getProperties("deploy.properties"); ZK_URL = CONFIG.getProperty("zk_url"); SPARK_URL = CONFIG.getProperty("spark_url"); } diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/Tool.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/Tool.java new file mode 100755 index 00000000000..9acaacfad32 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/util/Tool.java @@ -0,0 +1,60 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.qa.openmldb_deploy.util; + + +import lombok.extern.slf4j.Slf4j; + +import java.io.IOException; +import java.util.*; + +@Slf4j +public class Tool { + + public static void sleep(long time) { + try { + Thread.sleep(time); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + + public static Properties getProperties(String fileName) { + Properties ps = new Properties(); + try { + ps.load(Tool.class.getClassLoader().getResourceAsStream(fileName)); + } catch (IOException e) { + e.printStackTrace(); + log.error(e.getMessage()); + } + return ps; + } + +} + + + + + + + + + + + + diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/pom.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/pom.xml new file mode 100644 index 00000000000..70e8d174a9c --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/pom.xml @@ -0,0 +1,27 @@ + + + + openmldb-test-java + com.4paradigm.openmldb + 0.1.0-SNAPSHOT + + 4.0.0 + + openmldb-devops-test + + + 8 + 8 + + + + + com.4paradigm.openmldb + openmldb-deploy + ${project.version} + + + + \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java new file mode 100644 index 00000000000..36bac096fe2 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -0,0 +1,7 @@ +package com._4paradigm.openmldb.devops_test.high_availability; + +public class TestCluster { + public void testMoreReplica(){ + + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java index 4306eb12d0d..207a5c52617 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java @@ -16,8 +16,8 @@ package com._4paradigm.openmldb.http_test.common; -import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBClient; -import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; @@ -64,10 +64,10 @@ public void beforeTest(@Optional("qa") String env, @Optional("main") String vers .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) .build(); - FedbGlobalVar.env = "cluster"; + OpenMLDBGlobalVar.env = "cluster"; } - OpenMLDBClient fesqlClient = new OpenMLDBClient(RestfulGlobalVar.mainInfo); - executor = fesqlClient.getExecutor(); - System.out.println("fesqlClient = " + fesqlClient); + OpenMLDBClient openMLDBClient = new OpenMLDBClient(RestfulGlobalVar.mainInfo.getZk_cluster(),RestfulGlobalVar.mainInfo.getZk_root_path()); + executor = openMLDBClient.getExecutor(); + System.out.println("fesqlClient = " + openMLDBClient); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/RestfulCaseFileList.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/RestfulCaseFileList.java index 1448162d4a9..bf54e6a5969 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/RestfulCaseFileList.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/RestfulCaseFileList.java @@ -21,7 +21,7 @@ import com._4paradigm.openmldb.test_common.provider.YamlUtil; import com._4paradigm.openmldb.test_common.restful.model.RestfulCase; import com._4paradigm.openmldb.test_common.restful.model.RestfulCaseFile; -import com._4paradigm.openmldb.test_common.restful.util.OpenMLDBTool; +import com._4paradigm.openmldb.test_common.util.Tool; import org.apache.commons.lang3.StringUtils; import java.io.File; @@ -61,7 +61,7 @@ public static List generatorCaseFileList(String[] caseFiles) th && !FedbRestfulConfig.FESQL_CASE_PATH.equals(caseFile)) { continue; } - String casePath = OpenMLDBTool.getCasePath(FedbRestfulConfig.YAML_CASE_BASE_DIR, caseFile); + String casePath = Tool.getCasePath(FedbRestfulConfig.YAML_CASE_BASE_DIR, caseFile); File file = new File(casePath); if (!file.exists()) { continue; diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/config/FedbRestfulConfig.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/config/FedbRestfulConfig.java index 67d8769caec..ff216bf3a93 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/config/FedbRestfulConfig.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/config/FedbRestfulConfig.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.http_test.config; import com._4paradigm.openmldb.http_test.common.RestfulGlobalVar; -import com._4paradigm.openmldb.test_common.restful.util.OpenMLDBTool; +import com._4paradigm.openmldb.test_common.util.Tool; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.testng.collections.Lists; @@ -47,7 +47,7 @@ public class FedbRestfulConfig { // public static final String BASE_URL; public static final String DB_NAME; - public static final Properties CONFIG = OpenMLDBTool.getProperties("fedb.properties"); + public static final Properties CONFIG = Tool.getProperties("fedb.properties"); static { String levelStr = System.getProperty("caseLevel"); diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/BaseExecutor.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/BaseExecutor.java index d4f203b7bc8..4fce6d7d3d9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/BaseExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/BaseExecutor.java @@ -16,8 +16,7 @@ package com._4paradigm.openmldb.http_test.executor; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.common.IExecutor; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.restful.model.HttpResult; @@ -37,7 +36,7 @@ public abstract class BaseExecutor implements IExecutor { protected Logger logger = new LogProxy(log); protected HttpResult httpResult; protected RestfulCase restfulCase; - protected FesqlResult fesqlResult; + protected OpenMLDBResult fesqlResult; protected List tableNames; public BaseExecutor(RestfulCase restfulCase){ diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulCliExecutor.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulCliExecutor.java index 5a420cff64e..3478bcf6e5d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulCliExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulCliExecutor.java @@ -20,21 +20,19 @@ import com._4paradigm.openmldb.http_test.common.RestfulGlobalVar; import com._4paradigm.openmldb.http_test.config.FedbRestfulConfig; import com._4paradigm.openmldb.java_sdk_test.checker.ResultChecker; -import com._4paradigm.openmldb.java_sdk_test.command.OpenMLDBComamndFacade; -import com._4paradigm.openmldb.java_sdk_test.command.OpenMLDBCommandUtil; -import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; -import com._4paradigm.openmldb.java_sdk_test.util.Tool; -import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.command.OpenMLDBComamndFacade; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.test_common.common.Checker; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.InputDesc; -import com._4paradigm.openmldb.test_common.restful.common.FedbHttp; +import com._4paradigm.openmldb.test_common.restful.common.OpenMLDBHttp; import com._4paradigm.openmldb.test_common.restful.model.AfterAction; import com._4paradigm.openmldb.test_common.restful.model.BeforeAction; import com._4paradigm.openmldb.test_common.restful.model.HttpMethod; import com._4paradigm.openmldb.test_common.restful.model.RestfulCase; +import com._4paradigm.openmldb.test_common.util.Tool; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; @@ -43,10 +41,10 @@ import java.util.stream.Collectors; public class RestfulCliExecutor extends BaseExecutor{ - private FedbHttp fedbHttp; + private OpenMLDBHttp fedbHttp; public RestfulCliExecutor(RestfulCase restfulCase) { super(restfulCase); - fedbHttp = new FedbHttp(); + fedbHttp = new OpenMLDBHttp(); fedbHttp.setUrl("http://"+ RestfulGlobalVar.mainInfo.getApiServerEndpoints().get(0)); fedbHttp.setMethod(HttpMethod.valueOf(restfulCase.getMethod())); String uri = restfulCase.getUri(); @@ -63,7 +61,7 @@ public RestfulCliExecutor(RestfulCase restfulCase) { @Override public void prepare() { - FesqlResult createDBResult = OpenMLDBCommandUtil.createDB(RestfulGlobalVar.mainInfo, FedbRestfulConfig.DB_NAME); + OpenMLDBResult createDBResult = OpenMLDBCommandUtil.createDB(RestfulGlobalVar.mainInfo, FedbRestfulConfig.DB_NAME); logger.info("create db:{},{}", FedbRestfulConfig.DB_NAME, createDBResult.isOk()); BeforeAction beforeAction = restfulCase.getBeforeAction(); if(beforeAction==null){ @@ -71,7 +69,7 @@ public void prepare() { return; } if(CollectionUtils.isNotEmpty(beforeAction.getTables())) { - FesqlResult res = OpenMLDBCommandUtil.createAndInsert(RestfulGlobalVar.mainInfo, FedbRestfulConfig.DB_NAME, beforeAction.getTables()); + OpenMLDBResult res = OpenMLDBCommandUtil.createAndInsert(RestfulGlobalVar.mainInfo, FedbRestfulConfig.DB_NAME, beforeAction.getTables()); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail "); } @@ -84,7 +82,7 @@ public void prepare() { } if(CollectionUtils.isNotEmpty(beforeAction.getSqls())){ List sqls = beforeAction.getSqls().stream() - .map(sql -> FesqlUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) + .map(sql -> OpenMLDBUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) .map(sql->{ if(sql.contains("{db_name}")){ sql = sql.replace("{db_name}",FedbRestfulConfig.DB_NAME); @@ -118,7 +116,7 @@ public void tearDown() { if(tearDown!=null){ if(CollectionUtils.isNotEmpty(tearDown.getSqls())){ List sqls = tearDown.getSqls().stream() - .map(sql -> FesqlUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) + .map(sql -> OpenMLDBUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) .map(sql->{ if(sql.contains("{db_name}")){ sql = sql.replace("{db_name}",FedbRestfulConfig.DB_NAME); @@ -150,7 +148,7 @@ protected void afterAction(){ if(afterAction!=null){ if(CollectionUtils.isNotEmpty(afterAction.getSqls())){ List sqls = afterAction.getSqls().stream() - .map(sql -> FesqlUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) + .map(sql -> OpenMLDBUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) .collect(Collectors.toList()); fesqlResult = OpenMLDBComamndFacade.sqls(RestfulGlobalVar.mainInfo, FedbRestfulConfig.DB_NAME, sqls); } diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulExecutor.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulExecutor.java index e2f5cbd924a..8c7c3192b0f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulExecutor.java @@ -20,13 +20,13 @@ import com._4paradigm.openmldb.http_test.common.RestfulGlobalVar; import com._4paradigm.openmldb.http_test.config.FedbRestfulConfig; import com._4paradigm.openmldb.java_sdk_test.checker.ResultChecker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.common.Checker; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.InputDesc; -import com._4paradigm.openmldb.test_common.restful.common.FedbHttp; +import com._4paradigm.openmldb.test_common.restful.common.OpenMLDBHttp; import com._4paradigm.openmldb.test_common.restful.model.AfterAction; import com._4paradigm.openmldb.test_common.restful.model.BeforeAction; import com._4paradigm.openmldb.test_common.restful.model.HttpMethod; @@ -40,11 +40,11 @@ public class RestfulExecutor extends BaseExecutor{ protected SqlExecutor executor; - private FedbHttp fedbHttp; + private OpenMLDBHttp fedbHttp; public RestfulExecutor(SqlExecutor executor, RestfulCase restfulCase) { super(restfulCase); this.executor = executor; - fedbHttp = new FedbHttp(); + fedbHttp = new OpenMLDBHttp(); fedbHttp.setUrl("http://"+ RestfulGlobalVar.mainInfo.getApiServerEndpoints().get(0)); fedbHttp.setMethod(HttpMethod.valueOf(restfulCase.getMethod())); String uri = restfulCase.getUri(); @@ -69,7 +69,7 @@ public void prepare() { return; } if(CollectionUtils.isNotEmpty(beforeAction.getTables())) { - FesqlResult res = FesqlUtil.createAndInsert(executor, FedbRestfulConfig.DB_NAME, beforeAction.getTables(), false); + OpenMLDBResult res = OpenMLDBUtil.createAndInsert(executor, FedbRestfulConfig.DB_NAME, beforeAction.getTables(), false); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail "); } @@ -82,7 +82,7 @@ public void prepare() { } if(CollectionUtils.isNotEmpty(beforeAction.getSqls())){ List sqls = beforeAction.getSqls().stream() - .map(sql -> FesqlUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) + .map(sql -> OpenMLDBUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) .map(sql->{ if(sql.contains("{db_name}")){ sql = sql.replace("{db_name}",FedbRestfulConfig.DB_NAME); @@ -90,7 +90,7 @@ public void prepare() { return sql; }) .collect(Collectors.toList()); - FesqlUtil.sqls(executor,FedbRestfulConfig.DB_NAME,sqls); + OpenMLDBUtil.sqls(executor,FedbRestfulConfig.DB_NAME,sqls); } logger.info("prepare end"); } @@ -115,7 +115,7 @@ public void tearDown() { if(tearDown!=null){ if(CollectionUtils.isNotEmpty(tearDown.getSqls())){ List sqls = tearDown.getSqls().stream() - .map(sql -> FesqlUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) + .map(sql -> OpenMLDBUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) .map(sql->{ if(sql.contains("{db_name}")){ sql = sql.replace("{db_name}",FedbRestfulConfig.DB_NAME); @@ -123,7 +123,7 @@ public void tearDown() { return sql; }) .collect(Collectors.toList()); - fesqlResult = FesqlUtil.sqls(executor, FedbRestfulConfig.DB_NAME, sqls); + fesqlResult = OpenMLDBUtil.sqls(executor, FedbRestfulConfig.DB_NAME, sqls); } } @@ -135,7 +135,7 @@ public void tearDown() { for (InputDesc table : tables) { if(table.isDrop()) { String drop = "drop table " + table.getName() + ";"; - FesqlUtil.ddl(executor, FedbRestfulConfig.DB_NAME, drop); + OpenMLDBUtil.ddl(executor, FedbRestfulConfig.DB_NAME, drop); } } } @@ -147,9 +147,9 @@ protected void afterAction(){ if(afterAction!=null){ if(CollectionUtils.isNotEmpty(afterAction.getSqls())){ List sqls = afterAction.getSqls().stream() - .map(sql -> FesqlUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) + .map(sql -> OpenMLDBUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) .collect(Collectors.toList()); - fesqlResult = FesqlUtil.sqls(executor, FedbRestfulConfig.DB_NAME, sqls); + fesqlResult = OpenMLDBUtil.sqls(executor, FedbRestfulConfig.DB_NAME, sqls); } ExpectDesc expect = afterAction.getExpect(); if(expect!=null){ diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/test/java/com/_4paradigm/openmldb/http_test/tmp/TestDropTable.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/test/java/com/_4paradigm/openmldb/http_test/tmp/TestDropTable.java index 1ed46f58c19..6e77f854c0e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/test/java/com/_4paradigm/openmldb/http_test/tmp/TestDropTable.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/test/java/com/_4paradigm/openmldb/http_test/tmp/TestDropTable.java @@ -15,11 +15,11 @@ */ package com._4paradigm.openmldb.http_test.tmp; -import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBClient; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.test_common.restful.model.HttpResult; -import com._4paradigm.openmldb.test_common.restful.util.HttpRequest; +import com._4paradigm.openmldb.test_common.util.HttpRequest; import com.google.gson.Gson; import com.google.gson.JsonArray; import com.google.gson.JsonObject; @@ -45,7 +45,7 @@ public void testAll() throws Exception { for(int i=0;i8 UTF-8 - 0.5.0 - 0.5.0-macos - test_suite/test_tmp.xml 1.8.9 @@ -36,16 +33,6 @@ - - com.4paradigm.openmldb - openmldb-jdbc - ${openmldb.jdbc.version} - - - com.4paradigm.openmldb - openmldb-native - ${openmldb.navtive.version} - com.4paradigm.openmldb openmldb-test-common @@ -54,7 +41,7 @@ com.4paradigm.openmldb openmldb-deploy - 0.1.0-SNAPSHOT + ${project.version} io.qameta.allure diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/BaseChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/BaseChecker.java index b2ea244b550..64e64a7e6c0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/BaseChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/BaseChecker.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.common.ReportLog; import com._4paradigm.openmldb.test_common.model.ExpectDesc; @@ -28,17 +28,17 @@ * @date 2020/6/16 3:37 PM */ public abstract class BaseChecker implements Checker { - protected FesqlResult fesqlResult; - protected Map resultMap; + protected OpenMLDBResult fesqlResult; + protected Map resultMap; protected ExpectDesc expect; protected ReportLog reportLog = ReportLog.of(); - public BaseChecker(ExpectDesc expect, FesqlResult fesqlResult){ + public BaseChecker(ExpectDesc expect, OpenMLDBResult fesqlResult){ this.expect = expect; this.fesqlResult = fesqlResult; } - public BaseChecker(FesqlResult fesqlResult,Map resultMap){ + public BaseChecker(OpenMLDBResult fesqlResult, Map resultMap){ this.fesqlResult = fesqlResult; this.resultMap = resultMap; } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CatCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CatCheckerByCli.java index 145062faa84..e808f77cddc 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CatCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CatCheckerByCli.java @@ -1,11 +1,10 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.command.CommandUtil; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.command.CommandUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.test_common.model.CatFile; import com._4paradigm.openmldb.test_common.model.ExpectDesc; -import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; import lombok.extern.slf4j.Slf4j; import org.testng.Assert; @@ -13,7 +12,7 @@ @Slf4j public class CatCheckerByCli extends BaseChecker{ - public CatCheckerByCli(ExpectDesc expect, FesqlResult fesqlResult) { + public CatCheckerByCli(ExpectDesc expect, OpenMLDBResult fesqlResult) { super(expect, fesqlResult); } @@ -23,7 +22,7 @@ public void check() throws Exception { reportLog.info("cat check"); CatFile expectCat = expect.getCat(); String path = expectCat.getPath(); - path = FesqlUtil.formatSql(path, fesqlResult.getTableNames()); + path = OpenMLDBUtil.formatSql(path, fesqlResult.getTableNames()); String command = "cat "+path; List actualList = CommandUtil.run(command); List expectList = expectCat.getLines(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java index b9ce3c9bb63..502c0bfc377 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -29,7 +29,7 @@ public class CheckerStrategy { - public static List build(SQLCase fesqlCase, FesqlResult fesqlResult, SQLCaseType executorType) { + public static List build(SQLCase fesqlCase, OpenMLDBResult fesqlResult, SQLCaseType executorType) { List checkList = new ArrayList<>(); if (null == fesqlCase) { return checkList; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java index b2651d52258..7de5287523d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java @@ -16,8 +16,8 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.Table; import lombok.extern.slf4j.Slf4j; @@ -32,7 +32,7 @@ @Slf4j public class ColumnsChecker extends BaseChecker { - public ColumnsChecker(ExpectDesc expect, FesqlResult fesqlResult) { + public ColumnsChecker(ExpectDesc expect, OpenMLDBResult fesqlResult) { super(expect, fesqlResult); } @@ -50,8 +50,8 @@ public void check() throws Exception { for (int i = 0; i < expectColumns.size(); i++) { // Assert.assertEquals(columnNames.get(i)+" "+columnTypes.get(i),expectColumns.get(i)); Assert.assertEquals(columnNames.get(i), Table.getColumnName(expectColumns.get(i))); - Assert.assertEquals(FesqlUtil.getColumnType(columnTypes.get(i)), - FesqlUtil.getColumnType(Table.getColumnType(expectColumns.get(i)))); + Assert.assertEquals(OpenMLDBUtil.getColumnType(columnTypes.get(i)), + OpenMLDBUtil.getColumnType(Table.getColumnType(expectColumns.get(i)))); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByCli.java index edcf61052ba..662dca28913 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByCli.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.Table; import lombok.extern.slf4j.Slf4j; @@ -31,7 +31,7 @@ */ @Slf4j public class ColumnsCheckerByCli extends BaseChecker { - public ColumnsCheckerByCli(ExpectDesc expect, FesqlResult fesqlResult) { + public ColumnsCheckerByCli(ExpectDesc expect, OpenMLDBResult fesqlResult) { super(expect, fesqlResult); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByJBDC.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByJBDC.java index fe352cb260d..8eda4685cdd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByJBDC.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByJBDC.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.Table; import lombok.extern.slf4j.Slf4j; @@ -31,7 +31,7 @@ */ @Slf4j public class ColumnsCheckerByJBDC extends BaseChecker { - public ColumnsCheckerByJBDC(ExpectDesc expect, FesqlResult fesqlResult) { + public ColumnsCheckerByJBDC(ExpectDesc expect, OpenMLDBResult fesqlResult) { super(expect, fesqlResult); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CountChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CountChecker.java index 5f240436456..10c02e08253 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CountChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CountChecker.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import lombok.extern.slf4j.Slf4j; import org.testng.Assert; @@ -30,7 +30,7 @@ @Slf4j public class CountChecker extends BaseChecker { - public CountChecker(ExpectDesc expect, FesqlResult fesqlResult){ + public CountChecker(ExpectDesc expect, OpenMLDBResult fesqlResult){ super(expect,fesqlResult); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java index 8114e4c5660..1f00c9e623a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java @@ -17,23 +17,20 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; -import com._4paradigm.openmldb.test_common.model.Table; import lombok.extern.slf4j.Slf4j; import org.testng.Assert; -import java.util.List; - /** * @author zhaowei * @date 2020/6/16 3:14 PM */ @Slf4j public class DeploymentCheckerByCli extends BaseChecker { - public DeploymentCheckerByCli(ExpectDesc expect, FesqlResult fesqlResult) { + public DeploymentCheckerByCli(ExpectDesc expect, OpenMLDBResult fesqlResult) { super(expect, fesqlResult); } @@ -43,10 +40,10 @@ public void check() throws Exception { reportLog.info("deployment check"); OpenmldbDeployment expectDeployment = expect.getDeployment(); String name = expectDeployment.getName(); - name = FesqlUtil.formatSql(name, fesqlResult.getTableNames()); + name = OpenMLDBUtil.formatSql(name, fesqlResult.getTableNames()); expectDeployment.setName(name); String sql = expectDeployment.getSql(); - sql = FesqlUtil.formatSql(sql, fesqlResult.getTableNames()); + sql = OpenMLDBUtil.formatSql(sql, fesqlResult.getTableNames()); expectDeployment.setSql(sql); if (expectDeployment == null) { return; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentContainsCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentContainsCheckerByCli.java index a6b32ab71c3..49031b2f106 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentContainsCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentContainsCheckerByCli.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; import lombok.extern.slf4j.Slf4j; @@ -31,7 +31,7 @@ */ @Slf4j public class DeploymentContainsCheckerByCli extends BaseChecker { - public DeploymentContainsCheckerByCli(ExpectDesc expect, FesqlResult fesqlResult) { + public DeploymentContainsCheckerByCli(ExpectDesc expect, OpenMLDBResult fesqlResult) { super(expect, fesqlResult); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java index f3053738a77..a7b57aed4d4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; import lombok.extern.slf4j.Slf4j; @@ -31,7 +31,7 @@ */ @Slf4j public class DeploymentCountCheckerByCli extends BaseChecker { - public DeploymentCountCheckerByCli(ExpectDesc expect, FesqlResult fesqlResult) { + public DeploymentCountCheckerByCli(ExpectDesc expect, OpenMLDBResult fesqlResult) { super(expect, fesqlResult); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffResultChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffResultChecker.java index cc5c39ff771..fc5f2938822 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffResultChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffResultChecker.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.model.SQLCaseType; import lombok.extern.slf4j.Slf4j; import org.testng.Assert; @@ -33,7 +33,7 @@ public class DiffResultChecker extends BaseChecker{ // private FesqlResult sqlite3Result; - public DiffResultChecker(FesqlResult fesqlResult, Map resultMap){ + public DiffResultChecker(OpenMLDBResult fesqlResult, Map resultMap){ super(fesqlResult,resultMap); // sqlite3Result = resultMap.get("sqlite3"); } @@ -48,7 +48,7 @@ public void check() throws Exception { } } } - public void checkMysql(FesqlResult mysqlResult) throws Exception { + public void checkMysql(OpenMLDBResult mysqlResult) throws Exception { log.info("diff mysql check"); reportLog.info("diff mysql check"); //验证success @@ -67,7 +67,7 @@ public void checkMysql(FesqlResult mysqlResult) throws Exception { // String.format("ResultChecker fail: mysql size %d, fesql size %d", mysqlRows.size(), fesqlRows.size())); Assert.assertEquals(fesqlRows,mysqlRows,String.format("ResultChecker fail: mysql: %s, fesql: %s", mysqlRows, fesqlRows)); } - public void checkSqlite3(FesqlResult sqlite3Result) throws Exception { + public void checkSqlite3(OpenMLDBResult sqlite3Result) throws Exception { log.info("diff sqlite3 check"); reportLog.info("diff sqlite3 check"); //验证success diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffVersionChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffVersionChecker.java index 9595c9fd168..dfc3de0fdb9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffVersionChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffVersionChecker.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import lombok.extern.slf4j.Slf4j; import org.testng.Assert; @@ -30,7 +30,7 @@ @Slf4j public class DiffVersionChecker extends BaseChecker{ - public DiffVersionChecker(FesqlResult fesqlResult, Map resultMap){ + public DiffVersionChecker(OpenMLDBResult fesqlResult, Map resultMap){ super(fesqlResult,resultMap); } @@ -40,7 +40,7 @@ public void check() throws Exception { reportLog.info("diff version check"); resultMap.entrySet().stream().forEach(e->{ String version = e.getKey(); - FesqlResult result = e.getValue(); + OpenMLDBResult result = e.getValue(); Assert.assertTrue(fesqlResult.equals(result),"版本结果对比不一致\nmainVersion:\n"+fesqlResult+"\nversion:"+version+"\n"+result); }); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/IndexChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/IndexChecker.java index b9d37e48c92..f1b7ef08b8e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/IndexChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/IndexChecker.java @@ -17,9 +17,8 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.command.OpenMLDBComamndFacade; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBIndex; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBIndex; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.TableIndex; @@ -39,7 +38,7 @@ @Slf4j public class IndexChecker extends BaseChecker { private static final Logger logger = new LogProxy(log); - public IndexChecker(ExpectDesc expect, FesqlResult fesqlResult){ + public IndexChecker(ExpectDesc expect, OpenMLDBResult fesqlResult){ super(expect,fesqlResult); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/IndexCountChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/IndexCountChecker.java index 821bac68faa..f3bd1d4e5e4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/IndexCountChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/IndexCountChecker.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import lombok.extern.slf4j.Slf4j; @@ -32,7 +32,7 @@ @Slf4j public class IndexCountChecker extends BaseChecker { private static final Logger logger = new LogProxy(log); - public IndexCountChecker(ExpectDesc expect, FesqlResult fesqlResult){ + public IndexCountChecker(ExpectDesc expect, OpenMLDBResult fesqlResult){ super(expect,fesqlResult); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/OptionsChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/OptionsChecker.java index 49227f0c3b1..624feacae6d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/OptionsChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/OptionsChecker.java @@ -17,12 +17,12 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.Tool; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.restful.model.HttpResult; -import com._4paradigm.openmldb.test_common.restful.util.HttpRequest; +import com._4paradigm.openmldb.test_common.util.HttpRequest; +import com._4paradigm.openmldb.test_common.util.Tool; import com.jayway.jsonpath.JsonPath; import lombok.extern.slf4j.Slf4j; import org.testng.Assert; @@ -37,7 +37,7 @@ public class OptionsChecker extends BaseChecker { private static String reg = "\\{(\\d+)\\}"; - public OptionsChecker(ExpectDesc expect, FesqlResult fesqlResult) { + public OptionsChecker(ExpectDesc expect, OpenMLDBResult fesqlResult) { super(expect, fesqlResult); } @@ -45,7 +45,7 @@ public OptionsChecker(ExpectDesc expect, FesqlResult fesqlResult) { public void check() throws Exception { log.info("options check"); reportLog.info("options check"); - String apiserverEndpoint = FedbGlobalVar.mainInfo.getApiServerEndpoints().get(0); + String apiserverEndpoint = OpenMLDBGlobalVar.mainInfo.getApiServerEndpoints().get(0); String dbName = fesqlResult.getDbName(); String tableName = expect.getName(); if(tableName.matches(reg)){ diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java index a29fad66a6b..97e8afd4cdb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java @@ -17,8 +17,8 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.Table; import lombok.extern.slf4j.Slf4j; @@ -36,7 +36,7 @@ @Slf4j public class ResultChecker extends BaseChecker { - public ResultChecker(ExpectDesc expect, FesqlResult fesqlResult) { + public ResultChecker(ExpectDesc expect, OpenMLDBResult fesqlResult) { super(expect, fesqlResult); } @@ -47,13 +47,13 @@ public void check() throws ParseException { if (expect.getColumns().isEmpty()) { throw new RuntimeException("fail check result: columns are empty"); } - List> expectRows = FesqlUtil.convertRows(expect.getRows(), + List> expectRows = OpenMLDBUtil.convertRows(expect.getRows(), expect.getColumns()); List> actual = fesqlResult.getResult(); String orderName = expect.getOrder(); if (StringUtils.isNotEmpty(orderName)) { - int index = FesqlUtil.getIndexByColumnName(fesqlResult.getColumnNames(),orderName); + int index = OpenMLDBUtil.getIndexByColumnName(fesqlResult.getColumnNames(),orderName); Collections.sort(expectRows, new RowsSort(index)); Collections.sort(actual, new RowsSort(index)); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java index 7c0672b63cf..e9ff07f31b5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java @@ -17,8 +17,8 @@ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.Table; import lombok.extern.slf4j.Slf4j; @@ -36,7 +36,7 @@ @Slf4j public class ResultCheckerByCli extends BaseChecker { - public ResultCheckerByCli(ExpectDesc expect, FesqlResult fesqlResult) { + public ResultCheckerByCli(ExpectDesc expect, OpenMLDBResult fesqlResult) { super(expect, fesqlResult); } @@ -47,12 +47,12 @@ public void check() throws ParseException { if (expect.getColumns().isEmpty()) { throw new RuntimeException("fail check result: columns are empty"); } - List> expectRows = FesqlUtil.convertRows(expect.getRows(), expect.getColumns()); - List> actual = FesqlUtil.convertRows(fesqlResult.getResult(), expect.getColumns()); + List> expectRows = OpenMLDBUtil.convertRows(expect.getRows(), expect.getColumns()); + List> actual = OpenMLDBUtil.convertRows(fesqlResult.getResult(), expect.getColumns()); String orderName = expect.getOrder(); if (StringUtils.isNotEmpty(orderName)) { - int index = FesqlUtil.getIndexByColumnName(fesqlResult.getColumnNames(),orderName); + int index = OpenMLDBUtil.getIndexByColumnName(fesqlResult.getColumnNames(),orderName); Collections.sort(expectRows, new RowsSort(index)); Collections.sort(actual, new RowsSort(index)); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java index 2e56b336ef6..f02575e2d1a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java @@ -15,8 +15,8 @@ */ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.Table; import lombok.extern.slf4j.Slf4j; @@ -34,7 +34,7 @@ @Slf4j public class ResultCheckerByJDBC extends BaseChecker { - public ResultCheckerByJDBC(ExpectDesc expect, FesqlResult fesqlResult) { + public ResultCheckerByJDBC(ExpectDesc expect, OpenMLDBResult fesqlResult) { super(expect, fesqlResult); } @@ -45,13 +45,13 @@ public void check() throws Exception { if (expect.getColumns().isEmpty()) { throw new RuntimeException("fail check result: columns are empty"); } - List> expectRows = FesqlUtil.convertRows(expect.getRows(), + List> expectRows = OpenMLDBUtil.convertRows(expect.getRows(), expect.getColumns()); List> actual = fesqlResult.getResult(); String orderName = expect.getOrder(); if (StringUtils.isNotEmpty(orderName)) { - int index = FesqlUtil.getIndexByColumnName(fesqlResult.getColumnNames(),orderName); + int index = OpenMLDBUtil.getIndexByColumnName(fesqlResult.getColumnNames(),orderName); Collections.sort(expectRows, new RowsSort(index)); Collections.sort(actual, new RowsSort(index)); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/SuccessChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/SuccessChecker.java index bfad498620e..b16d2504c7a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/SuccessChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/SuccessChecker.java @@ -15,7 +15,7 @@ */ package com._4paradigm.openmldb.java_sdk_test.checker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import lombok.extern.slf4j.Slf4j; import org.testng.Assert; @@ -27,7 +27,7 @@ @Slf4j public class SuccessChecker extends BaseChecker { - public SuccessChecker(ExpectDesc expect, FesqlResult fesqlResult){ + public SuccessChecker(ExpectDesc expect, OpenMLDBResult fesqlResult){ super(expect,fesqlResult); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/BaseTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/BaseTest.java index a2d0d241b2e..f4ddc844d1b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/BaseTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/BaseTest.java @@ -21,6 +21,7 @@ import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.common.ReportLog; import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.provider.Yaml; import lombok.extern.slf4j.Slf4j; import org.slf4j.Logger; @@ -44,7 +45,7 @@ public class BaseTest implements ITest { public static String CaseNameFormat(SQLCase sqlCase) { return String.format("%s_%s_%s", - FedbGlobalVar.env, sqlCase.getId(), sqlCase.getDesc()); + OpenMLDBGlobalVar.env, sqlCase.getId(), sqlCase.getDesc()); } @DataProvider(name = "getCase") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbConfig.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbConfig.java index 43a1cbf059c..3d39ffd65ba 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbConfig.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbConfig.java @@ -16,7 +16,8 @@ package com._4paradigm.openmldb.java_sdk_test.common; -import com._4paradigm.openmldb.java_sdk_test.util.Tool; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.util.Tool; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.testng.collections.Lists; @@ -77,13 +78,13 @@ public class FedbConfig { log.info("YAML_CASE_BASE_DIR {}", YAML_CASE_BASE_DIR); } - BASE_PATH = CONFIG.getProperty(FedbGlobalVar.env + "_base_path"); + BASE_PATH = CONFIG.getProperty(OpenMLDBGlobalVar.env + "_base_path"); // String tb_endpoint_0 = CONFIG.getProperty(FedbGlobalVar.env + "_tb_endpoint_0"); // String tb_endpoint_1 = CONFIG.getProperty(FedbGlobalVar.env + "_tb_endpoint_1"); // String tb_endpoint_2 = CONFIG.getProperty(FedbGlobalVar.env + "_tb_endpoint_2"); String versionStr = System.getProperty("fedbVersion"); if (StringUtils.isEmpty(versionStr)) { - versionStr = CONFIG.getProperty(FedbGlobalVar.env + "_versions"); + versionStr = CONFIG.getProperty(OpenMLDBGlobalVar.env + "_versions"); } if (StringUtils.isNotEmpty(versionStr)) { VERSIONS = Arrays.stream(versionStr.split(",")).collect(Collectors.toList()); @@ -98,14 +99,14 @@ public class FedbConfig { }else{ ADD_REPORT_LOG = true; } - String init_env = CONFIG.getProperty(FedbGlobalVar.env + "_init_version_env"); + String init_env = CONFIG.getProperty(OpenMLDBGlobalVar.env + "_init_version_env"); if (StringUtils.isNotEmpty(init_env)) { INIT_VERSION_ENV = Boolean.parseBoolean(init_env); } } public static boolean isCluster() { - return FedbGlobalVar.env.equals("cluster"); + return OpenMLDBGlobalVar.env.equals("cluster"); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java index 4618368af64..27fa4ae42a4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java @@ -18,6 +18,8 @@ import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; @@ -41,19 +43,19 @@ public class FedbTest extends BaseTest { @BeforeTest() @Parameters({"env","version","fedbPath"}) public void beforeTest(@Optional("qa") String env,@Optional("main") String version,@Optional("")String fedbPath) throws Exception { - FedbGlobalVar.env = env; + OpenMLDBGlobalVar.env = env; if(env.equalsIgnoreCase("cluster")){ OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version);; openMLDBDeploy.setOpenMLDBPath(fedbPath); openMLDBDeploy.setCluster(true); - FedbGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); + OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); }else if(env.equalsIgnoreCase("standalone")){ OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); openMLDBDeploy.setOpenMLDBPath(fedbPath); openMLDBDeploy.setCluster(false); - FedbGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); + OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); }else{ - FedbGlobalVar.mainInfo = OpenMLDBInfo.builder() + OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() .deployType(OpenMLDBDeployType.CLUSTER) .basePath("/home/zhaowei01/openmldb-auto-test/tmp") .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") @@ -64,15 +66,15 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) .build(); - FedbGlobalVar.env = "cluster"; + OpenMLDBGlobalVar.env = "cluster"; } String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { - FedbGlobalVar.env = caseEnv; + OpenMLDBGlobalVar.env = caseEnv; } log.info("fedb global var env: {}", env); - OpenMLDBClient fesqlClient = new OpenMLDBClient(FedbGlobalVar.mainInfo); + OpenMLDBClient fesqlClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); executor = fesqlClient.getExecutor(); log.info("executor:{}",executor); //todo diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbVersionConfig.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbVersionConfig.java index 3785aeb0cf0..a8d8e992003 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbVersionConfig.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbVersionConfig.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.common; -import com._4paradigm.openmldb.java_sdk_test.util.Tool; +import com._4paradigm.openmldb.test_common.util.Tool; import lombok.extern.slf4j.Slf4j; import java.util.Properties; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneClient.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneClient.java deleted file mode 100644 index 66c9e60258b..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneClient.java +++ /dev/null @@ -1,34 +0,0 @@ -package com._4paradigm.openmldb.java_sdk_test.common; - -import com._4paradigm.openmldb.sdk.SdkOption; -import com._4paradigm.openmldb.sdk.SqlException; -import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.sdk.impl.SqlClusterExecutor; -import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; -import lombok.Data; -import lombok.extern.slf4j.Slf4j; - -@Data -@Slf4j -public class StandaloneClient { - - private SqlExecutor executor; - - public StandaloneClient(String host, Integer port){ - SdkOption option = new SdkOption(); - option.setHost(host); - option.setPort(port); - option.setClusterMode(false); - option.setSessionTimeout(10000); - option.setRequestTimeout(60000); - log.info("host {}, port {}", option.getHost(), option.getPort()); - try { - executor = new SqlClusterExecutor(option); - } catch (SqlException e) { - e.printStackTrace(); - } - } - public StandaloneClient(OpenMLDBInfo fedbInfo){ - this(fedbInfo.getHost(),fedbInfo.getPort()); - } -} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java index 9fc66633c48..174ca23e677 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java @@ -18,6 +18,8 @@ import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; @@ -39,13 +41,13 @@ public class StandaloneTest extends BaseTest { @BeforeTest() @Parameters({"env","version","fedbPath"}) public void beforeTest(@Optional("qa") String env,@Optional("main") String version,@Optional("")String fedbPath) throws Exception { - FedbGlobalVar.env = env; + OpenMLDBGlobalVar.env = env; if(env.equalsIgnoreCase("standalone")){ OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); openMLDBDeploy.setOpenMLDBPath(fedbPath); - FedbGlobalVar.mainInfo = openMLDBDeploy.deployStandalone(); + OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployStandalone(); }else{ - FedbGlobalVar.mainInfo = OpenMLDBInfo.builder() + OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() .deployType(OpenMLDBDeployType.STANDALONE) .basePath("/home/wangkaidong/fedb-auto-test/standalone") .openMLDBPath("/home/wangkaidong/fedb-auto-test/standalone/openmldb-standalone/bin/openmldb") @@ -59,10 +61,10 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi } String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { - FedbGlobalVar.env = caseEnv; + OpenMLDBGlobalVar.env = caseEnv; } //单机版SDK - StandaloneClient standaloneClient = new StandaloneClient(FedbGlobalVar.mainInfo); + OpenMLDBClient standaloneClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getHost(), OpenMLDBGlobalVar.mainInfo.getPort()); executor = standaloneClient.getExecutor(); log.info("executor : {}",executor); log.info("fedb global var env: {}", env); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderList.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderList.java index 7a117b9afab..0fe1a9f94c3 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderList.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderList.java @@ -19,8 +19,8 @@ import com._4paradigm.openmldb.java_sdk_test.common.BaseTest; import com._4paradigm.openmldb.java_sdk_test.common.FedbConfig; -import com._4paradigm.openmldb.java_sdk_test.util.Tool; import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.openmldb.test_common.util.Tool; import org.apache.commons.lang3.StringUtils; import java.io.File; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java index 61a96f6e3f4..0a7844f9403 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java @@ -16,9 +16,8 @@ package com._4paradigm.openmldb.java_sdk_test.executor; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.common.LogProxy; -import com._4paradigm.openmldb.test_common.common.ReportLog; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; import lombok.extern.slf4j.Slf4j; @@ -39,7 +38,7 @@ public abstract class BaseExecutor implements IExecutor{ protected SQLCaseType executorType; protected String dbName; protected List tableNames = Lists.newArrayList(); - protected FesqlResult mainResult; + protected OpenMLDBResult mainResult; @Override public void run() { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java index d7bbbbbe3d5..b40d731d4ad 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java @@ -20,8 +20,8 @@ import com._4paradigm.openmldb.java_sdk_test.checker.Checker; import com._4paradigm.openmldb.java_sdk_test.checker.CheckerStrategy; import com._4paradigm.openmldb.java_sdk_test.checker.DiffVersionChecker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; @@ -45,7 +45,7 @@ public abstract class BaseSQLExecutor extends BaseExecutor{ protected SqlExecutor executor; private Map executorMap; protected Map fedbInfoMap; - private Map resultMap; + private Map resultMap; public BaseSQLExecutor(SqlExecutor executor, SQLCase fesqlCase, SQLCaseType executorType) { this.executor = executor; @@ -89,7 +89,7 @@ public void execute() { } } - protected abstract FesqlResult execute(String version, SqlExecutor executor); + protected abstract OpenMLDBResult execute(String version, SqlExecutor executor); @Override public void check() throws Exception { @@ -116,11 +116,11 @@ public void tearDown(String version,SqlExecutor executor) { if(CollectionUtils.isNotEmpty(tearDown)){ tearDown.forEach(sql->{ if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = FesqlUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + sql = OpenMLDBUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); }else { - sql = FesqlUtil.formatSql(sql, tableNames); + sql = OpenMLDBUtil.formatSql(sql, tableNames); } - FesqlUtil.sql(executor, dbName, sql); + OpenMLDBUtil.sql(executor, dbName, sql); }); } logger.info("version:{},begin drop table",version); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java index df36ed4000e..c6a13823ac7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java @@ -16,8 +16,8 @@ package com._4paradigm.openmldb.java_sdk_test.executor; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -72,8 +72,8 @@ public void prepare(String version,SqlExecutor executor){ logger.info("version:{} prepare begin",version); boolean dbOk = executor.createDB(dbName); logger.info("version:{},create db:{},{}", version, dbName, dbOk); - FesqlUtil.useDB(executor,dbName); - FesqlResult res = FesqlUtil.createAndInsert(executor, dbName, fesqlCase.getInputs(), false); + OpenMLDBUtil.useDB(executor,dbName); + OpenMLDBResult res = OpenMLDBUtil.createAndInsert(executor, dbName, fesqlCase.getInputs(), false); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail . version:"+version); } @@ -81,30 +81,30 @@ public void prepare(String version,SqlExecutor executor){ } @Override - public FesqlResult execute(String version,SqlExecutor executor){ + public OpenMLDBResult execute(String version, SqlExecutor executor){ logger.info("version:{} execute begin",version); - FesqlResult fesqlResult = null; + OpenMLDBResult fesqlResult = null; List sqls = fesqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = FesqlUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + sql = OpenMLDBUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); }else { - sql = FesqlUtil.formatSql(sql, tableNames); + sql = OpenMLDBUtil.formatSql(sql, tableNames); } - fesqlResult = FesqlUtil.sql(executor, dbName, sql); + fesqlResult = OpenMLDBUtil.sql(executor, dbName, sql); } } String sql = fesqlCase.getSql(); if (sql != null && sql.length() > 0) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = FesqlUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + sql = OpenMLDBUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); }else { - sql = FesqlUtil.formatSql(sql, tableNames); + sql = OpenMLDBUtil.formatSql(sql, tableNames); } - fesqlResult = FesqlUtil.sql(executor, dbName, sql); + fesqlResult = OpenMLDBUtil.sql(executor, dbName, sql); } logger.info("version:{} execute end",version); return fesqlResult; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java index 56f83600002..471a4c968c7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java @@ -19,12 +19,12 @@ import com._4paradigm.openmldb.java_sdk_test.checker.Checker; import com._4paradigm.openmldb.java_sdk_test.checker.CheckerStrategy; import com._4paradigm.openmldb.java_sdk_test.checker.DiffVersionChecker; -import com._4paradigm.openmldb.java_sdk_test.command.OpenMLDBComamndFacade; -import com._4paradigm.openmldb.java_sdk_test.command.OpenMLDBCommandUtil; -import com._4paradigm.openmldb.java_sdk_test.command.OpenmlDBCommandFactory; -import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.command.OpenMLDBComamndFacade; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandUtil; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; @@ -45,7 +45,7 @@ public class CommandExecutor extends BaseExecutor{ private static final Logger logger = new LogProxy(log); protected Map openMLDBInfoMap; - private Map resultMap; + private Map resultMap; public CommandExecutor(SQLCase fesqlCase, SQLCaseType executorType) { this.fesqlCase = fesqlCase; @@ -94,7 +94,7 @@ public boolean verify() { @Override public void prepare(){ - prepare("mainVersion", FedbGlobalVar.mainInfo); + prepare("mainVersion", OpenMLDBGlobalVar.mainInfo); if(MapUtils.isNotEmpty(openMLDBInfoMap)) { openMLDBInfoMap.entrySet().stream().forEach(e -> prepare(e.getKey(), e.getValue())); } @@ -102,9 +102,9 @@ public void prepare(){ protected void prepare(String version, OpenMLDBInfo openMLDBInfo){ logger.info("version:{} prepare begin",version); - FesqlResult fesqlResult = OpenMLDBCommandUtil.createDB(openMLDBInfo,dbName); + OpenMLDBResult fesqlResult = OpenMLDBCommandUtil.createDB(openMLDBInfo,dbName); logger.info("version:{},create db:{},{}", version, dbName, fesqlResult.isOk()); - FesqlResult res = OpenMLDBCommandUtil.createAndInsert(openMLDBInfo, dbName, fesqlCase.getInputs()); + OpenMLDBResult res = OpenMLDBCommandUtil.createAndInsert(openMLDBInfo, dbName, fesqlCase.getInputs()); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail . version:"+version); } @@ -113,7 +113,7 @@ protected void prepare(String version, OpenMLDBInfo openMLDBInfo){ @Override public void execute() { - mainResult = execute("mainVersion",FedbGlobalVar.mainInfo); + mainResult = execute("mainVersion", OpenMLDBGlobalVar.mainInfo); mainResult.setDbName(dbName); if(CollectionUtils.isNotEmpty(tableNames)) { mainResult.setTableNames(tableNames); @@ -124,17 +124,17 @@ public void execute() { } } - protected FesqlResult execute(String version, OpenMLDBInfo openMLDBInfo){ + protected OpenMLDBResult execute(String version, OpenMLDBInfo openMLDBInfo){ logger.info("version:{} execute begin",version); - FesqlResult fesqlResult = null; + OpenMLDBResult fesqlResult = null; List sqls = fesqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(openMLDBInfoMap)) { - sql = FesqlUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); + sql = OpenMLDBUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); }else { - sql = FesqlUtil.formatSql(sql, tableNames); + sql = OpenMLDBUtil.formatSql(sql, tableNames); } fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo, dbName, sql); } @@ -143,9 +143,9 @@ protected FesqlResult execute(String version, OpenMLDBInfo openMLDBInfo){ if (StringUtils.isNotEmpty(sql)) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(openMLDBInfoMap)) { - sql = FesqlUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); + sql = OpenMLDBUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); }else { - sql = FesqlUtil.formatSql(sql, tableNames); + sql = OpenMLDBUtil.formatSql(sql, tableNames); } fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo, dbName, sql); } @@ -165,7 +165,7 @@ public void check() throws Exception { } @Override public void tearDown() { - tearDown("mainVersion",FedbGlobalVar.mainInfo); + tearDown("mainVersion", OpenMLDBGlobalVar.mainInfo); if(MapUtils.isNotEmpty(openMLDBInfoMap)) { openMLDBInfoMap.entrySet().stream().forEach(e -> tearDown(e.getKey(), e.getValue())); } @@ -178,11 +178,11 @@ public void tearDown(String version,OpenMLDBInfo openMLDBInfo) { if(CollectionUtils.isNotEmpty(tearDown)){ tearDown.forEach(sql->{ if(MapUtils.isNotEmpty(openMLDBInfoMap)) { - sql = FesqlUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); + sql = OpenMLDBUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); }else { - sql = FesqlUtil.formatSql(sql, tableNames); + sql = OpenMLDBUtil.formatSql(sql, tableNames); } - OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,dbName, sql); + OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName, sql); }); } logger.info("version:{},begin drop table",version); @@ -194,7 +194,7 @@ public void tearDown(String version,OpenMLDBInfo openMLDBInfo) { if(table.isDrop()) { String drop = "drop table " + table.getName() + ";"; String db = table.getDb().isEmpty() ? dbName : table.getDb(); - OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,db,drop); + OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,db,drop); } } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/DiffResultExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/DiffResultExecutor.java index 98058e9b1cf..06702626378 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/DiffResultExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/DiffResultExecutor.java @@ -19,7 +19,7 @@ import com._4paradigm.openmldb.java_sdk_test.checker.Checker; import com._4paradigm.openmldb.java_sdk_test.checker.CheckerStrategy; import com._4paradigm.openmldb.java_sdk_test.checker.DiffResultChecker; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.DBType; import com._4paradigm.openmldb.test_common.model.SQLCase; @@ -39,7 +39,7 @@ @Slf4j public class DiffResultExecutor extends BatchSQLExecutor{ private List executors; - private Map resultMap; + private Map resultMap; public DiffResultExecutor(SqlExecutor executor, SQLCase fesqlCase, SQLCaseType executorType) { super(executor, fesqlCase, executorType); executors = new ArrayList<>(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java index b2a31867527..fcd15c52695 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java @@ -17,8 +17,8 @@ package com._4paradigm.openmldb.java_sdk_test.executor; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -45,7 +45,7 @@ public void prepare(String version,SqlExecutor executor){ logger.info("version:{} prepare begin",version); boolean dbOk = executor.createDB(dbName); logger.info("version:{},create db:{},{}", version, dbName, dbOk); - FesqlResult res = FesqlUtil.createAndInsertWithPrepared(executor, dbName, fesqlCase.getInputs(), false); + OpenMLDBResult res = OpenMLDBUtil.createAndInsertWithPrepared(executor, dbName, fesqlCase.getInputs(), false); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail . version:"+version); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/MysqlExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/MysqlExecutor.java index ced545f653c..8ad5c6e1043 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/MysqlExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/MysqlExecutor.java @@ -15,8 +15,8 @@ */ package com._4paradigm.openmldb.java_sdk_test.executor; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.java_sdk_test.util.JDBCUtil; import com._4paradigm.openmldb.java_sdk_test.util.MysqlUtil; import com._4paradigm.openmldb.test_common.model.DBType; @@ -65,17 +65,17 @@ public void prepare() { @Override public void execute() { logger.info("mysql execute begin"); - FesqlResult fesqlResult = null; + OpenMLDBResult fesqlResult = null; List sqls = fesqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { - sql = FesqlUtil.formatSql(sql, tableNames); + sql = OpenMLDBUtil.formatSql(sql, tableNames); fesqlResult = JDBCUtil.executeQuery(sql,DBType.MYSQL); } } String sql = fesqlCase.getSql(); if (sql != null && sql.length() > 0) { - sql = FesqlUtil.formatSql(sql, tableNames); + sql = OpenMLDBUtil.formatSql(sql, tableNames); fesqlResult = JDBCUtil.executeQuery(sql,DBType.MYSQL); } mainResult = fesqlResult; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/NullExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/NullExecutor.java index 3b458e953cf..5fa87e34b9b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/NullExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/NullExecutor.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.executor; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -34,7 +34,7 @@ public NullExecutor(SqlExecutor executor, SQLCase fesqlCase, SQLCaseType executo } @Override - public FesqlResult execute(String version, SqlExecutor executor) { + public OpenMLDBResult execute(String version, SqlExecutor executor) { return null; } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java index 7d12bd2293e..3b073456d8b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java @@ -16,8 +16,8 @@ package com._4paradigm.openmldb.java_sdk_test.executor; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; @@ -45,9 +45,9 @@ public QueryPreparedExecutor(SQLCase fesqlCase, SqlExecutor executor, Map sqls = fesqlCase.getSqls(); // if (sqls != null && sqls.size() > 0) { // for (String sql : sqls) { @@ -64,14 +64,14 @@ public FesqlResult execute(String version, SqlExecutor executor){ if (sql != null && sql.length() > 0) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = FesqlUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + sql = OpenMLDBUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); }else { - sql = FesqlUtil.formatSql(sql, tableNames); + sql = OpenMLDBUtil.formatSql(sql, tableNames); } InputDesc parameters = fesqlCase.getParameters(); List types = parameters.getColumns().stream().map(s -> s.split("\\s+")[1]).collect(Collectors.toList()); List objects = parameters.getRows().get(0); - fesqlResult = FesqlUtil.selectWithPrepareStatement(executor, dbName,sql, types,objects); + fesqlResult = OpenMLDBUtil.selectWithPrepareStatement(executor, dbName,sql, types,objects); } logger.info("version:{} execute end",version); return fesqlResult; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java index ce87463229e..c0e5e21add0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java @@ -17,8 +17,8 @@ package com._4paradigm.openmldb.java_sdk_test.executor; import com._4paradigm.openmldb.java_sdk_test.common.FedbConfig; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; @@ -51,29 +51,29 @@ public RequestQuerySQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map sqls = fesqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = FesqlUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + sql = OpenMLDBUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); }else { - sql = FesqlUtil.formatSql(sql, tableNames); + sql = OpenMLDBUtil.formatSql(sql, tableNames); } - fesqlResult = FesqlUtil.sql(executor, dbName, sql); + fesqlResult = OpenMLDBUtil.sql(executor, dbName, sql); } } String sql = fesqlCase.getSql(); if (sql != null && sql.length() > 0) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = FesqlUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + sql = OpenMLDBUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); }else { - sql = FesqlUtil.formatSql(sql, tableNames); + sql = OpenMLDBUtil.formatSql(sql, tableNames); } InputDesc request = null; if (isBatchRequest) { @@ -91,7 +91,7 @@ public FesqlResult execute(String version, SqlExecutor executor) { } } - fesqlResult = FesqlUtil.sqlBatchRequestMode( + fesqlResult = OpenMLDBUtil.sqlBatchRequestMode( executor, dbName, sql, batchRequest, commonColumnIndices); } else { if (null != fesqlCase.getBatch_request()) { @@ -103,7 +103,7 @@ public FesqlResult execute(String version, SqlExecutor executor) { logger.error("fail to execute in request query sql executor: sql case request columns is empty"); return null; } - fesqlResult = FesqlUtil.sqlRequestMode(executor, dbName, null == fesqlCase.getBatch_request(), sql, request); + fesqlResult = OpenMLDBUtil.sqlRequestMode(executor, dbName, null == fesqlCase.getBatch_request(), sql, request); } } }catch (Exception e){ @@ -119,7 +119,7 @@ protected void prepare(String version,SqlExecutor executor) { boolean dbOk = executor.createDB(dbName); logger.info("create db:{},{}", dbName, dbOk); boolean useFirstInputAsRequests = !isBatchRequest && null == fesqlCase.getBatch_request(); - FesqlResult res = FesqlUtil.createAndInsert(executor, dbName, fesqlCase.getInputs(), useFirstInputAsRequests); + OpenMLDBResult res = OpenMLDBUtil.createAndInsert(executor, dbName, fesqlCase.getInputs(), useFirstInputAsRequests); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail"); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java index cd3eafc7a6c..f4493dd0084 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java @@ -16,8 +16,8 @@ package com._4paradigm.openmldb.java_sdk_test.executor; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.java_sdk_test.util.JDBCUtil; import com._4paradigm.openmldb.java_sdk_test.util.Sqlite3Util; import com._4paradigm.openmldb.test_common.model.DBType; @@ -66,17 +66,17 @@ public void prepare() { @Override public void execute() { logger.info("sqlite3 execute begin"); - FesqlResult fesqlResult = null; + OpenMLDBResult fesqlResult = null; List sqls = fesqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { - sql = FesqlUtil.formatSql(sql, tableNames); + sql = OpenMLDBUtil.formatSql(sql, tableNames); fesqlResult = JDBCUtil.executeQuery(sql,DBType.SQLITE3); } } String sql = fesqlCase.getSql(); if (sql != null && sql.length() > 0) { - sql = FesqlUtil.formatSql(sql, tableNames); + sql = OpenMLDBUtil.formatSql(sql, tableNames); fesqlResult = JDBCUtil.executeQuery(sql,DBType.SQLITE3); } mainResult = fesqlResult; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java index d4e4a33cae5..4dbc45c702a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java @@ -16,8 +16,8 @@ package com._4paradigm.openmldb.java_sdk_test.executor; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -50,7 +50,7 @@ public void prepare(String version,SqlExecutor executor){ logger.info("version:{} prepare begin",version); boolean dbOk = executor.createDB(dbName); logger.info("create db:{},{}", dbName, dbOk); - FesqlResult res = FesqlUtil.createAndInsert( + OpenMLDBResult res = OpenMLDBUtil.createAndInsert( executor, dbName, fesqlCase.getInputs(), !isBatchRequest && null == fesqlCase.getBatch_request()); if (!res.isOk()) { @@ -59,9 +59,9 @@ public void prepare(String version,SqlExecutor executor){ logger.info("version:{} prepare end",version); } @Override - public FesqlResult execute(String version,SqlExecutor executor) { + public OpenMLDBResult execute(String version, SqlExecutor executor) { logger.info("version:{} execute begin",version); - FesqlResult fesqlResult = null; + OpenMLDBResult fesqlResult = null; try { if (fesqlCase.getInputs().isEmpty() || CollectionUtils.isEmpty(fesqlCase.getInputs().get(0).getRows())) { @@ -86,19 +86,19 @@ public FesqlResult execute(String version,SqlExecutor executor) { return fesqlResult; } - private FesqlResult executeSingle(SqlExecutor executor, String sql, boolean isAsyn) throws SQLException { + private OpenMLDBResult executeSingle(SqlExecutor executor, String sql, boolean isAsyn) throws SQLException { String spSql = fesqlCase.getProcedure(sql); logger.info("spSql: {}", spSql); - return FesqlUtil.sqlRequestModeWithSp( + return OpenMLDBUtil.sqlRequestModeWithSp( executor, dbName, fesqlCase.getSpName(), null == fesqlCase.getBatch_request(), spSql, fesqlCase.getInputs().get(0), isAsyn); } - private FesqlResult executeBatch(SqlExecutor executor, String sql, boolean isAsyn) throws SQLException { + private OpenMLDBResult executeBatch(SqlExecutor executor, String sql, boolean isAsyn) throws SQLException { String spName = "sp_" + tableNames.get(0) + "_" + System.currentTimeMillis(); - String spSql = FesqlUtil.buildSpSQLWithConstColumns(spName, sql, fesqlCase.getBatch_request()); + String spSql = OpenMLDBUtil.buildSpSQLWithConstColumns(spName, sql, fesqlCase.getBatch_request()); logger.info("spSql: {}", spSql); - return FesqlUtil.selectBatchRequestModeWithSp( + return OpenMLDBUtil.selectBatchRequestModeWithSp( executor, dbName, spName, spSql, fesqlCase.getBatch_request(), isAsyn); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/JDBCUtil.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/JDBCUtil.java index 108d85d266a..91cd909289e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/JDBCUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/JDBCUtil.java @@ -16,9 +16,10 @@ package com._4paradigm.openmldb.java_sdk_test.util; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.common.ReportLog; import com._4paradigm.openmldb.test_common.model.DBType; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import lombok.extern.slf4j.Slf4j; import java.sql.*; @@ -47,10 +48,10 @@ public static int executeUpdate(String sql, DBType dbType){ reportLog.info("jdbc update result:{}",n); return n; } - public static FesqlResult executeQuery(String sql, DBType dbType){ + public static OpenMLDBResult executeQuery(String sql, DBType dbType){ log.info("jdbc sql:{}",sql); reportLog.info("jdbc sql:{}",sql); - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); try(Connection connection= ConnectionFactory.of().getConn(dbType)){ Statement statement=connection.createStatement(); ResultSet rs = statement.executeQuery(sql); @@ -96,7 +97,7 @@ private static List> convertRestultSetToList(ResultSet rs) throws S return result; } - public static void setSchema(ResultSetMetaData metaData,FesqlResult fesqlResult) { + public static void setSchema(ResultSetMetaData metaData, OpenMLDBResult fesqlResult) { try { int columnCount = metaData.getColumnCount(); List columnNames = new ArrayList<>(); @@ -109,7 +110,7 @@ public static void setSchema(ResultSetMetaData metaData,FesqlResult fesqlResult) columnLabel = metaData.getColumnName(i); } columnNames.add(columnLabel); - columnTypes.add(FesqlUtil.getSQLTypeString(metaData.getColumnType(i))); + columnTypes.add(OpenMLDBUtil.getSQLTypeString(metaData.getColumnType(i))); } fesqlResult.setColumnNames(columnNames); fesqlResult.setColumnTypes(columnTypes); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/Tool.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/Tool.java deleted file mode 100755 index 21be1de8354..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/Tool.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com._4paradigm.openmldb.java_sdk_test.util; - - -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testng.Assert; - -import java.io.File; -import java.io.IOException; -import java.util.*; - - -public class Tool { - private static final Logger logger = LoggerFactory.getLogger(Tool.class); - - public static String getFilePath(String filename) { - return Tool.class.getClassLoader().getResource(filename).getFile(); - } - - public static String getCasePath(String yamlCaseDir, String casePath) { - String caseDir = StringUtils.isEmpty(yamlCaseDir) ? Tool.rtidbDir().getAbsolutePath() : yamlCaseDir; - Assert.assertNotNull(caseDir); - String caseAbsPath = caseDir + "/cases/" + casePath; - logger.debug("case absolute path: {}", caseAbsPath); - return caseAbsPath; - } - - public static File rtidbDir() { - File directory = new File("."); - directory = directory.getAbsoluteFile(); - while (null != directory) { - if (directory.isDirectory() && "OpenMLDB".equals(directory.getName())) { - break; - } - logger.debug("current directory name {}", directory.getName()); - directory = directory.getParentFile(); - } - - if ("OpenMLDB".equals(directory.getName())) { - return directory; - } else { - return null; - } - } - - public static void sleep(long time) { - try { - Thread.sleep(time); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - - public static List getPaths(File directory) { - List list = new ArrayList<>(); - Collection files = FileUtils.listFiles(directory, null, true); - for (File f : files) { - list.add(f.getAbsolutePath()); - } - Collections.sort(list); - return list; - } - - - public static Properties getProperties(String fileName) { - Properties ps = new Properties(); - try { - ps.load(Tool.class.getClassLoader().getResourceAsStream(fileName)); - } catch (IOException e) { - e.printStackTrace(); - logger.error(e.getMessage()); - } - return ps; - } - - public static String uuid() { - String uuid = UUID.randomUUID().toString().replaceAll("-", ""); - return uuid; - } - -} - - - - - - - - - - - - diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java index 9190b9159e9..e1a5e20fda8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java @@ -16,9 +16,9 @@ package com._4paradigm.openmldb.java_sdk_test.auto_gen_case; -import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBClient; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; import com._4paradigm.openmldb.java_sdk_test.common.FedbConfig; -import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.sdk.SqlExecutor; @@ -52,13 +52,13 @@ public void beforeClass(){ if(FedbConfig.INIT_VERSION_ENV) { FedbConfig.VERSIONS.forEach(version -> { OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); - openMLDBDeploy.setCluster("cluster".equals(FedbGlobalVar.env)); + openMLDBDeploy.setCluster("cluster".equals(OpenMLDBGlobalVar.env)); OpenMLDBInfo fedbInfo = openMLDBDeploy.deployCluster(2, 3); - OpenMLDBClient fesqlClient = new OpenMLDBClient(fedbInfo); + OpenMLDBClient fesqlClient = new OpenMLDBClient(fedbInfo.getZk_cluster(),fedbInfo.getZk_root_path()); executorMap.put(version, fesqlClient.getExecutor()); fedbInfoMap.put(version, fedbInfo); }); - fedbInfoMap.put("mainVersion", FedbGlobalVar.mainInfo); + fedbInfoMap.put("mainVersion", OpenMLDBGlobalVar.mainInfo); }else{ //测试调试用 String verion = "2.2.2"; @@ -71,9 +71,9 @@ public void beforeClass(){ .nsEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:10007", "172.24.4.55:10008")) .tabletEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:10009", "172.24.4.55:10010", "172.24.4.55:10011")) .build(); - executorMap.put(verion, new OpenMLDBClient(fedbInfo).getExecutor()); + executorMap.put(verion, new OpenMLDBClient(fedbInfo.getZk_cluster(),fedbInfo.getZk_root_path()).getExecutor()); fedbInfoMap.put(verion, fedbInfo); - fedbInfoMap.put("mainVersion", FedbGlobalVar.mainInfo); + fedbInfoMap.put("mainVersion", OpenMLDBGlobalVar.mainInfo); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java index 3eccd5b6945..3040df97434 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java @@ -1,15 +1,14 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v030; -import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.sdk.Column; import com._4paradigm.openmldb.sdk.Schema; import io.qameta.allure.Feature; import io.qameta.allure.Story; import lombok.extern.slf4j.Slf4j; import org.testng.Assert; -import org.testng.annotations.Test; import org.testng.collections.Lists; import java.sql.SQLException; @@ -22,8 +21,8 @@ public class SchemaTest extends FedbTest { @Story("schema-sdk") // @Test public void testHaveIndexAndOption() throws SQLException { - boolean dbOk = executor.createDB(FedbGlobalVar.dbName); - log.info("create db:{},{}", FedbGlobalVar.dbName, dbOk); + boolean dbOk = executor.createDB(OpenMLDBGlobalVar.dbName); + log.info("create db:{},{}", OpenMLDBGlobalVar.dbName, dbOk); String tableName = "test_schema1"; String createSql = "create table "+tableName+"(\n" + "c1 string,\n" + @@ -36,19 +35,19 @@ public void testHaveIndexAndOption() throws SQLException { "c8 date,\n" + "c9 bool not null,\n" + "index(key=(c1),ts=c7,ttl=10,ttl_type=latest))options(partitionnum=8,replicanum=3);"; - FesqlUtil.sql(executor,FedbGlobalVar.dbName,createSql); - Schema tableSchema = executor.getTableSchema(FedbGlobalVar.dbName, tableName); + OpenMLDBUtil.sql(executor, OpenMLDBGlobalVar.dbName,createSql); + Schema tableSchema = executor.getTableSchema(OpenMLDBGlobalVar.dbName, tableName); List columnList = tableSchema.getColumnList(); List actualList = columnList.stream() .map(column -> String.format("%s %s %s", column.getColumnName(), - FesqlUtil.getColumnTypeByType(column.getSqlType()), + OpenMLDBUtil.getColumnTypeByType(column.getSqlType()), column.isNotNull() ? "not null" : "").trim()) .collect(Collectors.toList()); List expectList = Lists.newArrayList("c1 string","c2 int not null","c3 bigint","c4 smallint", "c5 float","c6 double not null","c7 timestamp not null","c8 date","c9 bool not null"); Assert.assertEquals(actualList,expectList); String deleteSql = "drop table "+tableName+";"; - FesqlUtil.sql(executor,FedbGlobalVar.dbName,deleteSql); + OpenMLDBUtil.sql(executor, OpenMLDBGlobalVar.dbName,deleteSql); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/deploy/TestFEDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/deploy/TestFEDBDeploy.java index bb2d8c3d126..5ef667865ea 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/deploy/TestFEDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/deploy/TestFEDBDeploy.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.deploy; -import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; import org.testng.annotations.Optional; import org.testng.annotations.Test; @@ -24,17 +24,17 @@ public class TestFEDBDeploy{ @Test public void pythonDeploy(@Optional("qa") String env, @Optional("main") String version, @Optional("")String fedbPath){ - FedbGlobalVar.env = env; + OpenMLDBGlobalVar.env = env; if(env.equalsIgnoreCase("cluster")){ OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); openMLDBDeploy.setOpenMLDBPath(fedbPath); openMLDBDeploy.setCluster(true); - FedbGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); + OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); }else if(env.equalsIgnoreCase("standalone")){ OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); openMLDBDeploy.setOpenMLDBPath(fedbPath); openMLDBDeploy.setCluster(false); - FedbGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); + OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); } } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderTest.java index 90d12748f29..98e575b399b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderTest.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.entity; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import org.testng.Assert; @@ -96,7 +96,7 @@ public void converRowsTest() throws ParseException, FileNotFoundException { Assert.assertEquals(3, provider.getCases().size()); SQLCase sqlCase = provider.getCases().get(0); Assert.assertEquals(2, sqlCase.getInputs().size()); - List> expect = FesqlUtil.convertRows(sqlCase.getExpect().getRows(), + List> expect = OpenMLDBUtil.convertRows(sqlCase.getExpect().getRows(), sqlCase.getExpect().getColumns()); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DMLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DMLTest.java index 8abb41f2758..cef60cde89e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DMLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DMLTest.java @@ -17,11 +17,10 @@ package com._4paradigm.openmldb.java_sdk_test.standalone.v030; -import com._4paradigm.openmldb.java_sdk_test.command.OpenMLDBComamndFacade; -import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.test_common.command.OpenMLDBComamndFacade; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.java_sdk_test.common.StandaloneTest; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -68,7 +67,7 @@ public void testInsertMulti1000(){ " c8 date not null,\n" + " c9 bool not null,\n" + " index(key=(c1), ts=c5));"; - OpenMLDBComamndFacade.sql(FedbGlobalVar.mainInfo,FedbGlobalVar.dbName,createSql); + OpenMLDBComamndFacade.sql(OpenMLDBGlobalVar.mainInfo, OpenMLDBGlobalVar.dbName,createSql); StringBuilder sb = new StringBuilder("insert into auto_multi_insert_1000 values "); int total = 1000; for(int i=0;i test_zw = OpenMLDBCommandFactory.runNoInteractive(fedbInfo, "test_zw", "desc t4;"); System.out.println("======="); test_zw.forEach(System.out::println); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestDropTable.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestDropTable.java index eb9d9edbb29..b4a085ecaad 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestDropTable.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestDropTable.java @@ -16,9 +16,9 @@ package com._4paradigm.openmldb.java_sdk_test.temp; -import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBClient; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import org.testng.annotations.Test; import java.util.List; @@ -30,7 +30,7 @@ public void testAll(){ OpenMLDBClient fedbClient = new OpenMLDBClient("172.24.4.55:10000","/openmldb"); String dbName = "test_zw"; String sql = "show tables;"; - FesqlResult fesqlResult = FesqlUtil.select(fedbClient.getExecutor(), dbName, sql); + OpenMLDBResult fesqlResult = OpenMLDBUtil.select(fedbClient.getExecutor(), dbName, sql); List> result = fesqlResult.getResult(); for(List list:result){ System.out.println(list); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index d9b4ee12db9..2d11ecfdc28 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -14,9 +14,30 @@ 8 8 + + 0.5.0 + 0.5.0-macos + + com.4paradigm.openmldb + openmldb-deploy + ${project.version} + + + + com.4paradigm.openmldb + openmldb-jdbc + ${openmldb.jdbc.version} + + + com.4paradigm.openmldb + openmldb-native + ${openmldb.navtive.version} + + + org.apache.httpcomponents httpclient diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBColumn.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBColumn.java similarity index 93% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBColumn.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBColumn.java index 706118f2c4e..fb15d9ae7d0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBColumn.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBColumn.java @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.entity; +package com._4paradigm.openmldb.test_common.bean; import lombok.Data; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBIndex.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBIndex.java similarity index 93% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBIndex.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBIndex.java index 36ca863983c..8bb79992dc5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBIndex.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBIndex.java @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.entity; +package com._4paradigm.openmldb.test_common.bean; import lombok.Data; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlResult.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java similarity index 95% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlResult.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java index 108e6e8cc6a..1299aea5895 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlResult.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.entity; +package com._4paradigm.openmldb.test_common.bean; import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; import com.google.common.base.Joiner; @@ -28,7 +28,7 @@ * @date 2020/6/15 11:36 AM */ @Data -public class FesqlResult { +public class OpenMLDBResult { private String dbName; private List tableNames; private boolean ok; @@ -81,7 +81,7 @@ public String toString() { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - FesqlResult that = (FesqlResult) o; + OpenMLDBResult that = (OpenMLDBResult) o; boolean flag = toString().equals(that.toString()); return flag; } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBSchema.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBSchema.java similarity index 93% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBSchema.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBSchema.java index f5ea4e489a1..9ae135f5dcb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBSchema.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBSchema.java @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.entity; +package com._4paradigm.openmldb.test_common.bean; import lombok.Data; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/CommandUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/CommandUtil.java similarity index 89% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/CommandUtil.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/CommandUtil.java index 9b7ecbfa030..708c64c8f8a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/CommandUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/CommandUtil.java @@ -1,7 +1,8 @@ -package com._4paradigm.openmldb.java_sdk_test.command; +package com._4paradigm.openmldb.test_common.command; + -import com._4paradigm.openmldb.java_sdk_test.util.Tool; import com._4paradigm.openmldb.test_common.common.LogProxy; +import com._4paradigm.openmldb.test_common.util.Tool; import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; import lombok.extern.slf4j.Slf4j; import org.slf4j.Logger; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenMLDBComamndFacade.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBComamndFacade.java similarity index 70% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenMLDBComamndFacade.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBComamndFacade.java index dd26032e526..8bf190d2220 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenMLDBComamndFacade.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBComamndFacade.java @@ -13,10 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.command; +package com._4paradigm.openmldb.test_common.command; -import com._4paradigm.openmldb.java_sdk_test.command.chain.SqlChainManager; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.command.chain.SqlChainManager; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; @@ -28,16 +28,16 @@ @Slf4j public class OpenMLDBComamndFacade { private static final Logger logger = new LogProxy(log); - public static FesqlResult sql(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { + public static OpenMLDBResult sql(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { logger.info("sql:"+sql); sql = StringUtils.replace(sql,"\n"," "); sql = sql.trim(); - FesqlResult fesqlResult = SqlChainManager.of().sql(openMLDBInfo, dbName, sql); + OpenMLDBResult fesqlResult = SqlChainManager.of().sql(openMLDBInfo, dbName, sql); logger.info("fesqlResult:"+fesqlResult); return fesqlResult; } - public static FesqlResult sqls(OpenMLDBInfo openMLDBInfo, String dbName, List sqls) { - FesqlResult fesqlResult = null; + public static OpenMLDBResult sqls(OpenMLDBInfo openMLDBInfo, String dbName, List sqls) { + OpenMLDBResult fesqlResult = null; for(String sql:sqls){ fesqlResult = sql(openMLDBInfo,dbName,sql); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenmlDBCommandFactory.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandFactory.java similarity index 91% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenmlDBCommandFactory.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandFactory.java index 1b068111e00..cc1437e6f50 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenmlDBCommandFactory.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandFactory.java @@ -13,19 +13,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.command; +package com._4paradigm.openmldb.test_common.command; -import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; -import org.slf4j.Logger; import java.util.List; @Slf4j -public class OpenmlDBCommandFactory { - private static final Logger logger = new LogProxy(log); +public class OpenMLDBCommandFactory { private static String getNoInteractiveCommandByStandalone(String rtidbPath,String host,int port,String dbName,String command){ String line = "%s --host=%s --port=%s --interactive=false --database=%s --cmd='%s'"; if(command.contains("'")){ diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenMLDBCommandUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandUtil.java similarity index 75% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenMLDBCommandUtil.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandUtil.java index 3bc66b02fa4..317235b32ff 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/OpenMLDBCommandUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandUtil.java @@ -13,14 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.command; +package com._4paradigm.openmldb.test_common.command; - -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.FesqlUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; @@ -34,19 +33,19 @@ public class OpenMLDBCommandUtil { private static final Logger logger = new LogProxy(log); - public static FesqlResult createDB(OpenMLDBInfo openMLDBInfo, String dbName) { + public static OpenMLDBResult createDB(OpenMLDBInfo openMLDBInfo, String dbName) { String sql = String.format("create database %s ;",dbName); - FesqlResult fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,sql); + OpenMLDBResult fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,sql); return fesqlResult; } - public static FesqlResult desc(OpenMLDBInfo openMLDBInfo, String dbName, String tableName) { + public static OpenMLDBResult desc(OpenMLDBInfo openMLDBInfo, String dbName, String tableName) { String sql = String.format("desc %s ;",tableName); - FesqlResult fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,sql); + OpenMLDBResult fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,sql); return fesqlResult; } - public static FesqlResult createAndInsert(OpenMLDBInfo openMLDBInfo, String defaultDBName, List inputs) { + public static OpenMLDBResult createAndInsert(OpenMLDBInfo openMLDBInfo, String defaultDBName, List inputs) { HashSet dbNames = new HashSet<>(); if (StringUtils.isNotEmpty(defaultDBName)) { dbNames.add(defaultDBName); @@ -55,13 +54,13 @@ public static FesqlResult createAndInsert(OpenMLDBInfo openMLDBInfo, String defa for (InputDesc input : inputs) { // CreateDB if input's db has been configured and hasn't been created before if (!StringUtils.isEmpty(input.getDb()) && !dbNames.contains(input.getDb())) { - FesqlResult createDBResult = createDB(openMLDBInfo,input.getDb()); + OpenMLDBResult createDBResult = createDB(openMLDBInfo,input.getDb()); dbNames.add(input.getDb()); log.info("create db:{},{}", input.getDb(), createDBResult.isOk()); } } } - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); if (inputs != null && inputs.size() > 0) { for (int i = 0; i < inputs.size(); i++) { InputDesc inputDesc = inputs.get(i); @@ -70,9 +69,9 @@ public static FesqlResult createAndInsert(OpenMLDBInfo openMLDBInfo, String defa //create table String createSql = inputDesc.extractCreate(); createSql = SQLCase.formatSql(createSql, i, tableName); - createSql = FesqlUtil.formatSql(createSql, openMLDBInfo); + createSql = OpenMLDBUtil.formatSql(createSql, openMLDBInfo); if (!createSql.isEmpty()) { - FesqlResult res = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,createSql); + OpenMLDBResult res = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,createSql); if (!res.isOk()) { logger.error("fail to create table"); // reportLog.error("fail to create table"); @@ -84,7 +83,7 @@ public static FesqlResult createAndInsert(OpenMLDBInfo openMLDBInfo, String defa for (String insertSql : inserts) { insertSql = SQLCase.formatSql(insertSql, i, input.getName()); if (!insertSql.isEmpty()) { - FesqlResult res = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,insertSql); + OpenMLDBResult res = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,insertSql); if (!res.isOk()) { logger.error("fail to insert table"); // reportLog.error("fail to insert table"); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/AbstractSQLHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/AbstractSQLHandler.java similarity index 77% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/AbstractSQLHandler.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/AbstractSQLHandler.java index 2bc32f03154..83ddf7c2158 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/AbstractSQLHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/AbstractSQLHandler.java @@ -13,10 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.command.chain; +package com._4paradigm.openmldb.test_common.command.chain; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.Setter; @@ -26,9 +26,9 @@ public abstract class AbstractSQLHandler { public abstract boolean preHandle(String sql); - public abstract FesqlResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql); + public abstract OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql); - public FesqlResult doHandle(OpenMLDBInfo openMLDBInfo, String dbName,String sql){ + public OpenMLDBResult doHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql){ if(preHandle(sql)){ return onHandle(openMLDBInfo,dbName,sql); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DDLHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DDLHandler.java similarity index 72% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DDLHandler.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DDLHandler.java index bfec6fd6dc9..ad0c446e0e1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DDLHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DDLHandler.java @@ -13,12 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.command.chain; +package com._4paradigm.openmldb.test_common.command.chain; -import com._4paradigm.openmldb.java_sdk_test.command.OpenmlDBCommandFactory; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.CommandResultUtil; -import com._4paradigm.openmldb.java_sdk_test.util.Tool; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; + +import com._4paradigm.openmldb.test_common.util.CommandResultUtil; +import com._4paradigm.openmldb.test_common.util.Tool; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.base.Joiner; @@ -32,9 +33,9 @@ public boolean preHandle(String sql) { } @Override - public FesqlResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { - FesqlResult fesqlResult = new FesqlResult(); - List result = OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); + public OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { + OpenMLDBResult fesqlResult = new OpenMLDBResult(); + List result = OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); fesqlResult.setMsg(Joiner.on("\n").join(result)); fesqlResult.setOk(CommandResultUtil.success(result)); fesqlResult.setDbName(dbName); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DMLHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DMLHandler.java similarity index 72% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DMLHandler.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DMLHandler.java index d183a1fcea8..5f402063d1f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DMLHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DMLHandler.java @@ -13,11 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.command.chain; +package com._4paradigm.openmldb.test_common.command.chain; -import com._4paradigm.openmldb.java_sdk_test.command.OpenmlDBCommandFactory; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.CommandResultUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; +import com._4paradigm.openmldb.test_common.util.CommandResultUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.base.Joiner; @@ -31,9 +31,9 @@ public boolean preHandle(String sql) { } @Override - public FesqlResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { - FesqlResult fesqlResult = new FesqlResult(); - List result = OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); + public OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { + OpenMLDBResult fesqlResult = new OpenMLDBResult(); + List result = OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); fesqlResult.setMsg(Joiner.on("\n").join(result)); fesqlResult.setOk(CommandResultUtil.success(result)); fesqlResult.setDbName(dbName); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DescHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DescHandler.java similarity index 72% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DescHandler.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DescHandler.java index c690bc8cbc5..080579f79d2 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/DescHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DescHandler.java @@ -13,13 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.command.chain; +package com._4paradigm.openmldb.test_common.command.chain; -import com._4paradigm.openmldb.java_sdk_test.command.OpenmlDBCommandFactory; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.CommandResultUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; +import com._4paradigm.openmldb.test_common.util.CommandResultUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.base.Joiner; @@ -32,9 +32,9 @@ public boolean preHandle(String sql) { } @Override - public FesqlResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { - FesqlResult fesqlResult = new FesqlResult(); - List result = OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); + public OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { + OpenMLDBResult fesqlResult = new OpenMLDBResult(); + List result = OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); boolean ok = CommandResultUtil.success(result); fesqlResult.setMsg(Joiner.on("\n").join(result)); fesqlResult.setOk(ok); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/QueryHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/QueryHandler.java similarity index 80% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/QueryHandler.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/QueryHandler.java index 9a8eb053afd..6496e1621e9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/QueryHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/QueryHandler.java @@ -13,13 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.command.chain; +package com._4paradigm.openmldb.test_common.command.chain; -import com._4paradigm.openmldb.java_sdk_test.command.OpenmlDBCommandFactory; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.CommandResultUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; +import com._4paradigm.openmldb.test_common.util.CommandResultUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.base.Joiner; import org.apache.commons.collections4.CollectionUtils; @@ -35,9 +35,9 @@ public boolean preHandle(String sql) { } @Override - public FesqlResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { - FesqlResult fesqlResult = new FesqlResult(); - List result = OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); + public OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { + OpenMLDBResult fesqlResult = new OpenMLDBResult(); + List result = OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); boolean ok = CommandResultUtil.success(result); fesqlResult.setMsg(Joiner.on("\n").join(result)); fesqlResult.setOk(ok); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/ShowDeploymentHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowDeploymentHandler.java similarity index 73% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/ShowDeploymentHandler.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowDeploymentHandler.java index e4e4b9e2394..7740138a2df 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/ShowDeploymentHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowDeploymentHandler.java @@ -13,13 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.command.chain; +package com._4paradigm.openmldb.test_common.command.chain; -import com._4paradigm.openmldb.java_sdk_test.command.OpenmlDBCommandFactory; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.CommandResultUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; +import com._4paradigm.openmldb.test_common.util.CommandResultUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.base.Joiner; @@ -33,9 +33,9 @@ public boolean preHandle(String sql) { } @Override - public FesqlResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { - FesqlResult fesqlResult = new FesqlResult(); - List result = OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); + public OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { + OpenMLDBResult fesqlResult = new OpenMLDBResult(); + List result = OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); boolean ok = CommandResultUtil.success(result); fesqlResult.setMsg(Joiner.on("\n").join(result)); fesqlResult.setOk(ok); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/ShowDeploymentsHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowDeploymentsHandler.java similarity index 75% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/ShowDeploymentsHandler.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowDeploymentsHandler.java index 8ef12deee1f..1c6c481fe40 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/ShowDeploymentsHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowDeploymentsHandler.java @@ -13,13 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.command.chain; +package com._4paradigm.openmldb.test_common.command.chain; -import com._4paradigm.openmldb.java_sdk_test.command.OpenmlDBCommandFactory; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.util.CommandResultUtil; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; +import com._4paradigm.openmldb.test_common.util.CommandResultUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.base.Joiner; import org.testng.collections.Lists; @@ -34,9 +34,9 @@ public boolean preHandle(String sql) { } @Override - public FesqlResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { - FesqlResult fesqlResult = new FesqlResult(); - List result = OpenmlDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); + public OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { + OpenMLDBResult fesqlResult = new OpenMLDBResult(); + List result = OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); boolean ok = CommandResultUtil.success(result); fesqlResult.setMsg(Joiner.on("\n").join(result)); fesqlResult.setOk(ok); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/SqlChainManager.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/SqlChainManager.java similarity index 85% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/SqlChainManager.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/SqlChainManager.java index 884f4716085..50aa40d8ebe 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/command/chain/SqlChainManager.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/SqlChainManager.java @@ -13,10 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.command.chain; +package com._4paradigm.openmldb.test_common.command.chain; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; public class SqlChainManager { @@ -47,8 +47,8 @@ private static class ClassHolder { public static SqlChainManager of() { return ClassHolder.holder; } - public FesqlResult sql(OpenMLDBInfo openMLDBInfo, String dbName, String sql){ - FesqlResult fesqlResult = sqlHandler.doHandle(openMLDBInfo, dbName, sql); + public OpenMLDBResult sql(OpenMLDBInfo openMLDBInfo, String dbName, String sql){ + OpenMLDBResult fesqlResult = sqlHandler.doHandle(openMLDBInfo, dbName, sql); return fesqlResult; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBClient.java similarity index 68% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBClient.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBClient.java index 65b2502eced..3adebcb59f4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBClient.java @@ -14,14 +14,13 @@ * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.common; +package com._4paradigm.openmldb.test_common.openmldb; import com._4paradigm.openmldb.sdk.SdkOption; import com._4paradigm.openmldb.sdk.SqlException; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.sdk.impl.SqlClusterExecutor; -import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.Data; import lombok.extern.slf4j.Slf4j; @@ -49,7 +48,21 @@ public OpenMLDBClient(String zkCluster, String zkPath){ e.printStackTrace(); } } - public OpenMLDBClient(OpenMLDBInfo openMLDBInfo){ - this(openMLDBInfo.getZk_cluster(),openMLDBInfo.getZk_root_path()); + public OpenMLDBClient(String host, Integer port){ + SdkOption option = new SdkOption(); + option.setHost(host); + option.setPort(port); + option.setClusterMode(false); + option.setSessionTimeout(10000); + option.setRequestTimeout(60000); + log.info("host {}, port {}", option.getHost(), option.getPort()); + try { + executor = new SqlClusterExecutor(option); + } catch (SqlException e) { + e.printStackTrace(); + } } +// public OpenMLDBClient(OpenMLDBInfo openMLDBInfo){ +// this(openMLDBInfo.getZk_cluster(),openMLDBInfo.getZk_root_path()); +// } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbGlobalVar.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java similarity index 91% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbGlobalVar.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java index 480e72aeb13..c7b4d2eaa48 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbGlobalVar.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.common; +package com._4paradigm.openmldb.test_common.openmldb; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; @@ -23,7 +23,7 @@ * @author zhaowei * @date 2020/6/11 11:45 AM */ -public class FedbGlobalVar { +public class OpenMLDBGlobalVar { public static String env; public static String level; public static String version; diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/common/FedbHttp.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/common/OpenMLDBHttp.java similarity index 97% rename from test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/common/FedbHttp.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/common/OpenMLDBHttp.java index b30d56510af..1016deb01e9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/common/FedbHttp.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/common/OpenMLDBHttp.java @@ -18,7 +18,7 @@ import com._4paradigm.openmldb.test_common.restful.model.HttpMethod; import com._4paradigm.openmldb.test_common.restful.model.HttpResult; -import com._4paradigm.openmldb.test_common.restful.util.HttpRequest; +import com._4paradigm.openmldb.test_common.util.HttpRequest; import lombok.Data; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.MapUtils; @@ -28,7 +28,7 @@ @Data @Slf4j -public class FedbHttp { +public class OpenMLDBHttp { private String url; private String uri; private String body; diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/model/RestfulCaseFile.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/model/RestfulCaseFile.java index b17e6e160ac..274c3abe174 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/model/RestfulCaseFile.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/model/RestfulCaseFile.java @@ -16,8 +16,7 @@ package com._4paradigm.openmldb.test_common.restful.model; -import com._4paradigm.openmldb.test_common.restful.util.OpenMLDBTool; -import com._4paradigm.openmldb.test_common.restful.util.Tool; +import com._4paradigm.openmldb.test_common.util.Tool; import lombok.Data; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; @@ -56,7 +55,7 @@ public List getCases() { List debugs = getDebugs(); for (RestfulCase tmpCase : cases) { if(baseCase!=null){ - OpenMLDBTool.mergeObject(baseCase,tmpCase); + Tool.mergeObject(baseCase,tmpCase); } if (!CollectionUtils.isEmpty(debugs)) { if (debugs.contains(tmpCase.getDesc().trim())) { diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/util/OpenMLDBTool.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/util/OpenMLDBTool.java deleted file mode 100755 index bd905f9a5ae..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/util/OpenMLDBTool.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com._4paradigm.openmldb.test_common.restful.util; - - -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.StringUtils; -import org.testng.Assert; - -import java.io.File; -import java.io.IOException; -import java.lang.reflect.Field; -import java.util.*; - -@Slf4j -public class OpenMLDBTool { - - public static String getFilePath(String filename) { - return OpenMLDBTool.class.getClassLoader().getResource(filename).getFile(); - } - - public static String getCasePath(String yamlCaseDir, String casePath) { - String caseDir = StringUtils.isEmpty(yamlCaseDir) ? OpenMLDBTool.openMLDBDir().getAbsolutePath() : yamlCaseDir; - Assert.assertNotNull(caseDir); - String caseAbsPath = caseDir + "/cases/" + casePath; - log.debug("case absolute path: {}", caseAbsPath); - return caseAbsPath; - } - - public static File openMLDBDir() { - File directory = new File("."); - directory = directory.getAbsoluteFile(); - while (null != directory) { - if (directory.isDirectory() && "OpenMLDB".equals(directory.getName())) { - break; - } - log.debug("current directory name {}", directory.getName()); - directory = directory.getParentFile(); - } - - if ("OpenMLDB".equals(directory.getName())) { - return directory; - } else { - return null; - } - } - - public static void sleep(long time) { - try { - Thread.sleep(time); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - - public static List getPaths(File directory) { - List list = new ArrayList<>(); - Collection files = FileUtils.listFiles(directory, null, true); - for (File f : files) { - list.add(f.getAbsolutePath()); - } - Collections.sort(list); - return list; - } - - - public static Properties getProperties(String fileName) { - Properties ps = new Properties(); - try { - ps.load(OpenMLDBTool.class.getClassLoader().getResourceAsStream(fileName)); - } catch (IOException e) { - e.printStackTrace(); - log.error(e.getMessage()); - } - return ps; - } - - public static String uuid() { - String uuid = UUID.randomUUID().toString().replaceAll("-", ""); - return uuid; - } - - public static void mergeObject(T origin, T destination) { - if (origin == null || destination == null) - return; - if (!origin.getClass().equals(destination.getClass())) - return; - Field[] fields = origin.getClass().getDeclaredFields(); - for (int i = 0; i < fields.length; i++) { - try { - fields[i].setAccessible(true); - Object originValue = fields[i].get(origin); - Object destValue = fields[i].get(destination); - if (null == destValue) { - fields[i].set(destination, originValue); - } - fields[i].setAccessible(false); - } catch (Exception e) { - } - } - } - -} - - - - - - - - - - - - diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/CommandResultUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/CommandResultUtil.java similarity index 95% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/CommandResultUtil.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/CommandResultUtil.java index 07d5ac7530b..c20d85c7576 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/CommandResultUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/CommandResultUtil.java @@ -1,6 +1,8 @@ -package com._4paradigm.openmldb.java_sdk_test.util; +package com._4paradigm.openmldb.test_common.util; -import com._4paradigm.openmldb.java_sdk_test.entity.*; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBColumn; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBIndex; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBSchema; import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; import com.google.common.base.Joiner; import org.apache.commons.collections4.CollectionUtils; diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/util/HttpRequest.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/HttpRequest.java similarity index 99% rename from test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/util/HttpRequest.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/HttpRequest.java index 32a44a18284..605423446ce 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/util/HttpRequest.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/HttpRequest.java @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.test_common.restful.util; +package com._4paradigm.openmldb.test_common.util; import com._4paradigm.openmldb.test_common.restful.model.HttpResult; import lombok.extern.slf4j.Slf4j; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/OpenMLDBUtil.java similarity index 89% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/OpenMLDBUtil.java index df9e6adc059..897301c5363 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/FesqlUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/OpenMLDBUtil.java @@ -14,16 +14,15 @@ * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.util; +package com._4paradigm.openmldb.test_common.util; import com._4paradigm.openmldb.DataType; import com._4paradigm.openmldb.SQLRequestRow; import com._4paradigm.openmldb.Schema; -import com._4paradigm.openmldb.java_sdk_test.common.FedbGlobalVar; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlResult; -import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBColumn; -import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBIndex; -import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBSchema; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBColumn; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBIndex; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBSchema; import com._4paradigm.openmldb.jdbc.CallablePreparedStatement; import com._4paradigm.openmldb.jdbc.SQLResultSet; import com._4paradigm.openmldb.sdk.QueryFuture; @@ -32,12 +31,12 @@ import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.base.Joiner; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.joda.time.DateTime; import org.slf4j.Logger; import org.testng.collections.Lists; @@ -56,7 +55,7 @@ * @date 2020/6/17 4:00 PM */ @Slf4j -public class FesqlUtil { +public class OpenMLDBUtil { private static String reg = "\\{(\\d+)\\}"; private static Pattern pattern = Pattern.compile(reg); private static final Logger logger = new LogProxy(log); @@ -244,17 +243,17 @@ public static String getSQLTypeString(int dataType) { } } - public static FesqlResult sqls(SqlExecutor executor, String dbName, List sqls) { - FesqlResult fesqlResult = null; + public static OpenMLDBResult sqls(SqlExecutor executor, String dbName, List sqls) { + OpenMLDBResult fesqlResult = null; for (String sql : sqls) { fesqlResult = sql(executor, dbName, sql); } return fesqlResult; } - public static FesqlResult sqlRequestMode(SqlExecutor executor, String dbName, - Boolean need_insert_request_row, String sql, InputDesc input) throws SQLException { - FesqlResult fesqlResult = null; + public static OpenMLDBResult sqlRequestMode(SqlExecutor executor, String dbName, + Boolean need_insert_request_row, String sql, InputDesc input) throws SQLException { + OpenMLDBResult fesqlResult = null; if (sql.toLowerCase().startsWith("select")||sql.toLowerCase().startsWith("deploy")) { fesqlResult = selectRequestModeWithPreparedStatement(executor, dbName, need_insert_request_row, sql, input); } else { @@ -263,10 +262,10 @@ public static FesqlResult sqlRequestMode(SqlExecutor executor, String dbName, return fesqlResult; } - public static FesqlResult sqlBatchRequestMode(SqlExecutor executor, String dbName, - String sql, InputDesc input, - List commonColumnIndices) throws SQLException { - FesqlResult fesqlResult = null; + public static OpenMLDBResult sqlBatchRequestMode(SqlExecutor executor, String dbName, + String sql, InputDesc input, + List commonColumnIndices) throws SQLException { + OpenMLDBResult fesqlResult = null; if (sql.toLowerCase().startsWith("select")) { fesqlResult = selectBatchRequestModeWithPreparedStatement( executor, dbName, sql, input, commonColumnIndices); @@ -276,10 +275,10 @@ public static FesqlResult sqlBatchRequestMode(SqlExecutor executor, String dbNam return fesqlResult; } - public static FesqlResult sqlRequestModeWithSp(SqlExecutor executor, String dbName, String spName, - Boolean needInsertRequestRow, String sql, - InputDesc rows, boolean isAsyn) throws SQLException { - FesqlResult fesqlResult = null; + public static OpenMLDBResult sqlRequestModeWithSp(SqlExecutor executor, String dbName, String spName, + Boolean needInsertRequestRow, String sql, + InputDesc rows, boolean isAsyn) throws SQLException { + OpenMLDBResult fesqlResult = null; if (sql.toLowerCase().startsWith("create procedure")) { fesqlResult = selectRequestModeWithSp(executor, dbName, spName, needInsertRequestRow, sql, rows, isAsyn); } else { @@ -288,9 +287,9 @@ public static FesqlResult sqlRequestModeWithSp(SqlExecutor executor, String dbNa return fesqlResult; } - public static FesqlResult sql(SqlExecutor executor, String dbName, String sql) { + public static OpenMLDBResult sql(SqlExecutor executor, String dbName, String sql) { useDB(executor,dbName); - FesqlResult fesqlResult = null; + OpenMLDBResult fesqlResult = null; if (sql.startsWith("create database") || sql.startsWith("drop database")) { fesqlResult = db(executor, sql); }else if(sql.startsWith("CREATE INDEX")||sql.startsWith("create index")){ @@ -313,12 +312,12 @@ public static FesqlResult sql(SqlExecutor executor, String dbName, String sql) { return fesqlResult; } - public static FesqlResult selectInto(SqlExecutor executor,String dbName,String outSql){ + public static OpenMLDBResult selectInto(SqlExecutor executor, String dbName, String outSql){ if (outSql.isEmpty()){ return null; } logger.info("select into:{}",outSql); - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); ResultSet rawRs = executor.executeSQL(dbName, outSql); if (rawRs == null) { fesqlResult.setOk(false); @@ -336,12 +335,12 @@ public static FesqlResult selectInto(SqlExecutor executor,String dbName,String o return fesqlResult; } - public static FesqlResult deploy(SqlExecutor executor,String dbName,String showdeploySql){ + public static OpenMLDBResult deploy(SqlExecutor executor, String dbName, String showdeploySql){ if (showdeploySql.isEmpty()){ return null; } logger.info("show deployment:{}",showdeploySql); - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); ResultSet rawRs = executor.executeSQL(dbName, showdeploySql); if (rawRs == null) { fesqlResult.setOk(false); @@ -349,7 +348,7 @@ public static FesqlResult deploy(SqlExecutor executor,String dbName,String showd } else if (rawRs instanceof SQLResultSet){ try { SQLResultSet rs = (SQLResultSet)rawRs; - JDBCUtil.setSchema(rs.getMetaData(),fesqlResult); + ResultUtil.setSchema(rs.getMetaData(),fesqlResult); fesqlResult.setOk(true); String deployStr = convertRestultSetToListDeploy(rs); String[] strings = deployStr.split("\n"); @@ -365,12 +364,12 @@ public static FesqlResult deploy(SqlExecutor executor,String dbName,String showd return fesqlResult; } - public static FesqlResult showDeploys(SqlExecutor executor,String dbName,String showdeploySqls){ + public static OpenMLDBResult showDeploys(SqlExecutor executor, String dbName, String showdeploySqls){ if (showdeploySqls.isEmpty()){ return null; } logger.info("show deployments:{}",showdeploySqls); - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); ResultSet rawRs = executor.executeSQL(dbName, showdeploySqls); if (rawRs == null) { fesqlResult.setOk(false); @@ -378,7 +377,7 @@ public static FesqlResult showDeploys(SqlExecutor executor,String dbName,String } else if (rawRs instanceof SQLResultSet){ try { SQLResultSet rs = (SQLResultSet)rawRs; - JDBCUtil.setSchema(rs.getMetaData(),fesqlResult); + ResultUtil.setSchema(rs.getMetaData(),fesqlResult); fesqlResult.setOk(true); List> lists = convertRestultSetToList(rs); if(lists.size() == 0 ||lists.isEmpty()){ @@ -458,12 +457,12 @@ private static OpenmldbDeployment parseDeployment(List lines){ return deployment; } - public static FesqlResult desc(SqlExecutor executor,String dbName,String descSql){ + public static OpenMLDBResult desc(SqlExecutor executor, String dbName, String descSql){ if (descSql.isEmpty()){ return null; } logger.info("desc:{}",descSql); - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); ResultSet rawRs = executor.executeSQL(dbName, descSql); if (rawRs == null) { @@ -472,7 +471,7 @@ public static FesqlResult desc(SqlExecutor executor,String dbName,String descSql } else if (rawRs instanceof SQLResultSet){ try { SQLResultSet rs = (SQLResultSet)rawRs; - JDBCUtil.setSchema(rs.getMetaData(),fesqlResult); + ResultUtil.setSchema(rs.getMetaData(),fesqlResult); fesqlResult.setOk(true); String deployStr = convertRestultSetToListDeploy(rs); List listDesc = convertRestultSetToListDesc(rs); @@ -526,12 +525,12 @@ public static OpenMLDBSchema parseSchema(List lines){ return schema; } - public static FesqlResult createIndex(SqlExecutor executor, String dbName, String sql) { + public static OpenMLDBResult createIndex(SqlExecutor executor, String dbName, String sql) { if (sql.isEmpty()) { return null; } logger.info("ddl sql:{}", sql); - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); boolean createOk = false; try { createOk = executor.getStatement().execute(sql); @@ -545,20 +544,20 @@ public static FesqlResult createIndex(SqlExecutor executor, String dbName, Strin } - public static FesqlResult insert(SqlExecutor executor, String dbName, String insertSql) { + public static OpenMLDBResult insert(SqlExecutor executor, String dbName, String insertSql) { if (insertSql.isEmpty()) { return null; } logger.info("insert sql:{}", insertSql); - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); boolean createOk = executor.executeInsert(dbName, insertSql); fesqlResult.setOk(createOk); logger.info("insert result:{}" + fesqlResult); return fesqlResult; } - public static FesqlResult selectWithPrepareStatement(SqlExecutor executor, String dbName, String sql,List paramterTypes,List params) { - FesqlResult fesqlResult = new FesqlResult(); + public static OpenMLDBResult selectWithPrepareStatement(SqlExecutor executor, String dbName, String sql, List paramterTypes, List params) { + OpenMLDBResult fesqlResult = new OpenMLDBResult(); try { if (sql.isEmpty()) { return null; @@ -574,7 +573,7 @@ public static FesqlResult selectWithPrepareStatement(SqlExecutor executor, Strin } else if (resultSet instanceof SQLResultSet){ try { SQLResultSet rs = (SQLResultSet)resultSet; - JDBCUtil.setSchema(rs.getMetaData(),fesqlResult); + ResultUtil.setSchema(rs.getMetaData(),fesqlResult); fesqlResult.setOk(true); List> result = convertRestultSetToList(rs); fesqlResult.setCount(result.size()); @@ -593,8 +592,8 @@ public static FesqlResult selectWithPrepareStatement(SqlExecutor executor, Strin return fesqlResult; } - public static FesqlResult insertWithPrepareStatement(SqlExecutor executor, String dbName, String insertSql,List params) { - FesqlResult fesqlResult = new FesqlResult(); + public static OpenMLDBResult insertWithPrepareStatement(SqlExecutor executor, String dbName, String insertSql, List params) { + OpenMLDBResult fesqlResult = new OpenMLDBResult(); try { if (insertSql.isEmpty()) { return null; @@ -616,7 +615,7 @@ public static FesqlResult insertWithPrepareStatement(SqlExecutor executor, Strin return fesqlResult; } - public static FesqlResult db(SqlExecutor executor, String ddlSql) { + public static OpenMLDBResult db(SqlExecutor executor, String ddlSql) { if (ddlSql.isEmpty()) { return null; } @@ -628,18 +627,18 @@ public static FesqlResult db(SqlExecutor executor, String ddlSql) { }else{ db = executor.dropDB(dbName); } - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); fesqlResult.setOk(db); logger.info("db result:{}", fesqlResult); return fesqlResult; } - public static FesqlResult ddl(SqlExecutor executor, String dbName, String ddlSql) { + public static OpenMLDBResult ddl(SqlExecutor executor, String dbName, String ddlSql) { if (ddlSql.isEmpty()) { return null; } logger.info("ddl sql:{}", ddlSql); - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); boolean createOk = executor.executeDDL(dbName, ddlSql); fesqlResult.setOk(createOk); logger.info("ddl result:{}", fesqlResult); @@ -660,9 +659,9 @@ private static List> convertRestultSetToList(SQLResultSet rs) throw return result; } - private static FesqlResult selectRequestModeWithPreparedStatement(SqlExecutor executor, String dbName, - Boolean need_insert_request_row, - String selectSql, InputDesc input) { + private static OpenMLDBResult selectRequestModeWithPreparedStatement(SqlExecutor executor, String dbName, + Boolean need_insert_request_row, + String selectSql, InputDesc input) { if (selectSql.isEmpty()) { logger.error("fail to execute sql in request mode: select sql is empty"); return null; @@ -686,7 +685,7 @@ private static FesqlResult selectRequestModeWithPreparedStatement(SqlExecutor ex String insertDbName= input.getDb().isEmpty() ? dbName : input.getDb(); logger.info("select sql:{}", selectSql); - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); List> result = Lists.newArrayList(); for (int i = 0; i < rows.size(); i++) { PreparedStatement rps = null; @@ -727,7 +726,7 @@ private static FesqlResult selectRequestModeWithPreparedStatement(SqlExecutor ex } if (i == rows.size()-1) { try { - JDBCUtil.setSchema(resultSet.getMetaData(),fesqlResult); + ResultUtil.setSchema(resultSet.getMetaData(),fesqlResult); } catch (SQLException throwables) { fesqlResult.setOk(false); fesqlResult.setMsg("Fail to set meta data"); @@ -753,9 +752,9 @@ private static FesqlResult selectRequestModeWithPreparedStatement(SqlExecutor ex return fesqlResult; } - private static FesqlResult selectBatchRequestModeWithPreparedStatement(SqlExecutor executor, String dbName, - String selectSql, InputDesc input, - List commonColumnIndices) { + private static OpenMLDBResult selectBatchRequestModeWithPreparedStatement(SqlExecutor executor, String dbName, + String selectSql, InputDesc input, + List commonColumnIndices) { if (selectSql.isEmpty()) { logger.error("fail to execute sql in batch request mode: select sql is empty"); return null; @@ -775,7 +774,7 @@ private static FesqlResult selectBatchRequestModeWithPreparedStatement(SqlExecut return null; } logger.info("select sql:{}", selectSql); - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); PreparedStatement rps = null; SQLResultSet sqlResultSet = null; @@ -793,7 +792,7 @@ private static FesqlResult selectBatchRequestModeWithPreparedStatement(SqlExecut List> result = Lists.newArrayList(); result.addAll(convertRestultSetToList(sqlResultSet)); fesqlResult.setResult(result); - JDBCUtil.setSchema(sqlResultSet.getMetaData(),fesqlResult); + ResultUtil.setSchema(sqlResultSet.getMetaData(),fesqlResult); fesqlResult.setCount(result.size()); // fesqlResult.setResultSchema(sqlResultSet.GetInternalSchema()); @@ -818,9 +817,9 @@ private static FesqlResult selectBatchRequestModeWithPreparedStatement(SqlExecut return fesqlResult; } - private static FesqlResult selectRequestModeWithSp(SqlExecutor executor, String dbName, String spName, - Boolean needInsertRequestRow, - String sql, InputDesc input, boolean isAsyn) { + private static OpenMLDBResult selectRequestModeWithSp(SqlExecutor executor, String dbName, String spName, + Boolean needInsertRequestRow, + String sql, InputDesc input, boolean isAsyn) { if (sql.isEmpty()) { logger.error("fail to execute sql in request mode: select sql is empty"); return null; @@ -845,7 +844,7 @@ private static FesqlResult selectRequestModeWithSp(SqlExecutor executor, String logger.info("procedure sql:{}", sql); String insertDbName = input.getDb().isEmpty() ? dbName : input.getDb(); - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); if (!executor.executeDDL(dbName, sql)) { logger.error("execute ddl failed! sql: {}", sql); fesqlResult.setOk(false); @@ -887,7 +886,7 @@ private static FesqlResult selectRequestModeWithSp(SqlExecutor executor, String } if (i == 0) { try { - JDBCUtil.setSchema(resultSet.getMetaData(),fesqlResult); + ResultUtil.setSchema(resultSet.getMetaData(),fesqlResult); } catch (SQLException throwables) { fesqlResult.setOk(false); fesqlResult.setMsg("fail to get/set meta data"); @@ -916,8 +915,8 @@ private static FesqlResult selectRequestModeWithSp(SqlExecutor executor, String return fesqlResult; } - public static FesqlResult selectBatchRequestModeWithSp(SqlExecutor executor, String dbName, String spName, - String sql, InputDesc input, boolean isAsyn) { + public static OpenMLDBResult selectBatchRequestModeWithSp(SqlExecutor executor, String dbName, String spName, + String sql, InputDesc input, boolean isAsyn) { if (sql.isEmpty()) { logger.error("fail to execute sql in batch request mode: select sql is empty"); return null; @@ -928,7 +927,7 @@ public static FesqlResult selectBatchRequestModeWithSp(SqlExecutor executor, Str return null; } logger.info("procedure sql: {}", sql); - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); if (!executor.executeDDL(dbName, sql)) { fesqlResult.setOk(false); fesqlResult.setMsg("fail to execute ddl"); @@ -973,7 +972,7 @@ public static FesqlResult selectBatchRequestModeWithSp(SqlExecutor executor, Str List> result = Lists.newArrayList(); result.addAll(convertRestultSetToList((SQLResultSet) sqlResultSet)); fesqlResult.setResult(result); - JDBCUtil.setSchema(sqlResultSet.getMetaData(),fesqlResult); + ResultUtil.setSchema(sqlResultSet.getMetaData(),fesqlResult); fesqlResult.setCount(result.size()); } catch (SQLException e) { @@ -1231,9 +1230,11 @@ private static boolean setRequestData(PreparedStatement requestPs, List requestPs.setDate(i + 1, new Date(((java.util.Date) obj).getTime())); } else if (obj instanceof Date) { requestPs.setDate(i + 1, (Date) (obj)); - } else if (obj instanceof DateTime) { - requestPs.setDate(i + 1, new Date(((DateTime) obj).getMillis())); - } else { + } +// else if (obj instanceof DateTime) { +// requestPs.setDate(i + 1, new Date(((DateTime) obj).getMillis())); +// } + else { try { Date date = new Date(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").parse(obj.toString() + " 00:00:00").getTime()); logger.info("build request row: obj: {}, append date: {}, {}, {}, {}",obj, date.toString(), date.getYear() + 1900, date.getMonth() + 1, date.getDate()); @@ -1282,12 +1283,12 @@ private static ResultSet buildRequestPreparedStatmentAsync(CallablePreparedState } } - public static FesqlResult select(SqlExecutor executor, String dbName, String selectSql) { + public static OpenMLDBResult select(SqlExecutor executor, String dbName, String selectSql) { if (selectSql.isEmpty()) { return null; } logger.info("select sql:{}", selectSql); - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); ResultSet rawRs = executor.executeSQL(dbName, selectSql); if (rawRs == null) { fesqlResult.setOk(false); @@ -1295,7 +1296,7 @@ public static FesqlResult select(SqlExecutor executor, String dbName, String sel } else if (rawRs instanceof SQLResultSet){ try { SQLResultSet rs = (SQLResultSet)rawRs; - JDBCUtil.setSchema(rs.getMetaData(),fesqlResult); + ResultUtil.setSchema(rs.getMetaData(),fesqlResult); fesqlResult.setOk(true); List> result = convertRestultSetToList(rs); fesqlResult.setCount(result.size()); @@ -1406,7 +1407,7 @@ public static String formatSql(String sql, OpenMLDBInfo fedbInfo) { } public static String formatSql(String sql, List tableNames) { - return formatSql(sql,tableNames, FedbGlobalVar.mainInfo); + return formatSql(sql,tableNames, OpenMLDBGlobalVar.mainInfo); } // public static FesqlResult createAndInsert(SqlExecutor executor, String dbName, @@ -1414,9 +1415,9 @@ public static String formatSql(String sql, List tableNames) { // boolean useFirstInputAsRequests) { // return createAndInsert(executor, dbName, inputs, useFirstInputAsRequests); // } - public static FesqlResult createTable(SqlExecutor executor,String dbName,String createSql){ + public static OpenMLDBResult createTable(SqlExecutor executor, String dbName, String createSql){ if (StringUtils.isNotEmpty(createSql)) { - FesqlResult res = FesqlUtil.ddl(executor, dbName, createSql); + OpenMLDBResult res = OpenMLDBUtil.ddl(executor, dbName, createSql); if (!res.isOk()) { logger.error("fail to create table"); return res; @@ -1426,10 +1427,10 @@ public static FesqlResult createTable(SqlExecutor executor,String dbName,String throw new IllegalArgumentException("create sql is null"); } - public static FesqlResult createAndInsert(SqlExecutor executor, - String defaultDBName, - List inputs, - boolean useFirstInputAsRequests) { + public static OpenMLDBResult createAndInsert(SqlExecutor executor, + String defaultDBName, + List inputs, + boolean useFirstInputAsRequests) { // Create inputs' databasess if exist HashSet dbNames = new HashSet<>(); if (!StringUtils.isEmpty(defaultDBName)) { @@ -1446,7 +1447,7 @@ public static FesqlResult createAndInsert(SqlExecutor executor, } } - FesqlResult fesqlResult = new FesqlResult(); + OpenMLDBResult fesqlResult = new OpenMLDBResult(); if (inputs != null && inputs.size() > 0) { for (int i = 0; i < inputs.size(); i++) { String tableName = inputs.get(i).getName(); @@ -1455,7 +1456,7 @@ public static FesqlResult createAndInsert(SqlExecutor executor, continue; } createSql = SQLCase.formatSql(createSql, i, tableName); - createSql = formatSql(createSql,FedbGlobalVar.mainInfo); + createSql = formatSql(createSql, OpenMLDBGlobalVar.mainInfo); String dbName = inputs.get(i).getDb().isEmpty() ? defaultDBName : inputs.get(i).getDb(); createTable(executor,dbName,createSql); InputDesc input = inputs.get(i); @@ -1466,7 +1467,7 @@ public static FesqlResult createAndInsert(SqlExecutor executor, for (String insertSql : inserts) { insertSql = SQLCase.formatSql(insertSql, i, input.getName()); if (!insertSql.isEmpty()) { - FesqlResult res = FesqlUtil.insert(executor, dbName, insertSql); + OpenMLDBResult res = OpenMLDBUtil.insert(executor, dbName, insertSql); if (!res.isOk()) { logger.error("fail to insert table"); return res; @@ -1479,11 +1480,11 @@ public static FesqlResult createAndInsert(SqlExecutor executor, return fesqlResult; } - public static FesqlResult createAndInsertWithPrepared(SqlExecutor executor, - String defaultDBName, - List inputs, - boolean useFirstInputAsRequests) { - FesqlResult fesqlResult = new FesqlResult(); + public static OpenMLDBResult createAndInsertWithPrepared(SqlExecutor executor, + String defaultDBName, + List inputs, + boolean useFirstInputAsRequests) { + OpenMLDBResult fesqlResult = new OpenMLDBResult(); if (inputs != null && inputs.size() > 0) { for (int i = 0; i < inputs.size(); i++) { String tableName = inputs.get(i).getName(); @@ -1499,7 +1500,7 @@ public static FesqlResult createAndInsertWithPrepared(SqlExecutor executor, insertSql = SQLCase.formatSql(insertSql, i, tableName); List> rows = input.getRows(); for(List row:rows){ - FesqlResult res = FesqlUtil.insertWithPrepareStatement(executor, dbName, insertSql, row); + OpenMLDBResult res = OpenMLDBUtil.insertWithPrepareStatement(executor, dbName, insertSql, row); if (!res.isOk()) { logger.error("fail to insert table"); return res; diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java new file mode 100644 index 00000000000..30ae633db9c --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java @@ -0,0 +1,32 @@ +package com._4paradigm.openmldb.test_common.util; + +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; + +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; + +public class ResultUtil { + public static void setSchema(ResultSetMetaData metaData, OpenMLDBResult fesqlResult) { + try { + int columnCount = metaData.getColumnCount(); + List columnNames = new ArrayList<>(); + List columnTypes = new ArrayList<>(); + for (int i = 1; i <= columnCount; i++) { + String columnLabel = null; + try { + columnLabel = metaData.getColumnLabel(i); + }catch (SQLException e){ + columnLabel = metaData.getColumnName(i); + } + columnNames.add(columnLabel); + columnTypes.add(OpenMLDBUtil.getSQLTypeString(metaData.getColumnType(i))); + } + fesqlResult.setColumnNames(columnNames); + fesqlResult.setColumnTypes(columnTypes); + }catch (SQLException e){ + e.printStackTrace(); + } + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/util/Tool.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/Tool.java similarity index 52% rename from test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/util/Tool.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/Tool.java index 3030346acb8..7fac0a16502 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/restful/util/Tool.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/Tool.java @@ -13,15 +13,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.test_common.restful.util; +package com._4paradigm.openmldb.test_common.util; import com.google.gson.JsonElement; import com.google.gson.JsonParser; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.http.message.BasicNameValuePair; +import org.testng.Assert; import sun.misc.BASE64Encoder; +import java.io.File; import java.io.IOException; +import java.lang.reflect.Field; import java.net.MalformedURLException; import java.net.URL; import java.security.MessageDigest; @@ -34,6 +39,86 @@ public class Tool { public static final Pattern PATTERN = Pattern.compile("<(.*?)>"); + public static String getFilePath(String filename) { + return Tool.class.getClassLoader().getResource(filename).getFile(); + } + + public static String getCasePath(String yamlCaseDir, String casePath) { + String caseDir = StringUtils.isEmpty(yamlCaseDir) ? Tool.openMLDBDir().getAbsolutePath() : yamlCaseDir; + Assert.assertNotNull(caseDir); + String caseAbsPath = caseDir + "/cases/" + casePath; + log.debug("case absolute path: {}", caseAbsPath); + return caseAbsPath; + } + + public static File openMLDBDir() { + File directory = new File("."); + directory = directory.getAbsoluteFile(); + while (null != directory) { + if (directory.isDirectory() && "OpenMLDB".equals(directory.getName())) { + break; + } + log.debug("current directory name {}", directory.getName()); + directory = directory.getParentFile(); + } + + if ("OpenMLDB".equals(directory.getName())) { + return directory; + } else { + return null; + } + } + + public static void sleep(long time) { + try { + Thread.sleep(time); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + public static List getPaths(File directory) { + List list = new ArrayList<>(); + Collection files = FileUtils.listFiles(directory, null, true); + for (File f : files) { + list.add(f.getAbsolutePath()); + } + Collections.sort(list); + return list; + } + + + public static Properties getProperties(String fileName) { + Properties ps = new Properties(); + try { + ps.load(Tool.class.getClassLoader().getResourceAsStream(fileName)); + } catch (IOException e) { + e.printStackTrace(); + log.error(e.getMessage()); + } + return ps; + } + + public static void mergeObject(T origin, T destination) { + if (origin == null || destination == null) + return; + if (!origin.getClass().equals(destination.getClass())) + return; + Field[] fields = origin.getClass().getDeclaredFields(); + for (int i = 0; i < fields.length; i++) { + try { + fields[i].setAccessible(true); + Object originValue = fields[i].get(origin); + Object destValue = fields[i].get(destination); + if (null == destValue) { + fields[i].set(destination, originValue); + } + fields[i].setAccessible(false); + } catch (Exception e) { + } + } + } + public static void genStr(String str, Map> map, List list){ Matcher matcher = PATTERN.matcher(str); if (matcher.find()){ @@ -109,144 +194,12 @@ public static String md5(String s){ } } - public static Properties getProperties(String path,Class c){ - Properties ps = new Properties(); - try { - ps.load(c.getClassLoader().getResourceAsStream(path)); - } catch (IOException e) { - e.printStackTrace(); - log.error(e.getMessage()); - } - return ps; - } - public static List getBasicNameValuePair(MapdataMap){ - List nvps = new ArrayList(); - for (String key:dataMap.keySet()){ - BasicNameValuePair nv = new BasicNameValuePair(key,String.valueOf(dataMap.get(key))); - nvps.add(nv); - } - return nvps; - } - - public static String strTime(String format,long time){ - SimpleDateFormat strFormat = new SimpleDateFormat(format); - if(time == 0){ - time = new Date().getTime(); - } - return strFormat.format(time); - } - - public static String ArrToString(T[] arr){ - String str = ""; - for (int i=0;iopenmldb-http-test openmldb-tool-test openmldb-deploy + openmldb-devops-test From 3a52ec0e50c9d6d30a7016c184af62be9917b3c9 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 5 Jul 2022 18:44:24 +0800 Subject: [PATCH 019/172] Optimize the structure --- .../_4paradigm/openmldb/java_sdk_test/temp/TestCommand.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestCommand.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestCommand.java index 01e0441c097..4f423db36be 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestCommand.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestCommand.java @@ -17,6 +17,7 @@ import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.collect.Lists; import org.testng.annotations.Test; @@ -25,9 +26,9 @@ public class TestCommand { @Test public void test1(){ - FEDBInfo fedbInfo = FEDBInfo.builder() + OpenMLDBInfo fedbInfo = OpenMLDBInfo.builder() .basePath("/home/zhaowei01/fedb-auto-test/0.1.5") - .fedbPath("/home/zhaowei01/fedb-auto-test/0.1.5/openmldb-ns-1/bin/openmldb") + .openMLDBPath("/home/zhaowei01/fedb-auto-test/0.1.5/openmldb-ns-1/bin/openmldb") .zk_cluster("172.24.4.55:10000") .zk_root_path("/openmldb") .nsNum(2).tabletNum(3) From 19f22c9a3bc62df1f26cf29d1d82c59eafb0682d Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 6 Jul 2022 10:52:06 +0800 Subject: [PATCH 020/172] Optimize the structure --- .../openmldb-devops-test/pom.xml | 2 +- .../devops_test/common/ClusterTest.java | 78 ++ .../high_availability/TestCluster.java | 21 +- .../executor/RestfulCliExecutor.java | 9 +- .../http_test/executor/RestfulExecutor.java | 19 +- .../openmldb/http_test/tmp/TestDropTable.java | 4 +- .../checker/CatCheckerByCli.java | 5 +- .../java_sdk_test/checker/ColumnsChecker.java | 6 +- .../checker/DeploymentCheckerByCli.java | 7 +- .../java_sdk_test/checker/ResultChecker.java | 8 +- .../checker/ResultCheckerByCli.java | 10 +- .../checker/ResultCheckerByJDBC.java | 8 +- .../executor/BaseSQLExecutor.java | 9 +- .../executor/BatchSQLExecutor.java | 19 +- .../executor/CommandExecutor.java | 15 +- .../executor/InsertPreparedExecutor.java | 4 +- .../java_sdk_test/executor/MysqlExecutor.java | 7 +- .../executor/QueryPreparedExecutor.java | 9 +- .../executor/RequestQuerySQLExecutor.java | 19 +- .../executor/Sqlite3Executor.java | 7 +- .../executor/StoredProcedureSQLExecutor.java | 11 +- .../openmldb/java_sdk_test/util/JDBCUtil.java | 4 +- ...db_deploy.properties => deploy.properties} | 0 .../cluster/v030/SchemaTest.java | 9 +- .../entity/FesqlDataProviderTest.java | 5 +- .../java_sdk_test/temp/DebugTest.java | 4 +- .../java_sdk_test/temp/TestDropTable.java | 4 +- .../command/OpenMLDBCommandUtil.java | 5 +- .../openmldb/test_common/util/DataUtil.java | 285 +++++++ .../openmldb/test_common/util/ResultUtil.java | 48 +- .../util/{OpenMLDBUtil.java => SDKUtil.java} | 760 +++--------------- .../openmldb/test_common/util/SQLUtil.java | 76 ++ .../openmldb/test_common/util/SchemaUtil.java | 57 ++ .../openmldb/test_common/util/TypeUtil.java | 115 +++ 34 files changed, 880 insertions(+), 769 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/{fedb_deploy.properties => deploy.properties} (100%) create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java rename test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/{OpenMLDBUtil.java => SDKUtil.java} (56%) create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SchemaUtil.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/TypeUtil.java diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/pom.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/pom.xml index 70e8d174a9c..38cb7f9fd09 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/pom.xml @@ -19,7 +19,7 @@ com.4paradigm.openmldb - openmldb-deploy + openmldb-test-common ${project.version} diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java new file mode 100644 index 00000000000..14654e695b1 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -0,0 +1,78 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.openmldb.devops_test.common; + + +import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; +import com.google.common.collect.Lists; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.testng.annotations.BeforeTest; +import org.testng.annotations.Optional; +import org.testng.annotations.Parameters; + +import java.sql.Statement; + +/** + * @author zhaowei + * @date 2020/6/11 2:02 PM + */ +@Slf4j +public class ClusterTest { + protected static SqlExecutor executor; + + @BeforeTest() + @Parameters({"env","version","openMLDBPath"}) + public void beforeTest(@Optional("qa") String env,@Optional("main") String version,@Optional("")String openMLDBPath) throws Exception { + OpenMLDBGlobalVar.env = env; + if(env.equalsIgnoreCase("cluster")){ + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version);; + openMLDBDeploy.setOpenMLDBPath(openMLDBPath); + openMLDBDeploy.setCluster(true); + OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); + }else{ + OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() + .deployType(OpenMLDBDeployType.CLUSTER) + .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") + .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") + .zk_cluster("127.0.0.1:30000") + .zk_root_path("/openmldb") + .nsNum(2).tabletNum(3) + .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) + .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) + .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) + .build(); + OpenMLDBGlobalVar.env = "cluster"; + + } + String caseEnv = System.getProperty("caseEnv"); + if (!StringUtils.isEmpty(caseEnv)) { + OpenMLDBGlobalVar.env = caseEnv; + } + log.info("fedb global var env: {}", env); + OpenMLDBClient fesqlClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); + executor = fesqlClient.getExecutor(); + log.info("executor:{}",executor); + Statement statement = executor.getStatement(); + statement.execute("SET @@execute_mode='online';"); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index 36bac096fe2..18beeafa67c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -1,7 +1,26 @@ package com._4paradigm.openmldb.devops_test.high_availability; -public class TestCluster { +import com._4paradigm.openmldb.devops_test.common.ClusterTest; + +public class TestCluster extends ClusterTest { public void testMoreReplica(){ + // 创建磁盘表和内存表。 + // 其中一个tablet stop,leader 内存表和磁盘表可以正常访问,flower 内存表和磁盘表可以正常访问。 + // tablet start,数据可以回复,要看磁盘表和内存表。 + //创建磁盘表和内存表,在重启tablet,数据可回复,内存表和磁盘表可以正常访问。 + //创建磁盘表和内存表,插入一些数据,然后make snapshot,在重启tablet,数据可回复。 + //tablet 依次restart,数据可回复,可以访问。 + //3个tablet stop,不能访问。 + // 1个tablet启动,数据可回复,分片所在的表,可以访问。 + //ns stop,可以正常访问。 + //2个ns stop,不能访问。 + //ns start 可以访问。 + //一个 zk stop,可以正常访问 + //3个zk stop,不能正常访问。 + //一个zk start,可正常访问。 + //3个 zk start,可正常访问。 + // 一个节点(ns leader 所在服务器)重启,leader可以正常访问,flower可以正常访问。 + //一直查询某一个表,然后重启一个机器。 } } diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulCliExecutor.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulCliExecutor.java index 3478bcf6e5d..d946b5b699b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulCliExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulCliExecutor.java @@ -23,7 +23,7 @@ import com._4paradigm.openmldb.test_common.command.OpenMLDBComamndFacade; import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandUtil; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.common.Checker; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.InputDesc; @@ -32,6 +32,7 @@ import com._4paradigm.openmldb.test_common.restful.model.BeforeAction; import com._4paradigm.openmldb.test_common.restful.model.HttpMethod; import com._4paradigm.openmldb.test_common.restful.model.RestfulCase; +import com._4paradigm.openmldb.test_common.util.SQLUtil; import com._4paradigm.openmldb.test_common.util.Tool; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; @@ -82,7 +83,7 @@ public void prepare() { } if(CollectionUtils.isNotEmpty(beforeAction.getSqls())){ List sqls = beforeAction.getSqls().stream() - .map(sql -> OpenMLDBUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) + .map(sql -> SQLUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) .map(sql->{ if(sql.contains("{db_name}")){ sql = sql.replace("{db_name}",FedbRestfulConfig.DB_NAME); @@ -116,7 +117,7 @@ public void tearDown() { if(tearDown!=null){ if(CollectionUtils.isNotEmpty(tearDown.getSqls())){ List sqls = tearDown.getSqls().stream() - .map(sql -> OpenMLDBUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) + .map(sql -> SQLUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) .map(sql->{ if(sql.contains("{db_name}")){ sql = sql.replace("{db_name}",FedbRestfulConfig.DB_NAME); @@ -148,7 +149,7 @@ protected void afterAction(){ if(afterAction!=null){ if(CollectionUtils.isNotEmpty(afterAction.getSqls())){ List sqls = afterAction.getSqls().stream() - .map(sql -> OpenMLDBUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) + .map(sql -> SQLUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) .collect(Collectors.toList()); fesqlResult = OpenMLDBComamndFacade.sqls(RestfulGlobalVar.mainInfo, FedbRestfulConfig.DB_NAME, sqls); } diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulExecutor.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulExecutor.java index 8c7c3192b0f..cb4cd6adb01 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulExecutor.java @@ -21,7 +21,7 @@ import com._4paradigm.openmldb.http_test.config.FedbRestfulConfig; import com._4paradigm.openmldb.java_sdk_test.checker.ResultChecker; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.common.Checker; import com._4paradigm.openmldb.test_common.model.ExpectDesc; @@ -31,6 +31,7 @@ import com._4paradigm.openmldb.test_common.restful.model.BeforeAction; import com._4paradigm.openmldb.test_common.restful.model.HttpMethod; import com._4paradigm.openmldb.test_common.restful.model.RestfulCase; +import com._4paradigm.openmldb.test_common.util.SQLUtil; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; @@ -69,7 +70,7 @@ public void prepare() { return; } if(CollectionUtils.isNotEmpty(beforeAction.getTables())) { - OpenMLDBResult res = OpenMLDBUtil.createAndInsert(executor, FedbRestfulConfig.DB_NAME, beforeAction.getTables(), false); + OpenMLDBResult res = SDKUtil.createAndInsert(executor, FedbRestfulConfig.DB_NAME, beforeAction.getTables(), false); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail "); } @@ -82,7 +83,7 @@ public void prepare() { } if(CollectionUtils.isNotEmpty(beforeAction.getSqls())){ List sqls = beforeAction.getSqls().stream() - .map(sql -> OpenMLDBUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) + .map(sql -> SQLUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) .map(sql->{ if(sql.contains("{db_name}")){ sql = sql.replace("{db_name}",FedbRestfulConfig.DB_NAME); @@ -90,7 +91,7 @@ public void prepare() { return sql; }) .collect(Collectors.toList()); - OpenMLDBUtil.sqls(executor,FedbRestfulConfig.DB_NAME,sqls); + SDKUtil.sqlList(executor,FedbRestfulConfig.DB_NAME,sqls); } logger.info("prepare end"); } @@ -115,7 +116,7 @@ public void tearDown() { if(tearDown!=null){ if(CollectionUtils.isNotEmpty(tearDown.getSqls())){ List sqls = tearDown.getSqls().stream() - .map(sql -> OpenMLDBUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) + .map(sql -> SQLUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) .map(sql->{ if(sql.contains("{db_name}")){ sql = sql.replace("{db_name}",FedbRestfulConfig.DB_NAME); @@ -123,7 +124,7 @@ public void tearDown() { return sql; }) .collect(Collectors.toList()); - fesqlResult = OpenMLDBUtil.sqls(executor, FedbRestfulConfig.DB_NAME, sqls); + fesqlResult = SDKUtil.sqlList(executor, FedbRestfulConfig.DB_NAME, sqls); } } @@ -135,7 +136,7 @@ public void tearDown() { for (InputDesc table : tables) { if(table.isDrop()) { String drop = "drop table " + table.getName() + ";"; - OpenMLDBUtil.ddl(executor, FedbRestfulConfig.DB_NAME, drop); + SDKUtil.ddl(executor, FedbRestfulConfig.DB_NAME, drop); } } } @@ -147,9 +148,9 @@ protected void afterAction(){ if(afterAction!=null){ if(CollectionUtils.isNotEmpty(afterAction.getSqls())){ List sqls = afterAction.getSqls().stream() - .map(sql -> OpenMLDBUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) + .map(sql -> SQLUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) .collect(Collectors.toList()); - fesqlResult = OpenMLDBUtil.sqls(executor, FedbRestfulConfig.DB_NAME, sqls); + fesqlResult = SDKUtil.sqlList(executor, FedbRestfulConfig.DB_NAME, sqls); } ExpectDesc expect = afterAction.getExpect(); if(expect!=null){ diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/test/java/com/_4paradigm/openmldb/http_test/tmp/TestDropTable.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/test/java/com/_4paradigm/openmldb/http_test/tmp/TestDropTable.java index 6e77f854c0e..7d947bf4bb6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/test/java/com/_4paradigm/openmldb/http_test/tmp/TestDropTable.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/test/java/com/_4paradigm/openmldb/http_test/tmp/TestDropTable.java @@ -17,7 +17,7 @@ import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.restful.model.HttpResult; import com._4paradigm.openmldb.test_common.util.HttpRequest; import com.google.gson.Gson; @@ -45,7 +45,7 @@ public void testAll() throws Exception { for(int i=0;i actualList = CommandUtil.run(command); List expectList = expectCat.getLines(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java index 7de5287523d..48d78e70a1d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.checker; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.TypeUtil; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.Table; import lombok.extern.slf4j.Slf4j; @@ -50,8 +50,8 @@ public void check() throws Exception { for (int i = 0; i < expectColumns.size(); i++) { // Assert.assertEquals(columnNames.get(i)+" "+columnTypes.get(i),expectColumns.get(i)); Assert.assertEquals(columnNames.get(i), Table.getColumnName(expectColumns.get(i))); - Assert.assertEquals(OpenMLDBUtil.getColumnType(columnTypes.get(i)), - OpenMLDBUtil.getColumnType(Table.getColumnType(expectColumns.get(i)))); + Assert.assertEquals(TypeUtil.getOpenMLDBColumnType(columnTypes.get(i)), + TypeUtil.getOpenMLDBColumnType(Table.getColumnType(expectColumns.get(i)))); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java index 1f00c9e623a..b1de3a498d7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java @@ -18,9 +18,10 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; +import com._4paradigm.openmldb.test_common.util.SQLUtil; import lombok.extern.slf4j.Slf4j; import org.testng.Assert; @@ -40,10 +41,10 @@ public void check() throws Exception { reportLog.info("deployment check"); OpenmldbDeployment expectDeployment = expect.getDeployment(); String name = expectDeployment.getName(); - name = OpenMLDBUtil.formatSql(name, fesqlResult.getTableNames()); + name = SQLUtil.formatSql(name, fesqlResult.getTableNames()); expectDeployment.setName(name); String sql = expectDeployment.getSql(); - sql = OpenMLDBUtil.formatSql(sql, fesqlResult.getTableNames()); + sql = SQLUtil.formatSql(sql, fesqlResult.getTableNames()); expectDeployment.setSql(sql); if (expectDeployment == null) { return; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java index 97e8afd4cdb..ec21b35b4dd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java @@ -18,9 +18,11 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.DataUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.Table; +import com._4paradigm.openmldb.test_common.util.SchemaUtil; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.testng.Assert; @@ -47,13 +49,13 @@ public void check() throws ParseException { if (expect.getColumns().isEmpty()) { throw new RuntimeException("fail check result: columns are empty"); } - List> expectRows = OpenMLDBUtil.convertRows(expect.getRows(), + List> expectRows = DataUtil.convertRows(expect.getRows(), expect.getColumns()); List> actual = fesqlResult.getResult(); String orderName = expect.getOrder(); if (StringUtils.isNotEmpty(orderName)) { - int index = OpenMLDBUtil.getIndexByColumnName(fesqlResult.getColumnNames(),orderName); + int index = SchemaUtil.getIndexByColumnName(fesqlResult.getColumnNames(),orderName); Collections.sort(expectRows, new RowsSort(index)); Collections.sort(actual, new RowsSort(index)); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java index e9ff07f31b5..8fde7eb4833 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java @@ -18,9 +18,11 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.DataUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.Table; +import com._4paradigm.openmldb.test_common.util.SchemaUtil; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.testng.Assert; @@ -47,12 +49,12 @@ public void check() throws ParseException { if (expect.getColumns().isEmpty()) { throw new RuntimeException("fail check result: columns are empty"); } - List> expectRows = OpenMLDBUtil.convertRows(expect.getRows(), expect.getColumns()); - List> actual = OpenMLDBUtil.convertRows(fesqlResult.getResult(), expect.getColumns()); + List> expectRows = DataUtil.convertRows(expect.getRows(), expect.getColumns()); + List> actual = DataUtil.convertRows(fesqlResult.getResult(), expect.getColumns()); String orderName = expect.getOrder(); if (StringUtils.isNotEmpty(orderName)) { - int index = OpenMLDBUtil.getIndexByColumnName(fesqlResult.getColumnNames(),orderName); + int index = SchemaUtil.getIndexByColumnName(fesqlResult.getColumnNames(),orderName); Collections.sort(expectRows, new RowsSort(index)); Collections.sort(actual, new RowsSort(index)); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java index f02575e2d1a..8c1065a819e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java @@ -16,9 +16,11 @@ package com._4paradigm.openmldb.java_sdk_test.checker; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.DataUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.Table; +import com._4paradigm.openmldb.test_common.util.SchemaUtil; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.testng.Assert; @@ -45,13 +47,13 @@ public void check() throws Exception { if (expect.getColumns().isEmpty()) { throw new RuntimeException("fail check result: columns are empty"); } - List> expectRows = OpenMLDBUtil.convertRows(expect.getRows(), + List> expectRows = DataUtil.convertRows(expect.getRows(), expect.getColumns()); List> actual = fesqlResult.getResult(); String orderName = expect.getOrder(); if (StringUtils.isNotEmpty(orderName)) { - int index = OpenMLDBUtil.getIndexByColumnName(fesqlResult.getColumnNames(),orderName); + int index = SchemaUtil.getIndexByColumnName(fesqlResult.getColumnNames(),orderName); Collections.sort(expectRows, new RowsSort(index)); Collections.sort(actual, new RowsSort(index)); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java index b40d731d4ad..01871b43532 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java @@ -21,11 +21,12 @@ import com._4paradigm.openmldb.java_sdk_test.checker.CheckerStrategy; import com._4paradigm.openmldb.java_sdk_test.checker.DiffVersionChecker; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.util.SQLUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; @@ -116,11 +117,11 @@ public void tearDown(String version,SqlExecutor executor) { if(CollectionUtils.isNotEmpty(tearDown)){ tearDown.forEach(sql->{ if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = OpenMLDBUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + sql = SQLUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); }else { - sql = OpenMLDBUtil.formatSql(sql, tableNames); + sql = SQLUtil.formatSql(sql, tableNames); } - OpenMLDBUtil.sql(executor, dbName, sql); + SDKUtil.sql(executor, dbName, sql); }); } logger.info("version:{},begin drop table",version); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java index c6a13823ac7..024039c33f6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java @@ -17,10 +17,11 @@ package com._4paradigm.openmldb.java_sdk_test.executor; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.util.SQLUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.MapUtils; @@ -72,8 +73,8 @@ public void prepare(String version,SqlExecutor executor){ logger.info("version:{} prepare begin",version); boolean dbOk = executor.createDB(dbName); logger.info("version:{},create db:{},{}", version, dbName, dbOk); - OpenMLDBUtil.useDB(executor,dbName); - OpenMLDBResult res = OpenMLDBUtil.createAndInsert(executor, dbName, fesqlCase.getInputs(), false); + SDKUtil.useDB(executor,dbName); + OpenMLDBResult res = SDKUtil.createAndInsert(executor, dbName, fesqlCase.getInputs(), false); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail . version:"+version); } @@ -89,22 +90,22 @@ public OpenMLDBResult execute(String version, SqlExecutor executor){ for (String sql : sqls) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = OpenMLDBUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + sql = SQLUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); }else { - sql = OpenMLDBUtil.formatSql(sql, tableNames); + sql = SQLUtil.formatSql(sql, tableNames); } - fesqlResult = OpenMLDBUtil.sql(executor, dbName, sql); + fesqlResult = SDKUtil.sql(executor, dbName, sql); } } String sql = fesqlCase.getSql(); if (sql != null && sql.length() > 0) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = OpenMLDBUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + sql = SQLUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); }else { - sql = OpenMLDBUtil.formatSql(sql, tableNames); + sql = SQLUtil.formatSql(sql, tableNames); } - fesqlResult = OpenMLDBUtil.sql(executor, dbName, sql); + fesqlResult = SDKUtil.sql(executor, dbName, sql); } logger.info("version:{} execute end",version); return fesqlResult; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java index 471a4c968c7..02d0f262694 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java @@ -24,11 +24,12 @@ import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.util.SQLUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; @@ -132,9 +133,9 @@ protected OpenMLDBResult execute(String version, OpenMLDBInfo openMLDBInfo){ for (String sql : sqls) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(openMLDBInfoMap)) { - sql = OpenMLDBUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); + sql = SQLUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); }else { - sql = OpenMLDBUtil.formatSql(sql, tableNames); + sql = SQLUtil.formatSql(sql, tableNames); } fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo, dbName, sql); } @@ -143,9 +144,9 @@ protected OpenMLDBResult execute(String version, OpenMLDBInfo openMLDBInfo){ if (StringUtils.isNotEmpty(sql)) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(openMLDBInfoMap)) { - sql = OpenMLDBUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); + sql = SQLUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); }else { - sql = OpenMLDBUtil.formatSql(sql, tableNames); + sql = SQLUtil.formatSql(sql, tableNames); } fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo, dbName, sql); } @@ -178,9 +179,9 @@ public void tearDown(String version,OpenMLDBInfo openMLDBInfo) { if(CollectionUtils.isNotEmpty(tearDown)){ tearDown.forEach(sql->{ if(MapUtils.isNotEmpty(openMLDBInfoMap)) { - sql = OpenMLDBUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); + sql = SQLUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); }else { - sql = OpenMLDBUtil.formatSql(sql, tableNames); + sql = SQLUtil.formatSql(sql, tableNames); } OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName, sql); }); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java index fcd15c52695..4a64d26ae77 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java @@ -18,7 +18,7 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -45,7 +45,7 @@ public void prepare(String version,SqlExecutor executor){ logger.info("version:{} prepare begin",version); boolean dbOk = executor.createDB(dbName); logger.info("version:{},create db:{},{}", version, dbName, dbOk); - OpenMLDBResult res = OpenMLDBUtil.createAndInsertWithPrepared(executor, dbName, fesqlCase.getInputs(), false); + OpenMLDBResult res = SDKUtil.createAndInsertWithPrepared(executor, dbName, fesqlCase.getInputs(), false); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail . version:"+version); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/MysqlExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/MysqlExecutor.java index 8ad5c6e1043..a2a50003d15 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/MysqlExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/MysqlExecutor.java @@ -16,13 +16,14 @@ package com._4paradigm.openmldb.java_sdk_test.executor; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.java_sdk_test.util.JDBCUtil; import com._4paradigm.openmldb.java_sdk_test.util.MysqlUtil; import com._4paradigm.openmldb.test_common.model.DBType; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.util.SQLUtil; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; @@ -69,13 +70,13 @@ public void execute() { List sqls = fesqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { - sql = OpenMLDBUtil.formatSql(sql, tableNames); + sql = SQLUtil.formatSql(sql, tableNames); fesqlResult = JDBCUtil.executeQuery(sql,DBType.MYSQL); } } String sql = fesqlCase.getSql(); if (sql != null && sql.length() > 0) { - sql = OpenMLDBUtil.formatSql(sql, tableNames); + sql = SQLUtil.formatSql(sql, tableNames); fesqlResult = JDBCUtil.executeQuery(sql,DBType.MYSQL); } mainResult = fesqlResult; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java index 3b073456d8b..6c7ff449fe2 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java @@ -17,11 +17,12 @@ package com._4paradigm.openmldb.java_sdk_test.executor; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.util.SQLUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.MapUtils; @@ -64,14 +65,14 @@ public OpenMLDBResult execute(String version, SqlExecutor executor){ if (sql != null && sql.length() > 0) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = OpenMLDBUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + sql = SQLUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); }else { - sql = OpenMLDBUtil.formatSql(sql, tableNames); + sql = SQLUtil.formatSql(sql, tableNames); } InputDesc parameters = fesqlCase.getParameters(); List types = parameters.getColumns().stream().map(s -> s.split("\\s+")[1]).collect(Collectors.toList()); List objects = parameters.getRows().get(0); - fesqlResult = OpenMLDBUtil.selectWithPrepareStatement(executor, dbName,sql, types,objects); + fesqlResult = SDKUtil.selectWithPrepareStatement(executor, dbName,sql, types,objects); } logger.info("version:{} execute end",version); return fesqlResult; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java index c0e5e21add0..f5dc7dcc51e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java @@ -18,11 +18,12 @@ import com._4paradigm.openmldb.java_sdk_test.common.FedbConfig; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.util.SQLUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; @@ -60,20 +61,20 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { for (String sql : sqls) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = OpenMLDBUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + sql = SQLUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); }else { - sql = OpenMLDBUtil.formatSql(sql, tableNames); + sql = SQLUtil.formatSql(sql, tableNames); } - fesqlResult = OpenMLDBUtil.sql(executor, dbName, sql); + fesqlResult = SDKUtil.sql(executor, dbName, sql); } } String sql = fesqlCase.getSql(); if (sql != null && sql.length() > 0) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = OpenMLDBUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + sql = SQLUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); }else { - sql = OpenMLDBUtil.formatSql(sql, tableNames); + sql = SQLUtil.formatSql(sql, tableNames); } InputDesc request = null; if (isBatchRequest) { @@ -91,7 +92,7 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { } } - fesqlResult = OpenMLDBUtil.sqlBatchRequestMode( + fesqlResult = SDKUtil.sqlBatchRequestMode( executor, dbName, sql, batchRequest, commonColumnIndices); } else { if (null != fesqlCase.getBatch_request()) { @@ -103,7 +104,7 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { logger.error("fail to execute in request query sql executor: sql case request columns is empty"); return null; } - fesqlResult = OpenMLDBUtil.sqlRequestMode(executor, dbName, null == fesqlCase.getBatch_request(), sql, request); + fesqlResult = SDKUtil.sqlRequestMode(executor, dbName, null == fesqlCase.getBatch_request(), sql, request); } } }catch (Exception e){ @@ -119,7 +120,7 @@ protected void prepare(String version,SqlExecutor executor) { boolean dbOk = executor.createDB(dbName); logger.info("create db:{},{}", dbName, dbOk); boolean useFirstInputAsRequests = !isBatchRequest && null == fesqlCase.getBatch_request(); - OpenMLDBResult res = OpenMLDBUtil.createAndInsert(executor, dbName, fesqlCase.getInputs(), useFirstInputAsRequests); + OpenMLDBResult res = SDKUtil.createAndInsert(executor, dbName, fesqlCase.getInputs(), useFirstInputAsRequests); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail"); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java index f4493dd0084..816372a0819 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java @@ -17,13 +17,14 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.java_sdk_test.util.JDBCUtil; import com._4paradigm.openmldb.java_sdk_test.util.Sqlite3Util; import com._4paradigm.openmldb.test_common.model.DBType; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.util.SQLUtil; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; @@ -70,13 +71,13 @@ public void execute() { List sqls = fesqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { - sql = OpenMLDBUtil.formatSql(sql, tableNames); + sql = SQLUtil.formatSql(sql, tableNames); fesqlResult = JDBCUtil.executeQuery(sql,DBType.SQLITE3); } } String sql = fesqlCase.getSql(); if (sql != null && sql.length() > 0) { - sql = OpenMLDBUtil.formatSql(sql, tableNames); + sql = SQLUtil.formatSql(sql, tableNames); fesqlResult = JDBCUtil.executeQuery(sql,DBType.SQLITE3); } mainResult = fesqlResult; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java index 4dbc45c702a..35bdf046de3 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java @@ -17,10 +17,11 @@ package com._4paradigm.openmldb.java_sdk_test.executor; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.util.SQLUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; @@ -50,7 +51,7 @@ public void prepare(String version,SqlExecutor executor){ logger.info("version:{} prepare begin",version); boolean dbOk = executor.createDB(dbName); logger.info("create db:{},{}", dbName, dbOk); - OpenMLDBResult res = OpenMLDBUtil.createAndInsert( + OpenMLDBResult res = SDKUtil.createAndInsert( executor, dbName, fesqlCase.getInputs(), !isBatchRequest && null == fesqlCase.getBatch_request()); if (!res.isOk()) { @@ -89,16 +90,16 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { private OpenMLDBResult executeSingle(SqlExecutor executor, String sql, boolean isAsyn) throws SQLException { String spSql = fesqlCase.getProcedure(sql); logger.info("spSql: {}", spSql); - return OpenMLDBUtil.sqlRequestModeWithSp( + return SDKUtil.sqlRequestModeWithProcedure( executor, dbName, fesqlCase.getSpName(), null == fesqlCase.getBatch_request(), spSql, fesqlCase.getInputs().get(0), isAsyn); } private OpenMLDBResult executeBatch(SqlExecutor executor, String sql, boolean isAsyn) throws SQLException { String spName = "sp_" + tableNames.get(0) + "_" + System.currentTimeMillis(); - String spSql = OpenMLDBUtil.buildSpSQLWithConstColumns(spName, sql, fesqlCase.getBatch_request()); + String spSql = SQLUtil.buildSpSQLWithConstColumns(spName, sql, fesqlCase.getBatch_request()); logger.info("spSql: {}", spSql); - return OpenMLDBUtil.selectBatchRequestModeWithSp( + return SDKUtil.selectBatchRequestModeWithSp( executor, dbName, spName, spSql, fesqlCase.getBatch_request(), isAsyn); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/JDBCUtil.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/JDBCUtil.java index 91cd909289e..cd221ade44c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/JDBCUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/JDBCUtil.java @@ -19,7 +19,7 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.common.ReportLog; import com._4paradigm.openmldb.test_common.model.DBType; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.TypeUtil; import lombok.extern.slf4j.Slf4j; import java.sql.*; @@ -110,7 +110,7 @@ public static void setSchema(ResultSetMetaData metaData, OpenMLDBResult fesqlRes columnLabel = metaData.getColumnName(i); } columnNames.add(columnLabel); - columnTypes.add(OpenMLDBUtil.getSQLTypeString(metaData.getColumnType(i))); + columnTypes.add(TypeUtil.fromJDBCTypeToString(metaData.getColumnType(i))); } fesqlResult.setColumnNames(columnNames); fesqlResult.setColumnTypes(columnTypes); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fedb_deploy.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy.properties similarity index 100% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fedb_deploy.properties rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy.properties diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java index 3040df97434..8bf027d1803 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java @@ -2,7 +2,8 @@ import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.TypeUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.sdk.Column; import com._4paradigm.openmldb.sdk.Schema; import io.qameta.allure.Feature; @@ -35,19 +36,19 @@ public void testHaveIndexAndOption() throws SQLException { "c8 date,\n" + "c9 bool not null,\n" + "index(key=(c1),ts=c7,ttl=10,ttl_type=latest))options(partitionnum=8,replicanum=3);"; - OpenMLDBUtil.sql(executor, OpenMLDBGlobalVar.dbName,createSql); + SDKUtil.sql(executor, OpenMLDBGlobalVar.dbName,createSql); Schema tableSchema = executor.getTableSchema(OpenMLDBGlobalVar.dbName, tableName); List columnList = tableSchema.getColumnList(); List actualList = columnList.stream() .map(column -> String.format("%s %s %s", column.getColumnName(), - OpenMLDBUtil.getColumnTypeByType(column.getSqlType()), + TypeUtil.fromJDBCTypeToString(column.getSqlType()), column.isNotNull() ? "not null" : "").trim()) .collect(Collectors.toList()); List expectList = Lists.newArrayList("c1 string","c2 int not null","c3 bigint","c4 smallint", "c5 float","c6 double not null","c7 timestamp not null","c8 date","c9 bool not null"); Assert.assertEquals(actualList,expectList); String deleteSql = "drop table "+tableName+";"; - OpenMLDBUtil.sql(executor, OpenMLDBGlobalVar.dbName,deleteSql); + SDKUtil.sql(executor, OpenMLDBGlobalVar.dbName,deleteSql); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderTest.java index 98e575b399b..229b62c8270 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderTest.java @@ -17,7 +17,8 @@ package com._4paradigm.openmldb.java_sdk_test.entity; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.DataUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import org.testng.Assert; @@ -96,7 +97,7 @@ public void converRowsTest() throws ParseException, FileNotFoundException { Assert.assertEquals(3, provider.getCases().size()); SQLCase sqlCase = provider.getCases().get(0); Assert.assertEquals(2, sqlCase.getInputs().size()); - List> expect = OpenMLDBUtil.convertRows(sqlCase.getExpect().getRows(), + List> expect = DataUtil.convertRows(sqlCase.getExpect().getRows(), sqlCase.getExpect().getColumns()); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java index a7c7b6834d2..5220e458adb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java @@ -18,7 +18,7 @@ import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; import com._4paradigm.openmldb.test_common.provider.Yaml; @@ -47,7 +47,7 @@ public void testSelect(SQLCase testCase) throws Exception { @Test(dataProvider = "getCase") @Yaml(filePaths = {"debug/diff-debug-myhug.yaml"}) public void testSelectRequestMode(SQLCase testCase) throws Exception { - OpenMLDBUtil.setOnline(executor); + SDKUtil.setOnline(executor); ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); } @Story("requestWithSp") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestDropTable.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestDropTable.java index b4a085ecaad..a56d455f368 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestDropTable.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestDropTable.java @@ -18,7 +18,7 @@ import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; import org.testng.annotations.Test; import java.util.List; @@ -30,7 +30,7 @@ public void testAll(){ OpenMLDBClient fedbClient = new OpenMLDBClient("172.24.4.55:10000","/openmldb"); String dbName = "test_zw"; String sql = "show tables;"; - OpenMLDBResult fesqlResult = OpenMLDBUtil.select(fedbClient.getExecutor(), dbName, sql); + OpenMLDBResult fesqlResult = SDKUtil.select(fedbClient.getExecutor(), dbName, sql); List> result = fesqlResult.getResult(); for(List list:result){ System.out.println(list); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandUtil.java index 317235b32ff..2282876cfc4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandUtil.java @@ -19,7 +19,8 @@ import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; -import com._4paradigm.openmldb.test_common.util.OpenMLDBUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; +import com._4paradigm.openmldb.test_common.util.SQLUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; @@ -69,7 +70,7 @@ public static OpenMLDBResult createAndInsert(OpenMLDBInfo openMLDBInfo, String d //create table String createSql = inputDesc.extractCreate(); createSql = SQLCase.formatSql(createSql, i, tableName); - createSql = OpenMLDBUtil.formatSql(createSql, openMLDBInfo); + createSql = SQLUtil.formatSql(createSql, openMLDBInfo); if (!createSql.isEmpty()) { OpenMLDBResult res = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,createSql); if (!res.isOk()) { diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java new file mode 100644 index 00000000000..9a0154d0a89 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java @@ -0,0 +1,285 @@ +package com._4paradigm.openmldb.test_common.util; + +import com._4paradigm.openmldb.jdbc.SQLResultSet; +import lombok.extern.slf4j.Slf4j; + +import java.sql.*; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.List; +@Slf4j +public class DataUtil { + public static boolean setPreparedData(PreparedStatement ps, List parameterType, List objects) throws SQLException { + for(int i=0;i objects) throws SQLException { + ResultSetMetaData metaData = requestPs.getMetaData(); + int totalSize = 0; + for (int i = 0; i < metaData.getColumnCount(); i++) { + if (null == objects.get(i)) { + continue; + } + if (metaData.getColumnType(i + 1) == Types.VARCHAR) { + totalSize += objects.get(i).toString().length(); + } + } + log.info("init request row: {}", totalSize); + for (int i = 0; i < metaData.getColumnCount(); i++) { + Object obj = objects.get(i); + if (null == obj || obj.toString().equalsIgnoreCase("null")) { + requestPs.setNull(i + 1, 0); + continue; + } + int columnType = metaData.getColumnType(i + 1); + if (columnType == Types.BOOLEAN) { + requestPs.setBoolean(i + 1, Boolean.parseBoolean(obj.toString())); + } else if (columnType == Types.SMALLINT) { + requestPs.setShort(i + 1, Short.parseShort(obj.toString())); + } else if (columnType == Types.INTEGER) { + requestPs.setInt(i + 1, Integer.parseInt(obj.toString())); + } else if (columnType == Types.BIGINT) { + requestPs.setLong(i + 1, Long.parseLong(obj.toString())); + } else if (columnType == Types.FLOAT) { + requestPs.setFloat(i + 1, Float.parseFloat(obj.toString())); + } else if (columnType == Types.DOUBLE) { + requestPs.setDouble(i + 1, Double.parseDouble(obj.toString())); + } else if (columnType == Types.TIMESTAMP) { + requestPs.setTimestamp(i + 1, new Timestamp(Long.parseLong(obj.toString()))); + } else if (columnType == Types.DATE) { + if (obj instanceof java.util.Date) { + requestPs.setDate(i + 1, new Date(((java.util.Date) obj).getTime())); + } else if (obj instanceof Date) { + requestPs.setDate(i + 1, (Date) (obj)); + } +// else if (obj instanceof DateTime) { +// requestPs.setDate(i + 1, new Date(((DateTime) obj).getMillis())); +// } + else { + try { + Date date = new Date(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").parse(obj.toString() + " 00:00:00").getTime()); + log.info("build request row: obj: {}, append date: {}, {}, {}, {}",obj, date.toString(), date.getYear() + 1900, date.getMonth() + 1, date.getDate()); + requestPs.setDate(i + 1, date); + } catch (ParseException e) { + log.error("Fail convert {} to date: {}", obj, e); + return false; + } + } + } else if (columnType == Types.VARCHAR) { + requestPs.setString(i + 1, obj.toString()); + } else { + log.error("fail to build request row: invalid data type {]", columnType); + return false; + } + } + return true; + } + public static List> convertRows(List> rows, List columns) throws ParseException { + List> list = new ArrayList<>(); + for (List row : rows) { + list.add(DataUtil.convertList(row, columns)); + } + return list; + } + public static List> convertResultSetToList(SQLResultSet rs) throws SQLException { + List> result = new ArrayList<>(); + while (rs.next()) { + List list = new ArrayList(); + int columnCount = rs.getMetaData().getColumnCount(); + for (int i = 0; i < columnCount; i++) { + list.add(DataUtil.getColumnData(rs, i)); + } + result.add(list); + } + return result; + } + public static String convertResultSetToListDeploy(SQLResultSet rs) throws SQLException { + String string = null; + while (rs.next()) { + int columnCount = rs.getMetaData().getColumnCount(); + for (int i = 0; i < columnCount; i++) { + string=String.valueOf(DataUtil.getColumnData(rs, i)); + } + } + return string; + } + + public static List convertResultSetToListDesc(SQLResultSet rs) throws SQLException { + List res = new ArrayList<>(); + while (rs.next()) { + int columnCount = rs.getMetaData().getColumnCount(); + for (int i = 0; i < columnCount; i++) { + String string=String.valueOf(DataUtil.getColumnData(rs, i)); + res.add(string); + } + } + return res; + } + public static Object getColumnData(SQLResultSet rs, int index) throws SQLException { + Object obj = null; + int columnType = rs.getMetaData().getColumnType(index + 1); + if (rs.getNString(index + 1) == null) { + log.info("rs is null"); + return null; + } + if (columnType == Types.BOOLEAN) { + obj = rs.getBoolean(index + 1); + } else if (columnType == Types.DATE) { + try { +// obj = new Date(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss") +// .parse(rs.getNString(index + 1) + " 00:00:00").getTime()); + obj = rs.getDate(index + 1); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + } else if (columnType == Types.DOUBLE) { + obj = rs.getDouble(index + 1); + } else if (columnType == Types.FLOAT) { + obj = rs.getFloat(index + 1); + } else if (columnType == Types.SMALLINT) { + obj = rs.getShort(index + 1); + } else if (columnType == Types.INTEGER) { + obj = rs.getInt(index + 1); + } else if (columnType == Types.BIGINT) { + obj = rs.getLong(index + 1); + } else if (columnType == Types.VARCHAR) { + obj = rs.getString(index + 1); + log.info("conver string data {}", obj); + } else if (columnType == Types.TIMESTAMP) { + obj = rs.getTimestamp(index + 1); + } + return obj; + } + + public static List convertList(List datas, List columns) throws ParseException { + List list = new ArrayList(); + for (int i = 0; i < datas.size(); i++) { + if (datas.get(i) == null) { + list.add(null); + } else { + String obj = datas.get(i).toString(); + String column = columns.get(i); + list.add(convertData(obj, column)); + } + } + return list; + } + + public static Object convertData(String data, String column) throws ParseException { + String[] ss = column.split("\\s+"); + String type = ss[ss.length - 1]; + Object obj = null; + if(data == null){ + return null; + } + if ("null".equalsIgnoreCase(data)) { + return "null"; + } + switch (type) { + case "smallint": + case "int16": + obj = Short.parseShort(data); + break; + case "int32": + case "i32": + case "int": + obj = Integer.parseInt(data); + break; + case "int64": + case "bigint": + obj = Long.parseLong(data); + break; + case "float": { + if (data.equalsIgnoreCase("nan")||data.equalsIgnoreCase("-nan")) { + obj = Float.NaN; + }else if(data.equalsIgnoreCase("inf")){ + obj = Float.POSITIVE_INFINITY; + }else if(data.equalsIgnoreCase("-inf")){ + obj = Float.NEGATIVE_INFINITY; + }else { + obj = Float.parseFloat(data); + } + break; + } + case "double": { + if (data.equalsIgnoreCase("nan")||data.equalsIgnoreCase("-nan")) { + obj = Double.NaN; + }else if(data.equalsIgnoreCase("inf")){ + obj = Double.POSITIVE_INFINITY; + }else if(data.equalsIgnoreCase("-inf")){ + obj = Double.NEGATIVE_INFINITY; + }else { + obj = Double.parseDouble(data); + } + break; + } + case "bool": + obj = Boolean.parseBoolean(data); + break; + case "string": + obj = data; + break; + case "timestamp": + obj = new Timestamp(Long.parseLong(data)); + break; + case "date": + try { + obj = new Date(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").parse(data.trim() + " 00:00:00").getTime()); + } catch (ParseException e) { + log.error("Fail convert {} to date", data.trim()); + throw e; + } + break; + default: + obj = data; + break; + } + return obj; + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java index 30ae633db9c..fa6e0a9a19a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java @@ -1,14 +1,54 @@ package com._4paradigm.openmldb.test_common.util; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; +import com.google.common.base.Joiner; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; public class ResultUtil { - public static void setSchema(ResultSetMetaData metaData, OpenMLDBResult fesqlResult) { + public static OpenmldbDeployment parseDeployment(List lines){ + OpenmldbDeployment deployment = new OpenmldbDeployment(); + List inColumns = new ArrayList<>(); + List outColumns = new ArrayList<>(); + String[] db_sp = lines.get(3).split("\\s+"); + deployment.setDbName(db_sp[1]); + deployment.setName(db_sp[2]); + + String sql = ""; + List list = lines.subList(9, lines.size()); + Iterator it = list.iterator(); + while(it.hasNext()) { + String line = it.next().trim(); + if (line.contains("row in set")) break; + if (line.startsWith("#") || line.startsWith("-")) continue; + sql += line+"\n"; + } + deployment.setSql(sql); + while(it.hasNext()){ + String line = it.next().trim(); + if (line.contains("Output Schema")) break; + if (line.startsWith("#") || line.startsWith("-")|| line.equals("")) continue; + String[] infos = line.split("\\s+"); + String in = Joiner.on(",").join(infos); + inColumns.add(in); + } + while(it.hasNext()){ + String line = it.next().trim(); + if(line.startsWith("#")||line.startsWith("-"))continue; + String[] infos = line.split("\\s+"); + String out = Joiner.on(",").join(infos); + outColumns.add(out); + } + deployment.setInColumns(inColumns); + deployment.setOutColumns(outColumns); + return deployment; + } + public static void setSchema(ResultSetMetaData metaData, OpenMLDBResult openMLDBResult) { try { int columnCount = metaData.getColumnCount(); List columnNames = new ArrayList<>(); @@ -21,10 +61,10 @@ public static void setSchema(ResultSetMetaData metaData, OpenMLDBResult fesqlRes columnLabel = metaData.getColumnName(i); } columnNames.add(columnLabel); - columnTypes.add(OpenMLDBUtil.getSQLTypeString(metaData.getColumnType(i))); + columnTypes.add(TypeUtil.fromJDBCTypeToString(metaData.getColumnType(i))); } - fesqlResult.setColumnNames(columnNames); - fesqlResult.setColumnTypes(columnTypes); + openMLDBResult.setColumnNames(columnNames); + openMLDBResult.setColumnTypes(columnTypes); }catch (SQLException e){ e.printStackTrace(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/OpenMLDBUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java similarity index 56% rename from test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/OpenMLDBUtil.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java index 897301c5363..77668c82200 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/OpenMLDBUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java @@ -16,9 +16,6 @@ package com._4paradigm.openmldb.test_common.util; -import com._4paradigm.openmldb.DataType; -import com._4paradigm.openmldb.SQLRequestRow; -import com._4paradigm.openmldb.Schema; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.bean.OpenMLDBColumn; import com._4paradigm.openmldb.test_common.bean.OpenMLDBIndex; @@ -32,7 +29,6 @@ import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; -import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com.google.common.base.Joiner; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; @@ -47,203 +43,16 @@ import java.util.*; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.regex.Matcher; -import java.util.regex.Pattern; /** * @author zhaowei * @date 2020/6/17 4:00 PM */ @Slf4j -public class OpenMLDBUtil { - private static String reg = "\\{(\\d+)\\}"; - private static Pattern pattern = Pattern.compile(reg); +public class SDKUtil { private static final Logger logger = new LogProxy(log); - public static String buildSpSQLWithConstColumns(String spName, - String sql, - InputDesc input) throws SQLException { - StringBuilder builder = new StringBuilder("create procedure " + spName + "("); - HashSet commonColumnIndices = new HashSet<>(); - if (input.getCommon_column_indices() != null) { - for (String str : input.getCommon_column_indices()) { - if (str != null) { - commonColumnIndices.add(Integer.parseInt(str)); - } - } - } - if (input.getColumns() == null) { - throw new SQLException("No schema defined in input desc"); - } - for (int i = 0; i < input.getColumns().size(); ++i) { - String[] parts = input.getColumns().get(i).split(" "); - if (commonColumnIndices.contains(i)) { - builder.append("const "); - } - builder.append(parts[0]); - builder.append(" "); - builder.append(parts[1]); - if (i != input.getColumns().size() - 1) { - builder.append(","); - } - } - builder.append(") "); - builder.append("BEGIN "); - builder.append(sql.trim()); - builder.append(" "); - builder.append("END;"); - sql = builder.toString(); - return sql; - } - - public static int getIndexByColumnName(List columnNames, String columnName) { - for (int i = 0; i < columnNames.size(); i++) { - if (columnNames.get(i).equals(columnName)) { - return i; - } - } - return -1; - } - - public static DataType getColumnType(String type) { - switch (type) { - case "smallint": - case "int16": - return DataType.kTypeInt16; - case "int32": - case "i32": - case "int": - return DataType.kTypeInt32; - case "int64": - case "bigint": - return DataType.kTypeInt64; - case "float": - return DataType.kTypeFloat; - case "double": - return DataType.kTypeDouble; - case "bool": - return DataType.kTypeBool; - case "string": - return DataType.kTypeString; - case "timestamp": - return DataType.kTypeTimestamp; - case "date": - return DataType.kTypeDate; - default: - return null; - } - } - - public static DataType getColumnTypeByJDBC(String type) { - switch (type) { - case "smallint": - case "int16": - return DataType.kTypeInt16; - case "int32": - case "i32": - case "int": - case "bool": - return DataType.kTypeInt32; - case "int64": - case "bigint": - return DataType.kTypeInt64; - case "float": - return DataType.kTypeFloat; - case "double": - return DataType.kTypeDouble; - // case "bool": - // return DataType.kTypeBool; - case "string": - return DataType.kTypeString; - case "timestamp": - return DataType.kTypeTimestamp; - case "date": - return DataType.kTypeDate; - default: - return null; - } - } - - public static int getSQLType(String type) { - switch (type) { - case "smallint": - case "int16": - return Types.SMALLINT; - case "int32": - case "i32": - case "int": - return Types.INTEGER; - case "int64": - case "bigint": - return Types.BIGINT; - case "float": - return Types.FLOAT; - case "double": - return Types.DOUBLE; - case "bool": - return Types.BOOLEAN; - case "string": - return Types.VARCHAR; - case "timestamp": - return Types.TIMESTAMP; - case "date": - return Types.DATE; - default: - return 0; - } - } - - public static String getColumnTypeString(DataType dataType) { - if (dataType.equals(DataType.kTypeBool)) { - return "bool"; - } else if (dataType.equals(DataType.kTypeString)) { - return "string"; - } else if (dataType.equals(DataType.kTypeInt16)) { - return "smallint"; - } else if (dataType.equals(DataType.kTypeInt32)) { - return "int"; - } else if (dataType.equals(DataType.kTypeInt64)) { - return "bigint"; - } else if (dataType.equals(DataType.kTypeFloat)) { - return "float"; - } else if (dataType.equals(DataType.kTypeDouble)) { - return "double"; - } else if (dataType.equals(DataType.kTypeTimestamp)) { - return "timestamp"; - } else if (dataType.equals(DataType.kTypeDate)) { - return "date"; - } - return null; - } - - public static String getSQLTypeString(int dataType) { - switch (dataType){ - case Types.BIT: - case Types.BOOLEAN: - return "bool"; - case Types.VARCHAR: - return "string"; - case Types.SMALLINT: - return "smallint"; - case Types.INTEGER: - return "int"; - case Types.BIGINT: - return "bigint"; - case Types.REAL: - case Types.FLOAT: - return "float"; - case Types.DOUBLE: - return "double"; - case Types.TIMESTAMP: - return "timestamp"; - case Types.DATE: - return "date"; - default: - return null; - } - } - - public static OpenMLDBResult sqls(SqlExecutor executor, String dbName, List sqls) { + public static OpenMLDBResult sqlList(SqlExecutor executor, String dbName, List sqls) { OpenMLDBResult fesqlResult = null; for (String sql : sqls) { fesqlResult = sql(executor, dbName, sql); @@ -275,9 +84,9 @@ public static OpenMLDBResult sqlBatchRequestMode(SqlExecutor executor, String db return fesqlResult; } - public static OpenMLDBResult sqlRequestModeWithSp(SqlExecutor executor, String dbName, String spName, - Boolean needInsertRequestRow, String sql, - InputDesc rows, boolean isAsyn) throws SQLException { + public static OpenMLDBResult sqlRequestModeWithProcedure(SqlExecutor executor, String dbName, String spName, + Boolean needInsertRequestRow, String sql, + InputDesc rows, boolean isAsyn) throws SQLException { OpenMLDBResult fesqlResult = null; if (sql.toLowerCase().startsWith("create procedure")) { fesqlResult = selectRequestModeWithSp(executor, dbName, spName, needInsertRequestRow, sql, rows, isAsyn); @@ -293,7 +102,7 @@ public static OpenMLDBResult sql(SqlExecutor executor, String dbName, String sql if (sql.startsWith("create database") || sql.startsWith("drop database")) { fesqlResult = db(executor, sql); }else if(sql.startsWith("CREATE INDEX")||sql.startsWith("create index")){ - fesqlResult = createIndex(executor, dbName, sql); + fesqlResult = createIndex(executor, sql); }else if (sql.startsWith("create") || sql.startsWith("CREATE") || sql.startsWith("DROP")|| sql.startsWith("drop")) { fesqlResult = ddl(executor, dbName, sql); } else if (sql.startsWith("insert")||sql.startsWith("INSERT")) { @@ -350,10 +159,10 @@ public static OpenMLDBResult deploy(SqlExecutor executor, String dbName, String SQLResultSet rs = (SQLResultSet)rawRs; ResultUtil.setSchema(rs.getMetaData(),fesqlResult); fesqlResult.setOk(true); - String deployStr = convertRestultSetToListDeploy(rs); + String deployStr = DataUtil.convertResultSetToListDeploy(rs); String[] strings = deployStr.split("\n"); List stringList = Arrays.asList(strings); - OpenmldbDeployment openmldbDeployment = parseDeployment(stringList); + OpenmldbDeployment openmldbDeployment = ResultUtil.parseDeployment(stringList); fesqlResult.setDeployment(openmldbDeployment); } catch (Exception e) { fesqlResult.setOk(false); @@ -379,7 +188,7 @@ public static OpenMLDBResult showDeploys(SqlExecutor executor, String dbName, St SQLResultSet rs = (SQLResultSet)rawRs; ResultUtil.setSchema(rs.getMetaData(),fesqlResult); fesqlResult.setOk(true); - List> lists = convertRestultSetToList(rs); + List> lists = DataUtil.convertResultSetToList(rs); if(lists.size() == 0 ||lists.isEmpty()){ fesqlResult.setDeploymentCount(0); }else { @@ -396,66 +205,7 @@ public static OpenMLDBResult showDeploys(SqlExecutor executor, String dbName, St return fesqlResult; } - private static String convertRestultSetToListDeploy(SQLResultSet rs) throws SQLException { - String string = null; - while (rs.next()) { - int columnCount = rs.getMetaData().getColumnCount(); - for (int i = 0; i < columnCount; i++) { - string=String.valueOf(getColumnData(rs, i)); - } - } - return string; - } - - private static List convertRestultSetToListDesc(SQLResultSet rs) throws SQLException { - List res = new ArrayList<>(); - while (rs.next()) { - int columnCount = rs.getMetaData().getColumnCount(); - for (int i = 0; i < columnCount; i++) { - String string=String.valueOf(getColumnData(rs, i)); - res.add(string); - } - } - return res; - } - private static OpenmldbDeployment parseDeployment(List lines){ - OpenmldbDeployment deployment = new OpenmldbDeployment(); - List inColumns = new ArrayList<>(); - List outColumns = new ArrayList<>(); - String[] db_sp = lines.get(3).split("\\s+"); - deployment.setDbName(db_sp[1]); - deployment.setName(db_sp[2]); - - String sql = ""; - List list = lines.subList(9, lines.size()); - Iterator it = list.iterator(); - while(it.hasNext()) { - String line = it.next().trim(); - if (line.contains("row in set")) break; - if (line.startsWith("#") || line.startsWith("-")) continue; - sql += line+"\n"; - } - deployment.setSql(sql); - while(it.hasNext()){ - String line = it.next().trim(); - if (line.contains("Output Schema")) break; - if (line.startsWith("#") || line.startsWith("-")|| line.equals("")) continue; - String[] infos = line.split("\\s+"); - String in = Joiner.on(",").join(infos); - inColumns.add(in); - } - while(it.hasNext()){ - String line = it.next().trim(); - if(line.startsWith("#")||line.startsWith("-"))continue; - String[] infos = line.split("\\s+"); - String out = Joiner.on(",").join(infos); - outColumns.add(out); - } - deployment.setInColumns(inColumns); - deployment.setOutColumns(outColumns); - return deployment; - } public static OpenMLDBResult desc(SqlExecutor executor, String dbName, String descSql){ if (descSql.isEmpty()){ @@ -473,11 +223,11 @@ public static OpenMLDBResult desc(SqlExecutor executor, String dbName, String de SQLResultSet rs = (SQLResultSet)rawRs; ResultUtil.setSchema(rs.getMetaData(),fesqlResult); fesqlResult.setOk(true); - String deployStr = convertRestultSetToListDeploy(rs); - List listDesc = convertRestultSetToListDesc(rs); + String deployStr = DataUtil.convertResultSetToListDeploy(rs); + List listDesc = DataUtil.convertResultSetToListDesc(rs); String[] strings = deployStr.split("\n"); List stringList = Arrays.asList(strings); - OpenMLDBSchema openMLDBSchema = parseSchema(stringList); + OpenMLDBSchema openMLDBSchema = SchemaUtil.parseSchema(stringList); fesqlResult.setSchema(openMLDBSchema); } catch (Exception e) { fesqlResult.setOk(false); @@ -488,44 +238,8 @@ public static OpenMLDBResult desc(SqlExecutor executor, String dbName, String de return fesqlResult; } - public static OpenMLDBSchema parseSchema(List lines){ - OpenMLDBSchema schema = new OpenMLDBSchema(); - List cols = new ArrayList<>(); - List indexs = new ArrayList<>(); - Iterator it = lines.iterator(); -// while(it.hasNext()){ -// String line = it.next(); -// if(line.contains("ttl_type")) break; -// if(line.startsWith("#")||line.startsWith("-"))continue; -// OpenMLDBColumn col = new OpenMLDBColumn(); -// String[] infos = line.split("\\s+"); -// col.setId(Integer.parseInt(infos[0])); -// col.setFieldName(infos[1]); -// col.setFieldType(infos[2]); -// col.setNullable(infos[3].equals("NO")?false:true); -// cols.add(col); -// it.remove(); -// } - while(it.hasNext()){ - String line = it.next().trim(); - if(line.startsWith("#")||line.startsWith("-"))continue; - OpenMLDBIndex index = new OpenMLDBIndex(); - String[] infos = line.split("\\s+"); - index.setId(Integer.parseInt(infos[0])); - index.setIndexName(infos[1]); - index.setKeys(Arrays.asList(infos[2].split("\\|"))); - index.setTs(infos[3]); - index.setTtl(infos[4]); - index.setTtlType(infos[5]); - indexs.add(index); - //it.remove(); - } - schema.setIndexs(indexs); - //schema.setColumns(cols); - return schema; - } - public static OpenMLDBResult createIndex(SqlExecutor executor, String dbName, String sql) { + public static OpenMLDBResult createIndex(SqlExecutor executor, String sql) { if (sql.isEmpty()) { return null; } @@ -564,7 +278,7 @@ public static OpenMLDBResult selectWithPrepareStatement(SqlExecutor executor, St } logger.info("prepare sql:{}", sql); PreparedStatement preparedStmt = executor.getPreparedStatement(dbName, sql); - setPreparedData(preparedStmt,paramterTypes,params); + DataUtil.setPreparedData(preparedStmt,paramterTypes,params); ResultSet resultSet = preparedStmt.executeQuery(); if (resultSet == null) { @@ -575,7 +289,7 @@ public static OpenMLDBResult selectWithPrepareStatement(SqlExecutor executor, St SQLResultSet rs = (SQLResultSet)resultSet; ResultUtil.setSchema(rs.getMetaData(),fesqlResult); fesqlResult.setOk(true); - List> result = convertRestultSetToList(rs); + List> result = DataUtil.convertResultSetToList(rs); fesqlResult.setCount(result.size()); fesqlResult.setResult(result); } catch (Exception e) { @@ -600,7 +314,7 @@ public static OpenMLDBResult insertWithPrepareStatement(SqlExecutor executor, St } logger.info("prepare sql:{}", insertSql); PreparedStatement preparedStmt = executor.getInsertPreparedStmt(dbName, insertSql); - setRequestData(preparedStmt,params); + DataUtil.setRequestData(preparedStmt,params); // for(int i=0;i> convertRestultSetToList(SQLResultSet rs) throws SQLException { - List> result = new ArrayList<>(); - while (rs.next()) { - List list = new ArrayList(); - int columnCount = rs.getMetaData().getColumnCount(); - for (int i = 0; i < columnCount; i++) { - list.add(getColumnData(rs, i)); - } - result.add(list); - } - return result; - } - private static OpenMLDBResult selectRequestModeWithPreparedStatement(SqlExecutor executor, String dbName, Boolean need_insert_request_row, String selectSql, InputDesc input) { @@ -698,7 +399,7 @@ private static OpenMLDBResult selectRequestModeWithPreparedStatement(SqlExecutor } ResultSet resultSet = null; try { - resultSet = buildRequestPreparedStatment(rps, rows.get(i)); + resultSet = buildRequestPreparedStatement(rps, rows.get(i)); } catch (SQLException throwables) { fesqlResult.setOk(false); @@ -712,7 +413,7 @@ private static OpenMLDBResult selectRequestModeWithPreparedStatement(SqlExecutor return fesqlResult; } try { - result.addAll(convertRestultSetToList((SQLResultSet) resultSet)); + result.addAll(DataUtil.convertResultSetToList((SQLResultSet) resultSet)); } catch (SQLException throwables) { fesqlResult.setOk(false); fesqlResult.setMsg("Convert Result Set To List Fail"); @@ -782,7 +483,7 @@ private static OpenMLDBResult selectBatchRequestModeWithPreparedStatement(SqlExe rps = executor.getBatchRequestPreparedStmt(dbName, selectSql, commonColumnIndices); for (List row : rows) { - boolean ok = setRequestData(rps, row); + boolean ok = DataUtil.setRequestData(rps, row); if (ok) { rps.addBatch(); } @@ -790,7 +491,7 @@ private static OpenMLDBResult selectBatchRequestModeWithPreparedStatement(SqlExe sqlResultSet = (SQLResultSet) rps.executeQuery(); List> result = Lists.newArrayList(); - result.addAll(convertRestultSetToList(sqlResultSet)); + result.addAll(DataUtil.convertResultSetToList(sqlResultSet)); fesqlResult.setResult(result); ResultUtil.setSchema(sqlResultSet.getMetaData(),fesqlResult); fesqlResult.setCount(result.size()); @@ -867,9 +568,9 @@ private static OpenMLDBResult selectRequestModeWithSp(SqlExecutor executor, Stri return fesqlResult; } if (!isAsyn) { - resultSet = buildRequestPreparedStatment(rps, rows.get(i)); + resultSet = buildRequestPreparedStatement(rps, rows.get(i)); } else { - resultSet = buildRequestPreparedStatmentAsync(rps, rows.get(i)); + resultSet = buildRequestPreparedStatementAsync(rps, rows.get(i)); } if (resultSet == null) { fesqlResult.setOk(false); @@ -877,7 +578,7 @@ private static OpenMLDBResult selectRequestModeWithSp(SqlExecutor executor, Stri logger.error("select result:{}", fesqlResult); return fesqlResult; } - result.addAll(convertRestultSetToList((SQLResultSet) resultSet)); + result.addAll(DataUtil.convertResultSetToList((SQLResultSet) resultSet)); if (needInsertRequestRow && !executor.executeInsert(insertDbName, inserts.get(i))) { fesqlResult.setOk(false); fesqlResult.setMsg("fail to execute sql in request mode: fail to insert request row after query"); @@ -951,7 +652,7 @@ public static OpenMLDBResult selectBatchRequestModeWithSp(SqlExecutor executor, return fesqlResult; } for (List row : rows) { - boolean ok = setRequestData(rps, row); + boolean ok = DataUtil.setRequestData(rps, row); if (ok) { rps.addBatch(); } @@ -970,7 +671,7 @@ public static OpenMLDBResult selectBatchRequestModeWithSp(SqlExecutor executor, } } List> result = Lists.newArrayList(); - result.addAll(convertRestultSetToList((SQLResultSet) sqlResultSet)); + result.addAll(DataUtil.convertResultSetToList((SQLResultSet) sqlResultSet)); fesqlResult.setResult(result); ResultUtil.setSchema(sqlResultSet.getMetaData(),fesqlResult); fesqlResult.setCount(result.size()); @@ -997,266 +698,65 @@ public static OpenMLDBResult selectBatchRequestModeWithSp(SqlExecutor executor, return fesqlResult; } - public static List> convertRows(List> rows, List columns) throws ParseException { - List> list = new ArrayList<>(); - for (List row : rows) { - list.add(convertList(row, columns)); - } - return list; - } - - public static List convertList(List datas, List columns) throws ParseException { - List list = new ArrayList(); - for (int i = 0; i < datas.size(); i++) { - if (datas.get(i) == null) { - list.add(null); - } else { - String obj = datas.get(i).toString(); - String column = columns.get(i); - list.add(convertData(obj, column)); - } - } - return list; - } - - public static Object convertData(String data, String column) throws ParseException { - String[] ss = column.split("\\s+"); - String type = ss[ss.length - 1]; - Object obj = null; - if(data == null){ - return null; - } - if ("null".equalsIgnoreCase(data)) { - return "null"; - } - switch (type) { - case "smallint": - case "int16": - obj = Short.parseShort(data); - break; - case "int32": - case "i32": - case "int": - obj = Integer.parseInt(data); - break; - case "int64": - case "bigint": - obj = Long.parseLong(data); - break; - case "float": { - if (data.equalsIgnoreCase("nan")||data.equalsIgnoreCase("-nan")) { - obj = Float.NaN; - }else if(data.equalsIgnoreCase("inf")){ - obj = Float.POSITIVE_INFINITY; - }else if(data.equalsIgnoreCase("-inf")){ - obj = Float.NEGATIVE_INFINITY; - }else { - obj = Float.parseFloat(data); - } - break; - } - case "double": { - if (data.equalsIgnoreCase("nan")||data.equalsIgnoreCase("-nan")) { - obj = Double.NaN; - }else if(data.equalsIgnoreCase("inf")){ - obj = Double.POSITIVE_INFINITY; - }else if(data.equalsIgnoreCase("-inf")){ - obj = Double.NEGATIVE_INFINITY; - }else { - obj = Double.parseDouble(data); - } - break; - } - case "bool": - obj = Boolean.parseBoolean(data); - break; - case "string": - obj = data; - break; - case "timestamp": - obj = new Timestamp(Long.parseLong(data)); - break; - case "date": - try { - obj = new Date(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").parse(data.trim() + " 00:00:00").getTime()); - } catch (ParseException e) { - log.error("Fail convert {} to date", data.trim()); - throw e; - } - break; - default: - obj = data; - break; - } - return obj; - } - - private static boolean buildRequestRow(SQLRequestRow requestRow, List objects) { - Schema schema = requestRow.GetSchema(); - int totalSize = 0; - for (int i = 0; i < schema.GetColumnCnt(); i++) { - if (null == objects.get(i)) { - continue; - } - if (DataType.kTypeString.equals(schema.GetColumnType(i))) { - totalSize += objects.get(i).toString().length(); - } - } - - logger.info("init request row: {}", totalSize); - requestRow.Init(totalSize); - for (int i = 0; i < schema.GetColumnCnt(); i++) { - Object obj = objects.get(i); - if (null == obj) { - requestRow.AppendNULL(); - continue; - } - DataType dataType = schema.GetColumnType(i); - if (DataType.kTypeInt16.equals(dataType)) { - requestRow.AppendInt16(Short.parseShort(obj.toString())); - } else if (DataType.kTypeInt32.equals(dataType)) { - requestRow.AppendInt32(Integer.parseInt(obj.toString())); - } else if (DataType.kTypeInt64.equals(dataType)) { - requestRow.AppendInt64(Long.parseLong(obj.toString())); - } else if (DataType.kTypeFloat.equals(dataType)) { - requestRow.AppendFloat(Float.parseFloat(obj.toString())); - } else if (DataType.kTypeDouble.equals(dataType)) { - requestRow.AppendDouble(Double.parseDouble(obj.toString())); - } else if (DataType.kTypeTimestamp.equals(dataType)) { - requestRow.AppendTimestamp(Long.parseLong(obj.toString())); - } else if (DataType.kTypeDate.equals(dataType)) { - try { - Date date = new Date(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").parse(obj.toString() + " 00:00:00").getTime()); - logger.info("build request row: obj: {}, append date: {}, {}, {}, {}", - obj, date.toString(), date.getYear() + 1900, date.getMonth() + 1, date.getDate()); - requestRow.AppendDate(date.getYear() + 1900, date.getMonth() + 1, date.getDate()); - } catch (ParseException e) { - logger.error("Fail convert {} to date", obj.toString()); - return false; - } - } else if (DataType.kTypeString.equals(schema.GetColumnType(i))) { - requestRow.AppendString(obj.toString()); - } else { - logger.error("fail to build request row: invalid data type {]", schema.GetColumnType(i)); - return false; - } - } - return requestRow.Build(); - } - private static boolean setPreparedData(PreparedStatement ps,List paramterType, List objects) throws SQLException { - for(int i=0;i objects) throws SQLException { - ResultSetMetaData metaData = requestPs.getMetaData(); - int totalSize = 0; - for (int i = 0; i < metaData.getColumnCount(); i++) { - if (null == objects.get(i)) { - continue; - } - if (metaData.getColumnType(i + 1) == Types.VARCHAR) { - totalSize += objects.get(i).toString().length(); - } - } - logger.info("init request row: {}", totalSize); - for (int i = 0; i < metaData.getColumnCount(); i++) { - Object obj = objects.get(i); - if (null == obj || obj.toString().equalsIgnoreCase("null")) { - requestPs.setNull(i + 1, 0); - continue; - } - int columnType = metaData.getColumnType(i + 1); - if (columnType == Types.BOOLEAN) { - requestPs.setBoolean(i + 1, Boolean.parseBoolean(obj.toString())); - } else if (columnType == Types.SMALLINT) { - requestPs.setShort(i + 1, Short.parseShort(obj.toString())); - } else if (columnType == Types.INTEGER) { - requestPs.setInt(i + 1, Integer.parseInt(obj.toString())); - } else if (columnType == Types.BIGINT) { - requestPs.setLong(i + 1, Long.parseLong(obj.toString())); - } else if (columnType == Types.FLOAT) { - requestPs.setFloat(i + 1, Float.parseFloat(obj.toString())); - } else if (columnType == Types.DOUBLE) { - requestPs.setDouble(i + 1, Double.parseDouble(obj.toString())); - } else if (columnType == Types.TIMESTAMP) { - requestPs.setTimestamp(i + 1, new Timestamp(Long.parseLong(obj.toString()))); - } else if (columnType == Types.DATE) { - if (obj instanceof java.util.Date) { - requestPs.setDate(i + 1, new Date(((java.util.Date) obj).getTime())); - } else if (obj instanceof Date) { - requestPs.setDate(i + 1, (Date) (obj)); - } -// else if (obj instanceof DateTime) { -// requestPs.setDate(i + 1, new Date(((DateTime) obj).getMillis())); +// private static boolean buildRequestRow(SQLRequestRow requestRow, List objects) { +// Schema schema = requestRow.GetSchema(); +// int totalSize = 0; +// for (int i = 0; i < schema.GetColumnCnt(); i++) { +// if (null == objects.get(i)) { +// continue; +// } +// if (DataType.kTypeString.equals(schema.GetColumnType(i))) { +// totalSize += objects.get(i).toString().length(); +// } +// } +// +// logger.info("init request row: {}", totalSize); +// requestRow.Init(totalSize); +// for (int i = 0; i < schema.GetColumnCnt(); i++) { +// Object obj = objects.get(i); +// if (null == obj) { +// requestRow.AppendNULL(); +// continue; +// } +// +// DataType dataType = schema.GetColumnType(i); +// if (DataType.kTypeInt16.equals(dataType)) { +// requestRow.AppendInt16(Short.parseShort(obj.toString())); +// } else if (DataType.kTypeInt32.equals(dataType)) { +// requestRow.AppendInt32(Integer.parseInt(obj.toString())); +// } else if (DataType.kTypeInt64.equals(dataType)) { +// requestRow.AppendInt64(Long.parseLong(obj.toString())); +// } else if (DataType.kTypeFloat.equals(dataType)) { +// requestRow.AppendFloat(Float.parseFloat(obj.toString())); +// } else if (DataType.kTypeDouble.equals(dataType)) { +// requestRow.AppendDouble(Double.parseDouble(obj.toString())); +// } else if (DataType.kTypeTimestamp.equals(dataType)) { +// requestRow.AppendTimestamp(Long.parseLong(obj.toString())); +// } else if (DataType.kTypeDate.equals(dataType)) { +// try { +// Date date = new Date(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").parse(obj.toString() + " 00:00:00").getTime()); +// logger.info("build request row: obj: {}, append date: {}, {}, {}, {}", +// obj, date.toString(), date.getYear() + 1900, date.getMonth() + 1, date.getDate()); +// requestRow.AppendDate(date.getYear() + 1900, date.getMonth() + 1, date.getDate()); +// } catch (ParseException e) { +// logger.error("Fail convert {} to date", obj.toString()); +// return false; // } - else { - try { - Date date = new Date(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").parse(obj.toString() + " 00:00:00").getTime()); - logger.info("build request row: obj: {}, append date: {}, {}, {}, {}",obj, date.toString(), date.getYear() + 1900, date.getMonth() + 1, date.getDate()); - requestPs.setDate(i + 1, date); - } catch (ParseException e) { - logger.error("Fail convert {} to date: {}", obj, e); - return false; - } - } - } else if (columnType == Types.VARCHAR) { - requestPs.setString(i + 1, obj.toString()); - } else { - logger.error("fail to build request row: invalid data type {]", columnType); - return false; - } - } - return true; - } +// } else if (DataType.kTypeString.equals(schema.GetColumnType(i))) { +// requestRow.AppendString(obj.toString()); +// } else { +// logger.error("fail to build request row: invalid data type {]", schema.GetColumnType(i)); +// return false; +// } +// } +// return requestRow.Build(); +// } - private static ResultSet buildRequestPreparedStatment(PreparedStatement requestPs, - List objects) throws SQLException { - boolean success = setRequestData(requestPs, objects); + + private static ResultSet buildRequestPreparedStatement(PreparedStatement requestPs, + List objects) throws SQLException { + boolean success = DataUtil.setRequestData(requestPs, objects); if (success) { return requestPs.executeQuery(); } else { @@ -1264,9 +764,9 @@ private static ResultSet buildRequestPreparedStatment(PreparedStatement requestP } } - private static ResultSet buildRequestPreparedStatmentAsync(CallablePreparedStatement requestPs, - List objects) throws SQLException { - boolean success = setRequestData(requestPs, objects); + private static ResultSet buildRequestPreparedStatementAsync(CallablePreparedStatement requestPs, + List objects) throws SQLException { + boolean success = DataUtil.setRequestData(requestPs, objects); if (success) { QueryFuture future = requestPs.executeQueryAsync(1000, TimeUnit.MILLISECONDS); ResultSet sqlResultSet = null; @@ -1298,7 +798,7 @@ public static OpenMLDBResult select(SqlExecutor executor, String dbName, String SQLResultSet rs = (SQLResultSet)rawRs; ResultUtil.setSchema(rs.getMetaData(),fesqlResult); fesqlResult.setOk(true); - List> result = convertRestultSetToList(rs); + List> result = DataUtil.convertResultSetToList(rs); fesqlResult.setCount(result.size()); fesqlResult.setResult(result); } catch (Exception e) { @@ -1346,78 +846,11 @@ public static OpenMLDBResult select(SqlExecutor executor, String dbName, String // return obj; // } - public static Object getColumnData(SQLResultSet rs, int index) throws SQLException { - Object obj = null; - int columnType = rs.getMetaData().getColumnType(index + 1); - if (rs.getNString(index + 1) == null) { - logger.info("rs is null"); - return null; - } - if (columnType == Types.BOOLEAN) { - obj = rs.getBoolean(index + 1); - } else if (columnType == Types.DATE) { - try { -// obj = new Date(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss") -// .parse(rs.getNString(index + 1) + " 00:00:00").getTime()); - obj = rs.getDate(index + 1); - } catch (Exception e) { - e.printStackTrace(); - return null; - } - } else if (columnType == Types.DOUBLE) { - obj = rs.getDouble(index + 1); - } else if (columnType == Types.FLOAT) { - obj = rs.getFloat(index + 1); - } else if (columnType == Types.SMALLINT) { - obj = rs.getShort(index + 1); - } else if (columnType == Types.INTEGER) { - obj = rs.getInt(index + 1); - } else if (columnType == Types.BIGINT) { - obj = rs.getLong(index + 1); - } else if (columnType == Types.VARCHAR) { - obj = rs.getString(index + 1); - logger.info("conver string data {}", obj); - } else if (columnType == Types.TIMESTAMP) { - obj = rs.getTimestamp(index + 1); - } - return obj; - } - public static String formatSql(String sql, List tableNames, OpenMLDBInfo fedbInfo) { - Matcher matcher = pattern.matcher(sql); - while (matcher.find()) { - int index = Integer.parseInt(matcher.group(1)); - sql = sql.replace("{" + index + "}", tableNames.get(index)); - } - sql = formatSql(sql,fedbInfo); - return sql; - } - public static String formatSql(String sql, OpenMLDBInfo fedbInfo) { - if(sql.contains("{tb_endpoint_0}")){ - sql = sql.replace("{tb_endpoint_0}", fedbInfo.getTabletEndpoints().get(0)); - } - if(sql.contains("{tb_endpoint_1}")){ - sql = sql.replace("{tb_endpoint_1}", fedbInfo.getTabletEndpoints().get(1)); - } - if(sql.contains("{tb_endpoint_2}")){ - sql = sql.replace("{tb_endpoint_2}", fedbInfo.getTabletEndpoints().get(2)); - } - return sql; - } - - public static String formatSql(String sql, List tableNames) { - return formatSql(sql,tableNames, OpenMLDBGlobalVar.mainInfo); - } - - // public static FesqlResult createAndInsert(SqlExecutor executor, String dbName, - // List inputs, - // boolean useFirstInputAsRequests) { - // return createAndInsert(executor, dbName, inputs, useFirstInputAsRequests); - // } public static OpenMLDBResult createTable(SqlExecutor executor, String dbName, String createSql){ if (StringUtils.isNotEmpty(createSql)) { - OpenMLDBResult res = OpenMLDBUtil.ddl(executor, dbName, createSql); + OpenMLDBResult res = SDKUtil.ddl(executor, dbName, createSql); if (!res.isOk()) { logger.error("fail to create table"); return res; @@ -1456,7 +889,7 @@ public static OpenMLDBResult createAndInsert(SqlExecutor executor, continue; } createSql = SQLCase.formatSql(createSql, i, tableName); - createSql = formatSql(createSql, OpenMLDBGlobalVar.mainInfo); + createSql = SQLUtil.formatSql(createSql, OpenMLDBGlobalVar.mainInfo); String dbName = inputs.get(i).getDb().isEmpty() ? defaultDBName : inputs.get(i).getDb(); createTable(executor,dbName,createSql); InputDesc input = inputs.get(i); @@ -1467,7 +900,7 @@ public static OpenMLDBResult createAndInsert(SqlExecutor executor, for (String insertSql : inserts) { insertSql = SQLCase.formatSql(insertSql, i, input.getName()); if (!insertSql.isEmpty()) { - OpenMLDBResult res = OpenMLDBUtil.insert(executor, dbName, insertSql); + OpenMLDBResult res = SDKUtil.insert(executor, dbName, insertSql); if (!res.isOk()) { logger.error("fail to insert table"); return res; @@ -1500,7 +933,7 @@ public static OpenMLDBResult createAndInsertWithPrepared(SqlExecutor executor, insertSql = SQLCase.formatSql(insertSql, i, tableName); List> rows = input.getRows(); for(List row:rows){ - OpenMLDBResult res = OpenMLDBUtil.insertWithPrepareStatement(executor, dbName, insertSql, row); + OpenMLDBResult res = SDKUtil.insertWithPrepareStatement(executor, dbName, insertSql, row); if (!res.isOk()) { logger.error("fail to insert table"); return res; @@ -1524,20 +957,7 @@ public static void show(com._4paradigm.openmldb.ResultSet rs) { } logger.info("RESULT:\n{} row in set\n{}", rs.Size(), sb.toString()); } - public static String getColumnTypeByType(int type){ - switch (type){ - case Types.BIGINT: return "bigint"; - case Types.SMALLINT: return "smallint"; - case Types.INTEGER: return "int"; - case Types.VARCHAR: return "string"; - case Types.FLOAT: return "float"; - case Types.DOUBLE: return "double"; - case Types.DATE: return "date"; - case Types.TIMESTAMP: return "timestamp"; - case Types.BOOLEAN: return "bool"; - } - throw new IllegalArgumentException("not know type"); - } + diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java new file mode 100644 index 00000000000..211cce8a1eb --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java @@ -0,0 +1,76 @@ +package com._4paradigm.openmldb.test_common.util; + +import com._4paradigm.openmldb.test_common.model.InputDesc; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; + +import java.sql.SQLException; +import java.util.HashSet; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class SQLUtil { + private static String reg = "\\{(\\d+)\\}"; + private static Pattern pattern = Pattern.compile(reg); + + public static String formatSql(String sql, List tableNames, OpenMLDBInfo fedbInfo) { + Matcher matcher = pattern.matcher(sql); + while (matcher.find()) { + int index = Integer.parseInt(matcher.group(1)); + sql = sql.replace("{" + index + "}", tableNames.get(index)); + } + sql = formatSql(sql,fedbInfo); + return sql; + } + + public static String formatSql(String sql, OpenMLDBInfo fedbInfo) { + if(sql.contains("{tb_endpoint_0}")){ + sql = sql.replace("{tb_endpoint_0}", fedbInfo.getTabletEndpoints().get(0)); + } + if(sql.contains("{tb_endpoint_1}")){ + sql = sql.replace("{tb_endpoint_1}", fedbInfo.getTabletEndpoints().get(1)); + } + if(sql.contains("{tb_endpoint_2}")){ + sql = sql.replace("{tb_endpoint_2}", fedbInfo.getTabletEndpoints().get(2)); + } + return sql; + } + + public static String formatSql(String sql, List tableNames) { + return formatSql(sql,tableNames, OpenMLDBGlobalVar.mainInfo); + } + public static String buildSpSQLWithConstColumns(String spName, String sql, InputDesc input) throws SQLException { + StringBuilder builder = new StringBuilder("create procedure " + spName + "("); + HashSet commonColumnIndices = new HashSet<>(); + if (input.getCommon_column_indices() != null) { + for (String str : input.getCommon_column_indices()) { + if (str != null) { + commonColumnIndices.add(Integer.parseInt(str)); + } + } + } + if (input.getColumns() == null) { + throw new SQLException("No schema defined in input desc"); + } + for (int i = 0; i < input.getColumns().size(); ++i) { + String[] parts = input.getColumns().get(i).split(" "); + if (commonColumnIndices.contains(i)) { + builder.append("const "); + } + builder.append(parts[0]); + builder.append(" "); + builder.append(parts[1]); + if (i != input.getColumns().size() - 1) { + builder.append(","); + } + } + builder.append(") "); + builder.append("BEGIN "); + builder.append(sql.trim()); + builder.append(" "); + builder.append("END;"); + sql = builder.toString(); + return sql; + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SchemaUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SchemaUtil.java new file mode 100644 index 00000000000..0ef5897eb28 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SchemaUtil.java @@ -0,0 +1,57 @@ +package com._4paradigm.openmldb.test_common.util; + +import com._4paradigm.openmldb.test_common.bean.OpenMLDBColumn; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBIndex; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBSchema; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; + +public class SchemaUtil { + public static int getIndexByColumnName(List columnNames, String columnName) { + for (int i = 0; i < columnNames.size(); i++) { + if (columnNames.get(i).equals(columnName)) { + return i; + } + } + return -1; + } + public static OpenMLDBSchema parseSchema(List lines){ + OpenMLDBSchema schema = new OpenMLDBSchema(); + List cols = new ArrayList<>(); + List indexs = new ArrayList<>(); + Iterator it = lines.iterator(); +// while(it.hasNext()){ +// String line = it.next(); +// if(line.contains("ttl_type")) break; +// if(line.startsWith("#")||line.startsWith("-"))continue; +// OpenMLDBColumn col = new OpenMLDBColumn(); +// String[] infos = line.split("\\s+"); +// col.setId(Integer.parseInt(infos[0])); +// col.setFieldName(infos[1]); +// col.setFieldType(infos[2]); +// col.setNullable(infos[3].equals("NO")?false:true); +// cols.add(col); +// it.remove(); +// } + while(it.hasNext()){ + String line = it.next().trim(); + if(line.startsWith("#")||line.startsWith("-"))continue; + OpenMLDBIndex index = new OpenMLDBIndex(); + String[] infos = line.split("\\s+"); + index.setId(Integer.parseInt(infos[0])); + index.setIndexName(infos[1]); + index.setKeys(Arrays.asList(infos[2].split("\\|"))); + index.setTs(infos[3]); + index.setTtl(infos[4]); + index.setTtlType(infos[5]); + indexs.add(index); + //it.remove(); + } + schema.setIndexs(indexs); + //schema.setColumns(cols); + return schema; + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/TypeUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/TypeUtil.java new file mode 100644 index 00000000000..475a409c85d --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/TypeUtil.java @@ -0,0 +1,115 @@ +package com._4paradigm.openmldb.test_common.util; + +import com._4paradigm.openmldb.DataType; + +import java.sql.Types; + +public class TypeUtil { + public static DataType getOpenMLDBColumnType(String type) { + switch (type) { + case "smallint": + case "int16": + return DataType.kTypeInt16; + case "int32": + case "i32": + case "int": + return DataType.kTypeInt32; + case "int64": + case "bigint": + return DataType.kTypeInt64; + case "float": + return DataType.kTypeFloat; + case "double": + return DataType.kTypeDouble; + case "bool": + return DataType.kTypeBool; + case "string": + return DataType.kTypeString; + case "timestamp": + return DataType.kTypeTimestamp; + case "date": + return DataType.kTypeDate; + default: + return null; + } + } + + public static int getJDBCColumnType(String type) { + switch (type) { + case "smallint": + case "int16": + return Types.SMALLINT; + case "int32": + case "i32": + case "int": + return Types.INTEGER; + case "int64": + case "bigint": + return Types.BIGINT; + case "float": + return Types.FLOAT; + case "double": + return Types.DOUBLE; + case "bool": + return Types.BOOLEAN; + case "string": + return Types.VARCHAR; + case "timestamp": + return Types.TIMESTAMP; + case "date": + return Types.DATE; + default: + return 0; + } + } + + public static String fromOpenMLDBTypeToString(DataType dataType) { + if (dataType.equals(DataType.kTypeBool)) { + return "bool"; + } else if (dataType.equals(DataType.kTypeString)) { + return "string"; + } else if (dataType.equals(DataType.kTypeInt16)) { + return "smallint"; + } else if (dataType.equals(DataType.kTypeInt32)) { + return "int"; + } else if (dataType.equals(DataType.kTypeInt64)) { + return "bigint"; + } else if (dataType.equals(DataType.kTypeFloat)) { + return "float"; + } else if (dataType.equals(DataType.kTypeDouble)) { + return "double"; + } else if (dataType.equals(DataType.kTypeTimestamp)) { + return "timestamp"; + } else if (dataType.equals(DataType.kTypeDate)) { + return "date"; + } + return null; + } + + public static String fromJDBCTypeToString(int dataType) { + switch (dataType){ + case Types.BIT: + case Types.BOOLEAN: + return "bool"; + case Types.VARCHAR: + return "string"; + case Types.SMALLINT: + return "smallint"; + case Types.INTEGER: + return "int"; + case Types.BIGINT: + return "bigint"; + case Types.REAL: + case Types.FLOAT: + return "float"; + case Types.DOUBLE: + return "double"; + case Types.TIMESTAMP: + return "timestamp"; + case Types.DATE: + return "date"; + default: + return null; + } + } +} From 455d0d9a725bb438932c32e8aa8ee57e9309be59 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 7 Jul 2022 17:54:02 +0800 Subject: [PATCH 021/172] add ns operator --- .../devops_test/common/ClusterTest.java | 5 +- .../high_availability/TestCluster.java | 84 +++++++++++- .../devops_test/tmp/TestClusterLinux.java | 129 ++++++++++++++++++ .../openmldb/devops_test/tmp/TestCommand.java | 14 ++ .../devops_test/tmp/TestSDKClient.java | 20 +++ .../test_common/bean/OpenMLDBResult.java | 4 +- .../openmldb/test_common/bean/SQLType.java | 43 ++++++ .../chain/result/AbstractResultHandler.java | 43 ++++++ .../chain/result/ResultChainManager.java | 26 ++++ .../chain/result/ResultSetHandler.java | 41 ++++++ .../test_common/common/Condition.java | 8 ++ .../test_common/openmldb/NsClient.java | 57 ++++++++ .../openmldb/OpenMLDBGlobalVar.java | 2 +- .../test_common/openmldb/SDKClient.java | 91 ++++++++++++ .../openmldb/test_common/util/DataUtil.java | 84 ++---------- .../test_common/util/NsCliResultUtil.java | 30 ++++ .../openmldb/test_common/util/ResultUtil.java | 82 +++++++++++ .../test_common/util/SDKByJDBCUtil.java | 16 +++ .../openmldb/test_common/util/SDKUtil.java | 46 ++++--- .../openmldb/test_common/util/SQLUtil.java | 51 +++++++ .../openmldb/test_common/util/WaitUtil.java | 67 +++++++++ 21 files changed, 848 insertions(+), 95 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestClusterLinux.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/SQLType.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/AbstractResultHandler.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultChainManager.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultSetHandler.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/Condition.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/NsCliResultUtil.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKByJDBCUtil.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index 14654e695b1..e176d5e344b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -39,7 +39,6 @@ @Slf4j public class ClusterTest { protected static SqlExecutor executor; - @BeforeTest() @Parameters({"env","version","openMLDBPath"}) public void beforeTest(@Optional("qa") String env,@Optional("main") String version,@Optional("")String openMLDBPath) throws Exception { @@ -69,8 +68,8 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi OpenMLDBGlobalVar.env = caseEnv; } log.info("fedb global var env: {}", env); - OpenMLDBClient fesqlClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); - executor = fesqlClient.getExecutor(); + OpenMLDBClient openMLDBClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); + executor = openMLDBClient.getExecutor(); log.info("executor:{}",executor); Statement statement = executor.getStatement(); statement.execute("SET @@execute_mode='online';"); diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index 18beeafa67c..9307bb0422e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -1,13 +1,95 @@ package com._4paradigm.openmldb.devops_test.high_availability; import com._4paradigm.openmldb.devops_test.common.ClusterTest; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.openmldb.SDKClient; +import com._4paradigm.openmldb.test_common.util.SDKByJDBCUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; +import com._4paradigm.qa.openmldb_deploy.util.Tool; +import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; +import org.testng.Assert; +import org.testng.annotations.Test; +import org.testng.collections.Lists; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; public class TestCluster extends ClusterTest { + @Test public void testMoreReplica(){ + SDKClient sdkClient = SDKClient.of(executor); // 创建磁盘表和内存表。 - + String dbName = "test_devops4"; + String memoryTable = "test_memory"; + String ssdTable = "test_ssd"; + String hddTable = "test_hdd"; + int dataCount = 100; + sdkClient.createAndUseDB(dbName); + String memoryTableDDL = "create table test_memory(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=8,replicanum=3);"; + String ssdTableDDL = "create table test_ssd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=8,replicanum=3,storage_mode=\"SSD\");"; + String hddTableDDL = "create table test_hdd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=8,replicanum=3,storage_mode=\"HDD\");"; + sdkClient.execute(Lists.newArrayList(memoryTableDDL,ssdTableDDL,hddTableDDL)); + // 插入一定量的数据 + List> dataList = new ArrayList<>(); + for(int i=0;i list = Lists.newArrayList("aa" + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); + dataList.add(list); + } + sdkClient.insertList(memoryTable,dataList); + sdkClient.insertList(ssdTable,dataList); + sdkClient.insertList(hddTable,dataList); // 其中一个tablet stop,leader 内存表和磁盘表可以正常访问,flower 内存表和磁盘表可以正常访问。 + String basePath = OpenMLDBGlobalVar.mainInfo.getBasePath(); + String stopOneTabletCommand = String.format("sh %s/openmldb-tablet-1/bin/start.sh stop tablet",basePath); + ExecutorUtil.run(stopOneTabletCommand); + Tool.sleep(5*1000); + String selectMemory = String.format("select c1 from %s;",memoryTable); + String selectSSD = String.format("select c1 from %s;",ssdTable); + String selectHDD = String.format("select c1 from %s;",hddTable); + OpenMLDBResult memoryResult = sdkClient.execute(selectMemory); + OpenMLDBResult ssdResult = sdkClient.execute(selectSSD); + OpenMLDBResult hddResult = sdkClient.execute(selectHDD); + String oneTabletStopMsg = "tablet1 stop tablet row count check failed."; + Assert.assertEquals(memoryResult.getCount(),dataCount,oneTabletStopMsg); + Assert.assertEquals(ssdResult.getCount(),dataCount,oneTabletStopMsg); + Assert.assertEquals(hddResult.getCount(),dataCount,oneTabletStopMsg); // tablet start,数据可以回复,要看磁盘表和内存表。 + String startOneTabletCommand = String.format("sh %s/openmldb-tablet-1/bin/start.sh start tablet",basePath); + ExecutorUtil.run(startOneTabletCommand); + Tool.sleep(5*1000); + //创建磁盘表和内存表,在重启tablet,数据可回复,内存表和磁盘表可以正常访问。 //创建磁盘表和内存表,插入一些数据,然后make snapshot,在重启tablet,数据可回复。 //tablet 依次restart,数据可回复,可以访问。 diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestClusterLinux.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestClusterLinux.java new file mode 100644 index 00000000000..0ef9fa8408d --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestClusterLinux.java @@ -0,0 +1,129 @@ +package com._4paradigm.openmldb.devops_test.tmp; + +import com._4paradigm.openmldb.devops_test.common.ClusterTest; +import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.openmldb.SDKClient; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com._4paradigm.qa.openmldb_deploy.util.Tool; +import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; +import lombok.extern.slf4j.Slf4j; +import org.testng.Assert; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import org.testng.collections.Lists; + +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; +@Slf4j +public class TestClusterLinux { + private SqlExecutor executor; + @BeforeClass + public void init() throws SQLException { + OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() + .deployType(OpenMLDBDeployType.CLUSTER) + .basePath("/home/zhaowei01/openmldb-auto-test/tmp") + .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") + .zk_cluster("172.24.4.55:30000") + .zk_root_path("/openmldb") + .nsNum(2).tabletNum(3) + .nsEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) + .tabletEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) + .apiServerEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:30006")) + .build(); + OpenMLDBGlobalVar.env = "cluster"; + OpenMLDBClient openMLDBClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); + executor = openMLDBClient.getExecutor(); + log.info("executor:{}",executor); + Statement statement = executor.getStatement(); + statement.execute("SET @@execute_mode='online';"); + } + @Test + public void testMoreReplica(){ + SDKClient sdkClient = SDKClient.of(executor); + // 创建磁盘表和内存表。 + String dbName = "test_devops4"; + String memoryTable = "test_memory"; + String ssdTable = "test_ssd"; + String hddTable = "test_hdd"; + int dataCount = 100; + sdkClient.createAndUseDB(dbName); + String memoryTableDDL = "create table test_memory(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=8,replicanum=3);"; + String ssdTableDDL = "create table test_ssd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=8,replicanum=3,storage_mode=\"SSD\");"; + String hddTableDDL = "create table test_hdd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=8,replicanum=3,storage_mode=\"HDD\");"; + sdkClient.execute(Lists.newArrayList(memoryTableDDL,ssdTableDDL,hddTableDDL)); + // 插入一定量的数据 + List> dataList = new ArrayList<>(); + for(int i=0;i list = Lists.newArrayList("aa" + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); + dataList.add(list); + } + sdkClient.insertList(memoryTable,dataList); + sdkClient.insertList(ssdTable,dataList); + sdkClient.insertList(hddTable,dataList); + // 其中一个tablet stop,leader 内存表和磁盘表可以正常访问,flower 内存表和磁盘表可以正常访问。 +// String basePath = OpenMLDBGlobalVar.mainInfo.getBasePath(); +// String stopOneTabletCommand = String.format("sh %s/openmldb-tablet-1/bin/start.sh stop tablet",basePath); +// ExecutorUtil.run(stopOneTabletCommand); +// Tool.sleep(5*1000); +// String selectMemory = String.format("select c1 from %s;",memoryTable); +// String selectSSD = String.format("select c1 from %s;",ssdTable); +// String selectHDD = String.format("select c1 from %s;",hddTable); +// OpenMLDBResult memoryResult = sdkClient.execute(selectMemory); +// OpenMLDBResult ssdResult = sdkClient.execute(selectSSD); +// OpenMLDBResult hddResult = sdkClient.execute(selectHDD); +// Assert.assertEquals(memoryResult.getCount(),dataCount); +// Assert.assertEquals(ssdResult.getCount(),dataCount); +// Assert.assertEquals(hddResult.getCount(),dataCount); + // tablet start,数据可以回复,要看磁盘表和内存表。 + //创建磁盘表和内存表,在重启tablet,数据可回复,内存表和磁盘表可以正常访问。 + //创建磁盘表和内存表,插入一些数据,然后make snapshot,在重启tablet,数据可回复。 + //tablet 依次restart,数据可回复,可以访问。 + //3个tablet stop,不能访问。 + // 1个tablet启动,数据可回复,分片所在的表,可以访问。 + //ns stop,可以正常访问。 + //2个ns stop,不能访问。 + //ns start 可以访问。 + //一个 zk stop,可以正常访问 + //3个zk stop,不能正常访问。 + //一个zk start,可正常访问。 + //3个 zk start,可正常访问。 + // 一个节点(ns leader 所在服务器)重启,leader可以正常访问,flower可以正常访问。 + //一直查询某一个表,然后重启一个机器。 + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java new file mode 100644 index 00000000000..0b703bc36d5 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java @@ -0,0 +1,14 @@ +package com._4paradigm.openmldb.devops_test.tmp; + +import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; +import org.testng.annotations.Test; + +import java.util.List; + +public class TestCommand { + @Test + public void test1(){ + List list = ExecutorUtil.run("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb --zk_cluster=127.0.0.1:30000 --zk_root_path=/openmldb --role=ns_client --interactive=false --database=test_devops4 --cmd='showopstatus'"); + list.forEach(System.out::println); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java new file mode 100644 index 00000000000..3f3e32f0c08 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java @@ -0,0 +1,20 @@ +package com._4paradigm.openmldb.devops_test.tmp; + +import com._4paradigm.openmldb.devops_test.common.ClusterTest; +import com._4paradigm.openmldb.test_common.openmldb.NsClient; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.openmldb.SDKClient; +import org.testng.annotations.Test; + +public class TestSDKClient extends ClusterTest { + + @Test + public void testComponents(){ +// SDKClient sdkClient = SDKClient.of(executor); +// boolean b= sdkClient.checkComponentStatus("127.0.0.1:30001","online"); +// System.out.println("b = " + b); + NsClient nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); + boolean flag = nsClient.checkOPStatusDone("test_devops4",null); + + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java index 1299aea5895..f440aff440f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java @@ -31,6 +31,7 @@ public class OpenMLDBResult { private String dbName; private List tableNames; + private String sql; private boolean ok; private int count; private String msg = ""; @@ -44,7 +45,8 @@ public class OpenMLDBResult { @Override public String toString() { - StringBuilder builder = new StringBuilder("FesqlResult{"); + StringBuilder builder = new StringBuilder("OpenMLDBResult{"); + builder.append("sql=").append(sql); builder.append("ok=").append(ok); if (!ok) { builder.append(", msg=").append(msg); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/SQLType.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/SQLType.java new file mode 100644 index 00000000000..8dc87602abb --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/SQLType.java @@ -0,0 +1,43 @@ +package com._4paradigm.openmldb.test_common.bean; + +import com.google.common.collect.Sets; +import org.testng.collections.Lists; + +import java.util.List; +import java.util.Set; + +public enum SQLType { + SELECT, + DEPLOY, + SHOW, + // insert + INSERT, + CREATE, + DROP, + USE + ; + public static final Set RESULT_SET = Sets.newHashSet(SELECT, SHOW, DEPLOY); +// public static final List VOID = Lists.newArrayList(CREATE,DROP,USE,INSERT); + public static SQLType parseSQLType(String sql){ + if(sql.startsWith("select")){ + return SELECT; + }else if (sql.startsWith("insert into")) { + return INSERT; + }else if (sql.startsWith("show")) { + return SHOW; + }else if (sql.startsWith("create")) { + return CREATE; + }else if (sql.startsWith("drop")) { + return DROP; + }else if (sql.startsWith("use")) { + return USE; + } + throw new IllegalArgumentException("no match sql type,sql:"+sql); + } + public static boolean isResultSet(SQLType sqlType){ + return RESULT_SET.contains(sqlType); + } + public static boolean isResultSet(String sql){ + return isResultSet(parseSQLType(sql)); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/AbstractResultHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/AbstractResultHandler.java new file mode 100644 index 00000000000..0e69158f0e7 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/AbstractResultHandler.java @@ -0,0 +1,43 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com._4paradigm.openmldb.test_common.chain.result; + + +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.bean.SQLType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import lombok.Setter; + +import java.sql.Statement; + +@Setter +public abstract class AbstractResultHandler { + private AbstractResultHandler nextHandler; + + public abstract boolean preHandle(SQLType sqlType); + + public abstract void onHandle(Statement statement, OpenMLDBResult openMLDBResult); + + public void doHandle(Statement statement, OpenMLDBResult openMLDBResult){ + SQLType sqlType = SQLType.parseSQLType(openMLDBResult.getSql()); + if(preHandle(sqlType)){ + onHandle(statement,openMLDBResult); + } + if(nextHandler!=null){ + nextHandler.doHandle(statement,openMLDBResult); + } + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultChainManager.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultChainManager.java new file mode 100644 index 00000000000..2bc84bb5ebf --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultChainManager.java @@ -0,0 +1,26 @@ +package com._4paradigm.openmldb.test_common.chain.result; + +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; + +import java.sql.Statement; + +public class ResultChainManager { + private AbstractResultHandler resultHandler; + private ResultChainManager() { + ResultSetHandler selectResultHandler = new ResultSetHandler(); + + resultHandler = selectResultHandler; + } + + private static class ClassHolder { + private static final ResultChainManager holder = new ResultChainManager(); + } + + public static ResultChainManager of() { + return ClassHolder.holder; + } + public void toOpenMLDBResult(Statement statement, OpenMLDBResult openMLDBResult){ + resultHandler.doHandle(statement,openMLDBResult); + } + +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultSetHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultSetHandler.java new file mode 100644 index 00000000000..1f51387a9ab --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultSetHandler.java @@ -0,0 +1,41 @@ +package com._4paradigm.openmldb.test_common.chain.result; + +import com._4paradigm.openmldb.jdbc.SQLResultSet; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.bean.SQLType; +import com._4paradigm.openmldb.test_common.util.ResultUtil; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.List; + +public class ResultSetHandler extends AbstractResultHandler { + + @Override + public boolean preHandle(SQLType sqlType) { + return SQLType.isResultSet(sqlType); + } + + @Override + public void onHandle(Statement statement, OpenMLDBResult openMLDBResult) { + try { + ResultSet resultSet = statement.getResultSet(); + if (resultSet == null) { + openMLDBResult.setOk(false); + openMLDBResult.setMsg("executeSQL fail, result is null"); + } else if (resultSet instanceof SQLResultSet){ + SQLResultSet rs = (SQLResultSet)resultSet; + ResultUtil.setSchema(rs.getMetaData(),openMLDBResult); + List> result = ResultUtil.toList(rs); + openMLDBResult.setCount(result.size()); + openMLDBResult.setResult(result); + } + } catch (SQLException e) { + e.printStackTrace(); + openMLDBResult.setOk(false); + openMLDBResult.setMsg(e.getMessage()); + throw new RuntimeException(e); + } + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/Condition.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/Condition.java new file mode 100644 index 00000000000..a755976d457 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/Condition.java @@ -0,0 +1,8 @@ +package com._4paradigm.openmldb.test_common.common; + +/** + * Created by zhangguanglin on 2020/1/16. + */ +public interface Condition { + Boolean execute(); +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java new file mode 100644 index 00000000000..ee311c75473 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -0,0 +1,57 @@ +package com._4paradigm.openmldb.test_common.openmldb; + +import com._4paradigm.openmldb.test_common.command.CommandUtil; +import com._4paradigm.openmldb.test_common.util.NsCliResultUtil; +import com._4paradigm.openmldb.test_common.util.Tool; +import com._4paradigm.openmldb.test_common.util.WaitUtil; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; + +import java.util.List; + +@Slf4j +public class NsClient { + private OpenMLDBInfo openMLDBInfo; + private String openMLDBPath; + private String zkCluster; + private String zkRootPath; + + private NsClient(OpenMLDBInfo openMLDBInfo){ + this.openMLDBInfo = openMLDBInfo; + this.openMLDBPath = openMLDBInfo.getOpenMLDBPath(); + this.zkCluster = openMLDBInfo.getZk_cluster(); + this.zkRootPath = openMLDBInfo.getZk_root_path(); + } + public static NsClient of(OpenMLDBInfo openMLDBInfo){ + return new NsClient(openMLDBInfo); + } + public String genNsCommand(String openMLDBPath,String zkCluster,String zkRootPath,String dbName,String command){ + String line = "%s --zk_cluster=%s --zk_root_path=%s --role=ns_client --interactive=false --database=%s --cmd='%s'"; + line = String.format(line,openMLDBPath,zkCluster,zkRootPath,dbName,command); + log.info("ns command:"+line); + return line; + } + public String genNsCommand(String dbName,String command){ + return genNsCommand(openMLDBPath,zkCluster,zkRootPath,dbName,command); + } + public List runNs(String dbName,String command){ + String nsCommand = genNsCommand(dbName,command); + return CommandUtil.run(nsCommand); + } + public boolean checkOPStatusDone(String dbName,String tableName){ + String command = StringUtils.isNotEmpty(tableName) ?"showopstatus "+tableName:"showopstatus"; + String nsCommand = genNsCommand(dbName,command); + Tool.sleep(3*1000); + return WaitUtil.waitCondition(()->{ + List lines = CommandUtil.run(nsCommand); + return NsCliResultUtil.checkOPStatus(lines,"kDone"); + },()->{ + List lines = CommandUtil.run(nsCommand); + return NsCliResultUtil.checkOPStatusAny(lines,"kFailed"); + }); + } + + + +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java index c7b4d2eaa48..e8ffe6fef94 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java @@ -27,7 +27,7 @@ public class OpenMLDBGlobalVar { public static String env; public static String level; public static String version; - public static String fedbPath; + public static String openMLDBPath; public static OpenMLDBInfo mainInfo; public static String dbName = "test_zw"; } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java new file mode 100644 index 00000000000..976a24be756 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java @@ -0,0 +1,91 @@ +package com._4paradigm.openmldb.test_common.openmldb; + +import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.bean.SQLType; +import com._4paradigm.openmldb.test_common.chain.result.ResultChainManager; +import com._4paradigm.openmldb.test_common.util.SDKUtil; +import com._4paradigm.openmldb.test_common.util.SQLUtil; +import com._4paradigm.openmldb.test_common.util.WaitUtil; +import lombok.extern.slf4j.Slf4j; +import org.testng.collections.Lists; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; +@Slf4j +public class SDKClient { + private Statement statement; + + private SDKClient(SqlExecutor executor){ + this.statement = executor.getStatement(); + } + public static SDKClient of(SqlExecutor executor){ + return new SDKClient(executor); + } + public OpenMLDBResult execute(String sql) { + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); + try { + boolean ok = statement.execute(sql); + openMLDBResult.setOk(ok); + openMLDBResult.setSql(sql); + ResultChainManager.of().toOpenMLDBResult(statement,openMLDBResult); + log.info(openMLDBResult.toString()); + return openMLDBResult; + } catch (SQLException e) { + e.printStackTrace(); + throw new IllegalStateException(e); + } + } + public OpenMLDBResult execute(List sqlList) { + OpenMLDBResult openMLDBResult = null; + for(String sql:sqlList){ + openMLDBResult = execute(sql); + } + return openMLDBResult; + } + public boolean checkComponentStatus(String endpoint,String status){ + String sql = "show components;"; + return WaitUtil.waitCondition(()->{ + OpenMLDBResult openMLDBResult = execute(sql); + List> rows = openMLDBResult.getResult(); + long count = rows.stream().filter(row -> row.get(0).equals(endpoint) && row.get(3).equals(status)).count(); + return count==1; + }); + } + public void createDB(String dbName){ + String sql = String.format("create database %s",dbName); + execute(sql); + } + public void useDB(String dbName){ + String sql = String.format("use %s",dbName); + execute(sql); + } + public void createAndUseDB(String dbName){ + List sqlList = new ArrayList<>(); + if (!SDKUtil.dbIsExist(statement,dbName)) { + sqlList.add(String.format("create database %s", dbName)); + } + sqlList.add(String.format("use %s", dbName)); + execute(sqlList); + } + public void insert(String tableName,List list){ + List> dataList = new ArrayList<>(); + insertList(tableName,dataList); + } + public void insertList(String tableName,List> dataList){ + String sql = SQLUtil.genInsertSQL(tableName,dataList); + execute(sql); + } + public void close(){ + if(statement!=null){ + try { + statement.close(); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java index 9a0154d0a89..a00c064ac26 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java @@ -10,6 +10,21 @@ import java.util.List; @Slf4j public class DataUtil { + public static Object parseRules(String data){ + Object obj = null; + if(data.equals("{currentTime}")){ + obj = System.currentTimeMillis(); + }else if(data.startsWith("{currentTime}-")){ + long t = Long.parseLong(data.substring(14)); + obj = System.currentTimeMillis()-t; + }else if(data.startsWith("{currentTime}+")){ + long t = Long.parseLong(data.substring(14)); + obj = System.currentTimeMillis()+t; + }else{ + obj = data; + } + return obj; + } public static boolean setPreparedData(PreparedStatement ps, List parameterType, List objects) throws SQLException { for(int i=0;i> convertRows(List> rows, List> convertResultSetToList(SQLResultSet rs) throws SQLException { - List> result = new ArrayList<>(); - while (rs.next()) { - List list = new ArrayList(); - int columnCount = rs.getMetaData().getColumnCount(); - for (int i = 0; i < columnCount; i++) { - list.add(DataUtil.getColumnData(rs, i)); - } - result.add(list); - } - return result; - } - public static String convertResultSetToListDeploy(SQLResultSet rs) throws SQLException { - String string = null; - while (rs.next()) { - int columnCount = rs.getMetaData().getColumnCount(); - for (int i = 0; i < columnCount; i++) { - string=String.valueOf(DataUtil.getColumnData(rs, i)); - } - } - return string; - } - public static List convertResultSetToListDesc(SQLResultSet rs) throws SQLException { - List res = new ArrayList<>(); - while (rs.next()) { - int columnCount = rs.getMetaData().getColumnCount(); - for (int i = 0; i < columnCount; i++) { - String string=String.valueOf(DataUtil.getColumnData(rs, i)); - res.add(string); - } - } - return res; - } - public static Object getColumnData(SQLResultSet rs, int index) throws SQLException { - Object obj = null; - int columnType = rs.getMetaData().getColumnType(index + 1); - if (rs.getNString(index + 1) == null) { - log.info("rs is null"); - return null; - } - if (columnType == Types.BOOLEAN) { - obj = rs.getBoolean(index + 1); - } else if (columnType == Types.DATE) { - try { -// obj = new Date(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss") -// .parse(rs.getNString(index + 1) + " 00:00:00").getTime()); - obj = rs.getDate(index + 1); - } catch (Exception e) { - e.printStackTrace(); - return null; - } - } else if (columnType == Types.DOUBLE) { - obj = rs.getDouble(index + 1); - } else if (columnType == Types.FLOAT) { - obj = rs.getFloat(index + 1); - } else if (columnType == Types.SMALLINT) { - obj = rs.getShort(index + 1); - } else if (columnType == Types.INTEGER) { - obj = rs.getInt(index + 1); - } else if (columnType == Types.BIGINT) { - obj = rs.getLong(index + 1); - } else if (columnType == Types.VARCHAR) { - obj = rs.getString(index + 1); - log.info("conver string data {}", obj); - } else if (columnType == Types.TIMESTAMP) { - obj = rs.getTimestamp(index + 1); - } - return obj; - } public static List convertList(List datas, List columns) throws ParseException { List list = new ArrayList(); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/NsCliResultUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/NsCliResultUtil.java new file mode 100644 index 00000000000..ee81478e991 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/NsCliResultUtil.java @@ -0,0 +1,30 @@ +package com._4paradigm.openmldb.test_common.util; + +import java.util.List; + +public class NsCliResultUtil { + public static boolean checkOPStatus(List lines,String status){ + if(lines.size()<=2) return false; + for(int i=2;i lines,String status){ + if(lines.size()<=2) return false; + for(int i=2;i lines){ OpenmldbDeployment deployment = new OpenmldbDeployment(); @@ -69,4 +76,79 @@ public static void setSchema(ResultSetMetaData metaData, OpenMLDBResult openMLDB e.printStackTrace(); } } + + public static List> toList(SQLResultSet rs) throws SQLException { + List> result = new ArrayList<>(); + while (rs.next()) { + List list = new ArrayList(); + int columnCount = rs.getMetaData().getColumnCount(); + for (int i = 0; i < columnCount; i++) { + list.add(getColumnData(rs, i)); + } + result.add(list); + } + return result; + } + + + + public static String convertResultSetToListDeploy(SQLResultSet rs) throws SQLException { + String string = null; + while (rs.next()) { + int columnCount = rs.getMetaData().getColumnCount(); + for (int i = 0; i < columnCount; i++) { + string=String.valueOf(getColumnData(rs, i)); + } + } + return string; + } + + public static List convertResultSetToListDesc(SQLResultSet rs) throws SQLException { + List res = new ArrayList<>(); + while (rs.next()) { + int columnCount = rs.getMetaData().getColumnCount(); + for (int i = 0; i < columnCount; i++) { + String string=String.valueOf(getColumnData(rs, i)); + res.add(string); + } + } + return res; + } + + public static Object getColumnData(SQLResultSet rs, int index) throws SQLException { + Object obj = null; + int columnType = rs.getMetaData().getColumnType(index + 1); + if (rs.getNString(index + 1) == null) { + log.info("rs is null"); + return null; + } + if (columnType == Types.BOOLEAN) { + obj = rs.getBoolean(index + 1); + } else if (columnType == Types.DATE) { + try { +// obj = new Date(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss") +// .parse(rs.getNString(index + 1) + " 00:00:00").getTime()); + obj = rs.getDate(index + 1); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + } else if (columnType == Types.DOUBLE) { + obj = rs.getDouble(index + 1); + } else if (columnType == Types.FLOAT) { + obj = rs.getFloat(index + 1); + } else if (columnType == Types.SMALLINT) { + obj = rs.getShort(index + 1); + } else if (columnType == Types.INTEGER) { + obj = rs.getInt(index + 1); + } else if (columnType == Types.BIGINT) { + obj = rs.getLong(index + 1); + } else if (columnType == Types.VARCHAR) { + obj = rs.getString(index + 1); + log.info("conver string data {}", obj); + } else if (columnType == Types.TIMESTAMP) { + obj = rs.getTimestamp(index + 1); + } + return obj; + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKByJDBCUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKByJDBCUtil.java new file mode 100644 index 00000000000..bf51a770ac1 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKByJDBCUtil.java @@ -0,0 +1,16 @@ +package com._4paradigm.openmldb.test_common.util; + +import com._4paradigm.openmldb.sdk.SqlExecutor; +import lombok.extern.slf4j.Slf4j; +import org.testng.collections.Lists; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.List; + +@Slf4j +public class SDKByJDBCUtil { + + +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java index 77668c82200..8e724ba5cc4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java @@ -17,8 +17,6 @@ package com._4paradigm.openmldb.test_common.util; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.bean.OpenMLDBColumn; -import com._4paradigm.openmldb.test_common.bean.OpenMLDBIndex; import com._4paradigm.openmldb.test_common.bean.OpenMLDBSchema; import com._4paradigm.openmldb.jdbc.CallablePreparedStatement; import com._4paradigm.openmldb.jdbc.SQLResultSet; @@ -29,7 +27,6 @@ import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; -import com.google.common.base.Joiner; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; @@ -37,9 +34,6 @@ import org.testng.collections.Lists; import java.sql.*; -import java.sql.Date; -import java.text.ParseException; -import java.text.SimpleDateFormat; import java.util.*; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -159,7 +153,7 @@ public static OpenMLDBResult deploy(SqlExecutor executor, String dbName, String SQLResultSet rs = (SQLResultSet)rawRs; ResultUtil.setSchema(rs.getMetaData(),fesqlResult); fesqlResult.setOk(true); - String deployStr = DataUtil.convertResultSetToListDeploy(rs); + String deployStr = ResultUtil.convertResultSetToListDeploy(rs); String[] strings = deployStr.split("\n"); List stringList = Arrays.asList(strings); OpenmldbDeployment openmldbDeployment = ResultUtil.parseDeployment(stringList); @@ -188,7 +182,7 @@ public static OpenMLDBResult showDeploys(SqlExecutor executor, String dbName, St SQLResultSet rs = (SQLResultSet)rawRs; ResultUtil.setSchema(rs.getMetaData(),fesqlResult); fesqlResult.setOk(true); - List> lists = DataUtil.convertResultSetToList(rs); + List> lists = ResultUtil.toList(rs); if(lists.size() == 0 ||lists.isEmpty()){ fesqlResult.setDeploymentCount(0); }else { @@ -223,8 +217,8 @@ public static OpenMLDBResult desc(SqlExecutor executor, String dbName, String de SQLResultSet rs = (SQLResultSet)rawRs; ResultUtil.setSchema(rs.getMetaData(),fesqlResult); fesqlResult.setOk(true); - String deployStr = DataUtil.convertResultSetToListDeploy(rs); - List listDesc = DataUtil.convertResultSetToListDesc(rs); + String deployStr = ResultUtil.convertResultSetToListDeploy(rs); + List listDesc = ResultUtil.convertResultSetToListDesc(rs); String[] strings = deployStr.split("\n"); List stringList = Arrays.asList(strings); OpenMLDBSchema openMLDBSchema = SchemaUtil.parseSchema(stringList); @@ -289,7 +283,7 @@ public static OpenMLDBResult selectWithPrepareStatement(SqlExecutor executor, St SQLResultSet rs = (SQLResultSet)resultSet; ResultUtil.setSchema(rs.getMetaData(),fesqlResult); fesqlResult.setOk(true); - List> result = DataUtil.convertResultSetToList(rs); + List> result = ResultUtil.toList(rs); fesqlResult.setCount(result.size()); fesqlResult.setResult(result); } catch (Exception e) { @@ -413,7 +407,7 @@ private static OpenMLDBResult selectRequestModeWithPreparedStatement(SqlExecutor return fesqlResult; } try { - result.addAll(DataUtil.convertResultSetToList((SQLResultSet) resultSet)); + result.addAll(ResultUtil.toList((SQLResultSet) resultSet)); } catch (SQLException throwables) { fesqlResult.setOk(false); fesqlResult.setMsg("Convert Result Set To List Fail"); @@ -491,7 +485,7 @@ private static OpenMLDBResult selectBatchRequestModeWithPreparedStatement(SqlExe sqlResultSet = (SQLResultSet) rps.executeQuery(); List> result = Lists.newArrayList(); - result.addAll(DataUtil.convertResultSetToList(sqlResultSet)); + result.addAll(ResultUtil.toList(sqlResultSet)); fesqlResult.setResult(result); ResultUtil.setSchema(sqlResultSet.getMetaData(),fesqlResult); fesqlResult.setCount(result.size()); @@ -578,7 +572,7 @@ private static OpenMLDBResult selectRequestModeWithSp(SqlExecutor executor, Stri logger.error("select result:{}", fesqlResult); return fesqlResult; } - result.addAll(DataUtil.convertResultSetToList((SQLResultSet) resultSet)); + result.addAll(ResultUtil.toList((SQLResultSet) resultSet)); if (needInsertRequestRow && !executor.executeInsert(insertDbName, inserts.get(i))) { fesqlResult.setOk(false); fesqlResult.setMsg("fail to execute sql in request mode: fail to insert request row after query"); @@ -671,7 +665,7 @@ public static OpenMLDBResult selectBatchRequestModeWithSp(SqlExecutor executor, } } List> result = Lists.newArrayList(); - result.addAll(DataUtil.convertResultSetToList((SQLResultSet) sqlResultSet)); + result.addAll(ResultUtil.toList((SQLResultSet) sqlResultSet)); fesqlResult.setResult(result); ResultUtil.setSchema(sqlResultSet.getMetaData(),fesqlResult); fesqlResult.setCount(result.size()); @@ -798,7 +792,7 @@ public static OpenMLDBResult select(SqlExecutor executor, String dbName, String SQLResultSet rs = (SQLResultSet)rawRs; ResultUtil.setSchema(rs.getMetaData(),fesqlResult); fesqlResult.setOk(true); - List> result = DataUtil.convertResultSetToList(rs); + List> result = ResultUtil.toList(rs); fesqlResult.setCount(result.size()); fesqlResult.setResult(result); } catch (Exception e) { @@ -913,10 +907,7 @@ public static OpenMLDBResult createAndInsert(SqlExecutor executor, return fesqlResult; } - public static OpenMLDBResult createAndInsertWithPrepared(SqlExecutor executor, - String defaultDBName, - List inputs, - boolean useFirstInputAsRequests) { + public static OpenMLDBResult createAndInsertWithPrepared(SqlExecutor executor, String defaultDBName, List inputs, boolean useFirstInputAsRequests) { OpenMLDBResult fesqlResult = new OpenMLDBResult(); if (inputs != null && inputs.size() > 0) { for (int i = 0; i < inputs.size(); i++) { @@ -985,4 +976,19 @@ public static void setOnline(SqlExecutor sqlExecutor){ e.printStackTrace(); } } + public static boolean dbIsExist(Statement statement,String dbName){ + String sql = "show databases;"; + try { + ResultSet resultSet = statement.executeQuery(sql); + List> rows = ResultUtil.toList((SQLResultSet) resultSet); + for(List row:rows){ + if(row.get(0).equals(dbName)){ + return true; + } + } + return false; + } catch (SQLException e) { + throw new RuntimeException(e); + } + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java index 211cce8a1eb..0c96ecdef54 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java @@ -3,6 +3,7 @@ import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import org.apache.commons.collections4.CollectionUtils; import java.sql.SQLException; import java.util.HashSet; @@ -14,6 +15,56 @@ public class SQLUtil { private static String reg = "\\{(\\d+)\\}"; private static Pattern pattern = Pattern.compile(reg); + public static String genInsertSQL(String tableName, List> dataList) { + if (CollectionUtils.isEmpty(dataList)) { + return ""; + } + // insert rows + StringBuilder builder = new StringBuilder("insert into ").append(tableName).append(" values"); + for (int row_id = 0; row_id < dataList.size(); row_id++) { + List list = dataList.get(row_id); + builder.append("\n("); + for (int i = 0; i < list.size(); i++) { + Object data = list.get(i); + if(data == null){ + data = "null"; + }else if(data instanceof String){ + data = DataUtil.parseRules((String)data); + } + if(data instanceof String){ + data = "'" + data + "'"; + } + builder.append(data); + if (i < list.size() - 1) { + builder.append(","); + } + } + if (row_id < dataList.size() - 1) { + builder.append("),"); + } else { + builder.append(");"); + } + } + return builder.toString(); + } + + public static String buildInsertSQLWithPrepared(String name, List columns) { + if (CollectionUtils.isEmpty(columns)) { + return ""; + } + // insert rows + StringBuilder builder = new StringBuilder("insert into ").append(name).append(" values"); + builder.append("\n("); + for (int i = 0; i < columns.size(); i++) { + builder.append("?"); + if (i < columns.size() - 1) { + builder.append(","); + } + } + builder.append(");"); + return builder.toString(); + } + public static String formatSql(String sql, List tableNames, OpenMLDBInfo fedbInfo) { Matcher matcher = pattern.matcher(sql); while (matcher.find()) { diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java new file mode 100644 index 00000000000..537d394f722 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java @@ -0,0 +1,67 @@ +package com._4paradigm.openmldb.test_common.util; + + +import com._4paradigm.openmldb.test_common.common.Condition; +import lombok.extern.log4j.Log4j; +import lombok.extern.slf4j.Slf4j; + + +@Slf4j +public class WaitUtil { + public static boolean waitCondition(Condition condition) { + return waitCondition(condition,3,180); + } + public static boolean waitCondition(Condition condition,Condition fail) { + return waitCondition(condition,fail,3,180); + } + + /** + * + * @param condition 等待的条件 + * @param interval 轮询间隔,单位为秒 + * @param timeout 轮询超时时间,单位为秒 + * @return 条件为真返回真,否则返回false + * @throws Exception + */ + private static boolean waitCondition(Condition condition, int interval, int timeout) { + int count = 1; + while (timeout > 0){ + log.info("retry count:{}",count); + if (condition.execute()){ + return true; + }else { + timeout -= interval; + Tool.sleep(interval*1000); + } + count++; + } + log.info("wait timeout!"); + return false; + } + /** + * + * @param condition 等待的条件 + * @param interval 轮询间隔,单位为秒 + * @param timeout 轮询超时时间,单位为秒 + * @return 条件为真返回真,否则返回false + * @throws Exception + */ + private static boolean waitCondition(Condition condition, Condition fail, int interval, int timeout) { + int count = 1; + while (timeout > 0){ + log.info("retry count:{}",count); + if (condition.execute()){ + return true; + } else if(fail.execute()){ + return false; + }else { + timeout -= interval; + Tool.sleep(interval*1000); + } + count++; + } + log.info("wait timeout!"); + return false; + } + +} From 358a0f7173f40ee701cfe577f6318a82dac3d055 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 7 Jul 2022 18:56:36 +0800 Subject: [PATCH 022/172] add ns operator --- .../com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java index 0b703bc36d5..886b3037ccb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java @@ -11,4 +11,9 @@ public void test1(){ List list = ExecutorUtil.run("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb --zk_cluster=127.0.0.1:30000 --zk_root_path=/openmldb --role=ns_client --interactive=false --database=test_devops4 --cmd='showopstatus'"); list.forEach(System.out::println); } + @Test + public void test2(){ + List list = ExecutorUtil.run("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb --zk_cluster=172.24.4.55:30000 --zk_root_path=/openmldb --role=ns_client --interactive=false --database=test_devops4 --cmd='showopstatus'"); + list.forEach(System.out::println); + } } From e4b799fc89c99546d091e7d8e2ac543dbc36b2b2 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 7 Jul 2022 19:01:29 +0800 Subject: [PATCH 023/172] add ns operator --- .../openmldb-devops-test/pom.xml | 33 +++++++++++++++++++ .../test_suite/test_tmp.xml | 14 ++++++++ 2 files changed, 47 insertions(+) create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_tmp.xml diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/pom.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/pom.xml index 38cb7f9fd09..686731302c7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/pom.xml @@ -14,6 +14,9 @@ 8 8 + + test_suite/test_tmp.xml + 1.8.9 @@ -24,4 +27,34 @@ + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20.1 + + false + 1 + + ${suite} + + always + + -javaagent:"${settings.localRepository}/org/aspectj/aspectjweaver/${aspectj.version}/aspectjweaver-${aspectj.version}.jar" + + + target/ + + + + org.aspectj + aspectjweaver + ${aspectj.version} + + + + + + \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_tmp.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_tmp.xml new file mode 100644 index 00000000000..bdb85ced850 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_tmp.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + \ No newline at end of file From 986a0ebd1c677334a1a24a86dc9846ebced140ff Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 7 Jul 2022 19:15:09 +0800 Subject: [PATCH 024/172] add ns operator --- .../openmldb-test-java/openmldb-test-common/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index 2d11ecfdc28..ff94cacead2 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -16,7 +16,7 @@ 8 0.5.0 - 0.5.0-macos + 0.5.0 From 36ad5aa1540c0cd3948d2a9e1b7adf551c4b6f7d Mon Sep 17 00:00:00 2001 From: zhaowei Date: Sat, 9 Jul 2022 14:36:21 +0800 Subject: [PATCH 025/172] add ns operator --- cases/function/join/test_lastjoin_simple.yaml | 2 +- .../qa/openmldb_deploy/test/TmpDeploy.java | 13 ++- .../test-suite/test_deploy.xml | 2 +- .../test_deploy_cluster_standalone.xml | 14 ++++ .../test-suite/test_deploy_tmp2.xml | 6 +- .../high_availability/TestCluster.java | 71 ++++++++++++---- .../openmldb/devops_test/tmp/TestCommand.java | 19 +++++ .../devops_test/tmp/TestSDKClient.java | 7 +- .../shell/{stop-fedb.sh => stop-openmldb.sh} | 0 .../cluster/v230/LastJoinTest.java | 12 +-- .../openmldb-test-common/pom.xml | 4 +- .../test_common/openmldb/NsClient.java | 82 +++++++++++++++++-- .../test_common/openmldb/OpenMLDBDevops.java | 55 +++++++++++++ .../test_common/openmldb/SDKClient.java | 11 ++- .../test_common/util/NsCliResultUtil.java | 30 ------- .../test_common/util/NsResultUtil.java | 69 ++++++++++++++++ .../command_tool/common/LocalExecutor.java | 39 ++------- 17 files changed, 330 insertions(+), 106 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_cluster_standalone.xml rename test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/{stop-fedb.sh => stop-openmldb.sh} (100%) create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/NsCliResultUtil.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/NsResultUtil.java diff --git a/cases/function/join/test_lastjoin_simple.yaml b/cases/function/join/test_lastjoin_simple.yaml index 9b1936f4014..626a5917216 100644 --- a/cases/function/join/test_lastjoin_simple.yaml +++ b/cases/function/join/test_lastjoin_simple.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: ["正常拼接"] +debugs: ["Last Join 无order by, 拼表条件命中索引, 副表多条命中"] cases: - id: 1 desc: 正常拼接 diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java index 966b95ea440..dba377b59a1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploy.java @@ -9,7 +9,7 @@ public class TmpDeploy { @Test @Parameters({"version","openMLDBPath"}) - public void testTmp(@Optional("tmp_mac") String version,@Optional("") String openMLDBPath){ + public void testCluster(@Optional("tmp_mac") String version,@Optional("") String openMLDBPath){ OpenMLDBDeploy deploy = new OpenMLDBDeploy(version); deploy.setOpenMLDBPath(openMLDBPath); deploy.setCluster(true); @@ -17,4 +17,15 @@ public void testTmp(@Optional("tmp_mac") String version,@Optional("") String ope OpenMLDBInfo openMLDBInfo = deploy.deployCluster(2, 3); System.out.println(openMLDBInfo); } + + @Test + @Parameters({"version","openMLDBPath"}) + public void testClusterByStandalone(@Optional("tmp_mac") String version,@Optional("") String openMLDBPath){ + OpenMLDBDeploy deploy = new OpenMLDBDeploy(version); + deploy.setOpenMLDBPath(openMLDBPath); + deploy.setCluster(false); + deploy.setSparkMaster("local"); + OpenMLDBInfo openMLDBInfo = deploy.deployCluster(2, 3); + System.out.println(openMLDBInfo); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index 17760957816..df952ea46d9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -6,7 +6,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_cluster_standalone.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_cluster_standalone.xml new file mode 100644 index 00000000000..9bc888e430c --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_cluster_standalone.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml index 144229117e0..6fc9dd189e7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml @@ -2,12 +2,12 @@ - - + + - + diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index 9307bb0422e..10c2f0cbd88 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -2,12 +2,16 @@ import com._4paradigm.openmldb.devops_test.common.ClusterTest; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.openmldb.NsClient; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBDevops; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.openmldb.SDKClient; import com._4paradigm.openmldb.test_common.util.SDKByJDBCUtil; import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.qa.openmldb_deploy.util.Tool; import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.RandomUtils; import org.testng.Assert; import org.testng.annotations.Test; import org.testng.collections.Lists; @@ -20,6 +24,8 @@ public class TestCluster extends ClusterTest { @Test public void testMoreReplica(){ SDKClient sdkClient = SDKClient.of(executor); + NsClient nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); + OpenMLDBDevops openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo); // 创建磁盘表和内存表。 String dbName = "test_devops4"; String memoryTable = "test_memory"; @@ -71,33 +77,48 @@ public void testMoreReplica(){ sdkClient.insertList(ssdTable,dataList); sdkClient.insertList(hddTable,dataList); // 其中一个tablet stop,leader 内存表和磁盘表可以正常访问,flower 内存表和磁盘表可以正常访问。 - String basePath = OpenMLDBGlobalVar.mainInfo.getBasePath(); - String stopOneTabletCommand = String.format("sh %s/openmldb-tablet-1/bin/start.sh stop tablet",basePath); - ExecutorUtil.run(stopOneTabletCommand); - Tool.sleep(5*1000); - String selectMemory = String.format("select c1 from %s;",memoryTable); - String selectSSD = String.format("select c1 from %s;",ssdTable); - String selectHDD = String.format("select c1 from %s;",hddTable); - OpenMLDBResult memoryResult = sdkClient.execute(selectMemory); - OpenMLDBResult ssdResult = sdkClient.execute(selectSSD); - OpenMLDBResult hddResult = sdkClient.execute(selectHDD); - String oneTabletStopMsg = "tablet1 stop tablet row count check failed."; - Assert.assertEquals(memoryResult.getCount(),dataCount,oneTabletStopMsg); - Assert.assertEquals(ssdResult.getCount(),dataCount,oneTabletStopMsg); - Assert.assertEquals(hddResult.getCount(),dataCount,oneTabletStopMsg); + openMLDBDevops.operateTablet(0,"stop"); + String oneTabletStopMsg = "tablet1 stop table row count check failed."; + Assert.assertEquals(sdkClient.getTableRowCount(memoryTable),dataCount,oneTabletStopMsg); + Assert.assertEquals(sdkClient.getTableRowCount(ssdTable),dataCount,oneTabletStopMsg); + Assert.assertEquals(sdkClient.getTableRowCount(hddTable),dataCount,oneTabletStopMsg); // tablet start,数据可以回复,要看磁盘表和内存表。 - String startOneTabletCommand = String.format("sh %s/openmldb-tablet-1/bin/start.sh start tablet",basePath); - ExecutorUtil.run(startOneTabletCommand); - Tool.sleep(5*1000); - + openMLDBDevops.operateTablet(0,"start"); + addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); //创建磁盘表和内存表,在重启tablet,数据可回复,内存表和磁盘表可以正常访问。 + openMLDBDevops.operateTablet(0,"restart"); + addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+10,10); //创建磁盘表和内存表,插入一些数据,然后make snapshot,在重启tablet,数据可回复。 + nsClient.makeSnapshot(dbName,memoryTable); + nsClient.makeSnapshot(dbName,ssdTable); + nsClient.makeSnapshot(dbName,hddTable); //tablet 依次restart,数据可回复,可以访问。 + openMLDBDevops.operateTablet("restart"); + addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); //3个tablet stop,不能访问。 + openMLDBDevops.operateTablet("stop"); + OpenMLDBResult openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); + System.out.println(openMLDBResult.getMsg()); // 1个tablet启动,数据可回复,分片所在的表,可以访问。 + openMLDBDevops.operateTablet(0,"start"); + addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); //ns stop,可以正常访问。 + openMLDBDevops.operateNs(0,"stop"); + addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); //2个ns stop,不能访问。 + openMLDBDevops.operateNs(1,"stop"); + openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); + System.out.println(openMLDBResult.getMsg()); //ns start 可以访问。 + openMLDBDevops.operateNs(0,"start"); + addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + // 单zk stop 后不能访问 + openMLDBDevops.operateZKOne("stop"); + openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); + System.out.println(openMLDBResult.getMsg()); + // 单zk start 后可以访问 + openMLDBDevops.operateZKOne("start"); + addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); //一个 zk stop,可以正常访问 //3个zk stop,不能正常访问。 //一个zk start,可正常访问。 @@ -105,4 +126,18 @@ public void testMoreReplica(){ // 一个节点(ns leader 所在服务器)重启,leader可以正常访问,flower可以正常访问。 //一直查询某一个表,然后重启一个机器。 } + public void addDataCheck(SDKClient sdkClient, NsClient nsClient,List tableNames,int originalCount,int addCount){ + List> addDataList = new ArrayList<>(); + for(int i=0;i list = Lists.newArrayList(c1 + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); + addDataList.add(list); + } + String msg = "table add data check count failed."; + for(String tableName:tableNames){ + sdkClient.insertList(tableName,addDataList); + Assert.assertEquals(sdkClient.getTableRowCount(tableName),originalCount+addCount,msg); + } + nsClient.checkTableOffSet(null,null); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java index 886b3037ccb..6542cfa361a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java @@ -1,5 +1,7 @@ package com._4paradigm.openmldb.devops_test.tmp; +import com._4paradigm.test_tool.command_tool.common.CommandUtil; +import com._4paradigm.test_tool.command_tool.common.ExecUtil; import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; import org.testng.annotations.Test; @@ -11,9 +13,26 @@ public void test1(){ List list = ExecutorUtil.run("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb --zk_cluster=127.0.0.1:30000 --zk_root_path=/openmldb --role=ns_client --interactive=false --database=test_devops4 --cmd='showopstatus'"); list.forEach(System.out::println); } + // + @Test + public void test3(){ + List list = ExecutorUtil.run("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb --zk_cluster=127.0.0.1:30000 --zk_root_path=/openmldb --role=sql_client --interactive=false --database=test_devops --cmd='select * from test_ssd;'"); + System.out.println("---"); + list.forEach(System.out::println); + } + @Test + public void test4(){ + String str = ExecUtil.exeCommand("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb --zk_cluster=127.0.0.1:30000 --zk_root_path=/openmldb --role=ns_client --interactive=false --database=test_devops4 --cmd='showopstatus'"); + System.out.println("str = " + str); + } @Test public void test2(){ List list = ExecutorUtil.run("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb --zk_cluster=172.24.4.55:30000 --zk_root_path=/openmldb --role=ns_client --interactive=false --database=test_devops4 --cmd='showopstatus'"); list.forEach(System.out::println); } + @Test + public void test5(){ + String str = ExecUtil.exeCommand("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb --zk_cluster=127.0.0.1:30000 --zk_root_path=/openmldb --role=sql_client --interactive=false --database=test_devops --cmd='select * from test_ssd;'"); + System.out.println("str = " + str); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java index 3f3e32f0c08..efa794d9df5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java @@ -1,6 +1,7 @@ package com._4paradigm.openmldb.devops_test.tmp; import com._4paradigm.openmldb.devops_test.common.ClusterTest; +import com._4paradigm.openmldb.test_common.command.OpenMLDBComamndFacade; import com._4paradigm.openmldb.test_common.openmldb.NsClient; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.openmldb.SDKClient; @@ -13,8 +14,10 @@ public void testComponents(){ // SDKClient sdkClient = SDKClient.of(executor); // boolean b= sdkClient.checkComponentStatus("127.0.0.1:30001","online"); // System.out.println("b = " + b); - NsClient nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); - boolean flag = nsClient.checkOPStatusDone("test_devops4",null); +// NsClient nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); +// boolean flag = nsClient.checkOPStatusDone("test_devops4",null); + + OpenMLDBComamndFacade.sql(OpenMLDBGlobalVar.mainInfo,"test_devops","select * from test_ssd;"); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-fedb.sh b/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-openmldb.sh similarity index 100% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-fedb.sh rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-openmldb.sh diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/LastJoinTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/LastJoinTest.java index a753fd7707a..76f980404c5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/LastJoinTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/LastJoinTest.java @@ -34,12 +34,12 @@ @Feature("Lastjoin") public class LastJoinTest extends FedbTest { - // @Story("batch") - // @Test(dataProvider = "getCase") - // @Yaml(filePaths = {"function/join/","function/cluster/window_and_lastjoin.yaml"}) - // public void testLastJoin(SQLCase testCase) throws Exception { - // ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); - // } + @Story("batch") + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/join/test_lastjoin_simple.yaml"}) + public void testLastJoin(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } @Story("request") @Test(dataProvider = "getCase") @Yaml(filePaths = {"function/join/test_lastjoin_simple.yaml"}) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index ff94cacead2..e4f6dac8601 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -15,8 +15,8 @@ 8 8 - 0.5.0 - 0.5.0 + 0.5.2 + 0.5.2-macos diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index ee311c75473..97df75ec997 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -1,14 +1,15 @@ package com._4paradigm.openmldb.test_common.openmldb; import com._4paradigm.openmldb.test_common.command.CommandUtil; -import com._4paradigm.openmldb.test_common.util.NsCliResultUtil; +import com._4paradigm.openmldb.test_common.util.NsResultUtil; import com._4paradigm.openmldb.test_common.util.Tool; import com._4paradigm.openmldb.test_common.util.WaitUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; +import org.testng.Assert; -import java.util.List; +import java.util.*; @Slf4j public class NsClient { @@ -27,8 +28,9 @@ public static NsClient of(OpenMLDBInfo openMLDBInfo){ return new NsClient(openMLDBInfo); } public String genNsCommand(String openMLDBPath,String zkCluster,String zkRootPath,String dbName,String command){ - String line = "%s --zk_cluster=%s --zk_root_path=%s --role=ns_client --interactive=false --database=%s --cmd='%s'"; - line = String.format(line,openMLDBPath,zkCluster,zkRootPath,dbName,command); + String dbStr = StringUtils.isNotEmpty(dbName)?"--database="+dbName:""; + String line = "%s --zk_cluster=%s --zk_root_path=%s --role=ns_client --interactive=false %s --cmd='%s'"; + line = String.format(line,openMLDBPath,zkCluster,zkRootPath,dbStr,command); log.info("ns command:"+line); return line; } @@ -39,17 +41,81 @@ public List runNs(String dbName,String command){ String nsCommand = genNsCommand(dbName,command); return CommandUtil.run(nsCommand); } - public boolean checkOPStatusDone(String dbName,String tableName){ + public void checkOPStatusDone(String dbName,String tableName){ String command = StringUtils.isNotEmpty(tableName) ?"showopstatus "+tableName:"showopstatus"; String nsCommand = genNsCommand(dbName,command); Tool.sleep(3*1000); - return WaitUtil.waitCondition(()->{ + boolean b = WaitUtil.waitCondition(()->{ List lines = CommandUtil.run(nsCommand); - return NsCliResultUtil.checkOPStatus(lines,"kDone"); + return NsResultUtil.checkOPStatus(lines,"kDone"); },()->{ List lines = CommandUtil.run(nsCommand); - return NsCliResultUtil.checkOPStatusAny(lines,"kFailed"); + return NsResultUtil.checkOPStatusAny(lines,"kFailed"); }); + Assert.assertTrue(b,"check op done failed."); + } + public List showTable(String dbName,String tableName){ + String command = StringUtils.isNotEmpty(tableName) ?"showtable "+tableName:"showtable"; + String nsCommand = genNsCommand(dbName,command); + List lines = CommandUtil.run(nsCommand); + return lines; + } + + public void checkTableIsAlive(String dbName,String tableName){ + List lines = showTable(dbName,tableName); + for(int i=2;i lines = showTable(dbName,tableName); + Map> table1 = NsResultUtil.getTableOffset(lines); + for(List values:table1.values()){ + for(Long offset:values){ + Assert.assertEquals(offset,values.get(0)); + } + } + } + public void makeSnapshot(String dbName,String tableName,int pid){ + String command = String.format("makesnapshot %s %d",tableName,pid); + String nsCommand = genNsCommand(dbName,command); + List lines = CommandUtil.run(nsCommand); + Assert.assertEquals(lines.get(0),"MakeSnapshot ok"); + checkTableOffSet(dbName,tableName); + } + public void makeSnapshot(String dbName,String tableName){ + List pidList = getPid(dbName,tableName); + for(Integer pid:pidList) { + String command = String.format("makesnapshot %s %d", tableName, pid); + String nsCommand = genNsCommand(dbName, command); + List lines = CommandUtil.run(nsCommand); + Assert.assertEquals(lines.get(0), "MakeSnapshot ok"); + checkTableOffSet(dbName, tableName); + } + } + public List getPid(String dbName,String tableName){ + Map> pidMap = getPid(dbName); + Set value = pidMap.get(tableName); + return new ArrayList<>(value); + } + public Map> getPid(String dbName){ + List lines = showTable(dbName,null); + Map> map = new HashMap<>(); + for(int i=2;i values = map.get(key); + if (values==null) { + values = new HashSet<>(); + } + values.add(pid); + map.put(key,values); + } + return map; } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java new file mode 100644 index 00000000000..a95291d8406 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java @@ -0,0 +1,55 @@ +package com._4paradigm.openmldb.test_common.openmldb; + +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com._4paradigm.qa.openmldb_deploy.util.Tool; +import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; + +public class OpenMLDBDevops { + private OpenMLDBInfo openMLDBInfo; + private NsClient nsClient; + private SDKClient sdkClient; + private String basePath; + + private OpenMLDBDevops(OpenMLDBInfo openMLDBInfo){ + this.openMLDBInfo = openMLDBInfo; + this.nsClient = NsClient.of(openMLDBInfo); + this.sdkClient = SDKClient.of(new OpenMLDBClient(openMLDBInfo.getZk_cluster(),openMLDBInfo.getZk_root_path()).getExecutor()); + this.basePath = openMLDBInfo.getBasePath(); + } + public static OpenMLDBDevops of(OpenMLDBInfo openMLDBInfo){ + return new OpenMLDBDevops(openMLDBInfo); + } + public void operateTablet(int tabletIndex,String operator){ + String command = String.format("sh %s/openmldb-tablet-%d/bin/start.sh %s tablet",basePath,tabletIndex+1,operator); + ExecutorUtil.run(command); + Tool.sleep(5*1000); + String checkStatus = operator.equals("stop")?"offline":"online"; + sdkClient.checkComponentStatus(openMLDBInfo.getTabletEndpoints().get(tabletIndex), checkStatus); + nsClient.checkOPStatusDone(null,null); + if(!operator.equals("stop")) { + nsClient.checkTableIsAlive(null, null); + } + } + public void operateTablet(String operator){ + int size = openMLDBInfo.getTabletEndpoints().size(); + for(int i=0;i sqlList) { } return openMLDBResult; } - public boolean checkComponentStatus(String endpoint,String status){ + public void checkComponentStatus(String endpoint,String status){ String sql = "show components;"; - return WaitUtil.waitCondition(()->{ + boolean b = WaitUtil.waitCondition(()->{ OpenMLDBResult openMLDBResult = execute(sql); List> rows = openMLDBResult.getResult(); long count = rows.stream().filter(row -> row.get(0).equals(endpoint) && row.get(3).equals(status)).count(); return count==1; }); + Assert.assertTrue(b,"check endpoint:"+endpoint+",status:"+status+"failed."); } public void createDB(String dbName){ String sql = String.format("create database %s",dbName); @@ -79,6 +81,11 @@ public void insertList(String tableName,List> dataList){ String sql = SQLUtil.genInsertSQL(tableName,dataList); execute(sql); } + public int getTableRowCount(String tableName){ + String sql = String.format("select * from %s",tableName); + OpenMLDBResult openMLDBResult = execute(sql); + return openMLDBResult.getCount(); + } public void close(){ if(statement!=null){ try { diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/NsCliResultUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/NsCliResultUtil.java deleted file mode 100644 index ee81478e991..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/NsCliResultUtil.java +++ /dev/null @@ -1,30 +0,0 @@ -package com._4paradigm.openmldb.test_common.util; - -import java.util.List; - -public class NsCliResultUtil { - public static boolean checkOPStatus(List lines,String status){ - if(lines.size()<=2) return false; - for(int i=2;i lines,String status){ - if(lines.size()<=2) return false; - for(int i=2;i lines,String status){ + if(lines.size()<=2) return false; + for(int i=2;i lines,String status){ + if(lines.size()<=2) return false; + for(int i=2;i> getTableOffset(List lines){ + Map> offsets = new HashMap<>(); + for(int i=2;i value = offsets.get(key); + String role = infos[4]; + long offset = 0; + String offsetStr = infos[7].trim(); + if(!offsetStr.equals("-")&&!offsetStr.equals("")){ + offset = Long.parseLong(offsetStr); + } + if(value==null){ + value = new ArrayList<>(); + offsets.put(key,value); + } + if(role.equals("leader")){ + value.add(0,offset); + }else { + value.add(offset); + } + } + return offsets; + } + public static Map getTableOffsetByLeader(List lines){ + Map offsets = new HashMap<>(); + for(int i=2;i starts = new ArrayList<>(); + private List contains = new ArrayList<>(); public LocalExecutor(){ starts.add("wget"); starts.add("tar"); + contains.add("--role=ns_client"); } public boolean isUseExec(String command){ for(String start:starts){ @@ -20,6 +22,11 @@ public boolean isUseExec(String command){ return true; } } + for(String contain:contains){ + if(command.contains(contain)){ + return true; + } + } return false; } @Override @@ -30,38 +37,6 @@ public String execute(String command) { }else{ result = CommandUtil.run(command); } - -// Scanner input = null; -// Process process = null; -// try { -// process = Runtime.getRuntime().exec(new String[]{"/bin/sh","-c",command}); -// try { -// //等待命令执行完成 -// process.waitFor(600, TimeUnit.SECONDS); -// } catch (InterruptedException e) { -// e.printStackTrace(); -// } -// InputStream is = process.getInputStream(); -// input = new Scanner(is); -// while (input.hasNextLine()) { -// String line = input.nextLine().trim(); -// if(line.contains("ZOO_INFO@log_env") || line.contains("src/zk/zk_client.cc")|| -// line.startsWith("ns leader:")){ -// continue; -// } -// if(line.length()==0) continue; -// list.add(line); -// } -// }catch (Exception e){ -// e.printStackTrace(); -// }finally { -// if (input != null) { -// input.close(); -// } -// if (process != null) { -// process.destroy(); -// } -// } return result; } From 1243eaf9228ba2a116cffb9d8539a2a7016910f3 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Sat, 9 Jul 2022 14:43:22 +0800 Subject: [PATCH 026/172] add ns operator --- .../test-suite/test_deploy_cluster_standalone.xml | 2 +- .../openmldb-deploy/test-suite/test_deploy_tmp2.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_cluster_standalone.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_cluster_standalone.xml index 9bc888e430c..d3656df2dbe 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_cluster_standalone.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_cluster_standalone.xml @@ -2,7 +2,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml index 6fc9dd189e7..c83a419bc5e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml @@ -3,7 +3,7 @@ - + From f4f5e909b616172ebc6bf12ca658741efd1f5fc8 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Sat, 9 Jul 2022 14:50:39 +0800 Subject: [PATCH 027/172] add ns operator --- .../openmldb-deploy/test-suite/test_deploy.xml | 1 + .../test-suite/test_deploy_cluster_standalone.xml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index df952ea46d9..d96ea93a46f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -2,6 +2,7 @@ + diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_cluster_standalone.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_cluster_standalone.xml index d3656df2dbe..0ae978e85c6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_cluster_standalone.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_cluster_standalone.xml @@ -2,7 +2,7 @@ - + From 930acf70dc1bb5a46c346caa4ca25ad39c9961c7 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Sun, 10 Jul 2022 22:04:37 +0800 Subject: [PATCH 028/172] add ns operator --- .../devops_test/high_availability/TestCluster.java | 2 +- .../openmldb/test_common/openmldb/NsClient.java | 9 ++++++--- .../test_tool/command_tool/common/ExecutorUtil.java | 2 +- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index 10c2f0cbd88..8ffb76bdfce 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -27,7 +27,7 @@ public void testMoreReplica(){ NsClient nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); OpenMLDBDevops openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo); // 创建磁盘表和内存表。 - String dbName = "test_devops4"; + String dbName = "test_devops2"; String memoryTable = "test_memory"; String ssdTable = "test_ssd"; String hddTable = "test_hdd"; diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index 97df75ec997..bc975766408 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -47,10 +47,13 @@ public void checkOPStatusDone(String dbName,String tableName){ Tool.sleep(3*1000); boolean b = WaitUtil.waitCondition(()->{ List lines = CommandUtil.run(nsCommand); + if(lines.size()<=2){ + return false; + } + if (NsResultUtil.checkOPStatusAny(lines,"kFailed")) { + return false; + } return NsResultUtil.checkOPStatus(lines,"kDone"); - },()->{ - List lines = CommandUtil.run(nsCommand); - return NsResultUtil.checkOPStatusAny(lines,"kFailed"); }); Assert.assertTrue(b,"check op done failed."); } diff --git a/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/ExecutorUtil.java b/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/ExecutorUtil.java index 9704569d7ca..3a86c9638ad 100644 --- a/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/ExecutorUtil.java +++ b/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/ExecutorUtil.java @@ -23,7 +23,7 @@ public static List run(String command){ for(String line:results){ line = line.trim(); if(line.contains("ZOO_INFO") || line.contains("zk_client.cc")|| - line.startsWith("ns leader:")||line.startsWith("client start in")){ + line.startsWith("ns leader:")||line.startsWith("client start in")||line.startsWith("WARNING:")){ continue; } if(line.length()==0) continue; From 3cd756efe3474f3ac9ec6d2a9a37746bdcca3bb8 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Sun, 10 Jul 2022 22:04:59 +0800 Subject: [PATCH 029/172] add ns operator --- .../openmldb-test-java/openmldb-test-common/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index e4f6dac8601..51c5e02bd0e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -16,7 +16,7 @@ 8 0.5.2 - 0.5.2-macos + 0.5.2 From 10daf93484d34c66a1c82fc2dbd23e8b4183dc2f Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 11 Jul 2022 05:36:38 +0800 Subject: [PATCH 030/172] add ns operator --- .../integration-test/openmldb-test-java/openmldb-deploy/pom.xml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml index ba43c16d75a..a554d94b8e1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/pom.xml @@ -14,6 +14,8 @@ 8 8 + + test-suite/test_deploy.xml From 7bb9ed6ed6e5892382e08d4c721049e0eb4f52f4 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 11 Jul 2022 05:45:27 +0800 Subject: [PATCH 031/172] add ns operator --- .../devops_test/common/ClusterTest.java | 25 ++++++++++++++----- .../test_suite/test_cluster.xml | 14 +++++++++++ 2 files changed, 33 insertions(+), 6 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_cluster.xml diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index e176d5e344b..b602f5ca94e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -49,16 +49,29 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi openMLDBDeploy.setCluster(true); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); }else{ +// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() +// .deployType(OpenMLDBDeployType.CLUSTER) +// .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") +// .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") +// .zk_cluster("127.0.0.1:30000") +// .zk_root_path("/openmldb") +// .nsNum(2).tabletNum(3) +// .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) +// .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) +// .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) +// .build(); +// OpenMLDBGlobalVar.env = "cluster"; + OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") - .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") - .zk_cluster("127.0.0.1:30000") + .basePath("/home/zhaowei01/openmldb-auto-test/tmp") + .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") + .zk_cluster("172.24.4.55:30000") .zk_root_path("/openmldb") .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) - .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) - .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) .build(); OpenMLDBGlobalVar.env = "cluster"; diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_cluster.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_cluster.xml new file mode 100644 index 00000000000..28f15d29391 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_cluster.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + \ No newline at end of file From 09a266df6e7be7d56fb33c541c5f705c0b412155 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 11 Jul 2022 12:27:48 +0800 Subject: [PATCH 032/172] add ns operator --- .../openmldb-test-java/openmldb-test-common/pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index 51c5e02bd0e..ff94cacead2 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -15,8 +15,8 @@ 8 8 - 0.5.2 - 0.5.2 + 0.5.0 + 0.5.0 From 63903144c392e53fb93ad79d4b1bacc35ed03a29 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 11 Jul 2022 14:30:01 +0800 Subject: [PATCH 033/172] add ns operator --- .../_4paradigm/openmldb/test_common/openmldb/NsClient.java | 7 ++++--- .../com/_4paradigm/openmldb/test_common/util/WaitUtil.java | 4 ++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index bc975766408..c9088c170fc 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -50,10 +50,10 @@ public void checkOPStatusDone(String dbName,String tableName){ if(lines.size()<=2){ return false; } - if (NsResultUtil.checkOPStatusAny(lines,"kFailed")) { - return false; - } return NsResultUtil.checkOPStatus(lines,"kDone"); + },()->{ + List lines = CommandUtil.run(nsCommand); + return NsResultUtil.checkOPStatusAny(lines,"kFailed"); }); Assert.assertTrue(b,"check op done failed."); } @@ -66,6 +66,7 @@ public List showTable(String dbName,String tableName){ public void checkTableIsAlive(String dbName,String tableName){ List lines = showTable(dbName,tableName); + Assert.assertTrue(lines.size()>2,"show table lines <= 2"); for(int i=2;i Date: Mon, 11 Jul 2022 15:53:01 +0800 Subject: [PATCH 034/172] add ns operator --- .../com/_4paradigm/openmldb/test_common/util/WaitUtil.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java index 23d606d9cea..8d4b322f718 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java @@ -9,10 +9,10 @@ @Slf4j public class WaitUtil { public static boolean waitCondition(Condition condition) { - return waitCondition(condition,10,600); + return waitCondition(condition,10,1800); } public static boolean waitCondition(Condition condition,Condition fail) { - return waitCondition(condition,fail,10,600); + return waitCondition(condition,fail,10,1800); } /** From 803acbbcda319916555a0b13584bcb6ccbe404ff Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 11 Jul 2022 18:03:13 +0800 Subject: [PATCH 035/172] add ns operator --- .../_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java | 1 + .../openmldb-deploy/test-suite/test_deploy.xml | 2 +- .../com/_4paradigm/openmldb/test_common/util/WaitUtil.java | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java index 3cdfd26cec7..7dac5155b51 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java @@ -212,6 +212,7 @@ public int deployNS(String testPath, String ip, int index, String zk_endpoint, S "sed -i "+sedSeparator+" 's@#--zk_cluster=.*@--zk_cluster=" + zk_endpoint + "@' " + testPath + ns_name + "/conf/nameserver.flags", "sed -i "+sedSeparator+" 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+ns_name+"/conf/nameserver.flags", "sed -i "+sedSeparator+" 's@--tablet=.*@#--tablet=127.0.0.1:9921@' "+testPath+ns_name+"/conf/nameserver.flags", + "sed -i "+sedSeparator+" 's@--tablet_heartbeat_timeout=.*@--tablet_heartbeat_timeout=1000@' "+testPath+ns_name+"/conf/nameserver.flags", "echo '--request_timeout_ms=60000' >> " + testPath + ns_name + "/conf/nameserver.flags" ); if(useName){ diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index d96ea93a46f..f6a768515a5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -3,7 +3,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java index 8d4b322f718..23d606d9cea 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java @@ -9,10 +9,10 @@ @Slf4j public class WaitUtil { public static boolean waitCondition(Condition condition) { - return waitCondition(condition,10,1800); + return waitCondition(condition,10,600); } public static boolean waitCondition(Condition condition,Condition fail) { - return waitCondition(condition,fail,10,1800); + return waitCondition(condition,fail,10,600); } /** From 17a450954fefd02d853c2cb4a80471e3baa59654 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 11 Jul 2022 18:45:32 +0800 Subject: [PATCH 036/172] add ns operator --- .../devops_test/high_availability/TestCluster.java | 14 +++++++------- .../test_common/openmldb/OpenMLDBDevops.java | 14 ++++++++------ .../openmldb/test_common/util/WaitUtil.java | 4 ++-- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index 8ffb76bdfce..cc9e5acc418 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -23,14 +23,14 @@ public class TestCluster extends ClusterTest { @Test public void testMoreReplica(){ - SDKClient sdkClient = SDKClient.of(executor); - NsClient nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); - OpenMLDBDevops openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo); - // 创建磁盘表和内存表。 String dbName = "test_devops2"; String memoryTable = "test_memory"; String ssdTable = "test_ssd"; String hddTable = "test_hdd"; + SDKClient sdkClient = SDKClient.of(executor); + NsClient nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); + OpenMLDBDevops openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); + // 创建磁盘表和内存表。 int dataCount = 100; sdkClient.createAndUseDB(dbName); String memoryTableDDL = "create table test_memory(\n" + @@ -43,7 +43,7 @@ public void testMoreReplica(){ "c7 timestamp,\n" + "c8 date,\n" + "c9 bool,\n" + - "index(key=(c1),ts=c7))options(partitionnum=8,replicanum=3);"; + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3);"; String ssdTableDDL = "create table test_ssd(\n" + "c1 string,\n" + "c2 smallint,\n" + @@ -54,7 +54,7 @@ public void testMoreReplica(){ "c7 timestamp,\n" + "c8 date,\n" + "c9 bool,\n" + - "index(key=(c1),ts=c7))options(partitionnum=8,replicanum=3,storage_mode=\"SSD\");"; + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"SSD\");"; String hddTableDDL = "create table test_hdd(\n" + "c1 string,\n" + "c2 smallint,\n" + @@ -65,7 +65,7 @@ public void testMoreReplica(){ "c7 timestamp,\n" + "c8 date,\n" + "c9 bool,\n" + - "index(key=(c1),ts=c7))options(partitionnum=8,replicanum=3,storage_mode=\"HDD\");"; + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"HDD\");"; sdkClient.execute(Lists.newArrayList(memoryTableDDL,ssdTableDDL,hddTableDDL)); // 插入一定量的数据 List> dataList = new ArrayList<>(); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java index a95291d8406..b877976de94 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java @@ -6,18 +6,20 @@ public class OpenMLDBDevops { private OpenMLDBInfo openMLDBInfo; + private String dbName; private NsClient nsClient; private SDKClient sdkClient; private String basePath; - private OpenMLDBDevops(OpenMLDBInfo openMLDBInfo){ + private OpenMLDBDevops(OpenMLDBInfo openMLDBInfo,String dbName){ this.openMLDBInfo = openMLDBInfo; + this.dbName = dbName; this.nsClient = NsClient.of(openMLDBInfo); this.sdkClient = SDKClient.of(new OpenMLDBClient(openMLDBInfo.getZk_cluster(),openMLDBInfo.getZk_root_path()).getExecutor()); this.basePath = openMLDBInfo.getBasePath(); } - public static OpenMLDBDevops of(OpenMLDBInfo openMLDBInfo){ - return new OpenMLDBDevops(openMLDBInfo); + public static OpenMLDBDevops of(OpenMLDBInfo openMLDBInfo,String dbName){ + return new OpenMLDBDevops(openMLDBInfo,dbName); } public void operateTablet(int tabletIndex,String operator){ String command = String.format("sh %s/openmldb-tablet-%d/bin/start.sh %s tablet",basePath,tabletIndex+1,operator); @@ -25,9 +27,9 @@ public void operateTablet(int tabletIndex,String operator){ Tool.sleep(5*1000); String checkStatus = operator.equals("stop")?"offline":"online"; sdkClient.checkComponentStatus(openMLDBInfo.getTabletEndpoints().get(tabletIndex), checkStatus); - nsClient.checkOPStatusDone(null,null); + nsClient.checkOPStatusDone(dbName,null); if(!operator.equals("stop")) { - nsClient.checkTableIsAlive(null, null); + nsClient.checkTableIsAlive(dbName, null); } } public void operateTablet(String operator){ @@ -42,7 +44,7 @@ public void operateNs(int nsIndex,String operator){ Tool.sleep(5*1000); String checkStatus = operator.equals("stop")?"offline":"online"; sdkClient.checkComponentStatus(openMLDBInfo.getNsEndpoints().get(nsIndex), checkStatus); - nsClient.checkOPStatusDone(null,null); + nsClient.checkOPStatusDone(dbName,null); // if(!operator.equals("stop")) { // nsClient.checkTableIsAlive(null, null); // } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java index 23d606d9cea..a8c97406356 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java @@ -9,10 +9,10 @@ @Slf4j public class WaitUtil { public static boolean waitCondition(Condition condition) { - return waitCondition(condition,10,600); + return waitCondition(condition,10,1200); } public static boolean waitCondition(Condition condition,Condition fail) { - return waitCondition(condition,fail,10,600); + return waitCondition(condition,fail,10,1200); } /** From be18281085147ef7b8cfe14360dfe536ad3ac4a8 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 12 Jul 2022 13:23:02 +0800 Subject: [PATCH 037/172] add ns operator --- .../devops_test/common/ClusterTest.java | 38 ++++---- .../high_availability/TestCluster.java | 44 +++++---- .../openmldb/devops_test/tmp/TestDevops.java | 91 +++++++++++++++++++ .../devops_test/tmp/TestSDKClient.java | 1 + .../test_common/bean/OpenMLDBResult.java | 2 +- .../chain/result/ResultSetHandler.java | 1 + .../test_common/openmldb/SDKClient.java | 7 +- 7 files changed, 144 insertions(+), 40 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestDevops.java diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index b602f5ca94e..f71afd7b4c1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -49,32 +49,32 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi openMLDBDeploy.setCluster(true); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); }else{ -// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() -// .deployType(OpenMLDBDeployType.CLUSTER) -// .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") -// .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") -// .zk_cluster("127.0.0.1:30000") -// .zk_root_path("/openmldb") -// .nsNum(2).tabletNum(3) -// .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) -// .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) -// .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) -// .build(); -// OpenMLDBGlobalVar.env = "cluster"; - OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/home/zhaowei01/openmldb-auto-test/tmp") - .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:30000") + .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") + .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") + .zk_cluster("127.0.0.1:30000") .zk_root_path("/openmldb") .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) + .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) + .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) + .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) .build(); OpenMLDBGlobalVar.env = "cluster"; +// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() +// .deployType(OpenMLDBDeployType.CLUSTER) +// .basePath("/home/zhaowei01/openmldb-auto-test/tmp") +// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") +// .zk_cluster("172.24.4.55:30000") +// .zk_root_path("/openmldb") +// .nsNum(2).tabletNum(3) +// .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) +// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) +// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) +// .build(); +// OpenMLDBGlobalVar.env = "cluster"; + } String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index cc9e5acc418..f28b30b506c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -95,30 +95,38 @@ public void testMoreReplica(){ //tablet 依次restart,数据可回复,可以访问。 openMLDBDevops.operateTablet("restart"); addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); - //3个tablet stop,不能访问。 - openMLDBDevops.operateTablet("stop"); - OpenMLDBResult openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); - System.out.println(openMLDBResult.getMsg()); - // 1个tablet启动,数据可回复,分片所在的表,可以访问。 - openMLDBDevops.operateTablet(0,"start"); - addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); - //ns stop,可以正常访问。 + //1个ns stop,可以正常访问。 openMLDBDevops.operateNs(0,"stop"); addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); - //2个ns stop,不能访问。 - openMLDBDevops.operateNs(1,"stop"); - openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); - System.out.println(openMLDBResult.getMsg()); - //ns start 可以访问。 + // 1个ns start 可以访问。 openMLDBDevops.operateNs(0,"start"); addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); - // 单zk stop 后不能访问 + // 1个ns restart 可以访问。 + openMLDBDevops.operateNs(0,"restart"); + addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + // 单zk stop 在start后 可以访问 openMLDBDevops.operateZKOne("stop"); - openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); - System.out.println(openMLDBResult.getMsg()); - // 单zk start 后可以访问 + Tool.sleep(5000); openMLDBDevops.operateZKOne("start"); + OpenMLDBResult openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); + System.out.println(openMLDBResult.getMsg()); + // 单zk restart 后可以访问 + openMLDBDevops.operateZKOne("restart"); addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + //3个tablet stop,不能访问。 + openMLDBDevops.operateTablet("stop"); + openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); + Assert.assertTrue(openMLDBResult.getMsg().contains("no tablet available for sqlfail to get tablet")); + +// // 1个tablet启动,数据可回复,分片所在的表,可以访问。 +// openMLDBDevops.operateTablet(0,"start"); +// addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + + //2个ns stop,不能访问。 +// openMLDBDevops.operateNs(1,"stop"); +// List lines = nsClient.runNs(dbName, "showtable"); +// System.out.println(openMLDBResult.getMsg()); + //一个 zk stop,可以正常访问 //3个zk stop,不能正常访问。 //一个zk start,可正常访问。 @@ -126,6 +134,8 @@ public void testMoreReplica(){ // 一个节点(ns leader 所在服务器)重启,leader可以正常访问,flower可以正常访问。 //一直查询某一个表,然后重启一个机器。 } + // 两个Tablet停止 + // 三个Tablet停止 public void addDataCheck(SDKClient sdkClient, NsClient nsClient,List tableNames,int originalCount,int addCount){ List> addDataList = new ArrayList<>(); for(int i=0;i> dataList = new ArrayList<>(); + for(int i=0;i list = Lists.newArrayList("aa" + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); + dataList.add(list); + } + sdkClient.insertList(memoryTable,dataList); + sdkClient.insertList(ssdTable,dataList); + sdkClient.insertList(hddTable,dataList); + } + @Test + public void test2(){ + sdkClient.createAndUseDB(dbName); + OpenMLDBResult openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); + System.out.println(openMLDBResult.getMsg()); +// List lines = nsClient.runNs(dbName, "showtable"); +// System.out.println("lines = " + lines); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java index efa794d9df5..f867f227131 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java @@ -19,5 +19,6 @@ public void testComponents(){ OpenMLDBComamndFacade.sql(OpenMLDBGlobalVar.mainInfo,"test_devops","select * from test_ssd;"); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java index f440aff440f..13ea595adce 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java @@ -47,7 +47,7 @@ public class OpenMLDBResult { public String toString() { StringBuilder builder = new StringBuilder("OpenMLDBResult{"); builder.append("sql=").append(sql); - builder.append("ok=").append(ok); + builder.append(", ok=").append(ok); if (!ok) { builder.append(", msg=").append(msg); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultSetHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultSetHandler.java index 1f51387a9ab..fde971c8bf0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultSetHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultSetHandler.java @@ -30,6 +30,7 @@ public void onHandle(Statement statement, OpenMLDBResult openMLDBResult) { List> result = ResultUtil.toList(rs); openMLDBResult.setCount(result.size()); openMLDBResult.setResult(result); + openMLDBResult.setMsg("success"); } } catch (SQLException e) { e.printStackTrace(); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java index ff75a3af427..a80cd948816 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java @@ -33,12 +33,13 @@ public OpenMLDBResult execute(String sql) { openMLDBResult.setOk(ok); openMLDBResult.setSql(sql); ResultChainManager.of().toOpenMLDBResult(statement,openMLDBResult); - log.info(openMLDBResult.toString()); - return openMLDBResult; } catch (SQLException e) { + openMLDBResult.setOk(false); + openMLDBResult.setMsg(e.getMessage()); e.printStackTrace(); - throw new IllegalStateException(e); } + log.info(openMLDBResult.toString()); + return openMLDBResult; } public OpenMLDBResult execute(List sqlList) { OpenMLDBResult openMLDBResult = null; From c4f9eadcf42eea902b3d8961dbefccfd95601549 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 12 Jul 2022 14:26:34 +0800 Subject: [PATCH 038/172] add ns operator --- .../devops_test/common/ClusterTest.java | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index f71afd7b4c1..b602f5ca94e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -49,32 +49,32 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi openMLDBDeploy.setCluster(true); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); }else{ - OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() - .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") - .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") - .zk_cluster("127.0.0.1:30000") - .zk_root_path("/openmldb") - .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) - .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) - .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) - .build(); - OpenMLDBGlobalVar.env = "cluster"; - // OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() // .deployType(OpenMLDBDeployType.CLUSTER) -// .basePath("/home/zhaowei01/openmldb-auto-test/tmp") -// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") -// .zk_cluster("172.24.4.55:30000") +// .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") +// .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") +// .zk_cluster("127.0.0.1:30000") // .zk_root_path("/openmldb") // .nsNum(2).tabletNum(3) -// .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) -// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) -// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) +// .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) +// .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) +// .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) // .build(); // OpenMLDBGlobalVar.env = "cluster"; + OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() + .deployType(OpenMLDBDeployType.CLUSTER) + .basePath("/home/zhaowei01/openmldb-auto-test/tmp") + .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") + .zk_cluster("172.24.4.55:30000") + .zk_root_path("/openmldb") + .nsNum(2).tabletNum(3) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) + .build(); + OpenMLDBGlobalVar.env = "cluster"; + } String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { From 807e01567a79d6b0cd651d34dcbf20c945ca2eff Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 12 Jul 2022 15:31:00 +0800 Subject: [PATCH 039/172] add ns operator --- .../high_availability/TestCluster.java | 18 +++++++++--------- .../test_common/openmldb/NsClient.java | 1 + .../test_common/openmldb/OpenMLDBDevops.java | 4 ++-- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index f28b30b506c..885a73bd294 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -84,26 +84,26 @@ public void testMoreReplica(){ Assert.assertEquals(sdkClient.getTableRowCount(hddTable),dataCount,oneTabletStopMsg); // tablet start,数据可以回复,要看磁盘表和内存表。 openMLDBDevops.operateTablet(0,"start"); - addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); //创建磁盘表和内存表,在重启tablet,数据可回复,内存表和磁盘表可以正常访问。 openMLDBDevops.operateTablet(0,"restart"); - addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+10,10); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+10,10); //创建磁盘表和内存表,插入一些数据,然后make snapshot,在重启tablet,数据可回复。 nsClient.makeSnapshot(dbName,memoryTable); nsClient.makeSnapshot(dbName,ssdTable); nsClient.makeSnapshot(dbName,hddTable); //tablet 依次restart,数据可回复,可以访问。 openMLDBDevops.operateTablet("restart"); - addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); //1个ns stop,可以正常访问。 openMLDBDevops.operateNs(0,"stop"); - addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 1个ns start 可以访问。 openMLDBDevops.operateNs(0,"start"); - addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 1个ns restart 可以访问。 openMLDBDevops.operateNs(0,"restart"); - addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 单zk stop 在start后 可以访问 openMLDBDevops.operateZKOne("stop"); Tool.sleep(5000); @@ -112,7 +112,7 @@ public void testMoreReplica(){ System.out.println(openMLDBResult.getMsg()); // 单zk restart 后可以访问 openMLDBDevops.operateZKOne("restart"); - addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); //3个tablet stop,不能访问。 openMLDBDevops.operateTablet("stop"); openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); @@ -136,7 +136,7 @@ public void testMoreReplica(){ } // 两个Tablet停止 // 三个Tablet停止 - public void addDataCheck(SDKClient sdkClient, NsClient nsClient,List tableNames,int originalCount,int addCount){ + public void addDataCheck(SDKClient sdkClient, NsClient nsClient,String dbName,List tableNames,int originalCount,int addCount){ List> addDataList = new ArrayList<>(); for(int i=0;i tab sdkClient.insertList(tableName,addDataList); Assert.assertEquals(sdkClient.getTableRowCount(tableName),originalCount+addCount,msg); } - nsClient.checkTableOffSet(null,null); + nsClient.checkTableOffSet(dbName,null); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index c9088c170fc..0d4622ad9a1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -75,6 +75,7 @@ public void checkTableIsAlive(String dbName,String tableName){ } public void checkTableOffSet(String dbName,String tableName){ List lines = showTable(dbName,tableName); + Assert.assertTrue(lines.size()>2,"show table lines <= 2"); Map> table1 = NsResultUtil.getTableOffset(lines); for(List values:table1.values()){ for(Long offset:values){ diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java index b877976de94..9d2a39d0876 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java @@ -42,8 +42,8 @@ public void operateNs(int nsIndex,String operator){ String command = String.format("sh %s/openmldb-ns-%d/bin/start.sh %s nameserver",basePath,nsIndex+1,operator); ExecutorUtil.run(command); Tool.sleep(5*1000); - String checkStatus = operator.equals("stop")?"offline":"online"; - sdkClient.checkComponentStatus(openMLDBInfo.getNsEndpoints().get(nsIndex), checkStatus); +// String checkStatus = operator.equals("stop")?"offline":"online"; +// sdkClient.checkComponentStatus(openMLDBInfo.getNsEndpoints().get(nsIndex), checkStatus); nsClient.checkOPStatusDone(dbName,null); // if(!operator.equals("stop")) { // nsClient.checkTableIsAlive(null, null); From baa79fe12c85427cba200c85904c0c77a16a31c9 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 12 Jul 2022 16:25:06 +0800 Subject: [PATCH 040/172] add ns operator --- .../high_availability/TestCluster.java | 12 ++++++++++++ .../test_common/openmldb/OpenMLDBDevops.java | 15 +++++++++++++-- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index 885a73bd294..3d6a0138afc 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -13,6 +13,7 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; import org.testng.Assert; +import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import org.testng.collections.Lists; @@ -21,6 +22,17 @@ import java.util.List; public class TestCluster extends ClusterTest { + private String dbName; + private SDKClient sdkClient; + private NsClient nsClient; + private OpenMLDBDevops openMLDBDevops; + @BeforeClass + public void beforeClass(){ + dbName = "test_devops2"; + sdkClient = SDKClient.of(executor); + nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); + openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); + } @Test public void testMoreReplica(){ String dbName = "test_devops2"; diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java index 9d2a39d0876..a9ac2d6eda2 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java @@ -1,8 +1,13 @@ package com._4paradigm.openmldb.test_common.openmldb; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com._4paradigm.qa.openmldb_deploy.util.Tool; import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; +import org.testng.Assert; + +import java.util.List; +import java.util.stream.Collectors; public class OpenMLDBDevops { private OpenMLDBInfo openMLDBInfo; @@ -42,8 +47,14 @@ public void operateNs(int nsIndex,String operator){ String command = String.format("sh %s/openmldb-ns-%d/bin/start.sh %s nameserver",basePath,nsIndex+1,operator); ExecutorUtil.run(command); Tool.sleep(5*1000); -// String checkStatus = operator.equals("stop")?"offline":"online"; -// sdkClient.checkComponentStatus(openMLDBInfo.getNsEndpoints().get(nsIndex), checkStatus); + String nsEndpoint = openMLDBInfo.getNsEndpoints().get(nsIndex); + if(operator.equals("stop")){ + OpenMLDBResult openMLDBResult = sdkClient.execute("show components;"); + List list = openMLDBResult.getResult().stream().map(l -> l.get(0)).collect(Collectors.toList()); + Assert.assertTrue(!list.contains(nsEndpoint),"ns stop, show components failed."); + }else { + sdkClient.checkComponentStatus(nsEndpoint, "online"); + } nsClient.checkOPStatusDone(dbName,null); // if(!operator.equals("stop")) { // nsClient.checkTableIsAlive(null, null); From e240d227c485c77bddc208d02a74627f2fe68707 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 12 Jul 2022 16:25:42 +0800 Subject: [PATCH 041/172] add ns operator --- .../openmldb/devops_test/high_availability/TestCluster.java | 4 ---- 1 file changed, 4 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index 3d6a0138afc..89cdb83b1fd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -35,13 +35,9 @@ public void beforeClass(){ } @Test public void testMoreReplica(){ - String dbName = "test_devops2"; String memoryTable = "test_memory"; String ssdTable = "test_ssd"; String hddTable = "test_hdd"; - SDKClient sdkClient = SDKClient.of(executor); - NsClient nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); - OpenMLDBDevops openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); // 创建磁盘表和内存表。 int dataCount = 100; sdkClient.createAndUseDB(dbName); From fa53ba62fd945997865d447a60c260f2a848a713 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 12 Jul 2022 18:34:34 +0800 Subject: [PATCH 042/172] add ns operator --- .../devops_test/common/ClusterTest.java | 38 +++++++++---------- .../high_availability/TestCluster.java | 12 ++++-- .../openmldb/devops_test/tmp/TestDevops.java | 4 ++ .../test_common/openmldb/OpenMLDBDevops.java | 5 +-- 4 files changed, 32 insertions(+), 27 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index b602f5ca94e..f71afd7b4c1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -49,32 +49,32 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi openMLDBDeploy.setCluster(true); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); }else{ -// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() -// .deployType(OpenMLDBDeployType.CLUSTER) -// .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") -// .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") -// .zk_cluster("127.0.0.1:30000") -// .zk_root_path("/openmldb") -// .nsNum(2).tabletNum(3) -// .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) -// .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) -// .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) -// .build(); -// OpenMLDBGlobalVar.env = "cluster"; - OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/home/zhaowei01/openmldb-auto-test/tmp") - .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:30000") + .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") + .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") + .zk_cluster("127.0.0.1:30000") .zk_root_path("/openmldb") .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) + .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) + .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) + .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) .build(); OpenMLDBGlobalVar.env = "cluster"; +// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() +// .deployType(OpenMLDBDeployType.CLUSTER) +// .basePath("/home/zhaowei01/openmldb-auto-test/tmp") +// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") +// .zk_cluster("172.24.4.55:30000") +// .zk_root_path("/openmldb") +// .nsNum(2).tabletNum(3) +// .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) +// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) +// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) +// .build(); +// OpenMLDBGlobalVar.env = "cluster"; + } String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index 89cdb83b1fd..e5ddeb2a507 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -2,10 +2,7 @@ import com._4paradigm.openmldb.devops_test.common.ClusterTest; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.openmldb.NsClient; -import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBDevops; -import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; -import com._4paradigm.openmldb.test_common.openmldb.SDKClient; +import com._4paradigm.openmldb.test_common.openmldb.*; import com._4paradigm.openmldb.test_common.util.SDKByJDBCUtil; import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.qa.openmldb_deploy.util.Tool; @@ -158,4 +155,11 @@ public void addDataCheck(SDKClient sdkClient, NsClient nsClient,String dbName,Li } nsClient.checkTableOffSet(dbName,null); } + public void resetClient(){ + OpenMLDBClient openMLDBClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); + executor = openMLDBClient.getExecutor(); + sdkClient = SDKClient.of(executor); + nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); + openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestDevops.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestDevops.java index b816cd05b8f..a5ff0a2ff2d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestDevops.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestDevops.java @@ -88,4 +88,8 @@ public void test2(){ // List lines = nsClient.runNs(dbName, "showtable"); // System.out.println("lines = " + lines); } + @Test + public void test3(){ + openMLDBDevops.operateNs(0,"stop"); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java index a9ac2d6eda2..249587cce74 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java @@ -55,10 +55,7 @@ public void operateNs(int nsIndex,String operator){ }else { sdkClient.checkComponentStatus(nsEndpoint, "online"); } - nsClient.checkOPStatusDone(dbName,null); -// if(!operator.equals("stop")) { -// nsClient.checkTableIsAlive(null, null); -// } +// nsClient.checkOPStatusDone(dbName,null); } public void operateZKOne(String operator){ String command = String.format("sh %s/zookeeper-3.4.14/bin/zkServer.sh %s",basePath,operator); From 588941b66b32dde89e223b5b443c83f36fba5c3e Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 12 Jul 2022 18:34:57 +0800 Subject: [PATCH 043/172] add ns operator --- .../devops_test/common/ClusterTest.java | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index f71afd7b4c1..b602f5ca94e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -49,32 +49,32 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi openMLDBDeploy.setCluster(true); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); }else{ - OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() - .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") - .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") - .zk_cluster("127.0.0.1:30000") - .zk_root_path("/openmldb") - .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) - .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) - .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) - .build(); - OpenMLDBGlobalVar.env = "cluster"; - // OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() // .deployType(OpenMLDBDeployType.CLUSTER) -// .basePath("/home/zhaowei01/openmldb-auto-test/tmp") -// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") -// .zk_cluster("172.24.4.55:30000") +// .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") +// .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") +// .zk_cluster("127.0.0.1:30000") // .zk_root_path("/openmldb") // .nsNum(2).tabletNum(3) -// .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) -// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) -// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) +// .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) +// .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) +// .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) // .build(); // OpenMLDBGlobalVar.env = "cluster"; + OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() + .deployType(OpenMLDBDeployType.CLUSTER) + .basePath("/home/zhaowei01/openmldb-auto-test/tmp") + .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") + .zk_cluster("172.24.4.55:30000") + .zk_root_path("/openmldb") + .nsNum(2).tabletNum(3) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) + .build(); + OpenMLDBGlobalVar.env = "cluster"; + } String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { From da25cc96e59ce6b82e32b9cc030a60901324c50f Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 12 Jul 2022 19:53:40 +0800 Subject: [PATCH 044/172] add ns operator --- .../test_common/openmldb/OpenMLDBDevops.java | 4 +--- .../openmldb/test_common/openmldb/SDKClient.java | 12 ++++++++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java index 249587cce74..a9034d576db 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java @@ -49,9 +49,7 @@ public void operateNs(int nsIndex,String operator){ Tool.sleep(5*1000); String nsEndpoint = openMLDBInfo.getNsEndpoints().get(nsIndex); if(operator.equals("stop")){ - OpenMLDBResult openMLDBResult = sdkClient.execute("show components;"); - List list = openMLDBResult.getResult().stream().map(l -> l.get(0)).collect(Collectors.toList()); - Assert.assertTrue(!list.contains(nsEndpoint),"ns stop, show components failed."); + sdkClient.checkComponentNotExist(nsEndpoint); }else { sdkClient.checkComponentStatus(nsEndpoint, "online"); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java index a80cd948816..47c647bed12 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java @@ -16,6 +16,8 @@ import java.sql.Statement; import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; + @Slf4j public class SDKClient { private Statement statement; @@ -58,6 +60,16 @@ public void checkComponentStatus(String endpoint,String status){ }); Assert.assertTrue(b,"check endpoint:"+endpoint+",status:"+status+"failed."); } + public void checkComponentNotExist(String endpoint){ + String sql = "show components;"; + boolean b = WaitUtil.waitCondition(()->{ + OpenMLDBResult openMLDBResult = execute(sql); + List> rows = openMLDBResult.getResult(); + long count = rows.stream().filter(row -> row.get(0).equals(endpoint)).count(); + return count==0; + }); + Assert.assertTrue(b,"check endpoint not exist :"+endpoint +"failed."); + } public void createDB(String dbName){ String sql = String.format("create database %s",dbName); execute(sql); From 00e99f9bd436f4f46893f7822db07a7604aba4bd Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 13 Jul 2022 08:20:55 +0800 Subject: [PATCH 045/172] add ns operator --- .../high_availability/TestCluster.java | 8 +- .../openmldb/devops_test/tmp/TestTmp.java | 160 ++++++++++++++++++ .../test_suite/test_tmp.xml | 8 +- 3 files changed, 168 insertions(+), 8 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index e5ddeb2a507..35cf8697bd7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -111,16 +111,16 @@ public void testMoreReplica(){ addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 单zk stop 在start后 可以访问 openMLDBDevops.operateZKOne("stop"); - Tool.sleep(5000); + Tool.sleep(3000); openMLDBDevops.operateZKOne("start"); - OpenMLDBResult openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); - System.out.println(openMLDBResult.getMsg()); + Tool.sleep(3000); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 单zk restart 后可以访问 openMLDBDevops.operateZKOne("restart"); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); //3个tablet stop,不能访问。 openMLDBDevops.operateTablet("stop"); - openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); + OpenMLDBResult openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); Assert.assertTrue(openMLDBResult.getMsg().contains("no tablet available for sqlfail to get tablet")); // // 1个tablet启动,数据可回复,分片所在的表,可以访问。 diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java new file mode 100644 index 00000000000..9373cccf4ce --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java @@ -0,0 +1,160 @@ +package com._4paradigm.openmldb.devops_test.tmp; + +import com._4paradigm.openmldb.devops_test.common.ClusterTest; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.openmldb.*; +import com._4paradigm.qa.openmldb_deploy.util.Tool; +import org.apache.commons.lang3.RandomStringUtils; +import org.testng.Assert; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import org.testng.collections.Lists; + +import java.util.ArrayList; +import java.util.List; + +public class TestTmp extends ClusterTest { + private String dbName; + private SDKClient sdkClient; + private NsClient nsClient; + private OpenMLDBDevops openMLDBDevops; + @BeforeClass + public void beforeClass(){ + dbName = "test_devops2"; + sdkClient = SDKClient.of(executor); + nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); + openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); + } + @Test + public void testMoreReplica(){ + String memoryTable = "test_memory"; + String ssdTable = "test_ssd"; + String hddTable = "test_hdd"; + // 创建磁盘表和内存表。 + int dataCount = 100; + sdkClient.createAndUseDB(dbName); + String memoryTableDDL = "create table test_memory(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3);"; + String ssdTableDDL = "create table test_ssd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"SSD\");"; + String hddTableDDL = "create table test_hdd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"HDD\");"; + sdkClient.execute(Lists.newArrayList(memoryTableDDL,ssdTableDDL,hddTableDDL)); + // 插入一定量的数据 + List> dataList = new ArrayList<>(); + for(int i=0;i list = Lists.newArrayList("aa" + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); + dataList.add(list); + } + sdkClient.insertList(memoryTable,dataList); + sdkClient.insertList(ssdTable,dataList); + sdkClient.insertList(hddTable,dataList); + // 其中一个tablet stop,leader 内存表和磁盘表可以正常访问,flower 内存表和磁盘表可以正常访问。 +// openMLDBDevops.operateTablet(0,"stop"); +// String oneTabletStopMsg = "tablet1 stop table row count check failed."; +// Assert.assertEquals(sdkClient.getTableRowCount(memoryTable),dataCount,oneTabletStopMsg); +// Assert.assertEquals(sdkClient.getTableRowCount(ssdTable),dataCount,oneTabletStopMsg); +// Assert.assertEquals(sdkClient.getTableRowCount(hddTable),dataCount,oneTabletStopMsg); +// // tablet start,数据可以回复,要看磁盘表和内存表。 +// openMLDBDevops.operateTablet(0,"start"); +// addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); +// //创建磁盘表和内存表,在重启tablet,数据可回复,内存表和磁盘表可以正常访问。 +// openMLDBDevops.operateTablet(0,"restart"); +// addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+10,10); +// //创建磁盘表和内存表,插入一些数据,然后make snapshot,在重启tablet,数据可回复。 +// nsClient.makeSnapshot(dbName,memoryTable); +// nsClient.makeSnapshot(dbName,ssdTable); +// nsClient.makeSnapshot(dbName,hddTable); +// //tablet 依次restart,数据可回复,可以访问。 +// openMLDBDevops.operateTablet("restart"); +// addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); + //1个ns stop,可以正常访问。 + openMLDBDevops.operateNs(0,"stop"); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,0); + // 1个ns start 可以访问。 + openMLDBDevops.operateNs(0,"start"); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,0); + // 1个ns restart 可以访问。 + openMLDBDevops.operateNs(0,"restart"); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,0); + // 单zk stop 在start后 可以访问 + openMLDBDevops.operateZKOne("stop"); + Tool.sleep(3000); + openMLDBDevops.operateZKOne("start"); + Tool.sleep(3000); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,0); + // 单zk restart 后可以访问 + openMLDBDevops.operateZKOne("restart"); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,0); + //3个tablet stop,不能访问。 + openMLDBDevops.operateTablet("stop"); + OpenMLDBResult openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); + Assert.assertTrue(openMLDBResult.getMsg().contains("no tablet available for sqlfail to get tablet")); + +// // 1个tablet启动,数据可回复,分片所在的表,可以访问。 +// openMLDBDevops.operateTablet(0,"start"); +// addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + + //2个ns stop,不能访问。 +// openMLDBDevops.operateNs(1,"stop"); +// List lines = nsClient.runNs(dbName, "showtable"); +// System.out.println(openMLDBResult.getMsg()); + + //一个 zk stop,可以正常访问 + //3个zk stop,不能正常访问。 + //一个zk start,可正常访问。 + //3个 zk start,可正常访问。 + // 一个节点(ns leader 所在服务器)重启,leader可以正常访问,flower可以正常访问。 + //一直查询某一个表,然后重启一个机器。 + } + // 两个Tablet停止 + // 三个Tablet停止 + public void addDataCheck(SDKClient sdkClient, NsClient nsClient,String dbName,List tableNames,int originalCount,int addCount){ + List> addDataList = new ArrayList<>(); + for(int i=0;i list = Lists.newArrayList(c1 + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); + addDataList.add(list); + } + String msg = "table add data check count failed."; + for(String tableName:tableNames){ + sdkClient.insertList(tableName,addDataList); + Assert.assertEquals(sdkClient.getTableRowCount(tableName),originalCount+addCount,msg); + } + nsClient.checkTableOffSet(dbName,null); + } + public void resetClient(){ + OpenMLDBClient openMLDBClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); + executor = openMLDBClient.getExecutor(); + sdkClient = SDKClient.of(executor); + nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); + openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_tmp.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_tmp.xml index bdb85ced850..d6a4221fefd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_tmp.xml +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_tmp.xml @@ -3,10 +3,10 @@ - - - - + + + + From 6a739d26183748a0ffb18919ec5c30eba2181eed Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 13 Jul 2022 08:41:38 +0800 Subject: [PATCH 046/172] add ns operator --- .../com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java | 5 ++++- .../_4paradigm/openmldb/test_common/openmldb/SDKClient.java | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java index 9373cccf4ce..ae174a576bf 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java @@ -4,6 +4,7 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.openmldb.*; import com._4paradigm.qa.openmldb_deploy.util.Tool; +import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.RandomStringUtils; import org.testng.Assert; import org.testng.annotations.BeforeClass; @@ -145,7 +146,9 @@ public void addDataCheck(SDKClient sdkClient, NsClient nsClient,String dbName,Li } String msg = "table add data check count failed."; for(String tableName:tableNames){ - sdkClient.insertList(tableName,addDataList); + if (CollectionUtils.isNotEmpty(addDataList)) { + sdkClient.insertList(tableName,addDataList); + } Assert.assertEquals(sdkClient.getTableRowCount(tableName),originalCount+addCount,msg); } nsClient.checkTableOffSet(dbName,null); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java index 47c647bed12..e8914ef1977 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java @@ -8,6 +8,7 @@ import com._4paradigm.openmldb.test_common.util.SQLUtil; import com._4paradigm.openmldb.test_common.util.WaitUtil; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.collections4.CollectionUtils; import org.testng.Assert; import org.testng.collections.Lists; From c6ebc7776b197777712b19ad4aa5daf5df2d076c Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 13 Jul 2022 09:44:09 +0800 Subject: [PATCH 047/172] add ns operator --- .../java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java index ae174a576bf..d0da20772f0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java @@ -98,12 +98,14 @@ public void testMoreReplica(){ // addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); //1个ns stop,可以正常访问。 openMLDBDevops.operateNs(0,"stop"); + resetClient(); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,0); // 1个ns start 可以访问。 openMLDBDevops.operateNs(0,"start"); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,0); // 1个ns restart 可以访问。 openMLDBDevops.operateNs(0,"restart"); + resetClient(); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,0); // 单zk stop 在start后 可以访问 openMLDBDevops.operateZKOne("stop"); From c0efd5d47b0291d9d948e65dd2fb8840e5a35021 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 13 Jul 2022 10:06:00 +0800 Subject: [PATCH 048/172] add ns operator --- .../openmldb/devops_test/tmp/TestTmp.java | 3 +++ .../openmldb/test_common/bean/SQLType.java | 18 +++++++++++------- .../test_common/openmldb/SDKClient.java | 3 +++ 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java index d0da20772f0..1fa877c30ed 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java @@ -11,6 +11,7 @@ import org.testng.annotations.Test; import org.testng.collections.Lists; +import java.sql.Statement; import java.util.ArrayList; import java.util.List; @@ -161,5 +162,7 @@ public void resetClient(){ sdkClient = SDKClient.of(executor); nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); + sdkClient.setOnline(); + sdkClient.createAndUseDB(dbName); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/SQLType.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/SQLType.java index 8dc87602abb..1f24d82c0ea 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/SQLType.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/SQLType.java @@ -4,6 +4,7 @@ import org.testng.collections.Lists; import java.util.List; +import java.util.Locale; import java.util.Set; public enum SQLType { @@ -14,23 +15,26 @@ public enum SQLType { INSERT, CREATE, DROP, - USE + USE, + SET ; public static final Set RESULT_SET = Sets.newHashSet(SELECT, SHOW, DEPLOY); // public static final List VOID = Lists.newArrayList(CREATE,DROP,USE,INSERT); public static SQLType parseSQLType(String sql){ - if(sql.startsWith("select")){ + if(sql.toLowerCase().startsWith("select")){ return SELECT; - }else if (sql.startsWith("insert into")) { + }else if (sql.toLowerCase().startsWith("insert into")) { return INSERT; - }else if (sql.startsWith("show")) { + }else if (sql.toLowerCase().startsWith("show")) { return SHOW; - }else if (sql.startsWith("create")) { + }else if (sql.toLowerCase().startsWith("create")) { return CREATE; - }else if (sql.startsWith("drop")) { + }else if (sql.toLowerCase().startsWith("drop")) { return DROP; - }else if (sql.startsWith("use")) { + }else if (sql.toLowerCase().startsWith("use")) { return USE; + }else if (sql.toLowerCase().startsWith("set")) { + return SET; } throw new IllegalArgumentException("no match sql type,sql:"+sql); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java index e8914ef1977..1e9fb856b99 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java @@ -75,6 +75,9 @@ public void createDB(String dbName){ String sql = String.format("create database %s",dbName); execute(sql); } + public void setOnline(){ + execute("SET @@execute_mode='online';"); + } public void useDB(String dbName){ String sql = String.format("use %s",dbName); execute(sql); From b7261ec8a389e842d504b8dca9649a71520cc208 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 13 Jul 2022 10:54:39 +0800 Subject: [PATCH 049/172] add ns operator --- .../devops_test/high_availability/TestCluster.java | 11 +++++------ .../_4paradigm/openmldb/devops_test/tmp/TestTmp.java | 2 +- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index 35cf8697bd7..08d629f365c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -3,18 +3,13 @@ import com._4paradigm.openmldb.devops_test.common.ClusterTest; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.openmldb.*; -import com._4paradigm.openmldb.test_common.util.SDKByJDBCUtil; -import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.qa.openmldb_deploy.util.Tool; -import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.RandomUtils; import org.testng.Assert; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import org.testng.collections.Lists; -import java.io.Serializable; import java.util.ArrayList; import java.util.List; @@ -102,12 +97,14 @@ public void testMoreReplica(){ addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); //1个ns stop,可以正常访问。 openMLDBDevops.operateNs(0,"stop"); + resetClient(); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 1个ns start 可以访问。 openMLDBDevops.operateNs(0,"start"); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 1个ns restart 可以访问。 openMLDBDevops.operateNs(0,"restart"); + resetClient(); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 单zk stop 在start后 可以访问 openMLDBDevops.operateZKOne("stop"); @@ -121,7 +118,7 @@ public void testMoreReplica(){ //3个tablet stop,不能访问。 openMLDBDevops.operateTablet("stop"); OpenMLDBResult openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); - Assert.assertTrue(openMLDBResult.getMsg().contains("no tablet available for sqlfail to get tablet")); + Assert.assertTrue(openMLDBResult.getMsg().contains("fail")); // // 1个tablet启动,数据可回复,分片所在的表,可以访问。 // openMLDBDevops.operateTablet(0,"start"); @@ -161,5 +158,7 @@ public void resetClient(){ sdkClient = SDKClient.of(executor); nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); + sdkClient.setOnline(); + sdkClient.createAndUseDB(dbName); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java index 1fa877c30ed..f32e1c56e74 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestTmp.java @@ -120,7 +120,7 @@ public void testMoreReplica(){ //3个tablet stop,不能访问。 openMLDBDevops.operateTablet("stop"); OpenMLDBResult openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); - Assert.assertTrue(openMLDBResult.getMsg().contains("no tablet available for sqlfail to get tablet")); + Assert.assertTrue(openMLDBResult.getMsg().contains("fail")); // // 1个tablet启动,数据可回复,分片所在的表,可以访问。 // openMLDBDevops.operateTablet(0,"start"); From 66b238f0c213db9dff821004929df578f83c7f4a Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 13 Jul 2022 16:07:33 +0800 Subject: [PATCH 050/172] add ns operator --- .../test-suite/test_deploy_single_node.xml | 2 +- .../devops_test/common/ClusterTest.java | 43 +++++----- .../high_availability/TestCluster.java | 78 +++++++++++++++++++ .../openmldb/devops_test/tmp/TestTmp.java | 32 ++++---- .../test_suite/test_cluster.xml | 6 +- .../test_suite/test_single.xml | 14 ++++ .../test_common/openmldb/SDKClient.java | 6 +- 7 files changed, 139 insertions(+), 42 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_single.xml diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_single_node.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_single_node.xml index 83747359d71..7e9061c08b0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_single_node.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_single_node.xml @@ -2,7 +2,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index b602f5ca94e..fde70450f7c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -48,33 +48,38 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi openMLDBDeploy.setOpenMLDBPath(openMLDBPath); openMLDBDeploy.setCluster(true); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); + }else if(env.equalsIgnoreCase("single")){ + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version);; + openMLDBDeploy.setOpenMLDBPath(openMLDBPath); + openMLDBDeploy.setCluster(true); + OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(1, 1); }else{ -// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() -// .deployType(OpenMLDBDeployType.CLUSTER) -// .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") -// .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") -// .zk_cluster("127.0.0.1:30000") -// .zk_root_path("/openmldb") -// .nsNum(2).tabletNum(3) -// .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) -// .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) -// .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) -// .build(); -// OpenMLDBGlobalVar.env = "cluster"; - OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/home/zhaowei01/openmldb-auto-test/tmp") - .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:30000") + .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") + .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") + .zk_cluster("127.0.0.1:30000") .zk_root_path("/openmldb") .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) + .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) + .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) + .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) .build(); OpenMLDBGlobalVar.env = "cluster"; +// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() +// .deployType(OpenMLDBDeployType.CLUSTER) +// .basePath("/home/zhaowei01/openmldb-auto-test/tmp") +// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") +// .zk_cluster("172.24.4.55:30000") +// .zk_root_path("/openmldb") +// .nsNum(2).tabletNum(3) +// .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) +// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) +// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) +// .build(); +// OpenMLDBGlobalVar.env = "cluster"; + } String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index 08d629f365c..d58d7394151 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -138,6 +138,84 @@ public void testMoreReplica(){ } // 两个Tablet停止 // 三个Tablet停止 + + @Test + public void testSingle(){ + String memoryTable = "test_memory"; + String ssdTable = "test_ssd"; + String hddTable = "test_hdd"; + // 创建磁盘表和内存表。 + int dataCount = 100; + sdkClient.createAndUseDB(dbName); + String memoryTableDDL = "create table test_memory(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=1,replicanum=1);"; + String ssdTableDDL = "create table test_ssd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=1,replicanum=1,storage_mode=\"SSD\");"; + String hddTableDDL = "create table test_hdd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=1,replicanum=1,storage_mode=\"HDD\");"; + sdkClient.execute(Lists.newArrayList(memoryTableDDL,ssdTableDDL,hddTableDDL)); + // 插入一定量的数据 + List> dataList = new ArrayList<>(); + for(int i=0;i list = Lists.newArrayList("aa" + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); + dataList.add(list); + } + sdkClient.insertList(memoryTable,dataList); + sdkClient.insertList(ssdTable,dataList); + sdkClient.insertList(hddTable,dataList); + // tablet stop,不能访问 + openMLDBDevops.operateTablet(0,"stop"); + OpenMLDBResult openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); + Assert.assertTrue(openMLDBResult.getMsg().contains("fail")); + // tablet start,数据可以回复,要看磁盘表和内存表。 + openMLDBDevops.operateTablet(0,"start"); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); + //make snapshot,在重启tablet,数据可回复。 + nsClient.makeSnapshot(dbName,memoryTable); + nsClient.makeSnapshot(dbName,ssdTable); + nsClient.makeSnapshot(dbName,hddTable); + //重启tablet,数据可回复,内存表和磁盘表可以正常访问。 + openMLDBDevops.operateTablet(0,"restart"); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+10,10); + //ns stop start 可以正常访问。 + openMLDBDevops.operateNs(0,"stop"); + resetClient(); + //ns start 可以访问。 + openMLDBDevops.operateNs(0,"start"); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,0); + //ns restart 可以访问。 + openMLDBDevops.operateNs(0,"restart"); + resetClient(); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,0); + } + public void addDataCheck(SDKClient sdkClient, NsClient nsClient,String dbName,List tableNames,int originalCount,int addCount){ List> addDataList = new ArrayList<>(); for(int i=0;i - - - + + + diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_single.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_single.xml new file mode 100644 index 00000000000..bbdc8aa901a --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_single.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java index 1e9fb856b99..3ec5f4383b6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java @@ -32,9 +32,9 @@ public static SDKClient of(SqlExecutor executor){ public OpenMLDBResult execute(String sql) { OpenMLDBResult openMLDBResult = new OpenMLDBResult(); try { + openMLDBResult.setSql(sql); boolean ok = statement.execute(sql); openMLDBResult.setOk(ok); - openMLDBResult.setSql(sql); ResultChainManager.of().toOpenMLDBResult(statement,openMLDBResult); } catch (SQLException e) { openMLDBResult.setOk(false); @@ -85,9 +85,9 @@ public void useDB(String dbName){ public void createAndUseDB(String dbName){ List sqlList = new ArrayList<>(); if (!SDKUtil.dbIsExist(statement,dbName)) { - sqlList.add(String.format("create database %s", dbName)); + sqlList.add(String.format("create database %s;", dbName)); } - sqlList.add(String.format("use %s", dbName)); + sqlList.add(String.format("use %s;", dbName)); execute(sqlList); } public void insert(String tableName,List list){ From 9a759083ac1b623325530ec02f2d5c2e2f102bf6 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 13 Jul 2022 16:55:43 +0800 Subject: [PATCH 051/172] add ns operator --- .../common/OpenMLDBDeploy.java | 5 +++ .../test/TmpDeploySingleNodeCluster.java | 3 +- .../devops_test/common/ClusterTest.java | 37 +++++++++++++------ 3 files changed, 32 insertions(+), 13 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java index 7dac5155b51..17e6bc7e5bc 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java @@ -48,6 +48,7 @@ public class OpenMLDBDeploy { private String sparkYarnJars = ""; private String offlineDataPrefix = "file:///tmp/openmldb_offline_storage/"; private String nameNodeUri = "172.27.12.215:8020"; + private int systemTableReplicaNum = 2; public static final int SLEEP_TIME = 10*1000; @@ -215,6 +216,10 @@ public int deployNS(String testPath, String ip, int index, String zk_endpoint, S "sed -i "+sedSeparator+" 's@--tablet_heartbeat_timeout=.*@--tablet_heartbeat_timeout=1000@' "+testPath+ns_name+"/conf/nameserver.flags", "echo '--request_timeout_ms=60000' >> " + testPath + ns_name + "/conf/nameserver.flags" ); + // --system_table_replica_num=2 + if(systemTableReplicaNum!=2){ + commands.add("sed -i "+sedSeparator+" 's@--system_table_replica_num=.*@--system_table_replica_num="+systemTableReplicaNum+"@' " + testPath + ns_name + "/conf/nameserver.flags"); + } if(useName){ commands.add("sed -i "+sedSeparator+" 's/--endpoint=.*/#&/' " + testPath + ns_name + "/conf/nameserver.flags"); commands.add("echo '--use_name=true' >> " + testPath + ns_name + "/conf/nameserver.flags"); diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploySingleNodeCluster.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploySingleNodeCluster.java index 005ad2e451e..e443dbcc68d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploySingleNodeCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/test/java/com/_4paradigm/qa/openmldb_deploy/test/TmpDeploySingleNodeCluster.java @@ -14,7 +14,8 @@ public void testTmp(@Optional("tmp") String version,@Optional("") String openMLD deploy.setOpenMLDBPath(openMLDBPath); deploy.setCluster(false); deploy.setSparkMaster("local"); - OpenMLDBInfo openMLDBInfo = deploy.deployCluster(1, 2); + deploy.setSystemTableReplicaNum(1); + OpenMLDBInfo openMLDBInfo = deploy.deployCluster(1, 1); System.out.println(openMLDBInfo); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index fde70450f7c..2fbd54441b5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -54,18 +54,18 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi openMLDBDeploy.setCluster(true); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(1, 1); }else{ - OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() - .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") - .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") - .zk_cluster("127.0.0.1:30000") - .zk_root_path("/openmldb") - .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) - .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) - .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) - .build(); - OpenMLDBGlobalVar.env = "cluster"; +// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() +// .deployType(OpenMLDBDeployType.CLUSTER) +// .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") +// .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") +// .zk_cluster("127.0.0.1:30000") +// .zk_root_path("/openmldb") +// .nsNum(2).tabletNum(3) +// .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) +// .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) +// .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) +// .build(); +// OpenMLDBGlobalVar.env = "cluster"; // OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() // .deployType(OpenMLDBDeployType.CLUSTER) @@ -80,6 +80,19 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi // .build(); // OpenMLDBGlobalVar.env = "cluster"; + OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() + .deployType(OpenMLDBDeployType.CLUSTER) + .basePath("/home/zhaowei01/openmldb-auto-test/single") + .openMLDBPath("/home/zhaowei01/openmldb-auto-test/single/openmldb-ns-1/bin/openmldb") + .zk_cluster("172.24.4.55:30008") + .zk_root_path("/openmldb") + .nsNum(1).tabletNum(1) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) + .build(); + OpenMLDBGlobalVar.env = "cluster"; + } String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { From 85eb46d1290cdf0bf9c4be00376bb4bee792c782 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 13 Jul 2022 17:45:44 +0800 Subject: [PATCH 052/172] add ns operator --- .../_4paradigm/openmldb/devops_test/common/ClusterTest.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index 2fbd54441b5..dee8db870d6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -87,9 +87,9 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi .zk_cluster("172.24.4.55:30008") .zk_root_path("/openmldb") .nsNum(1).tabletNum(1) - .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30010")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30011")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30012")) .build(); OpenMLDBGlobalVar.env = "cluster"; From 40ab5ff3f5a531c2a90e33482243921f9a3fa820 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 13 Jul 2022 18:34:34 +0800 Subject: [PATCH 053/172] add ns operator --- .../_4paradigm/openmldb/devops_test/common/ClusterTest.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index dee8db870d6..9c8d7a6863a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -88,8 +88,9 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi .zk_root_path("/openmldb") .nsNum(1).tabletNum(1) .nsEndpoints(Lists.newArrayList("172.24.4.55:30010")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30011")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30012")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30009")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30011")) + .taskManagerEndpoints(Lists.newArrayList("172.24.4.55:30012")) .build(); OpenMLDBGlobalVar.env = "cluster"; From 1c9ca1fb76f7113546a40c64e450185618e7a731 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 13 Jul 2022 19:01:41 +0800 Subject: [PATCH 054/172] add ns operator --- .../openmldb/java_sdk_test/util/CaseOutputUtil.java | 2 +- .../openmldb/test_common/openmldb/NsClient.java | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/CaseOutputUtil.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/CaseOutputUtil.java index 357b30668e2..d4511f8b4ce 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/CaseOutputUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/CaseOutputUtil.java @@ -136,6 +136,6 @@ public static void findAllYml(String path,List ymlAll){ public static void main(String[] args) { - fromYmlToCsv("/Users/zhaowei/code/4paradigm/rtidb/cases/integration/v1","./out_excel"); + fromYmlToCsv("/Users/zhaowei/code/4paradigm/OpenMLDB/cases/function","./out_excel"); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index 0d4622ad9a1..8b9e5408c11 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -88,16 +88,14 @@ public void makeSnapshot(String dbName,String tableName,int pid){ String nsCommand = genNsCommand(dbName,command); List lines = CommandUtil.run(nsCommand); Assert.assertEquals(lines.get(0),"MakeSnapshot ok"); + Tool.sleep(3*1000); checkTableOffSet(dbName,tableName); + checkOPStatusDone(dbName,tableName); } public void makeSnapshot(String dbName,String tableName){ List pidList = getPid(dbName,tableName); for(Integer pid:pidList) { - String command = String.format("makesnapshot %s %d", tableName, pid); - String nsCommand = genNsCommand(dbName, command); - List lines = CommandUtil.run(nsCommand); - Assert.assertEquals(lines.get(0), "MakeSnapshot ok"); - checkTableOffSet(dbName, tableName); + makeSnapshot(dbName,tableName,pid); } } public List getPid(String dbName,String tableName){ From 4cfac348e4dfa7900e0f29f5e2fc4e6616ef3a54 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 14 Jul 2022 15:15:48 +0800 Subject: [PATCH 055/172] add ns operator --- .gitignore | 3 +- .../devops_test/common/ClusterTest.java | 42 +++++++++---------- .../high_availability/TestCluster.java | 6 ++- .../entity/FesqlDataProvider.java | 2 +- .../java_sdk_test/util/CaseOutputUtil.java | 2 +- 5 files changed, 30 insertions(+), 25 deletions(-) diff --git a/.gitignore b/.gitignore index dfd2e572e53..90858ac5636 100644 --- a/.gitignore +++ b/.gitignore @@ -98,5 +98,6 @@ python/logs **/scalastyle-output.xml # test -logs +logs/ +out/ diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index 9c8d7a6863a..4ecaba8bebd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -65,35 +65,35 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi // .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) // .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) // .build(); -// OpenMLDBGlobalVar.env = "cluster"; - -// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() -// .deployType(OpenMLDBDeployType.CLUSTER) -// .basePath("/home/zhaowei01/openmldb-auto-test/tmp") -// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") -// .zk_cluster("172.24.4.55:30000") -// .zk_root_path("/openmldb") -// .nsNum(2).tabletNum(3) -// .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) -// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) -// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) -// .build(); // OpenMLDBGlobalVar.env = "cluster"; OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/home/zhaowei01/openmldb-auto-test/single") - .openMLDBPath("/home/zhaowei01/openmldb-auto-test/single/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:30008") + .basePath("/home/zhaowei01/openmldb-auto-test/tmp") + .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") + .zk_cluster("172.24.4.55:30000") .zk_root_path("/openmldb") - .nsNum(1).tabletNum(1) - .nsEndpoints(Lists.newArrayList("172.24.4.55:30010")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30009")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30011")) - .taskManagerEndpoints(Lists.newArrayList("172.24.4.55:30012")) + .nsNum(2).tabletNum(3) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) .build(); OpenMLDBGlobalVar.env = "cluster"; +// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() +// .deployType(OpenMLDBDeployType.CLUSTER) +// .basePath("/home/zhaowei01/openmldb-auto-test/single") +// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/single/openmldb-ns-1/bin/openmldb") +// .zk_cluster("172.24.4.55:30008") +// .zk_root_path("/openmldb") +// .nsNum(1).tabletNum(1) +// .nsEndpoints(Lists.newArrayList("172.24.4.55:30010")) +// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30009")) +// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30011")) +// .taskManagerEndpoints(Lists.newArrayList("172.24.4.55:30012")) +// .build(); +// OpenMLDBGlobalVar.env = "cluster"; + } String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index d58d7394151..660673cc01f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -115,8 +115,12 @@ public void testMoreReplica(){ // 单zk restart 后可以访问 openMLDBDevops.operateZKOne("restart"); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + // 2个tablet stop 可以访问 + openMLDBDevops.operateTablet(0,"stop"); + openMLDBDevops.operateTablet(1,"stop"); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); //3个tablet stop,不能访问。 - openMLDBDevops.operateTablet("stop"); + openMLDBDevops.operateTablet(2,"stop"); OpenMLDBResult openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); Assert.assertTrue(openMLDBResult.getMsg().contains("fail")); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProvider.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProvider.java index ffdfaf04ab2..9c87fee9c1c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProvider.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProvider.java @@ -44,7 +44,7 @@ public static FesqlDataProvider dataProviderGenerator(String caseFile) throws Fi FesqlDataProvider testDateProvider = yaml.loadAs(testDataStream, FesqlDataProvider.class); return testDateProvider; } catch (Exception e) { - logger.error("fail to load yaml: ", caseFile); + logger.error("fail to load yaml:{}", caseFile); e.printStackTrace(); FesqlDataProvider nullDataProvider = new FesqlDataProvider(); SQLCase failCase = new SQLCase(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/CaseOutputUtil.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/CaseOutputUtil.java index d4511f8b4ce..13334233573 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/CaseOutputUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/CaseOutputUtil.java @@ -136,6 +136,6 @@ public static void findAllYml(String path,List ymlAll){ public static void main(String[] args) { - fromYmlToCsv("/Users/zhaowei/code/4paradigm/OpenMLDB/cases/function","./out_excel"); + fromYmlToCsv("/Users/zhaowei/code/4paradigm/OpenMLDB/cases/function","./out"); } } From 39e55b43c3cd4177894f3524ce84b8f2241117f8 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Fri, 15 Jul 2022 14:06:37 +0800 Subject: [PATCH 056/172] add ns operator --- .../qa/openmldb_deploy/bean/OpenMLDBInfo.java | 1 + .../common/OpenMLDBDeploy.java | 23 ++++++----- .../test-suite/test_deploy.xml | 2 +- .../devops_test/common/ClusterTest.java | 4 ++ .../high_availability/TestCluster.java | 16 ++++++-- .../node_expansion/TestCluster.java | 38 +++++++++++++++++++ .../test_common/openmldb/SDKClient.java | 3 +- 7 files changed, 72 insertions(+), 15 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java index 7e7c42ffc92..d37f11100c2 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java @@ -34,6 +34,7 @@ public class OpenMLDBInfo { private int port; private String basePath; private String openMLDBPath; + private String openMLDBDirectoryName; private String zk_cluster; private String zk_root_path; private int nsNum; diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java index 17e6bc7e5bc..48485b02fcb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java @@ -39,7 +39,7 @@ public class OpenMLDBDeploy { private String installPath; private String version; private String openMLDBUrl; - private String openMLDBName; + private String openMLDBDirectoryName; private String openMLDBPath; private boolean useName; private boolean isCluster = true; @@ -94,7 +94,7 @@ public OpenMLDBInfo deployCluster(String clusterName, int ns, int tablet){ file.mkdirs(); } int zkPort = deployZK(testPath); - downloadOpenMLDB(testPath); + String openMLDBDirectoryName = downloadOpenMLDB(testPath); String zk_point = ip+":"+zkPort; builder.zk_cluster(zk_point).zk_root_path("/openmldb"); builder.nsEndpoints(Lists.newArrayList()).nsNames(Lists.newArrayList()); @@ -102,6 +102,7 @@ public OpenMLDBInfo deployCluster(String clusterName, int ns, int tablet){ builder.apiServerEndpoints(Lists.newArrayList()).apiServerNames(Lists.newArrayList()); builder.taskManagerEndpoints(Lists.newArrayList()); builder.openMLDBPath(testPath+"/openmldb-ns-1/bin/openmldb"); + builder.openMLDBDirectoryName(openMLDBDirectoryName); OpenMLDBInfo fedbInfo = builder.build(); for(int i=1;i<=tablet;i++) { int tablet_port ; @@ -150,7 +151,7 @@ public OpenMLDBInfo deployCluster(String clusterName, int ns, int tablet){ return fedbInfo; } - private void downloadOpenMLDB(String testPath){ + private String downloadOpenMLDB(String testPath){ try { String command; if(openMLDBUrl.startsWith("http")) { @@ -168,13 +169,15 @@ private void downloadOpenMLDB(String testPath){ command = "ls " + testPath + " | grep openmldb | grep -v .tar.gz"; result = ExecutorUtil.run(command); if (result != null && result.size() > 0) { - openMLDBName = result.get(0); - log.info("FEDB下载成功:{}", openMLDBName); + openMLDBDirectoryName = result.get(0); + log.info("FEDB下载成功:{}", openMLDBDirectoryName); + return openMLDBDirectoryName; }else{ throw new RuntimeException("FEDB下载失败"); } }catch (Exception e){ e.printStackTrace(); + throw new RuntimeException(e); } } public int deployZK(String testPath){ @@ -207,7 +210,7 @@ public int deployNS(String testPath, String ip, int index, String zk_endpoint, S int port = LinuxUtil.getNoUsedPort(); String ns_name = "/openmldb-ns-"+index; List commands = Lists.newArrayList( - "cp -r " + testPath + "/" + openMLDBName + " " + testPath + ns_name, + "cp -r " + testPath + "/" + openMLDBDirectoryName + " " + testPath + ns_name, "sed -i "+sedSeparator+" 's#--zk_cluster=.*#--zk_cluster=" + zk_endpoint + "#' " + testPath + ns_name + "/conf/nameserver.flags", "sed -i "+sedSeparator+" 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+ns_name+"/conf/nameserver.flags", "sed -i "+sedSeparator+" 's@#--zk_cluster=.*@--zk_cluster=" + zk_endpoint + "@' " + testPath + ns_name + "/conf/nameserver.flags", @@ -259,7 +262,7 @@ public int deployTablet(String testPath, String ip, int index, String zk_endpoin int port = LinuxUtil.getNoUsedPort(); String tablet_name = "/openmldb-tablet-"+index; List commands = Lists.newArrayList( - "cp -r "+testPath+"/"+ openMLDBName +" "+testPath+tablet_name, + "cp -r "+testPath+"/"+ openMLDBDirectoryName +" "+testPath+tablet_name, "sed -i "+sedSeparator+" 's/--zk_cluster=.*/--zk_cluster="+zk_endpoint+"/' "+testPath+tablet_name+"/conf/tablet.flags", "sed -i "+sedSeparator+" 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+tablet_name+"/conf/tablet.flags", "sed -i "+sedSeparator+" 's@#--zk_cluster=.*@--zk_cluster="+zk_endpoint+"@' "+testPath+tablet_name+"/conf/tablet.flags", @@ -311,7 +314,7 @@ public int deployApiserver(String testPath, String ip, int index, String zk_endp int port = LinuxUtil.getNoUsedPort(); String apiserver_name = "/openmldb-apiserver-"+index; List commands = Lists.newArrayList( - "cp -r "+testPath+"/"+ openMLDBName +" "+testPath+apiserver_name, + "cp -r "+testPath+"/"+ openMLDBDirectoryName +" "+testPath+apiserver_name, "sed -i "+sedSeparator+" 's/--zk_cluster=.*/--zk_cluster="+zk_endpoint+"/' "+testPath+apiserver_name+"/conf/apiserver.flags", "sed -i "+sedSeparator+" 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+testPath+apiserver_name+"/conf/apiserver.flags", "sed -i "+sedSeparator+" 's@#--zk_cluster=.*@--zk_cluster="+zk_endpoint+"@' "+testPath+apiserver_name+"/conf/apiserver.flags", @@ -372,7 +375,7 @@ public int deployTaskManager(String testPath, String ip, int index, String zk_en String sparkHome = deploySpark(testPath); int port = LinuxUtil.getNoUsedPort(); String task_manager_name = "/openmldb-task_manager-"+index; - ExecutorUtil.run("cp -r " + testPath + "/" + openMLDBName + " " + testPath + task_manager_name); + ExecutorUtil.run("cp -r " + testPath + "/" + openMLDBDirectoryName + " " + testPath + task_manager_name); if(batchJobJarPath==null) { String batchJobName = ExecutorUtil.run("ls " + testPath + task_manager_name + "/taskmanager/lib | grep openmldb-batchjob").get(0); batchJobJarPath = testPath + task_manager_name + "/taskmanager/lib/" + batchJobName; @@ -413,7 +416,7 @@ public OpenMLDBInfo deployStandalone(String testPath, String ip){ String apiServerEndpoint = ip+":"+apiServerPort; String standaloneName = "/openmldb-standalone"; List commands = Lists.newArrayList( - "cp -r " + testPath + "/" + openMLDBName + " " + testPath + standaloneName, + "cp -r " + testPath + "/" + openMLDBDirectoryName + " " + testPath + standaloneName, "sed -i "+sedSeparator+" 's@--zk_cluster=.*@#--zk_cluster=127.0.0.1:2181@' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", "sed -i "+sedSeparator+" 's@--zk_root_path=.*@#--zk_root_path=/openmldb@' "+testPath+standaloneName+"/conf/standalone_nameserver.flags", "sed -i "+sedSeparator+" 's#--endpoint=.*#--endpoint=" + nsEndpoint + "#' " + testPath + standaloneName + "/conf/standalone_nameserver.flags", diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index f6a768515a5..662be931c10 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -3,7 +3,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index 4ecaba8bebd..938c36e8acd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -39,10 +39,14 @@ @Slf4j public class ClusterTest { protected static SqlExecutor executor; + protected String version; + protected String openMLDBPath; @BeforeTest() @Parameters({"env","version","openMLDBPath"}) public void beforeTest(@Optional("qa") String env,@Optional("main") String version,@Optional("")String openMLDBPath) throws Exception { OpenMLDBGlobalVar.env = env; + this.version = version; + this.openMLDBPath = openMLDBPath; if(env.equalsIgnoreCase("cluster")){ OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version);; openMLDBDeploy.setOpenMLDBPath(openMLDBPath); diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index 660673cc01f..3db3da9ab5f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -4,6 +4,7 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.openmldb.*; import com._4paradigm.qa.openmldb_deploy.util.Tool; +import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.RandomStringUtils; import org.testng.Assert; import org.testng.annotations.BeforeClass; @@ -210,14 +211,21 @@ public void testSingle(){ addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+10,10); //ns stop start 可以正常访问。 openMLDBDevops.operateNs(0,"stop"); - resetClient(); +// resetClient(); //ns start 可以访问。 openMLDBDevops.operateNs(0,"start"); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,0); //ns restart 可以访问。 openMLDBDevops.operateNs(0,"restart"); - resetClient(); +// resetClient(); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,0); + // stop tablet ns 后 在重启 ns tablet 可以访问 + openMLDBDevops.operateTablet(0,"stop"); + openMLDBDevops.operateNs(0,"stop"); +// resetClient(); + openMLDBDevops.operateNs(0,"start"); + openMLDBDevops.operateTablet(0,"start"); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); } public void addDataCheck(SDKClient sdkClient, NsClient nsClient,String dbName,List tableNames,int originalCount,int addCount){ @@ -229,7 +237,9 @@ public void addDataCheck(SDKClient sdkClient, NsClient nsClient,String dbName,Li } String msg = "table add data check count failed."; for(String tableName:tableNames){ - sdkClient.insertList(tableName,addDataList); + if (CollectionUtils.isNotEmpty(addDataList)) { + sdkClient.insertList(tableName,addDataList); + } Assert.assertEquals(sdkClient.getTableRowCount(tableName),originalCount+addCount,msg); } nsClient.checkTableOffSet(dbName,null); diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java new file mode 100644 index 00000000000..debb6256ead --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java @@ -0,0 +1,38 @@ +package com._4paradigm.openmldb.devops_test.node_expansion; + +import com._4paradigm.openmldb.devops_test.common.ClusterTest; +import com._4paradigm.openmldb.test_common.openmldb.NsClient; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBDevops; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.openmldb.SDKClient; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; +import com._4paradigm.test_tool.command_tool.common.LinuxUtil; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +public class TestCluster extends ClusterTest { + private String dbName; + private SDKClient sdkClient; + private NsClient nsClient; + private OpenMLDBDevops openMLDBDevops; + @BeforeClass + public void beforeClass(){ + dbName = "test_devops1"; + sdkClient = SDKClient.of(executor); + nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); + openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); + OpenMLDBDeploy deploy = new OpenMLDBDeploy(version); + deploy.setOpenMLDBPath(openMLDBPath); + deploy.setOpenMLDBDirectoryName(OpenMLDBGlobalVar.mainInfo.getOpenMLDBDirectoryName()); + String zk_cluster = OpenMLDBGlobalVar.mainInfo.getZk_cluster(); + String basePath = OpenMLDBGlobalVar.mainInfo.getBasePath(); + String ip = LinuxUtil.hostnameI(); + int port = deploy.deployTablet(basePath, ip, 4, zk_cluster, null); + String tabletEndpoint = ip+":"+port; + + } + @Test + public void testAddTablet(){ + + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java index 3ec5f4383b6..54ce616c402 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java @@ -30,9 +30,10 @@ public static SDKClient of(SqlExecutor executor){ return new SDKClient(executor); } public OpenMLDBResult execute(String sql) { + log.info("execute sql:{}",sql); OpenMLDBResult openMLDBResult = new OpenMLDBResult(); + openMLDBResult.setSql(sql); try { - openMLDBResult.setSql(sql); boolean ok = statement.execute(sql); openMLDBResult.setOk(ok); ResultChainManager.of().toOpenMLDBResult(statement,openMLDBResult); From f5e01b226cda7c2a2f474832e2249ed973de3da9 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Fri, 15 Jul 2022 14:39:03 +0800 Subject: [PATCH 057/172] add ns operator --- .../openmldb-deploy/src/main/resources/deploy.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index 3df7998d397..2efb1274aa2 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -19,7 +19,7 @@ single_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldb standalone=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-linux.tar.gz -tmp2=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-linux.tar.gz +tmp2=http://pkg.4paradigm.com:81/rtidb/test/openmldb-snapshot-linux.tar.gz tmp2_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz tmp2_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz From 0d51e4b063ef84c3e420077e98702ce500cf396e Mon Sep 17 00:00:00 2001 From: zhaowei Date: Fri, 15 Jul 2022 16:02:16 +0800 Subject: [PATCH 058/172] add ns operator --- .../openmldb-deploy/src/main/resources/deploy.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index 2efb1274aa2..fcc5a2ce51a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -13,7 +13,7 @@ tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-linux.tar.gz tmp_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz tmp_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz -single=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-linux.tar.gz +single=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5-SNAPSHOT-pr2174.tar.gz single_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz single_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz From 7b5991d8ae02ff5688e152d6a549cfb7f8d5298c Mon Sep 17 00:00:00 2001 From: zhaowei Date: Fri, 15 Jul 2022 16:46:31 +0800 Subject: [PATCH 059/172] add ns operator --- .../openmldb-deploy/src/main/resources/deploy.properties | 2 +- .../openmldb-deploy/test-suite/test_deploy.xml | 2 +- .../openmldb-deploy/test-suite/test_deploy_single_node.xml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index fcc5a2ce51a..2efb1274aa2 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -13,7 +13,7 @@ tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-linux.tar.gz tmp_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz tmp_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz -single=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5-SNAPSHOT-pr2174.tar.gz +single=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-linux.tar.gz single_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz single_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index 662be931c10..f6a768515a5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -3,7 +3,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_single_node.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_single_node.xml index 7e9061c08b0..83747359d71 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_single_node.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_single_node.xml @@ -2,7 +2,7 @@ - + From 4f91f40992e5a9197029c35c44299ff7ebd68b9a Mon Sep 17 00:00:00 2001 From: zhaowei Date: Fri, 15 Jul 2022 17:09:31 +0800 Subject: [PATCH 060/172] add ns operator --- .../devops_test/common/ClusterTest.java | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index 938c36e8acd..38137278e81 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -71,33 +71,33 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi // .build(); // OpenMLDBGlobalVar.env = "cluster"; - OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() - .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/home/zhaowei01/openmldb-auto-test/tmp") - .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:30000") - .zk_root_path("/openmldb") - .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) - .build(); - OpenMLDBGlobalVar.env = "cluster"; - // OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() // .deployType(OpenMLDBDeployType.CLUSTER) -// .basePath("/home/zhaowei01/openmldb-auto-test/single") -// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/single/openmldb-ns-1/bin/openmldb") -// .zk_cluster("172.24.4.55:30008") +// .basePath("/home/zhaowei01/openmldb-auto-test/tmp") +// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") +// .zk_cluster("172.24.4.55:30000") // .zk_root_path("/openmldb") -// .nsNum(1).tabletNum(1) -// .nsEndpoints(Lists.newArrayList("172.24.4.55:30010")) -// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30009")) -// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30011")) -// .taskManagerEndpoints(Lists.newArrayList("172.24.4.55:30012")) +// .nsNum(2).tabletNum(3) +// .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) +// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) +// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) // .build(); // OpenMLDBGlobalVar.env = "cluster"; + OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() + .deployType(OpenMLDBDeployType.CLUSTER) + .basePath("/home/zhaowei01/openmldb-auto-test/single") + .openMLDBPath("/home/zhaowei01/openmldb-auto-test/single/openmldb-ns-1/bin/openmldb") + .zk_cluster("172.24.4.55:30008") + .zk_root_path("/openmldb") + .nsNum(1).tabletNum(1) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30010")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30009")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30011")) + .taskManagerEndpoints(Lists.newArrayList("172.24.4.55:30012")) + .build(); + OpenMLDBGlobalVar.env = "cluster"; + } String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { From 737fa755f01eac168d2d9e4f0672b49eedd880a2 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 18 Jul 2022 14:31:22 +0800 Subject: [PATCH 061/172] add ns operator --- .../high_availability/TestCluster.java | 2 +- .../node_expansion/TestCluster.java | 176 +++++++++++++++++- .../test_common/openmldb/NsClient.java | 36 +++- 3 files changed, 208 insertions(+), 6 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index 3db3da9ab5f..1d40892126e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -219,7 +219,7 @@ public void testSingle(){ openMLDBDevops.operateNs(0,"restart"); // resetClient(); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,0); - // stop tablet ns 后 在重启 ns tablet 可以访问 + // stop tablet ns 后 在启动 ns tablet 可以访问 openMLDBDevops.operateTablet(0,"stop"); openMLDBDevops.operateNs(0,"stop"); // resetClient(); diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java index debb6256ead..cd3883916f8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java @@ -1,14 +1,22 @@ package com._4paradigm.openmldb.devops_test.node_expansion; import com._4paradigm.openmldb.devops_test.common.ClusterTest; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.openmldb.NsClient; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBDevops; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.openmldb.SDKClient; import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; import com._4paradigm.test_tool.command_tool.common.LinuxUtil; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.lang3.RandomStringUtils; +import org.testng.Assert; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; +import org.testng.collections.Lists; + +import java.util.ArrayList; +import java.util.List; public class TestCluster extends ClusterTest { private String dbName; @@ -21,6 +29,59 @@ public void beforeClass(){ sdkClient = SDKClient.of(executor); nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); + } + @Test + public void testAddTablet(){ + String memoryTable = "test_memory"; + String ssdTable = "test_ssd"; + String hddTable = "test_hdd"; + // 创建磁盘表和内存表。 + int dataCount = 100; + sdkClient.createAndUseDB(dbName); + String memoryTableDDL = "create table test_memory(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3);"; + String ssdTableDDL = "create table test_ssd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"SSD\");"; + String hddTableDDL = "create table test_hdd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"HDD\");"; + sdkClient.execute(Lists.newArrayList(memoryTableDDL,ssdTableDDL,hddTableDDL)); + // 插入一定量的数据 + List> dataList = new ArrayList<>(); + for(int i=0;i list = Lists.newArrayList("aa" + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); + dataList.add(list); + } + sdkClient.insertList(memoryTable,dataList); + sdkClient.insertList(ssdTable,dataList); + sdkClient.insertList(hddTable,dataList); + // 增加一个tablet,数据可以正常访问。 OpenMLDBDeploy deploy = new OpenMLDBDeploy(version); deploy.setOpenMLDBPath(openMLDBPath); deploy.setOpenMLDBDirectoryName(OpenMLDBGlobalVar.mainInfo.getOpenMLDBDirectoryName()); @@ -28,11 +89,118 @@ public void beforeClass(){ String basePath = OpenMLDBGlobalVar.mainInfo.getBasePath(); String ip = LinuxUtil.hostnameI(); int port = deploy.deployTablet(basePath, ip, 4, zk_cluster, null); - String tabletEndpoint = ip+":"+port; - + String addTabletEndpoint = ip+":"+port; + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,0); + // 可以创建四个副本的表,可以成功。 + String memoryTable4 = "test_memory4"; + String ssdTable4 = "test_ssd4"; + String hddTable4 = "test_hdd4"; + // 创建磁盘表和内存表。 + sdkClient.createAndUseDB(dbName); + String memoryTableDDL4 = "create table "+memoryTable4+"(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=4);"; + String ssdTableDDL4 = "create table "+ssdTable4+"(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=4,storage_mode=\"SSD\");"; + String hddTableDDL4 = "create table "+hddTable4+"(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=4,storage_mode=\"HDD\");"; + sdkClient.execute(Lists.newArrayList(memoryTableDDL4,ssdTableDDL4,hddTableDDL4)); + // 插入一定量的数据 + sdkClient.insertList(memoryTable4,dataList); + sdkClient.insertList(ssdTable4,dataList); + sdkClient.insertList(hddTable4,dataList); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable4,ssdTable4,hddTable4),dataCount,0); + // 创建表制定分片到新的tablet上,可以成功。 + String memoryTable5 = "test_memory5"; + String ssdTable5 = "test_ssd5"; + String hddTable5 = "test_hdd5"; + String memoryTableDDL5 = "create table "+memoryTable5+"(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=1,replicanum=1,distribution = [ ('"+addTabletEndpoint+"',[])]);"; + String ssdTableDDL5 = "create table "+ssdTable5+"(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=1,replicanum=1,storage_mode=\"SSD\",distribution = [ ('"+addTabletEndpoint+"',[])]);"; + String hddTableDDL5 = "create table "+hddTable5+"(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=1,replicanum=1,storage_mode=\"HDD\",distribution = [ ('"+addTabletEndpoint+"',[])]);"; + OpenMLDBResult memory5Result = sdkClient.execute(memoryTableDDL5); + String addTabletMsg = "create table to new tablet failed."; + Assert.assertTrue(memory5Result.isOk(),addTabletMsg); + OpenMLDBResult ssd5Result = sdkClient.execute(ssdTableDDL5); + Assert.assertTrue(ssd5Result.isOk(),addTabletMsg); + OpenMLDBResult hdd5Result = sdkClient.execute(hddTableDDL5); + Assert.assertTrue(hdd5Result.isOk(),addTabletMsg); + // 副本迁移,迁移后,原来的数据删除,新的tablet上增加数据。 + List tabletEndpoints = OpenMLDBGlobalVar.mainInfo.getTabletEndpoints(); + nsClient.migrate(dbName,memoryTable,tabletEndpoints.get(0),0,addTabletEndpoint); + nsClient.migrate(dbName,memoryTable,tabletEndpoints.get(1),1,addTabletEndpoint); + addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); } - @Test - public void testAddTablet(){ + public void addDataCheck(SDKClient sdkClient, NsClient nsClient,String dbName,List tableNames,int originalCount,int addCount){ + List> addDataList = new ArrayList<>(); + for(int i=0;i list = Lists.newArrayList(c1 + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); + addDataList.add(list); + } + String msg = "table add data check count failed."; + for(String tableName:tableNames){ + if (CollectionUtils.isNotEmpty(addDataList)) { + sdkClient.insertList(tableName,addDataList); + } + Assert.assertEquals(sdkClient.getTableRowCount(tableName),originalCount+addCount,msg); + } + nsClient.checkTableOffSet(dbName,null); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index 8b9e5408c11..4e84715a572 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -121,6 +121,40 @@ public Map> getPid(String dbName){ return map; } - + public void migrate(String dbName,String srcEndpoint,String tableName,int pid,String desEndpoint){ + List srcEndPoint = getTableEndPoint(dbName, tableName, pid); + Assert.assertTrue(srcEndPoint.contains(srcEndpoint)); + String command = String.format("migrate %s %s %s %s",srcEndpoint,tableName,pid,desEndpoint); + String nsCommand = genNsCommand(dbName,command); + List lines = CommandUtil.run(nsCommand); + Assert.assertEquals(lines.get(0),"partition migrate ok"); + Tool.sleep(3*1000); + checkOPStatusDone(dbName,tableName); + List desEndPoint = getTableEndPoint(dbName, tableName, pid); + Assert.assertTrue(desEndPoint.contains(desEndPoint),"migrate check endpoint failed."); + checkTableOffSet(dbName,tableName); + } + public List getTableEndPoint(String dbName,String tableName,int pid){ + Map> tableEndPointMap = getTableEndPoint(dbName, tableName); + return tableEndPointMap.get(pid); + } + public Map> getTableEndPoint(String dbName,String tableName){ + Map> map = new HashMap<>(); + List lines = showTable(dbName,tableName); + Assert.assertTrue(lines.size()>2,"show table lines <= 2"); + for(int i=2;i values = map.get(pid); + if(values == null){ + values = new ArrayList<>(); + } + values.add(endpoint); + map.put(pid,values); + } + return map; + } } From 8dce78d9a49fafedcbbc0ff15831e42c0d43b1cc Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 18 Jul 2022 18:50:41 +0800 Subject: [PATCH 062/172] add ns operator --- .../devops_test/common/ClusterTest.java | 44 ++++++++++--------- .../test_suite/test_node_expansion.xml | 13 ++++++ 2 files changed, 36 insertions(+), 21 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_node_expansion.xml diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index 38137278e81..3bc34e19fc3 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -60,6 +60,7 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi }else{ // OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() // .deployType(OpenMLDBDeployType.CLUSTER) +// .openMLDBDirectoryName("openmldb-0.5.2-darwin") // .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") // .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") // .zk_cluster("127.0.0.1:30000") @@ -69,35 +70,36 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi // .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) // .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) // .build(); -// OpenMLDBGlobalVar.env = "cluster"; - -// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() -// .deployType(OpenMLDBDeployType.CLUSTER) -// .basePath("/home/zhaowei01/openmldb-auto-test/tmp") -// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") -// .zk_cluster("172.24.4.55:30000") -// .zk_root_path("/openmldb") -// .nsNum(2).tabletNum(3) -// .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) -// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) -// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) -// .build(); // OpenMLDBGlobalVar.env = "cluster"; OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() + .openMLDBDirectoryName("openmldb-0.5.2-linux") .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/home/zhaowei01/openmldb-auto-test/single") - .openMLDBPath("/home/zhaowei01/openmldb-auto-test/single/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:30008") + .basePath("/home/zhaowei01/openmldb-auto-test/tmp") + .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") + .zk_cluster("172.24.4.55:30000") .zk_root_path("/openmldb") - .nsNum(1).tabletNum(1) - .nsEndpoints(Lists.newArrayList("172.24.4.55:30010")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30009")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30011")) - .taskManagerEndpoints(Lists.newArrayList("172.24.4.55:30012")) + .nsNum(2).tabletNum(3) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) .build(); OpenMLDBGlobalVar.env = "cluster"; +// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() +// .deployType(OpenMLDBDeployType.CLUSTER) +// .basePath("/home/zhaowei01/openmldb-auto-test/single") +// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/single/openmldb-ns-1/bin/openmldb") +// .zk_cluster("172.24.4.55:30008") +// .zk_root_path("/openmldb") +// .nsNum(1).tabletNum(1) +// .nsEndpoints(Lists.newArrayList("172.24.4.55:30010")) +// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30009")) +// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30011")) +// .taskManagerEndpoints(Lists.newArrayList("172.24.4.55:30012")) +// .build(); +// OpenMLDBGlobalVar.env = "cluster"; + } String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_node_expansion.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_node_expansion.xml new file mode 100644 index 00000000000..d9c9d9e5edb --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_node_expansion.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file From 09160f7ea9dd4caf9b3dd8fdafab36c9b0befd3f Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 18 Jul 2022 19:48:30 +0800 Subject: [PATCH 063/172] add ns operator --- .../node_expansion/TestCluster.java | 6 +++++ .../test_common/bean/OpenMLDBResult.java | 1 + .../test_common/openmldb/SDKClient.java | 19 ++++++++++++-- .../openmldb/test_common/util/ResultUtil.java | 26 ++++++++++++++++--- 4 files changed, 46 insertions(+), 6 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java index cd3883916f8..05d22d20fc6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java @@ -7,6 +7,7 @@ import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.openmldb.SDKClient; import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; +import com._4paradigm.qa.openmldb_deploy.util.Tool; import com._4paradigm.test_tool.command_tool.common.LinuxUtil; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.RandomStringUtils; @@ -90,6 +91,7 @@ public void testAddTablet(){ String ip = LinuxUtil.hostnameI(); int port = deploy.deployTablet(basePath, ip, 4, zk_cluster, null); String addTabletEndpoint = ip+":"+port; + sdkClient.checkComponentStatus(addTabletEndpoint, "online"); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,0); // 可以创建四个副本的表,可以成功。 String memoryTable4 = "test_memory4"; @@ -135,6 +137,7 @@ public void testAddTablet(){ sdkClient.insertList(memoryTable4,dataList); sdkClient.insertList(ssdTable4,dataList); sdkClient.insertList(hddTable4,dataList); + Tool.sleep(5*1000); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable4,ssdTable4,hddTable4),dataCount,0); // 创建表制定分片到新的tablet上,可以成功。 String memoryTable5 = "test_memory5"; @@ -176,10 +179,13 @@ public void testAddTablet(){ OpenMLDBResult memory5Result = sdkClient.execute(memoryTableDDL5); String addTabletMsg = "create table to new tablet failed."; Assert.assertTrue(memory5Result.isOk(),addTabletMsg); + Assert.assertTrue(sdkClient.tableIsExist(memoryTable5),addTabletMsg); OpenMLDBResult ssd5Result = sdkClient.execute(ssdTableDDL5); Assert.assertTrue(ssd5Result.isOk(),addTabletMsg); + Assert.assertTrue(sdkClient.tableIsExist(ssdTable5),addTabletMsg); OpenMLDBResult hdd5Result = sdkClient.execute(hddTableDDL5); Assert.assertTrue(hdd5Result.isOk(),addTabletMsg); + Assert.assertTrue(sdkClient.tableIsExist(hddTable5),addTabletMsg); // 副本迁移,迁移后,原来的数据删除,新的tablet上增加数据。 List tabletEndpoints = OpenMLDBGlobalVar.mainInfo.getTabletEndpoints(); nsClient.migrate(dbName,memoryTable,tabletEndpoints.get(0),0,addTabletEndpoint); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java index 13ea595adce..eb029175cb6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java @@ -32,6 +32,7 @@ public class OpenMLDBResult { private String dbName; private List tableNames; private String sql; + private boolean haveResult; private boolean ok; private int count; private String msg = ""; diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java index 54ce616c402..6fb0e2a4bed 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java @@ -1,9 +1,11 @@ package com._4paradigm.openmldb.test_common.openmldb; +import com._4paradigm.openmldb.jdbc.SQLResultSet; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.bean.SQLType; import com._4paradigm.openmldb.test_common.chain.result.ResultChainManager; +import com._4paradigm.openmldb.test_common.util.ResultUtil; import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.util.SQLUtil; import com._4paradigm.openmldb.test_common.util.WaitUtil; @@ -35,8 +37,11 @@ public OpenMLDBResult execute(String sql) { openMLDBResult.setSql(sql); try { boolean ok = statement.execute(sql); - openMLDBResult.setOk(ok); - ResultChainManager.of().toOpenMLDBResult(statement,openMLDBResult); + openMLDBResult.setHaveResult(ok); + if(ok){ + ResultUtil.parseResultSet(statement,openMLDBResult); + } +// ResultChainManager.of().toOpenMLDBResult(statement,openMLDBResult); } catch (SQLException e) { openMLDBResult.setOk(false); openMLDBResult.setMsg(e.getMessage()); @@ -76,6 +81,16 @@ public void createDB(String dbName){ String sql = String.format("create database %s",dbName); execute(sql); } + public List showTables(){ + String sql = String.format("show tables;"); + OpenMLDBResult openMLDBResult = execute(sql); + List tableNames = openMLDBResult.getResult().stream().map(l -> String.valueOf(l.get(0))).collect(Collectors.toList()); + return tableNames; + } + public boolean tableIsExist(String tableName){ + List tableNames = showTables(); + return tableNames.contains(tableName); + } public void setOnline(){ execute("SET @@execute_mode='online';"); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java index 621c43191db..79fbfbda406 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java @@ -7,10 +7,7 @@ import com.google.common.base.Joiner; import lombok.extern.slf4j.Slf4j; -import java.sql.ResultSetMetaData; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Types; +import java.sql.*; import java.text.ParseException; import java.util.ArrayList; import java.util.Iterator; @@ -18,6 +15,27 @@ @Slf4j public class ResultUtil { + public static void parseResultSet(Statement statement,OpenMLDBResult openMLDBResult){ + try { + ResultSet resultSet = statement.getResultSet(); + if (resultSet == null) { + openMLDBResult.setOk(false); + openMLDBResult.setMsg("executeSQL fail, result is null"); + } else if (resultSet instanceof SQLResultSet){ + SQLResultSet rs = (SQLResultSet)resultSet; + ResultUtil.setSchema(rs.getMetaData(),openMLDBResult); + List> result = ResultUtil.toList(rs); + openMLDBResult.setCount(result.size()); + openMLDBResult.setResult(result); + openMLDBResult.setMsg("success"); + openMLDBResult.setOk(true); + } + } catch (SQLException e) { + e.printStackTrace(); + openMLDBResult.setOk(false); + openMLDBResult.setMsg(e.getMessage()); + } + } public static OpenmldbDeployment parseDeployment(List lines){ OpenmldbDeployment deployment = new OpenmldbDeployment(); List inColumns = new ArrayList<>(); From a758cc3dce43fa05cb1ae835fa4c34fd89482270 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 19 Jul 2022 08:38:38 +0800 Subject: [PATCH 064/172] add ns operator --- .../_4paradigm/openmldb/test_common/openmldb/SDKClient.java | 2 ++ .../com/_4paradigm/openmldb/test_common/util/ResultUtil.java | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java index 6fb0e2a4bed..64c9204da22 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java @@ -38,6 +38,8 @@ public OpenMLDBResult execute(String sql) { try { boolean ok = statement.execute(sql); openMLDBResult.setHaveResult(ok); + openMLDBResult.setMsg("success"); + openMLDBResult.setOk(true); if(ok){ ResultUtil.parseResultSet(statement,openMLDBResult); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java index 79fbfbda406..76295d1b77e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java @@ -27,8 +27,8 @@ public static void parseResultSet(Statement statement,OpenMLDBResult openMLDBRes List> result = ResultUtil.toList(rs); openMLDBResult.setCount(result.size()); openMLDBResult.setResult(result); - openMLDBResult.setMsg("success"); - openMLDBResult.setOk(true); +// openMLDBResult.setMsg("success"); +// openMLDBResult.setOk(true); } } catch (SQLException e) { e.printStackTrace(); From 88dcd9939e75e016dac60a8237d716799cea0f36 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 19 Jul 2022 08:57:37 +0800 Subject: [PATCH 065/172] add ns operator --- .../openmldb/devops_test/node_expansion/TestCluster.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java index 05d22d20fc6..d52db00b2f5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java @@ -188,8 +188,8 @@ public void testAddTablet(){ Assert.assertTrue(sdkClient.tableIsExist(hddTable5),addTabletMsg); // 副本迁移,迁移后,原来的数据删除,新的tablet上增加数据。 List tabletEndpoints = OpenMLDBGlobalVar.mainInfo.getTabletEndpoints(); - nsClient.migrate(dbName,memoryTable,tabletEndpoints.get(0),0,addTabletEndpoint); - nsClient.migrate(dbName,memoryTable,tabletEndpoints.get(1),1,addTabletEndpoint); + nsClient.migrate(dbName,tabletEndpoints.get(0),memoryTable,0,addTabletEndpoint); + nsClient.migrate(dbName,tabletEndpoints.get(1),memoryTable,1,addTabletEndpoint); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); } From 1a9f5cc1ee9b9bf950afe14fad9cb76ac200a0ab Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 19 Jul 2022 09:17:13 +0800 Subject: [PATCH 066/172] add ns operator --- .../devops_test/node_expansion/TestCluster.java | 1 + .../openmldb/test_common/openmldb/NsClient.java | 15 +++++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java index d52db00b2f5..7345cd3d8e4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java @@ -187,6 +187,7 @@ public void testAddTablet(){ Assert.assertTrue(hdd5Result.isOk(),addTabletMsg); Assert.assertTrue(sdkClient.tableIsExist(hddTable5),addTabletMsg); // 副本迁移,迁移后,原来的数据删除,新的tablet上增加数据。 + nsClient.confset("auto_failover","false"); List tabletEndpoints = OpenMLDBGlobalVar.mainInfo.getTabletEndpoints(); nsClient.migrate(dbName,tabletEndpoints.get(0),memoryTable,0,addTabletEndpoint); nsClient.migrate(dbName,tabletEndpoints.get(1),memoryTable,1,addTabletEndpoint); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index 4e84715a572..7a3fa2dbf47 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -59,8 +59,7 @@ public void checkOPStatusDone(String dbName,String tableName){ } public List showTable(String dbName,String tableName){ String command = StringUtils.isNotEmpty(tableName) ?"showtable "+tableName:"showtable"; - String nsCommand = genNsCommand(dbName,command); - List lines = CommandUtil.run(nsCommand); + List lines = runNs(dbName,command); return lines; } @@ -85,8 +84,7 @@ public void checkTableOffSet(String dbName,String tableName){ } public void makeSnapshot(String dbName,String tableName,int pid){ String command = String.format("makesnapshot %s %d",tableName,pid); - String nsCommand = genNsCommand(dbName,command); - List lines = CommandUtil.run(nsCommand); + List lines = runNs(dbName,command); Assert.assertEquals(lines.get(0),"MakeSnapshot ok"); Tool.sleep(3*1000); checkTableOffSet(dbName,tableName); @@ -121,12 +119,17 @@ public Map> getPid(String dbName){ return map; } + public void confset(String key,String value){ + String command = String.format("confset %s %s",key,value); + List lines = runNs(null,command); + Assert.assertTrue(lines.get(0).contains("ok")); + } + public void migrate(String dbName,String srcEndpoint,String tableName,int pid,String desEndpoint){ List srcEndPoint = getTableEndPoint(dbName, tableName, pid); Assert.assertTrue(srcEndPoint.contains(srcEndpoint)); String command = String.format("migrate %s %s %s %s",srcEndpoint,tableName,pid,desEndpoint); - String nsCommand = genNsCommand(dbName,command); - List lines = CommandUtil.run(nsCommand); + List lines = runNs(dbName,command); Assert.assertEquals(lines.get(0),"partition migrate ok"); Tool.sleep(3*1000); checkOPStatusDone(dbName,tableName); From 808063614dbff37a2a473de6ba1a4aed2d932e51 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 19 Jul 2022 10:21:00 +0800 Subject: [PATCH 067/172] add ns operator --- .../openmldb/devops_test/node_expansion/TestCluster.java | 4 ++-- .../openmldb-sdk-test/shell/stop-openmldb.sh | 1 + .../_4paradigm/openmldb/test_common/openmldb/NsClient.java | 7 ++++++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java index 7345cd3d8e4..7182c9824c6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java @@ -189,8 +189,8 @@ public void testAddTablet(){ // 副本迁移,迁移后,原来的数据删除,新的tablet上增加数据。 nsClient.confset("auto_failover","false"); List tabletEndpoints = OpenMLDBGlobalVar.mainInfo.getTabletEndpoints(); - nsClient.migrate(dbName,tabletEndpoints.get(0),memoryTable,0,addTabletEndpoint); - nsClient.migrate(dbName,tabletEndpoints.get(1),memoryTable,1,addTabletEndpoint); + nsClient.migrate(dbName,tabletEndpoints.get(1),memoryTable,0,addTabletEndpoint); + nsClient.migrate(dbName,tabletEndpoints.get(2),memoryTable,1,addTabletEndpoint); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-openmldb.sh b/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-openmldb.sh index ca9cb98c2db..996aa02cc30 100755 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-openmldb.sh +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-openmldb.sh @@ -19,6 +19,7 @@ sh openmldb-ns-2/bin/start.sh stop nameserver sh openmldb-tablet-1/bin/start.sh stop tablet sh openmldb-tablet-2/bin/start.sh stop tablet sh openmldb-tablet-3/bin/start.sh stop tablet +sh openmldb-tablet-4/bin/start.sh stop tablet sh openmldb-apiserver-1/bin/start.sh stop apiserver sh openmldb-task_manager-1/bin/start.sh stop taskmanager sh zookeeper-3.4.14/bin/zkServer.sh stop diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index 7a3fa2dbf47..09e3bdcd983 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -150,11 +150,16 @@ public Map> getTableEndPoint(String dbName,String tableName String[] infos = line.split("\\s+"); int pid = Integer.parseInt(infos[2]); String endpoint = infos[3]; + String role = infos[4]; List values = map.get(pid); if(values == null){ values = new ArrayList<>(); } - values.add(endpoint); + if(role.equals("leader")){ + values.add(0,endpoint); + }else { + values.add(endpoint); + } map.put(pid,values); } return map; From f48563240d6b347fef2578e143facff8cb4628b0 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 19 Jul 2022 10:34:52 +0800 Subject: [PATCH 068/172] add ns operator --- .../openmldb/test_common/openmldb/NsClient.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index 09e3bdcd983..08190200fde 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -126,15 +126,15 @@ public void confset(String key,String value){ } public void migrate(String dbName,String srcEndpoint,String tableName,int pid,String desEndpoint){ - List srcEndPoint = getTableEndPoint(dbName, tableName, pid); - Assert.assertTrue(srcEndPoint.contains(srcEndpoint)); + List srcEndpointList = getTableEndPoint(dbName, tableName, pid); + Assert.assertTrue(srcEndpointList.contains(srcEndpoint)); String command = String.format("migrate %s %s %s %s",srcEndpoint,tableName,pid,desEndpoint); List lines = runNs(dbName,command); Assert.assertEquals(lines.get(0),"partition migrate ok"); Tool.sleep(3*1000); checkOPStatusDone(dbName,tableName); - List desEndPoint = getTableEndPoint(dbName, tableName, pid); - Assert.assertTrue(desEndPoint.contains(desEndPoint),"migrate check endpoint failed."); + List desEndpointList = getTableEndPoint(dbName, tableName, pid); + Assert.assertTrue(desEndpointList.contains(desEndpoint),"migrate check endpoint failed."); checkTableOffSet(dbName,tableName); } public List getTableEndPoint(String dbName,String tableName,int pid){ From ba9d1f4115db5cce10b59bcf489ae65e78958bbe Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 19 Jul 2022 11:21:49 +0800 Subject: [PATCH 069/172] add ns operator --- .../devops_test/node_expansion/TestCluster.java | 6 +++--- .../openmldb/test_common/openmldb/NsClient.java | 15 ++++++++++++--- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java index 7182c9824c6..80948fd17e1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/node_expansion/TestCluster.java @@ -188,9 +188,9 @@ public void testAddTablet(){ Assert.assertTrue(sdkClient.tableIsExist(hddTable5),addTabletMsg); // 副本迁移,迁移后,原来的数据删除,新的tablet上增加数据。 nsClient.confset("auto_failover","false"); - List tabletEndpoints = OpenMLDBGlobalVar.mainInfo.getTabletEndpoints(); - nsClient.migrate(dbName,tabletEndpoints.get(1),memoryTable,0,addTabletEndpoint); - nsClient.migrate(dbName,tabletEndpoints.get(2),memoryTable,1,addTabletEndpoint); + nsClient.migrate(dbName,memoryTable,addTabletEndpoint); + nsClient.migrate(dbName,ssdTable,addTabletEndpoint); + nsClient.migrate(dbName,hddTable,addTabletEndpoint); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index 08190200fde..8b492c3db09 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -124,10 +124,19 @@ public void confset(String key,String value){ List lines = runNs(null,command); Assert.assertTrue(lines.get(0).contains("ok")); } - + public void migrate(String dbName,String tableName,String desEndpoint){ + Map> tableEndPointMap = getTableEndPoint(dbName, tableName); + for(int pid:tableEndPointMap.keySet()){ + List srcEndpointList = tableEndPointMap.get(pid); + if(srcEndpointList.size()<=1){ + throw new IllegalStateException("only have leader not migrate"); + } + int index = new Random().nextInt(srcEndpointList.size()-1)+1; + String srcEndpoint = srcEndpointList.get(index); + migrate(dbName,srcEndpoint,tableName,pid,desEndpoint); + } + } public void migrate(String dbName,String srcEndpoint,String tableName,int pid,String desEndpoint){ - List srcEndpointList = getTableEndPoint(dbName, tableName, pid); - Assert.assertTrue(srcEndpointList.contains(srcEndpoint)); String command = String.format("migrate %s %s %s %s",srcEndpoint,tableName,pid,desEndpoint); List lines = runNs(dbName,command); Assert.assertEquals(lines.get(0),"partition migrate ok"); From 6aaee07a2d0021e7c471cabd48e29e9d29bacb03 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 19 Jul 2022 11:49:43 +0800 Subject: [PATCH 070/172] add ns operator --- .../devops_test/common/ClusterTest.java | 44 +++++++++---------- .../high_availability/TestCluster.java | 1 + 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index 3bc34e19fc3..b4abcf6600c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -72,34 +72,34 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi // .build(); // OpenMLDBGlobalVar.env = "cluster"; - OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() - .openMLDBDirectoryName("openmldb-0.5.2-linux") - .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/home/zhaowei01/openmldb-auto-test/tmp") - .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:30000") - .zk_root_path("/openmldb") - .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) - .build(); - OpenMLDBGlobalVar.env = "cluster"; - // OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() +// .openMLDBDirectoryName("openmldb-0.5.2-linux") // .deployType(OpenMLDBDeployType.CLUSTER) -// .basePath("/home/zhaowei01/openmldb-auto-test/single") -// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/single/openmldb-ns-1/bin/openmldb") -// .zk_cluster("172.24.4.55:30008") +// .basePath("/home/zhaowei01/openmldb-auto-test/tmp") +// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") +// .zk_cluster("172.24.4.55:30000") // .zk_root_path("/openmldb") -// .nsNum(1).tabletNum(1) -// .nsEndpoints(Lists.newArrayList("172.24.4.55:30010")) -// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30009")) -// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30011")) -// .taskManagerEndpoints(Lists.newArrayList("172.24.4.55:30012")) +// .nsNum(2).tabletNum(3) +// .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) +// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) +// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) // .build(); // OpenMLDBGlobalVar.env = "cluster"; + OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() + .deployType(OpenMLDBDeployType.CLUSTER) + .basePath("/home/zhaowei01/openmldb-auto-test/single") + .openMLDBPath("/home/zhaowei01/openmldb-auto-test/single/openmldb-ns-1/bin/openmldb") + .zk_cluster("172.24.4.55:30008") + .zk_root_path("/openmldb") + .nsNum(1).tabletNum(1) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30010")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30009")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30011")) + .taskManagerEndpoints(Lists.newArrayList("172.24.4.55:30012")) + .build(); + OpenMLDBGlobalVar.env = "cluster"; + } String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index 1d40892126e..91a5917dd73 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -224,6 +224,7 @@ public void testSingle(){ openMLDBDevops.operateNs(0,"stop"); // resetClient(); openMLDBDevops.operateNs(0,"start"); + Tool.sleep(10*1000); openMLDBDevops.operateTablet(0,"start"); addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); } From 22bce46379e70f7285b0858884338dca5d2bed4a Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 19 Jul 2022 16:39:20 +0800 Subject: [PATCH 071/172] add ns operator --- .../openmldb-test-java/openmldb-test-common/pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index ff94cacead2..e4f6dac8601 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -15,8 +15,8 @@ 8 8 - 0.5.0 - 0.5.0 + 0.5.2 + 0.5.2-macos From c156bab2e8794ca7efd1d1ec6a65fa61541c830c Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 20 Jul 2022 09:38:06 +0800 Subject: [PATCH 072/172] add ns operator --- cases/function/window/test_current_row.yaml | 61 ++++++++++++++ cases/function/window/test_current_time.yaml | 83 +++++++++++++++++++ .../src/main/resources/deploy.properties | 2 +- .../test-suite/test_deploy.xml | 2 +- .../cluster/v230/WindowTest.java | 4 +- .../openmldb/test_common/model/CaseFile.java | 1 + .../openmldb/test_common/model/SQLCase.java | 1 + 7 files changed, 149 insertions(+), 5 deletions(-) create mode 100644 cases/function/window/test_current_row.yaml create mode 100644 cases/function/window/test_current_time.yaml diff --git a/cases/function/window/test_current_row.yaml b/cases/function/window/test_current_row.yaml new file mode 100644 index 00000000000..c43863d1cd1 --- /dev/null +++ b/cases/function/window/test_current_row.yaml @@ -0,0 +1,61 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +cases: + - id: 0 + desc: rows-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,0 ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,0 ] + - id: 1 + desc: rows_range-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,0 ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,0 ] diff --git a/cases/function/window/test_current_time.yaml b/cases/function/window/test_current_time.yaml new file mode 100644 index 00000000000..998824ced03 --- /dev/null +++ b/cases/function/window/test_current_time.yaml @@ -0,0 +1,83 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +cases: + - id: 0 + desc: ts列的值为0 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,0,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,30 ] + - [ "aa",21,31 ] + - [ "aa",22,32 ] + - [ "aa",23,33 ] + - [ "bb",24,34 ] + - id: 1 + desc: ts列的值为-1 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,-1,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,-1,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,-1,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,30 ] + - [ "aa",21,31 ] + - [ "aa",22,32 ] + - [ "aa",23,33 ] + - [ "bb",24,34 ] +# - id: 2 +# desc: ts列的值为1 +# inputs: +# - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ] +# indexs: [ "index1:c1:c7" ] +# rows: +# - [ "aa",20,30,1.1,2.1,1,"2020-05-01" ] +# - [ "aa",21,31,1.2,2.2,1,"2020-05-02" ] +# - [ "aa",22,32,1.3,2.3,1,"2020-05-03" ] +# - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] +# - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] +# sql: | +# SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); +# expect: +# order: c3 +# columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] +# rows: +# - [ "aa",20,30 ] +# - [ "aa",21,31 ] +# - [ "aa",22,32 ] +# - [ "aa",23,33 ] +# - [ "bb",24,34 ] diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index 2efb1274aa2..af6f6362192 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -9,7 +9,7 @@ main=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2 0.2.3=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2.3-linux.tar.gz spark_url=https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0.3.2/spark-3.0.0-bin-openmldbspark.tgz -tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-linux.tar.gz +tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-snapshot-linux.tar.gz tmp_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz tmp_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index f6a768515a5..9603d18f40a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -3,7 +3,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java index 5f46d870ba8..8fcd76616fb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java @@ -44,9 +44,7 @@ public void testWindowBatch(SQLCase testCase) throws Exception { } @Story("request") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/window/", - "function/cluster/", - "function/test_index_optimized.yaml"}) + @Yaml(filePaths = {"function/window/test_current_time.yaml"}) public void testWindowRequestMode(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java index 39a908c09a0..41f83df837b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java @@ -32,6 +32,7 @@ @Data public class CaseFile { private String db; + private String version; private List debugs; private List cases; // ANSISQL HybridSQL SQLITE3 MYSQL diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java index f687d331459..5840e48aa43 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java @@ -32,6 +32,7 @@ public class SQLCase implements Serializable{ private String desc; private String mode; private String db; + private String version; private String sql; private List> dataProvider; private List sqls; From 5815700c6dc3ba58e58a4f16a1caabc87bae2deb Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 21 Jul 2022 10:55:57 +0800 Subject: [PATCH 073/172] add ns operator --- cases/function/window/test_current_row.yaml | 659 +++++++++++++++++- .../test-suite/test_deploy.xml | 2 +- .../openmldb-sdk-test/pom.xml | 12 - .../cluster/v230/WindowTest.java | 2 +- .../openmldb-test-common/pom.xml | 4 +- .../openmldb-test-java/pom.xml | 13 + 6 files changed, 672 insertions(+), 20 deletions(-) diff --git a/cases/function/window/test_current_row.yaml b/cases/function/window/test_current_row.yaml index c43863d1cd1..8c449c5aef7 100644 --- a/cases/function/window/test_current_row.yaml +++ b/cases/function/window/test_current_row.yaml @@ -32,11 +32,11 @@ cases: order: c3 columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] rows: - - [ "aa",20,0 ] + - [ "aa",20,null ] - [ "aa",21,30 ] - [ "aa",22,61 ] - [ "aa",23,63 ] - - [ "bb",24,0 ] + - [ "bb",24,null ] - id: 1 desc: rows_range-current_row inputs: @@ -54,8 +54,659 @@ cases: order: c3 columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] rows: - - [ "aa",20,0 ] + - [ "aa",20,null ] - [ "aa",21,30 ] - [ "aa",22,61 ] - [ "aa",23,63 ] - - [ "bb",24,0 ] + - [ "bb",24,null ] + - id: 2 + desc: rows-current_row-有和当前行ts一致的数据 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 3 + desc: rows_range-current_row-有和当前行ts一致的数据 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738991000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,32 ] + - [ "bb",24,null ] + - id: 4 + desc: rows-纯历史窗口-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 5 + desc: rows_range-纯历史窗口-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 1s PRECEDING EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 6 + desc: rows-current_row-ts=0 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 7 + desc: rows_range-current_row-ts=0 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,null ] + - [ "aa",23,32 ] + - [ "bb",24,null ] + - id: 8 + desc: rows-current_row-ts=-1 + tags: ["TODO","ts为负数有问题,带支持后再验证"] + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,-1,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,-1,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 9 + desc: rows_range-current_row-ts=-1 + tags: ["TODO","ts为负数有问题,带支持后再验证"] + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,-1,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,-1,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,null ] + - [ "aa",23,32 ] + - [ "bb",24,null ] + - id: 10 + desc: rows-current_row-ts=负数和0 + tags: ["TODO","ts为负数有问题,带支持后再验证"] + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,-1000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 11 + desc: rows_range-current_row-ts=负数和0 + tags: ["TODO","ts为负数有问题,带支持后再验证"] + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,-1000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,null ] + - [ "aa",23,32 ] + - [ "bb",24,null ] + - id: 12 + desc: rows-open-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 0 OPEN PRECEDING EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 13 + desc: rows_range-open-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "aa",24,34,1.5,2.5,1590738993000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0s OPEN PRECEDING EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,null ] + - [ "aa",23,32 ] + - [ "aa",24,32 ] + - id: 14 + desc: rows_range-current_row-maxsize小于窗口 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 15 + desc: rows_range-current_row-maxsize大于窗口 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW MAXSIZE 3 EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 16 + desc: rows-current_row-current_time + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 17 + desc: rows_range-current_row-current_time + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738991000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,61 ] + - [ "aa",23,32 ] + - [ "bb",24,null ] + - id: 18 + desc: window union rows-current_row-instance_not_in_window + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,63] + - [4,"dd",20,63] + - [5,"ee",21,null] + - id: 19 + desc: window union rows_range-current_row-instance_not_in_window + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,63] + - [4,"dd",20,32] + - [5,"ee",21,null] + - id: 20 + desc: window union rows-current_row + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,63] + - [4,"dd",20,62] + - [5,"ee",21,null] + - id: 21 + desc: window union rows_range-current_row + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,63] + - [4,"dd",20,62] + - [5,"ee",21,null] + - id: 22 + desc: rows窗口包含open/maxsize/instance_not_in_window/current_time/current_row + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [6,"cc",20,35,1.3,2.3,1590738993000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 0 OPEN PRECEDING EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,63] + - [4,"dd",20,67] + - [5,"ee",21,null] + - id: 23 + desc: rows_range窗口包含open/maxsize/instance_not_in_window/current_time/current_row + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [6,"cc",20,35,1.3,2.3,1590738993000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0s OPEN PRECEDING MAXSIZE 1 EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,32] + - [4,"dd",20,35] + - [5,"ee",21,null] + - id: 24 + desc: rows-lag-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, lag(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,30 ] + - [ "aa",23,31 ] + - [ "bb",24,null ] + - id: 25 + desc: rows_range-lag-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, lag(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,30 ] + - [ "aa",23,31 ] + - [ "bb",24,null ] + - id: 26 + desc: rows-at-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, at(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,30 ] + - [ "aa",23,31 ] + - [ "bb",24,null ] + - id: 27 + desc: rows_range-at-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, at(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,30 ] + - [ "aa",23,31 ] + - [ "bb",24,null ] + - id: 28 + desc: 两个窗口,一个rows,一个rows_range,current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c5) OVER w2 as w2_c5_count FROM {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 rows_range BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint","w2_c5_count bigint" ] + rows: + - [ "aa",20,null,0 ] + - [ "aa",21,30,1 ] + - [ "aa",22,61,2 ] + - [ "aa",23,63,2 ] + - [ "bb",24,null,0 ] + - id: 29 + desc: current_row小写 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW exclude current_row); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 30 + desc: maxsize位置错误 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW MAXSIZE 2); + expect: + success: false diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index 9603d18f40a..f6a768515a5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -3,7 +3,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml index 05a26c31e0d..ad10dcb34ea 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml @@ -19,18 +19,6 @@ test_suite/test_tmp.xml 1.8.9 - - - s01.oss.sonatype.org-snapshot - https://s01.oss.sonatype.org/content/repositories/snapshots - - false - - - true - - - diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java index 8fcd76616fb..ef5482aca85 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java @@ -44,7 +44,7 @@ public void testWindowBatch(SQLCase testCase) throws Exception { } @Story("request") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/window/test_current_time.yaml"}) + @Yaml(filePaths = {"function/window/test_current_row.yaml"}) public void testWindowRequestMode(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index 53f3e9284c4..90429813d43 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -15,8 +15,8 @@ 8 8 - 0.5.2 - 0.5.2-macos + 0.5.0-SNAPSHOT + 0.5.0-macos-SNAPSHOT diff --git a/test/integration-test/openmldb-test-java/pom.xml b/test/integration-test/openmldb-test-java/pom.xml index 60a96a427fa..568e3f8b0f5 100644 --- a/test/integration-test/openmldb-test-java/pom.xml +++ b/test/integration-test/openmldb-test-java/pom.xml @@ -22,6 +22,19 @@ 8 + + + s01.oss.sonatype.org-snapshot + https://s01.oss.sonatype.org/content/repositories/snapshots + + false + + + true + + + + From 8856c62368992b624be4b0dae3bb1632affe8397 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 21 Jul 2022 13:51:01 +0800 Subject: [PATCH 074/172] add ns operator --- .../openmldb-test-java/openmldb-test-common/pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index 90429813d43..61a092722ba 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -15,8 +15,8 @@ 8 8 - 0.5.0-SNAPSHOT - 0.5.0-macos-SNAPSHOT + 0.5.0 + 0.5.0 From be09dfe4da1ea6994facc45cbba58a17cbabe17b Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 21 Jul 2022 14:19:11 +0800 Subject: [PATCH 075/172] add ns operator --- .../openmldb-deploy/test-suite/test_deploy.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index f6a768515a5..6f6c59753c1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -3,7 +3,7 @@ - + From 8d98f7d1b31b35ea324f51b701db068e541f1472 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 21 Jul 2022 16:38:32 +0800 Subject: [PATCH 076/172] add ns operator --- cases/function/window/test_current_row.yaml | 46 ++++++++++++++++++- cases/function/window/test_current_time.yaml | 23 ++++++++++ .../src/main/resources/deploy.properties | 2 +- .../test-suite/test_deploy.xml | 2 +- 4 files changed, 70 insertions(+), 3 deletions(-) diff --git a/cases/function/window/test_current_row.yaml b/cases/function/window/test_current_row.yaml index 8c449c5aef7..5ad35d1c960 100644 --- a/cases/function/window/test_current_row.yaml +++ b/cases/function/window/test_current_row.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: [] +debugs: ["rows_range-纯历史窗口-current_row-ts=0"] cases: - id: 0 desc: rows-current_row @@ -710,3 +710,47 @@ cases: SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW MAXSIZE 2); expect: success: false + - id: 31 + desc: rows-纯历史窗口-current_row-ts=0 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 32 + desc: rows_range-纯历史窗口-current_row-ts=0 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,2000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 1s PRECEDING EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,61 ] + - [ "aa",23,93 ] + - [ "bb",24,null ] \ No newline at end of file diff --git a/cases/function/window/test_current_time.yaml b/cases/function/window/test_current_time.yaml index 998824ced03..528113cf3e5 100644 --- a/cases/function/window/test_current_time.yaml +++ b/cases/function/window/test_current_time.yaml @@ -38,7 +38,30 @@ cases: - [ "aa",23,33 ] - [ "bb",24,34 ] - id: 1 + desc: ts列的值为0 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0s OPEN PRECEDING EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,null ] + - [ "aa",23,32 ] + - [ "bb",24,null ] + - id: 2 desc: ts列的值为-1 + tags: ["TODO","ts为负数有问题,带支持后再验证"] inputs: - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ] indexs: [ "index1:c1:c7" ] diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index af6f6362192..a9c31e13b4a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -9,7 +9,7 @@ main=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2 0.2.3=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2.3-linux.tar.gz spark_url=https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0.3.2/spark-3.0.0-bin-openmldbspark.tgz -tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-snapshot-linux.tar.gz +tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.3.tar.gz tmp_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz tmp_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index 6f6c59753c1..f6a768515a5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -3,7 +3,7 @@ - + From d6931d233492791f0a4c4fd06b2ac54368c49946 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Sat, 23 Jul 2022 09:20:32 +0800 Subject: [PATCH 077/172] Compatible with disk table --- cases/function/ddl/test_options.yaml | 62 ++++++++++++++----- .../{window => tmp}/test_current_row.yaml | 0 .../{window => tmp}/test_current_time.yaml | 0 .../java_sdk_test/common/BaseTest.java | 12 ++-- .../{FedbConfig.java => OpenMLDBConfig.java} | 21 +++---- .../{FedbTest.java => OpenMLDBTest.java} | 4 +- ...Config.java => OpenMLDBVersionConfig.java} | 2 +- .../entity/FesqlDataProvider.java | 59 ------------------ ...derList.java => OpenMLDBCaseFileList.java} | 37 +++++------ .../java_sdk_test/executor/BaseExecutor.java | 2 +- .../executor/BaseSQLExecutor.java | 2 +- .../executor/ExecutorFactory.java | 11 ++-- .../executor/RequestQuerySQLExecutor.java | 4 +- .../executor/StoredProcedureSQLExecutor.java | 2 +- .../report/AddAttachmentListener.java | 6 +- .../java_sdk_test/util/CaseOutputUtil.java | 6 +- .../src/main/resources/fesql.properties | 26 -------- .../src/main/resources/run_case.properties | 4 ++ .../auto_gen_case/AutoGenCaseTest.java | 10 +-- .../java_sdk_test/cluster/v030/DMLTest.java | 4 +- .../cluster/v030/MultiDBTest.java | 4 +- .../cluster/v030/SchemaTest.java | 4 +- .../cluster/v040/DeploymentTest.java | 5 +- .../cluster/v040/ExpressTest.java | 4 +- .../cluster/v040/FunctionTest.java | 4 +- .../java_sdk_test/cluster/v040/OutInTest.java | 5 +- .../cluster/v050/DiskTableTest.java | 4 +- .../cluster/v050/LongWindowTest.java | 4 +- .../cluster/v230/BatchRequestTest.java | 4 +- .../java_sdk_test/cluster/v230/DDLTest.java | 4 +- .../java_sdk_test/cluster/v230/DMLTest.java | 4 +- .../cluster/v230/ExpressTest.java | 4 +- .../cluster/v230/FZCaseTest.java | 4 +- .../cluster/v230/FunctionTest.java | 4 +- .../cluster/v230/LastJoinTest.java | 16 ++--- .../cluster/v230/ParameterQueryTest.java | 4 +- .../cluster/v230/SelectTest.java | 4 +- .../cluster/v230/WindowTest.java | 10 +-- .../diff_test/DiffResultTest.java | 8 +-- .../java_sdk_test/diff_test/MysqlTest.java | 12 ++-- .../java_sdk_test/diff_test/Sqlite3Test.java | 12 ++-- .../entity/FesqlDataProviderTest.java | 12 ++-- .../standalone/v030/DDLTest.java | 1 - .../standalone/v030/ExpressTest.java | 1 - .../standalone/v030/FunctionTest.java | 1 - .../standalone/v030/LastJoinTest.java | 1 - .../standalone/v030/MultiDBTest.java | 2 - .../standalone/v030/OutInTest.java | 1 - .../standalone/v030/SelectTest.java | 1 - .../standalone/v030/WindowTest.java | 1 - .../java_sdk_test/temp/DebugTest.java | 4 +- .../java_sdk_test/temp/TestProcedure.java | 4 +- .../java_sdk_test/ut/UniqueExpectTest.java | 8 +-- .../openmldb-sdk-test/test_suite/test_tmp.xml | 18 +++--- .../openmldb-test-common/pom.xml | 4 +- .../openmldb/test_common/model/CaseFile.java | 39 ++++++++++++ .../openmldb/test_common/model/SQLCase.java | 19 ++++++ .../openmldb/OpenMLDBGlobalVar.java | 1 + 58 files changed, 254 insertions(+), 262 deletions(-) rename cases/function/{window => tmp}/test_current_row.yaml (100%) rename cases/function/{window => tmp}/test_current_time.yaml (100%) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/{FedbConfig.java => OpenMLDBConfig.java} (82%) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/{FedbTest.java => OpenMLDBTest.java} (98%) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/{FedbVersionConfig.java => OpenMLDBVersionConfig.java} (96%) delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProvider.java rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/{FesqlDataProviderList.java => OpenMLDBCaseFileList.java} (55%) delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fesql.properties create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties diff --git a/cases/function/ddl/test_options.yaml b/cases/function/ddl/test_options.yaml index 7355a83961a..a6a642ac98b 100644 --- a/cases/function/ddl/test_options.yaml +++ b/cases/function/ddl/test_options.yaml @@ -55,7 +55,7 @@ cases: name: t3 success: true options: - partitionNum: 1 + partitionNum: 8 replicaNum: 1 - id: 3 @@ -66,14 +66,14 @@ cases: create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) options ( partitionnum = 1, - distribution = [ ('{tb_endpoint_0}',[])] + distribution = [ ('{tb_endpoint_0}',['{tb_endpoint_1}','{tb_endpoint_2}'])] ); expect: name: t3 success: true options: partitionNum: 1 - replicaNum: 1 + replicaNum: 3 - id: 4 desc: 创建表时没有distribution @@ -109,7 +109,8 @@ cases: success: false - id: 6 - desc: partitionnum=0 + desc: partitionnum=0,指定distribution + tags: ["TODO","bug修复后验证"] mode: standalone-unsupport inputs: - name: t3 @@ -121,7 +122,11 @@ cases: distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])] ); expect: - success: false + name: t3 + success: true + options: + partitionNum: 1 + replicaNum: 3 - id: 7 desc: partitionnum=10 @@ -288,7 +293,7 @@ cases: success: true options: partitionNum: 1 - replicaNum: 1 + replicaNum: 3 - id: 18 desc: 只有replicanum @@ -303,11 +308,11 @@ cases: name: t3 success: true options: - partitionNum: 1 + partitionNum: 8 replicaNum: 1 - id: 19 - desc: 只有distribution + desc: 没有replicaNum,distribution的个数和tablet数量不一致 inputs: - name: t3 sql: | @@ -316,11 +321,7 @@ cases: distribution = [ ('{tb_endpoint_0}', [])] ); expect: - name: t3 - success: true - options: - partitionNum: 1 - replicaNum: 1 + success: false - id: 20 desc: distribution指定的tablet不存在 @@ -379,8 +380,39 @@ cases: options: partitionNum: 1 replicaNum: 3 - - + - + id: 23 + tags: ["TODO","bug修复后验证"] + desc: partitionnum=0,没有指定distribution + mode: standalone-unsupport + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 0, + replicanum = 3 + ); + expect: + success: false + - + id: 24 + desc: 没有partitionnum和replicanum,指定distribution + tags: ["TODO","bug修复后验证"] + mode: standalone-unsupport + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])] + ); + expect: + name: t3 + success: true + options: + partitionNum: 1 + replicaNum: 3 diff --git a/cases/function/window/test_current_row.yaml b/cases/function/tmp/test_current_row.yaml similarity index 100% rename from cases/function/window/test_current_row.yaml rename to cases/function/tmp/test_current_row.yaml diff --git a/cases/function/window/test_current_time.yaml b/cases/function/tmp/test_current_time.yaml similarity index 100% rename from cases/function/window/test_current_time.yaml rename to cases/function/tmp/test_current_time.yaml diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/BaseTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/BaseTest.java index f4ddc844d1b..da0050fb919 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/BaseTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/BaseTest.java @@ -16,10 +16,10 @@ package com._4paradigm.openmldb.java_sdk_test.common; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlDataProvider; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlDataProviderList; +import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBCaseFileList; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.common.ReportLog; +import com._4paradigm.openmldb.test_common.model.CaseFile; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.provider.Yaml; @@ -44,8 +44,8 @@ public class BaseTest implements ITest { private int testNum = 0; public static String CaseNameFormat(SQLCase sqlCase) { - return String.format("%s_%s_%s", - OpenMLDBGlobalVar.env, sqlCase.getId(), sqlCase.getDesc()); + return String.format("%s_%s_%s_%s", + OpenMLDBGlobalVar.env,sqlCase.getCaseFileName(), sqlCase.getId(), sqlCase.getDesc()); } @DataProvider(name = "getCase") @@ -54,7 +54,7 @@ public Object[] getCaseByYaml(Method method) throws FileNotFoundException { if(casePaths==null||casePaths.length==0){ throw new RuntimeException("please add @Yaml"); } - FesqlDataProviderList dp = FesqlDataProviderList.dataProviderGenerator(casePaths); + OpenMLDBCaseFileList dp = OpenMLDBCaseFileList.dataProviderGenerator(casePaths); Object[] caseArray = dp.getCases().toArray(); logger.info("caseArray.length:{}",caseArray.length); return caseArray; @@ -68,7 +68,7 @@ public void BeforeMethod(Method method, Object[] testData) { testData[0], "fail to run fesql test with null SQLCase: check yaml case"); if (testData[0] instanceof SQLCase) { SQLCase sqlCase = (SQLCase) testData[0]; - Assert.assertNotEquals(FesqlDataProvider.FAIL_SQL_CASE, + Assert.assertNotEquals(CaseFile.FAIL_SQL_CASE, sqlCase.getDesc(), "fail to run fesql test with FAIL DATA PROVIDER SQLCase: check yaml case"); testName.set(String.format("[%d]%s.%s", testNum, method.getName(), CaseNameFormat(sqlCase))); } else { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbConfig.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java similarity index 82% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbConfig.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java index 3d39ffd65ba..8d54b2676a8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbConfig.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java @@ -32,12 +32,8 @@ * @date 2020/6/11 11:34 AM */ @Slf4j -public class FedbConfig { - // public static final String ZK_CLUSTER; - // public static final String ZK_ROOT_PATH; +public class OpenMLDBConfig { public static final List VERSIONS; - // public static final FEDBInfo mainInfo; - public static final String BASE_PATH; public static boolean INIT_VERSION_ENV = true; public static final List FESQL_CASE_LEVELS; public static final String FESQL_CASE_PATH; @@ -46,13 +42,11 @@ public class FedbConfig { public static final String FESQL_CASE_DESC; public static final String YAML_CASE_BASE_DIR; public static final boolean ADD_REPORT_LOG; - public static final String ZK_URL; - public static final Properties CONFIG = Tool.getProperties("fesql.properties"); + + public static final Properties CONFIG = Tool.getProperties("run_case.properties"); static { - // ZK_CLUSTER = CONFIG.getProperty(FedbGlobalVar.env + "_zk_cluster"); - // ZK_ROOT_PATH = CONFIG.getProperty(FedbGlobalVar.env + "_zk_root_path"); String levelStr = System.getProperty("caseLevel"); levelStr = StringUtils.isEmpty(levelStr) ? "0" : levelStr; FESQL_CASE_LEVELS = Arrays.stream(levelStr.split(",")).map(Integer::parseInt).collect(Collectors.toList()); @@ -78,10 +72,6 @@ public class FedbConfig { log.info("YAML_CASE_BASE_DIR {}", YAML_CASE_BASE_DIR); } - BASE_PATH = CONFIG.getProperty(OpenMLDBGlobalVar.env + "_base_path"); - // String tb_endpoint_0 = CONFIG.getProperty(FedbGlobalVar.env + "_tb_endpoint_0"); - // String tb_endpoint_1 = CONFIG.getProperty(FedbGlobalVar.env + "_tb_endpoint_1"); - // String tb_endpoint_2 = CONFIG.getProperty(FedbGlobalVar.env + "_tb_endpoint_2"); String versionStr = System.getProperty("fedbVersion"); if (StringUtils.isEmpty(versionStr)) { versionStr = CONFIG.getProperty(OpenMLDBGlobalVar.env + "_versions"); @@ -92,7 +82,6 @@ public class FedbConfig { VERSIONS = Lists.newArrayList(); } log.info("HybridSEConfig: versions: {}", VERSIONS); - ZK_URL = CONFIG.getProperty("zk_url"); String reportLogStr = System.getProperty("reportLog"); if(StringUtils.isNotEmpty(reportLogStr)){ ADD_REPORT_LOG = Boolean.parseBoolean(reportLogStr); @@ -103,6 +92,10 @@ public class FedbConfig { if (StringUtils.isNotEmpty(init_env)) { INIT_VERSION_ENV = Boolean.parseBoolean(init_env); } + String tableStorageMode = CONFIG.getProperty("table_storage_mode"); + if(StringUtils.isNotEmpty(tableStorageMode)){ + OpenMLDBGlobalVar.tableStorageMode = tableStorageMode; + } } public static boolean isCluster() { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java similarity index 98% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java index 27fa4ae42a4..77569143566 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java @@ -37,7 +37,7 @@ * @date 2020/6/11 2:02 PM */ @Slf4j -public class FedbTest extends BaseTest { +public class OpenMLDBTest extends BaseTest { protected static SqlExecutor executor; @BeforeTest() @@ -77,8 +77,6 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi OpenMLDBClient fesqlClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); executor = fesqlClient.getExecutor(); log.info("executor:{}",executor); - //todo - Statement statement = executor.getStatement(); statement.execute("SET @@execute_mode='online';"); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbVersionConfig.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBVersionConfig.java similarity index 96% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbVersionConfig.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBVersionConfig.java index a8d8e992003..3dbb8db8bb6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/FedbVersionConfig.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBVersionConfig.java @@ -27,7 +27,7 @@ * @date 2020/6/11 11:34 AM */ @Slf4j -public class FedbVersionConfig { +public class OpenMLDBVersionConfig { public static final Properties CONFIG = Tool.getProperties("fesql_version.properties"); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProvider.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProvider.java deleted file mode 100644 index 9c87fee9c1c..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProvider.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com._4paradigm.openmldb.java_sdk_test.entity; - - -import com._4paradigm.openmldb.test_common.model.CaseFile; -import com._4paradigm.openmldb.test_common.model.SQLCase; -import lombok.Data; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testng.collections.Lists; -import org.yaml.snakeyaml.Yaml; - -import java.io.FileInputStream; -import java.io.FileNotFoundException; - -/** - * @author zhaowei - * @date 2020/6/11 3:19 PM - */ -@Data -public class FesqlDataProvider extends CaseFile { - private static Logger logger = LoggerFactory.getLogger(FesqlDataProvider.class); - public static final String FAIL_SQL_CASE= "FailSQLCase"; - - public static FesqlDataProvider dataProviderGenerator(String caseFile) throws FileNotFoundException { - try { - Yaml yaml = new Yaml(); - FileInputStream testDataStream = new FileInputStream(caseFile); - FesqlDataProvider testDateProvider = yaml.loadAs(testDataStream, FesqlDataProvider.class); - return testDateProvider; - } catch (Exception e) { - logger.error("fail to load yaml:{}", caseFile); - e.printStackTrace(); - FesqlDataProvider nullDataProvider = new FesqlDataProvider(); - SQLCase failCase = new SQLCase(); - failCase.setDesc(FAIL_SQL_CASE); - nullDataProvider.setCases(Lists.newArrayList(failCase)); - return nullDataProvider; - } - } - - -} - diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderList.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBCaseFileList.java similarity index 55% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderList.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBCaseFileList.java index 0fe1a9f94c3..4707852dd60 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderList.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBCaseFileList.java @@ -18,7 +18,8 @@ import com._4paradigm.openmldb.java_sdk_test.common.BaseTest; -import com._4paradigm.openmldb.java_sdk_test.common.FedbConfig; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBConfig; +import com._4paradigm.openmldb.test_common.model.CaseFile; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.util.Tool; import org.apache.commons.lang3.StringUtils; @@ -28,24 +29,24 @@ import java.util.ArrayList; import java.util.List; -public class FesqlDataProviderList { - private List dataProviderList = new ArrayList(); +public class OpenMLDBCaseFileList { + private List dataProviderList = new ArrayList(); public List getCases() { List cases = new ArrayList(); - for (FesqlDataProvider dataProvider : dataProviderList) { - for (SQLCase sqlCase : dataProvider.getCases(FedbConfig.FESQL_CASE_LEVELS)) { - if (!StringUtils.isEmpty(FedbConfig.FESQL_CASE_NAME) && - !FedbConfig.FESQL_CASE_NAME.equals(BaseTest.CaseNameFormat(sqlCase))) { + for (CaseFile dataProvider : dataProviderList) { + for (SQLCase sqlCase : dataProvider.getCases(OpenMLDBConfig.FESQL_CASE_LEVELS)) { + if (!StringUtils.isEmpty(OpenMLDBConfig.FESQL_CASE_NAME) && + !OpenMLDBConfig.FESQL_CASE_NAME.equals(BaseTest.CaseNameFormat(sqlCase))) { continue; } - if (!StringUtils.isEmpty(FedbConfig.FESQL_CASE_ID) - && !FedbConfig.FESQL_CASE_ID.equals(sqlCase.getId())) { + if (!StringUtils.isEmpty(OpenMLDBConfig.FESQL_CASE_ID) + && !OpenMLDBConfig.FESQL_CASE_ID.equals(sqlCase.getId())) { continue; } - if (!StringUtils.isEmpty(FedbConfig.FESQL_CASE_DESC) - && !FedbConfig.FESQL_CASE_DESC.equals(sqlCase.getDesc())) { + if (!StringUtils.isEmpty(OpenMLDBConfig.FESQL_CASE_DESC) + && !OpenMLDBConfig.FESQL_CASE_DESC.equals(sqlCase.getDesc())) { continue; } cases.add(sqlCase); @@ -54,25 +55,25 @@ public List getCases() { return cases; } - public static FesqlDataProviderList dataProviderGenerator(String[] caseFiles) throws FileNotFoundException { + public static OpenMLDBCaseFileList dataProviderGenerator(String[] caseFiles) throws FileNotFoundException { - FesqlDataProviderList fesqlDataProviderList = new FesqlDataProviderList(); + OpenMLDBCaseFileList fesqlDataProviderList = new OpenMLDBCaseFileList(); for (String caseFile : caseFiles) { - if (!StringUtils.isEmpty(FedbConfig.FESQL_CASE_PATH) - && !FedbConfig.FESQL_CASE_PATH.equals(caseFile)) { + if (!StringUtils.isEmpty(OpenMLDBConfig.FESQL_CASE_PATH) + && !OpenMLDBConfig.FESQL_CASE_PATH.equals(caseFile)) { continue; } - String casePath = Tool.getCasePath(FedbConfig.YAML_CASE_BASE_DIR, caseFile); + String casePath = Tool.getCasePath(OpenMLDBConfig.YAML_CASE_BASE_DIR, caseFile); File file = new File(casePath); if (!file.exists()) { continue; } if (file.isFile()) { - fesqlDataProviderList.dataProviderList.add(FesqlDataProvider.dataProviderGenerator(casePath)); + fesqlDataProviderList.dataProviderList.add(CaseFile.parseCaseFile(casePath)); } else { File[] files = file.listFiles(f -> f.getName().endsWith(".yaml")); for (File f : files) { - fesqlDataProviderList.dataProviderList.add(FesqlDataProvider.dataProviderGenerator(f.getAbsolutePath())); + fesqlDataProviderList.dataProviderList.add(CaseFile.parseCaseFile(f.getAbsolutePath())); } } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java index 0a7844f9403..6ef47384552 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java @@ -44,7 +44,7 @@ public abstract class BaseExecutor implements IExecutor{ public void run() { String className = Thread.currentThread().getStackTrace()[2].getClassName(); String methodName = Thread.currentThread().getStackTrace()[2].getMethodName(); - System.out.println(className+"."+methodName+":"+fesqlCase.getDesc() + " Begin!"); + System.out.println(className+"."+methodName+":"+fesqlCase.getCaseFileName()+":"+fesqlCase.getDesc() + " Begin!"); logger.info(className+"."+methodName+":"+fesqlCase.getDesc() + " Begin!"); boolean verify = false; try { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java index 01871b43532..8cadffc888c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java @@ -133,7 +133,7 @@ public void tearDown(String version,SqlExecutor executor) { if(table.isDrop()) { String drop = "drop table " + table.getName() + ";"; String tableDBName = table.getDb().isEmpty() ? dbName : table.getDb(); -// FesqlUtil.ddl(executor, tableDBName, drop); + SDKUtil.ddl(executor, tableDBName, drop); } } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java index 670a8727ebb..084241a5a1b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java @@ -16,6 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.executor; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBConfig; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.common.ReportLog; import com._4paradigm.openmldb.test_common.model.SQLCase; @@ -106,11 +107,11 @@ private static BaseSQLExecutor getDDLExecutor(SqlExecutor sqlExecutor, SQLCase f return executor; } private static BaseSQLExecutor getFeBatchQueryExecutor(SqlExecutor sqlExecutor, SQLCase fesqlCase, SQLCaseType type) { -// if (FedbConfig.isCluster()) { -// log.info("cluster unsupport batch query mode"); -// reportLog.info("cluster unsupport batch query mode"); -// return new NullExecutor(sqlExecutor, fesqlCase, type); -// } + if (OpenMLDBConfig.isCluster()) { + log.info("cluster unsupport batch query mode"); + reportLog.info("cluster unsupport batch query mode"); + return new NullExecutor(sqlExecutor, fesqlCase, type); + } BaseSQLExecutor executor = null; executor = new BatchSQLExecutor(sqlExecutor, fesqlCase, type); return executor; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java index f5dc7dcc51e..4ab6e19bd30 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.executor; -import com._4paradigm.openmldb.java_sdk_test.common.FedbConfig; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBConfig; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; @@ -149,7 +149,7 @@ public boolean verify() { logger.info("skip case in rtidb request mode: {}", fesqlCase.getDesc()); return false; } - if (FedbConfig.isCluster() && + if (OpenMLDBConfig.isCluster() && null != fesqlCase.getMode() && fesqlCase.getMode().contains("cluster-unsupport")) { logger.info("cluster-unsupport, skip case in cluster request mode: {}", fesqlCase.getDesc()); return false; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java index 35bdf046de3..f8bf184e5e3 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java @@ -112,7 +112,7 @@ public void tearDown(String version,SqlExecutor executor) { } for (String spName : spNames) { String drop = "drop procedure " + spName + ";"; -// FesqlUtil.ddl(executor, dbName, drop); + SDKUtil.ddl(executor, dbName, drop); } super.tearDown(version,executor); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/report/AddAttachmentListener.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/report/AddAttachmentListener.java index 37274717159..7201f4d3436 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/report/AddAttachmentListener.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/report/AddAttachmentListener.java @@ -16,16 +16,14 @@ package com._4paradigm.openmldb.java_sdk_test.report; -import com._4paradigm.openmldb.java_sdk_test.common.FedbConfig; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBConfig; import com._4paradigm.openmldb.test_common.common.ReportLog; import io.qameta.allure.Attachment; -import org.apache.commons.collections4.CollectionUtils; import org.testng.IHookCallBack; import org.testng.IHookable; import org.testng.ITestResult; import org.yaml.snakeyaml.Yaml; -import java.util.Arrays; import java.util.List; /** @@ -53,7 +51,7 @@ public String addCase(Object obj){ @Override public void run(IHookCallBack callBack, ITestResult testResult) { callBack.runTestMethod(testResult); - if(FedbConfig.ADD_REPORT_LOG&&testResult.getThrowable()!=null) { + if(OpenMLDBConfig.ADD_REPORT_LOG&&testResult.getThrowable()!=null) { Object[] parameters = testResult.getParameters(); if(parameters!=null&¶meters.length>0) { Object parameter = testResult.getParameters()[0]; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/CaseOutputUtil.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/CaseOutputUtil.java index 13334233573..a5542147b8d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/CaseOutputUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/util/CaseOutputUtil.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.util; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlDataProvider; +import com._4paradigm.openmldb.test_common.model.CaseFile; import com._4paradigm.openmldb.test_common.model.SQLCase; import org.apache.poi.hssf.usermodel.HSSFWorkbook; import org.apache.poi.ss.usermodel.*; @@ -111,9 +111,9 @@ public static void genExcel(String dPath,List casePath,String outPath){ } public static List getCase(String path){ - FesqlDataProvider dp = null; + CaseFile dp = null; try { - dp = FesqlDataProvider.dataProviderGenerator(path); + dp = CaseFile.parseCaseFile(path); } catch (FileNotFoundException e) { e.printStackTrace(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fesql.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fesql.properties deleted file mode 100644 index e93f94b4eb8..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/fesql.properties +++ /dev/null @@ -1,26 +0,0 @@ - -#配置zk地址, 和集群启动配置中的zk_cluster保持一致 -qa_zk_cluster=172.27.128.37:10000 -#配置集群的zk根路径, 和集群启动配置中的zk_root_path保持一致 -qa_zk_root_path=/fedb -qa_tb_endpoint_0=172.27.128.37:10003 -qa_tb_endpoint_1=172.27.128.37:10004 -qa_tb_endpoint_2=172.27.128.37:10005 -qa_versions=2021-02-06 -qa_init_version_env=false - -#配置zk地址, 和集群启动配置中的zk_cluster保持一致 -#standalone_zk_cluster=127.0.0.1:6181 -#配置集群的zk根路径, 和集群启动配置中的zk_root_path保持一致 -#standalone_zk_root_path=/onebox -#standalone_versions=2021-02-06 - -#配置zk地址, 和集群启动配置中的zk_cluster保持一致 -#cluster_zk_cluster=127.0.0.1:6181 -#配置集群的zk根路径, 和集群启动配置中的zk_root_path保持一致 -#cluster_zk_root_path=/cluster - -# github -#zk_url=https://archive.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz -# gitlab -#zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties new file mode 100644 index 00000000000..53d08e05d1b --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties @@ -0,0 +1,4 @@ +# memory/ssd/hdd +table_storage_mode=ssd + + diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java index e1a5e20fda8..d056c980a84 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java @@ -16,10 +16,10 @@ package com._4paradigm.openmldb.java_sdk_test.auto_gen_case; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBConfig; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; -import com._4paradigm.openmldb.java_sdk_test.common.FedbConfig; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.SQLCase; @@ -42,15 +42,15 @@ */ @Slf4j @Feature("AutoCase") -public class AutoGenCaseTest extends FedbTest { +public class AutoGenCaseTest extends OpenMLDBTest { private Map executorMap = new HashMap<>(); private Map fedbInfoMap = new HashMap<>(); @BeforeClass public void beforeClass(){ - if(FedbConfig.INIT_VERSION_ENV) { - FedbConfig.VERSIONS.forEach(version -> { + if(OpenMLDBConfig.INIT_VERSION_ENV) { + OpenMLDBConfig.VERSIONS.forEach(version -> { OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); openMLDBDeploy.setCluster("cluster".equals(OpenMLDBGlobalVar.env)); OpenMLDBInfo fedbInfo = openMLDBDeploy.deployCluster(2, 3); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/DMLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/DMLTest.java index 312b88e4239..c8240fc74c0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/DMLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/DMLTest.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v030; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -33,7 +33,7 @@ */ @Slf4j @Feature("DML") -public class DMLTest extends FedbTest { +public class DMLTest extends OpenMLDBTest { @Test(dataProvider = "getCase") @Yaml(filePaths = "function/dml/multi_insert.yaml") @Story("multi-insert") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/MultiDBTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/MultiDBTest.java index f33902ebdfd..f3c61e859d7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/MultiDBTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/MultiDBTest.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v030; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -33,7 +33,7 @@ */ @Slf4j @Feature("MultiDBTest") -public class MultiDBTest extends FedbTest { +public class MultiDBTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java index 8bf027d1803..af413747a41 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java @@ -1,7 +1,7 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v030; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; import com._4paradigm.openmldb.test_common.util.TypeUtil; import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.sdk.Column; @@ -18,7 +18,7 @@ @Slf4j @Feature("SchemaTest") -public class SchemaTest extends FedbTest { +public class SchemaTest extends OpenMLDBTest { @Story("schema-sdk") // @Test public void testHaveIndexAndOption() throws SQLException { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/DeploymentTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/DeploymentTest.java index 9befe7cfc69..f61f774527a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/DeploymentTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/DeploymentTest.java @@ -16,8 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v040; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; -import com._4paradigm.openmldb.java_sdk_test.common.StandaloneTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -33,7 +32,7 @@ */ @Slf4j @Feature("deploy") -public class DeploymentTest extends FedbTest { +public class DeploymentTest extends OpenMLDBTest { @Test(dataProvider = "getCase",enabled = false) @Yaml(filePaths = "function/deploy/test_create_deploy.yaml") @Story("create") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/ExpressTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/ExpressTest.java index 7d3a7774286..30da353292c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/ExpressTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/ExpressTest.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v040; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -32,7 +32,7 @@ */ @Slf4j @Feature("Express") -public class ExpressTest extends FedbTest { +public class ExpressTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/FunctionTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/FunctionTest.java index 3cda7d7ef96..79b752477e6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/FunctionTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/FunctionTest.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v040; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -32,7 +32,7 @@ */ @Slf4j @Feature("Function") -public class FunctionTest extends FedbTest { +public class FunctionTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/OutInTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/OutInTest.java index 889400f75c8..33222be727a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/OutInTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/OutInTest.java @@ -1,7 +1,6 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v040; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; -import com._4paradigm.openmldb.java_sdk_test.common.StandaloneTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -13,7 +12,7 @@ @Slf4j @Feature("Out-In") -public class OutInTest extends FedbTest { +public class OutInTest extends OpenMLDBTest { // @Test(dataProvider = "getCase") // @Yaml(filePaths = "function/out_in/test_out_in.yaml") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java index 3eb64ea9d95..ce976b51f04 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java @@ -1,6 +1,6 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v050; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -10,7 +10,7 @@ import org.testng.annotations.Test; @Slf4j -public class DiskTableTest extends FedbTest { +public class DiskTableTest extends OpenMLDBTest { @Test(dataProvider = "getCase") @Yaml(filePaths = "function/disk_table/disk_table.yaml") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java index e5b97430abb..7f6426141cc 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java @@ -1,6 +1,6 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v050; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -10,7 +10,7 @@ import org.testng.annotations.Test; @Slf4j -public class LongWindowTest extends FedbTest{ +public class LongWindowTest extends OpenMLDBTest { @Test(dataProvider = "getCase") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/BatchRequestTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/BatchRequestTest.java index 2309ce05ef6..1452ef8a56a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/BatchRequestTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/BatchRequestTest.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v230; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -26,7 +26,7 @@ import org.testng.annotations.Test; @Feature("BatchTest") -public class BatchRequestTest extends FedbTest { +public class BatchRequestTest extends OpenMLDBTest { @Story("BatchRequest") @Test(dataProvider = "getCase") @Yaml(filePaths = "function/test_batch_request.yaml") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/DDLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/DDLTest.java index b734d196e4a..ffd3a28a351 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/DDLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/DDLTest.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v230; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -32,7 +32,7 @@ */ @Slf4j @Feature("DDL") -public class DDLTest extends FedbTest { +public class DDLTest extends OpenMLDBTest { @Test(dataProvider = "getCase") @Yaml(filePaths = "function/ddl/test_create.yaml") @Story("create") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/DMLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/DMLTest.java index 35b7fd9275e..80a6c907ba0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/DMLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/DMLTest.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v230; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -33,7 +33,7 @@ */ @Slf4j @Feature("DML") -public class DMLTest extends FedbTest { +public class DMLTest extends OpenMLDBTest { @Test(dataProvider = "getCase") @Yaml(filePaths = {"function/dml/test_insert.yaml"}) diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/ExpressTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/ExpressTest.java index 7f0a7f72251..ba8a926af53 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/ExpressTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/ExpressTest.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v230; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -32,7 +32,7 @@ */ @Slf4j @Feature("Express") -public class ExpressTest extends FedbTest { +public class ExpressTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FZCaseTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FZCaseTest.java index 5e4115e67dc..0d34e1f744f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FZCaseTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FZCaseTest.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v230; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -32,7 +32,7 @@ */ @Slf4j @Feature("FZCase") -public class FZCaseTest extends FedbTest { +public class FZCaseTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase", enabled = false) diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FunctionTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FunctionTest.java index a4d2756fd22..c2eea0d85c9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FunctionTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FunctionTest.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v230; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -32,7 +32,7 @@ */ @Slf4j @Feature("Function") -public class FunctionTest extends FedbTest { +public class FunctionTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/LastJoinTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/LastJoinTest.java index 76f980404c5..02dde710286 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/LastJoinTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/LastJoinTest.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v230; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -32,29 +32,29 @@ */ @Slf4j @Feature("Lastjoin") -public class LastJoinTest extends FedbTest { +public class LastJoinTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/join/test_lastjoin_simple.yaml"}) + @Yaml(filePaths = {"function/join/"}) public void testLastJoin(SQLCase testCase) throws Exception { ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); } @Story("request") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/join/test_lastjoin_simple.yaml"}) + @Yaml(filePaths = {"function/join/"}) public void testLastJoinRequestMode(SQLCase testCase) throws Exception { ExecutorFactory.build(executor,testCase, SQLCaseType.kRequest).run(); } @Story("requestWithSp") - // @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/join/test_lastjoin_simple.yaml"}) + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/join/"}) public void testLastJoinRequestModeWithSp(SQLCase testCase) throws Exception { ExecutorFactory.build(executor,testCase, SQLCaseType.kRequestWithSp).run(); } @Story("requestWithSpAysn") - // @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/join/test_lastjoin_simple.yaml"}) + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/join/"}) public void testLastJoinRequestModeWithSpAsync(SQLCase testCase) throws Exception { ExecutorFactory.build(executor,testCase, SQLCaseType.kRequestWithSpAsync).run(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/ParameterQueryTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/ParameterQueryTest.java index 6388768ff52..3b28fd9e9e4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/ParameterQueryTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/ParameterQueryTest.java @@ -15,7 +15,7 @@ */ package com._4paradigm.openmldb.java_sdk_test.cluster.v230; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -28,7 +28,7 @@ @Slf4j @Feature("ParameterQueryTest") -public class ParameterQueryTest extends FedbTest { +public class ParameterQueryTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase") @Yaml(filePaths = {"query/parameterized_query.yaml"}) diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/SelectTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/SelectTest.java index 2636d774be8..28266fad214 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/SelectTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/SelectTest.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v230; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -33,7 +33,7 @@ */ @Slf4j @Feature("SelectTest") -public class SelectTest extends FedbTest { +public class SelectTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java index ef5482aca85..90d2d2ca187 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.cluster.v230; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -32,7 +32,7 @@ */ @Slf4j @Feature("Window") -public class WindowTest extends FedbTest { +public class WindowTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase") @@ -44,7 +44,9 @@ public void testWindowBatch(SQLCase testCase) throws Exception { } @Story("request") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/window/test_current_row.yaml"}) + @Yaml(filePaths = {"function/window/", + "function/cluster/", + "function/test_index_optimized.yaml"}) public void testWindowRequestMode(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); } @@ -89,7 +91,7 @@ public void testWindowRequestModeWithSp2(SQLCase testCase) throws Exception { //暂时不支持 @Story("requestWithSp") - @Test(dataProvider = "getCase") +// @Test(dataProvider = "getCase") @Yaml(filePaths = {"function/window/test_window_union_cluster.yaml"}) public void testWindowCLI(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kClusterCLI).run(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/DiffResultTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/DiffResultTest.java index e7a3492dc4c..9d181d3eaba 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/DiffResultTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/DiffResultTest.java @@ -15,8 +15,8 @@ */ package com._4paradigm.openmldb.java_sdk_test.diff_test; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlDataProviderList; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; +import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBCaseFileList; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -34,10 +34,10 @@ */ @Slf4j @Feature("diff sql result") -public class DiffResultTest extends FedbTest { +public class DiffResultTest extends OpenMLDBTest { @DataProvider() public Object[] getCreateData() throws FileNotFoundException { - FesqlDataProviderList dp = FesqlDataProviderList + OpenMLDBCaseFileList dp = OpenMLDBCaseFileList .dataProviderGenerator(new String[]{"/integration/v1/test_create.yaml"}); return dp.getCases().toArray(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/MysqlTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/MysqlTest.java index 32d3a1386e8..3515ec78641 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/MysqlTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/MysqlTest.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.diff_test; import com._4paradigm.openmldb.java_sdk_test.common.JDBCTest; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlDataProviderList; +import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBCaseFileList; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -38,7 +38,7 @@ public class MysqlTest extends JDBCTest { @DataProvider() public Object[] getCreateData() throws FileNotFoundException { - FesqlDataProviderList dp = FesqlDataProviderList + OpenMLDBCaseFileList dp = OpenMLDBCaseFileList .dataProviderGenerator(new String[]{"/integration/v1/test_create.yaml"}); return dp.getCases().toArray(); } @@ -51,7 +51,7 @@ public void testCreate(SQLCase testCase){ @DataProvider() public Object[] getInsertData() throws FileNotFoundException { - FesqlDataProviderList dp = FesqlDataProviderList + OpenMLDBCaseFileList dp = OpenMLDBCaseFileList .dataProviderGenerator(new String[]{"/integration/v1/test_insert.yaml"}); return dp.getCases().toArray(); } @@ -64,7 +64,7 @@ public void testInsert(SQLCase testCase){ @DataProvider() public Object[] getSelectData() throws FileNotFoundException { - FesqlDataProviderList dp = FesqlDataProviderList + OpenMLDBCaseFileList dp = OpenMLDBCaseFileList .dataProviderGenerator(new String[]{ "/integration/v1/select/test_select_sample.yaml", "/integration/v1/select/test_sub_select.yaml" @@ -80,7 +80,7 @@ public void testSelect(SQLCase testCase){ @DataProvider() public Object[] getFunctionData() throws FileNotFoundException { - FesqlDataProviderList dp = FesqlDataProviderList + OpenMLDBCaseFileList dp = OpenMLDBCaseFileList .dataProviderGenerator(new String[]{ "/integration/v1/function/", }); @@ -95,7 +95,7 @@ public void testFunction(SQLCase testCase){ @DataProvider() public Object[] getExpressionData() throws FileNotFoundException { - FesqlDataProviderList dp = FesqlDataProviderList + OpenMLDBCaseFileList dp = OpenMLDBCaseFileList .dataProviderGenerator(new String[]{ "/integration/v1/expression/", }); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/Sqlite3Test.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/Sqlite3Test.java index 4915eee79f1..06a29d9cafd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/Sqlite3Test.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/Sqlite3Test.java @@ -17,7 +17,7 @@ import com._4paradigm.openmldb.java_sdk_test.common.JDBCTest; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlDataProviderList; +import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBCaseFileList; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -39,7 +39,7 @@ public class Sqlite3Test extends JDBCTest { @DataProvider() public Object[] getCreateData() throws FileNotFoundException { - FesqlDataProviderList dp = FesqlDataProviderList + OpenMLDBCaseFileList dp = OpenMLDBCaseFileList .dataProviderGenerator(new String[]{"/integration/v1/test_create.yaml"}); return dp.getCases().toArray(); } @@ -52,7 +52,7 @@ public void testCreate(SQLCase testCase){ @DataProvider() public Object[] getInsertData() throws FileNotFoundException { - FesqlDataProviderList dp = FesqlDataProviderList + OpenMLDBCaseFileList dp = OpenMLDBCaseFileList .dataProviderGenerator(new String[]{"/integration/v1/test_insert.yaml"}); return dp.getCases().toArray(); } @@ -65,7 +65,7 @@ public void testInsert(SQLCase testCase){ @DataProvider() public Object[] getSelectData() throws FileNotFoundException { - FesqlDataProviderList dp = FesqlDataProviderList + OpenMLDBCaseFileList dp = OpenMLDBCaseFileList .dataProviderGenerator(new String[]{ "/integration/v1/select/test_select_sample.yaml", "/integration/v1/select/test_sub_select.yaml" @@ -81,7 +81,7 @@ public void testSelect(SQLCase testCase){ @DataProvider() public Object[] getFunctionData() throws FileNotFoundException { - FesqlDataProviderList dp = FesqlDataProviderList + OpenMLDBCaseFileList dp = OpenMLDBCaseFileList .dataProviderGenerator(new String[]{ "/integration/v1/function/", }); @@ -96,7 +96,7 @@ public void testFunction(SQLCase testCase){ @DataProvider() public Object[] getExpressionData() throws FileNotFoundException { - FesqlDataProviderList dp = FesqlDataProviderList + OpenMLDBCaseFileList dp = OpenMLDBCaseFileList .dataProviderGenerator(new String[]{ "/integration/v1/expression/", }); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderTest.java index 229b62c8270..642e7920bb4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/entity/FesqlDataProviderTest.java @@ -17,8 +17,8 @@ package com._4paradigm.openmldb.java_sdk_test.entity; +import com._4paradigm.openmldb.test_common.model.CaseFile; import com._4paradigm.openmldb.test_common.util.DataUtil; -import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import org.testng.Assert; @@ -33,7 +33,7 @@ public class FesqlDataProviderTest { @Test public void getDataProviderTest() throws FileNotFoundException { - FesqlDataProvider provider = FesqlDataProvider.dataProviderGenerator("/yaml/rtidb_demo.yaml"); + CaseFile provider = CaseFile.parseCaseFile("/yaml/rtidb_demo.yaml"); Assert.assertNotNull(provider); Assert.assertEquals(3, provider.getCases().size()); @@ -43,7 +43,7 @@ public void getDataProviderTest() throws FileNotFoundException { @Test public void getInsertTest() throws FileNotFoundException { - FesqlDataProvider provider = FesqlDataProvider.dataProviderGenerator("/yaml/rtidb_demo2.yaml"); + CaseFile provider = CaseFile.parseCaseFile("/yaml/rtidb_demo2.yaml"); Assert.assertNotNull(provider); Assert.assertEquals(1, provider.getCases().size()); SQLCase sqlCase = provider.getCases().get(0); @@ -57,7 +57,7 @@ public void getInsertTest() throws FileNotFoundException { @Test public void getInserstTest() throws FileNotFoundException { - FesqlDataProvider provider = FesqlDataProvider.dataProviderGenerator("/yaml/rtidb_demo2.yaml"); + CaseFile provider = CaseFile.parseCaseFile("/yaml/rtidb_demo2.yaml"); Assert.assertNotNull(provider); Assert.assertEquals(1, provider.getCases().size()); SQLCase sqlCase = provider.getCases().get(0); @@ -74,7 +74,7 @@ public void getInserstTest() throws FileNotFoundException { @Test public void getCreateTest() throws FileNotFoundException { - FesqlDataProvider provider = FesqlDataProvider.dataProviderGenerator("/yaml/rtidb_demo2.yaml"); + CaseFile provider = CaseFile.parseCaseFile("/yaml/rtidb_demo2.yaml"); Assert.assertNotNull(provider); Assert.assertEquals(1, provider.getCases().size()); SQLCase sqlCase = provider.getCases().get(0); @@ -92,7 +92,7 @@ public void getCreateTest() throws FileNotFoundException { @Test public void converRowsTest() throws ParseException, FileNotFoundException { - FesqlDataProvider provider = FesqlDataProvider.dataProviderGenerator("/yaml/rtidb_demo.yaml"); + CaseFile provider = CaseFile.parseCaseFile("/yaml/rtidb_demo.yaml"); Assert.assertNotNull(provider); Assert.assertEquals(3, provider.getCases().size()); SQLCase sqlCase = provider.getCases().get(0); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DDLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DDLTest.java index ac9cac4d656..eaa3fccf98f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DDLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DDLTest.java @@ -16,7 +16,6 @@ package com._4paradigm.openmldb.java_sdk_test.standalone.v030; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; import com._4paradigm.openmldb.java_sdk_test.common.StandaloneTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/ExpressTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/ExpressTest.java index 7300ef87839..4ebad5a198a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/ExpressTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/ExpressTest.java @@ -16,7 +16,6 @@ package com._4paradigm.openmldb.java_sdk_test.standalone.v030; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; import com._4paradigm.openmldb.java_sdk_test.common.StandaloneTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/FunctionTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/FunctionTest.java index f2f3f7b57fe..aa3b9d6a5e1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/FunctionTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/FunctionTest.java @@ -16,7 +16,6 @@ package com._4paradigm.openmldb.java_sdk_test.standalone.v030; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; import com._4paradigm.openmldb.java_sdk_test.common.StandaloneTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/LastJoinTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/LastJoinTest.java index 40257abaea0..d4b26b531fe 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/LastJoinTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/LastJoinTest.java @@ -16,7 +16,6 @@ package com._4paradigm.openmldb.java_sdk_test.standalone.v030; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; import com._4paradigm.openmldb.java_sdk_test.common.StandaloneTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/MultiDBTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/MultiDBTest.java index dece5e6f276..7b2698034fb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/MultiDBTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/MultiDBTest.java @@ -16,14 +16,12 @@ package com._4paradigm.openmldb.java_sdk_test.standalone.v030; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; import com._4paradigm.openmldb.java_sdk_test.common.StandaloneTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; import com._4paradigm.openmldb.test_common.provider.Yaml; import io.qameta.allure.Feature; -import io.qameta.allure.Step; import io.qameta.allure.Story; import lombok.extern.slf4j.Slf4j; import org.testng.annotations.Test; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/OutInTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/OutInTest.java index f267573ba3a..f5cc9f9b1a1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/OutInTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/OutInTest.java @@ -1,6 +1,5 @@ package com._4paradigm.openmldb.java_sdk_test.standalone.v030; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; import com._4paradigm.openmldb.java_sdk_test.common.StandaloneTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/SelectTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/SelectTest.java index ca1a2b15ad1..fef64b4c428 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/SelectTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/SelectTest.java @@ -16,7 +16,6 @@ package com._4paradigm.openmldb.java_sdk_test.standalone.v030; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; import com._4paradigm.openmldb.java_sdk_test.common.StandaloneTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/WindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/WindowTest.java index 9d56b6378e8..ac9bc341d5d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/WindowTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/WindowTest.java @@ -16,7 +16,6 @@ package com._4paradigm.openmldb.java_sdk_test.standalone.v030; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; import com._4paradigm.openmldb.java_sdk_test.common.StandaloneTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java index 5220e458adb..45cdb5a3142 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/DebugTest.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.temp; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.model.SQLCase; @@ -34,7 +34,7 @@ */ @Slf4j @Feature("DebugTest") -public class DebugTest extends FedbTest { +public class DebugTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestProcedure.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestProcedure.java index affe4c20734..f5d7f6bd0de 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestProcedure.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestProcedure.java @@ -1,7 +1,7 @@ package com._4paradigm.openmldb.java_sdk_test.temp; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; -public class TestProcedure extends FedbTest { +public class TestProcedure extends OpenMLDBTest { } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/ut/UniqueExpectTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/ut/UniqueExpectTest.java index 99e7a89c23b..62a5b20d24e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/ut/UniqueExpectTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/ut/UniqueExpectTest.java @@ -16,8 +16,8 @@ package com._4paradigm.openmldb.java_sdk_test.ut; -import com._4paradigm.openmldb.java_sdk_test.common.FedbTest; -import com._4paradigm.openmldb.java_sdk_test.entity.FesqlDataProviderList; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; +import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBCaseFileList; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -36,11 +36,11 @@ */ @Slf4j @Feature("UT") -public class UniqueExpectTest extends FedbTest { +public class UniqueExpectTest extends OpenMLDBTest { @DataProvider() public Object[] getData() throws FileNotFoundException { - FesqlDataProviderList dp = FesqlDataProviderList + OpenMLDBCaseFileList dp = OpenMLDBCaseFileList .dataProviderGenerator(new String[]{"/integration/ut_case/test_unique_expect.yaml"}); return dp.getCases().toArray(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml index 1a98665cb2c..b4af16ea5a5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml @@ -4,15 +4,15 @@ - - - - - - - - - + + + + + + + + + diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index 61a092722ba..90429813d43 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -15,8 +15,8 @@ 8 8 - 0.5.0 - 0.5.0 + 0.5.0-SNAPSHOT + 0.5.0-macos-SNAPSHOT diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java index 41f83df837b..d910956af36 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java @@ -16,13 +16,19 @@ package com._4paradigm.openmldb.test_common.model; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com.google.common.collect.Lists; import lombok.Data; +import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; import org.apache.commons.lang3.SerializationUtils; import org.apache.commons.lang3.StringUtils; +import org.yaml.snakeyaml.Yaml; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -30,14 +36,40 @@ import java.util.stream.Collectors; @Data +@Slf4j public class CaseFile { private String db; private String version; private List debugs; private List cases; + + private String filePath; + private String fileName; // ANSISQL HybridSQL SQLITE3 MYSQL private List sqlDialect = Lists.newArrayList("ANSISQL"); + public static final String FAIL_SQL_CASE= "FailSQLCase"; + + public static CaseFile parseCaseFile(String caseFilePath) throws FileNotFoundException { + try { + Yaml yaml = new Yaml(); + File file = new File(caseFilePath); + FileInputStream testDataStream = new FileInputStream(file); + CaseFile caseFile = yaml.loadAs(testDataStream, CaseFile.class); + caseFile.setFilePath(file.getAbsolutePath()); + caseFile.setFileName(file.getName()); + return caseFile; + } catch (Exception e) { + log.error("fail to load yaml:{}", caseFilePath); + e.printStackTrace(); + CaseFile nullCaseFile = new CaseFile(); + SQLCase failCase = new SQLCase(); + failCase.setDesc(FAIL_SQL_CASE); + nullCaseFile.setCases(org.testng.collections.Lists.newArrayList(failCase)); + return nullCaseFile; + } + } + public List getCases(List levels) { if(!CollectionUtils.isEmpty(debugs)){ return getCases(); @@ -54,7 +86,14 @@ public List getCases() { } List testCaseList = new ArrayList<>(); List debugs = getDebugs(); + cases = cases.stream().filter(c->c.isSupportDiskTable()).peek(c->c.setStorage(OpenMLDBGlobalVar.tableStorageMode)).collect(Collectors.toList()); for (SQLCase tmpCase : cases) { + tmpCase.setCaseFileName(fileName); + // TODO 排除 create case +// List inputs = tmpCase.getInputs(); +// if(CollectionUtils.isNotEmpty(inputs)) { +// inputs.forEach(t -> t.setStorage(OpenMLDBGlobalVar.tableStorageMode)); +// } if (null == tmpCase.getDb()) { tmpCase.setDb(getDb()); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java index 5840e48aa43..9f6877ae3d3 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java @@ -16,6 +16,7 @@ package com._4paradigm.openmldb.test_common.model; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import lombok.Data; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.RandomStringUtils; @@ -27,6 +28,7 @@ @Data public class SQLCase implements Serializable{ + private String caseFileName; private int level = 0; private String id; private String desc; @@ -84,6 +86,23 @@ public static String genAutoName() { return "auto_" + RandomStringUtils.randomAlphabetic(8); } + public boolean isSupportDiskTable(){ + if(CollectionUtils.isEmpty(inputs)){ + return false; + } + for(InputDesc input:inputs){ + if (CollectionUtils.isNotEmpty(input.getColumns())&&CollectionUtils.isNotEmpty(input.getIndexs())&& StringUtils.isEmpty(input.getCreate())) { + return true; + } + } + return false; + } + public void setStorage(String storageMode){ + if(CollectionUtils.isNotEmpty(inputs)) { + inputs.forEach(t -> t.setStorage(storageMode)); + } + } + public String getProcedure(String sql) { return buildCreateSpSQLFromColumnsIndexs(spName, sql, inputs.get(0).getColumns()); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java index e8ffe6fef94..767f6009b45 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java @@ -30,4 +30,5 @@ public class OpenMLDBGlobalVar { public static String openMLDBPath; public static OpenMLDBInfo mainInfo; public static String dbName = "test_zw"; + public static String tableStorageMode = "memory"; } From a8d38e1e35a0dd3380ca1169fa9cc4b538fc8d8a Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 25 Jul 2022 13:33:50 +0800 Subject: [PATCH 078/172] support create index by sdk --- .github/workflows/integration-test-src.yml | 5 +- cases/function/ddl/test_create.yaml | 1 + cases/function/ddl/test_create_index.yaml | 1 + cases/function/ddl/test_create_no_index.yaml | 1 + cases/function/ddl/test_options.yaml | 1 + cases/function/ddl/test_ttl.yaml | 1 + .../java_sdk_test/common/OpenMLDBTest.java | 8 +-- .../java_sdk_test/common/StandaloneTest.java | 6 +- .../src/main/resources/run_case.properties | 2 +- .../cluster/{v230 => sql_test}/DDLTest.java | 42 ++++++----- .../{test_all.xml => test_cluster.xml} | 8 +-- .../test_suite/test_standalone.xml | 2 +- .../test_common/bean/OpenMLDBResult.java | 2 +- ...OpenMLDBSchema.java => OpenMLDBTable.java} | 3 +- .../openmldb/test_common/bean/SQLType.java | 19 ++--- .../chain/result/AbstractResultHandler.java | 14 ++-- .../chain/result/DescResultParser.java | 71 +++++++++++++++++++ .../chain/result/ResultChainManager.java | 26 ------- .../chain/result/ResultParserManager.java | 24 +++++++ .../chain/result/ResultSetHandler.java | 42 ----------- .../openmldb/test_common/model/CaseFile.java | 10 ++- .../openmldb/test_common/model/SQLCase.java | 2 + .../test_common/openmldb/SDKClient.java | 6 -- .../test_common/util/CommandResultUtil.java | 6 +- .../openmldb/test_common/util/SDKUtil.java | 41 +++++------ .../openmldb/test_common/util/SchemaUtil.java | 6 +- 26 files changed, 198 insertions(+), 152 deletions(-) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/{v230 => sql_test}/DDLTest.java (90%) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/{test_all.xml => test_cluster.xml} (74%) rename test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/{OpenMLDBSchema.java => OpenMLDBTable.java} (92%) create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/DescResultParser.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultChainManager.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultParserManager.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultSetHandler.java diff --git a/.github/workflows/integration-test-src.yml b/.github/workflows/integration-test-src.yml index e0d5ed692c0..7e19e9fc2d4 100644 --- a/.github/workflows/integration-test-src.yml +++ b/.github/workflows/integration-test-src.yml @@ -75,7 +75,7 @@ jobs: make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install tar -zcvf openmldb-linux.tar.gz openmldb-linux - name: test - run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b SRC -c test_all.xml -d cluster -l "0" + run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b SRC -c test_cluster.xml -d cluster -l "0" - name: TEST Results if: always() uses: EnricoMi/publish-unit-test-result-action@v1 @@ -100,7 +100,7 @@ jobs: make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install tar -zcvf openmldb-linux.tar.gz openmldb-linux - name: test - run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b SRC -c test_all.xml -d cluster -l "1,2,3,4,5" + run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b SRC -c test_cluster.xml -d cluster -l "1,2,3,4,5" - name: TEST Results if: always() uses: EnricoMi/publish-unit-test-result-action@v1 @@ -180,6 +180,7 @@ jobs: name: python-sdk-standalone-0-src-${{ github.sha }} path: | python/report/allure-results + # - name: allure-report # uses: simple-elf/allure-report-action@master # if: always() diff --git a/cases/function/ddl/test_create.yaml b/cases/function/ddl/test_create.yaml index ee98e8a6c2d..7319230b3ac 100644 --- a/cases/function/ddl/test_create.yaml +++ b/cases/function/ddl/test_create.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 diff --git a/cases/function/ddl/test_create_index.yaml b/cases/function/ddl/test_create_index.yaml index 561a238ee4d..5549a5db039 100644 --- a/cases/function/ddl/test_create_index.yaml +++ b/cases/function/ddl/test_create_index.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 diff --git a/cases/function/ddl/test_create_no_index.yaml b/cases/function/ddl/test_create_no_index.yaml index 6d8a8b40a9d..66f859ea7a5 100644 --- a/cases/function/ddl/test_create_no_index.yaml +++ b/cases/function/ddl/test_create_no_index.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 diff --git a/cases/function/ddl/test_options.yaml b/cases/function/ddl/test_options.yaml index a6a642ac98b..1c8ed43ad7d 100644 --- a/cases/function/ddl/test_options.yaml +++ b/cases/function/ddl/test_options.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 diff --git a/cases/function/ddl/test_ttl.yaml b/cases/function/ddl/test_ttl.yaml index 7fb6582f47e..ecd5c9232c9 100644 --- a/cases/function/ddl/test_ttl.yaml +++ b/cases/function/ddl/test_ttl.yaml @@ -1,5 +1,6 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java index 77569143566..c502f83a7b3 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java @@ -41,17 +41,17 @@ public class OpenMLDBTest extends BaseTest { protected static SqlExecutor executor; @BeforeTest() - @Parameters({"env","version","fedbPath"}) - public void beforeTest(@Optional("qa") String env,@Optional("main") String version,@Optional("")String fedbPath) throws Exception { + @Parameters({"env","version","openMLDBPath"}) + public void beforeTest(@Optional("qa") String env,@Optional("main") String version,@Optional("")String openMLDBPath) throws Exception { OpenMLDBGlobalVar.env = env; if(env.equalsIgnoreCase("cluster")){ OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version);; - openMLDBDeploy.setOpenMLDBPath(fedbPath); + openMLDBDeploy.setOpenMLDBPath(openMLDBPath); openMLDBDeploy.setCluster(true); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); }else if(env.equalsIgnoreCase("standalone")){ OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); - openMLDBDeploy.setOpenMLDBPath(fedbPath); + openMLDBDeploy.setOpenMLDBPath(openMLDBPath); openMLDBDeploy.setCluster(false); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); }else{ diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java index 174ca23e677..ac76d7369d6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java @@ -39,12 +39,12 @@ public class StandaloneTest extends BaseTest { protected static SqlExecutor executor; @BeforeTest() - @Parameters({"env","version","fedbPath"}) - public void beforeTest(@Optional("qa") String env,@Optional("main") String version,@Optional("")String fedbPath) throws Exception { + @Parameters({"env","version","openMLDBPath"}) + public void beforeTest(@Optional("qa") String env,@Optional("main") String version,@Optional("")String openMLDBPath) throws Exception { OpenMLDBGlobalVar.env = env; if(env.equalsIgnoreCase("standalone")){ OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); - openMLDBDeploy.setOpenMLDBPath(fedbPath); + openMLDBDeploy.setOpenMLDBPath(openMLDBPath); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployStandalone(); }else{ OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties index 53d08e05d1b..e3252014c95 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties @@ -1,4 +1,4 @@ # memory/ssd/hdd -table_storage_mode=ssd +#table_storage_mode=ssd diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/DDLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java similarity index 90% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/DDLTest.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java index ffd3a28a351..c5b9ecd3991 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/DDLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.cluster.v230; +package com._4paradigm.openmldb.java_sdk_test.cluster.sql_test; import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; @@ -40,39 +40,43 @@ public void testCreate(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); } + @Yaml(filePaths = "function/ddl/test_create.yaml")//7 表名为非保留关键字 没过 + @Story("create") + @Test(dataProvider = "getCase",enabled = false) + public void testCreateByCli(SQLCase testCase){ + ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); + } + @Test(dataProvider = "getCase") @Yaml(filePaths = "function/ddl/test_ttl.yaml") @Story("ttl") public void testTTL(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); } - - @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/ddl/test_options.yaml") - @Story("options") - public void testOptions(SQLCase testCase){ - ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); - } - @Test(dataProvider = "getCase",enabled = false) + @Yaml(filePaths = "function/ddl/test_ttl.yaml") + @Story("ttl") + public void testTTLByCli(SQLCase testCase){ + ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); + } + @Test(dataProvider = "getCase") @Yaml(filePaths = "function/ddl/test_create_index.yaml") @Story("create_index") public void testCreateIndex(SQLCase testCase){ - ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); + ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); } - @Test(dataProvider = "getCase",enabled = false) - @Yaml(filePaths = "function/ddl/test_create.yaml")//7 表名为非保留关键字 没过 - @Story("create") - public void testCreateByCli(SQLCase testCase){ + @Yaml(filePaths = "function/ddl/test_create_index.yaml") + @Story("create_index") + public void testCreateIndexByCli(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); } - @Test(dataProvider = "getCase",enabled = false) - @Yaml(filePaths = "function/ddl/test_ttl.yaml") - @Story("ttl") - public void testTTLByCli(SQLCase testCase){ - ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/ddl/test_options.yaml") + @Story("options") + public void testOptions(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); } @Test(dataProvider = "getCase",enabled = false) diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_all.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_cluster.xml similarity index 74% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_all.xml rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_cluster.xml index 9709ef4e8a0..b2c7ed735dd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_all.xml +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_cluster.xml @@ -1,10 +1,10 @@ - - - + + + - + diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_standalone.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_standalone.xml index b3212c71f7b..1673b440fce 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_standalone.xml +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_standalone.xml @@ -4,7 +4,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java index eb029175cb6..022a1b80a80 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java @@ -39,7 +39,7 @@ public class OpenMLDBResult { private List> result; private List columnNames; private List columnTypes; - private OpenMLDBSchema schema; + private OpenMLDBTable schema; private OpenmldbDeployment deployment; private List deployments; private Integer deploymentCount; diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBSchema.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBTable.java similarity index 92% rename from test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBSchema.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBTable.java index 9ae135f5dcb..941ba3b172b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBSchema.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBTable.java @@ -20,7 +20,8 @@ import java.util.List; @Data -public class OpenMLDBSchema { +public class OpenMLDBTable { private List columns; private List indexs; + private String storageMode; } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/SQLType.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/SQLType.java index 1f24d82c0ea..ce2898b03c7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/SQLType.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/SQLType.java @@ -16,25 +16,28 @@ public enum SQLType { CREATE, DROP, USE, - SET + SET, + DESC ; public static final Set RESULT_SET = Sets.newHashSet(SELECT, SHOW, DEPLOY); // public static final List VOID = Lists.newArrayList(CREATE,DROP,USE,INSERT); public static SQLType parseSQLType(String sql){ - if(sql.toLowerCase().startsWith("select")){ + if(sql.toLowerCase().startsWith("select ")){ return SELECT; - }else if (sql.toLowerCase().startsWith("insert into")) { + }else if (sql.toLowerCase().startsWith("insert into ")) { return INSERT; - }else if (sql.toLowerCase().startsWith("show")) { + }else if (sql.toLowerCase().startsWith("show ")) { return SHOW; - }else if (sql.toLowerCase().startsWith("create")) { + }else if (sql.toLowerCase().startsWith("create ")) { return CREATE; - }else if (sql.toLowerCase().startsWith("drop")) { + }else if (sql.toLowerCase().startsWith("drop ")) { return DROP; - }else if (sql.toLowerCase().startsWith("use")) { + }else if (sql.toLowerCase().startsWith("use ")) { return USE; - }else if (sql.toLowerCase().startsWith("set")) { + }else if (sql.toLowerCase().startsWith("set ")) { return SET; + }else if (sql.toLowerCase().startsWith("desc ")) { + return DESC; } throw new IllegalArgumentException("no match sql type,sql:"+sql); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/AbstractResultHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/AbstractResultHandler.java index 0e69158f0e7..115584a7c88 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/AbstractResultHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/AbstractResultHandler.java @@ -29,15 +29,19 @@ public abstract class AbstractResultHandler { public abstract boolean preHandle(SQLType sqlType); - public abstract void onHandle(Statement statement, OpenMLDBResult openMLDBResult); + public abstract void onHandle(OpenMLDBResult openMLDBResult); - public void doHandle(Statement statement, OpenMLDBResult openMLDBResult){ - SQLType sqlType = SQLType.parseSQLType(openMLDBResult.getSql()); + public void doHandle(OpenMLDBResult openMLDBResult){ + String sql = openMLDBResult.getSql(); + SQLType sqlType = SQLType.parseSQLType(sql); if(preHandle(sqlType)){ - onHandle(statement,openMLDBResult); + onHandle(openMLDBResult); + return; } if(nextHandler!=null){ - nextHandler.doHandle(statement,openMLDBResult); + nextHandler.doHandle(openMLDBResult); + return; } + throw new IllegalArgumentException("result parse failed,not support sql type,sql:"+sql); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/DescResultParser.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/DescResultParser.java new file mode 100644 index 00000000000..2177bcca724 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/DescResultParser.java @@ -0,0 +1,71 @@ +package com._4paradigm.openmldb.test_common.chain.result; + +import com._4paradigm.openmldb.jdbc.SQLResultSet; +import com._4paradigm.openmldb.test_common.bean.*; +import com._4paradigm.openmldb.test_common.util.ResultUtil; +import org.testng.collections.Lists; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +public class DescResultParser extends AbstractResultHandler { + + @Override + public boolean preHandle(SQLType sqlType) { + return sqlType == SQLType.DESC; + } + + @Override + public void onHandle(OpenMLDBResult openMLDBResult) { + try { + List> resultList = openMLDBResult.getResult(); + List lines = resultList.stream().map(l -> String.valueOf(l.get(0))).collect(Collectors.toList()); + OpenMLDBTable table = new OpenMLDBTable(); + List columns = new ArrayList<>(); + String columnStr = lines.get(0); + String[] ss = columnStr.split("\n"); + for(String s:ss){ + s = s.trim(); + if(s.startsWith("#")||s.startsWith("-")) continue; + String[] infos = s.split("\\s+"); + OpenMLDBColumn openMLDBColumn = new OpenMLDBColumn(); + openMLDBColumn.setId(Integer.parseInt(infos[0])); + openMLDBColumn.setFieldName(infos[1]); + openMLDBColumn.setFieldType(infos[2]); + openMLDBColumn.setNullable(infos[3].equals("YES")); + columns.add(openMLDBColumn); + } + table.setColumns(columns); + String indexStr = lines.get(1); + List indices = new ArrayList<>(); + String[] indexSS = indexStr.split("\n"); + for(String s:indexSS){ + s = s.trim(); + if(s.startsWith("#")||s.startsWith("-")) continue; + String[] infos = s.split("\\s+"); + OpenMLDBIndex openMLDBIndex = new OpenMLDBIndex(); + openMLDBIndex.setId(Integer.parseInt(infos[0])); + openMLDBIndex.setIndexName(infos[1]); + openMLDBIndex.setKeys(Lists.newArrayList(infos[2].split("\\|"))); + openMLDBIndex.setTs(infos[3]); + openMLDBIndex.setTtl(infos[4]); + openMLDBIndex.setTtlType(infos[5]); + indices.add(openMLDBIndex); + } + table.setIndexs(indices); + String storageStr = lines.get(2); + table.setStorageMode(storageStr.split("\n")[2].trim()); + openMLDBResult.setSchema(table); + } catch (Exception e) { + e.printStackTrace(); + openMLDBResult.setOk(false); + openMLDBResult.setMsg(e.getMessage()); + throw new RuntimeException(e); + } + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultChainManager.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultChainManager.java deleted file mode 100644 index 2bc84bb5ebf..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultChainManager.java +++ /dev/null @@ -1,26 +0,0 @@ -package com._4paradigm.openmldb.test_common.chain.result; - -import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; - -import java.sql.Statement; - -public class ResultChainManager { - private AbstractResultHandler resultHandler; - private ResultChainManager() { - ResultSetHandler selectResultHandler = new ResultSetHandler(); - - resultHandler = selectResultHandler; - } - - private static class ClassHolder { - private static final ResultChainManager holder = new ResultChainManager(); - } - - public static ResultChainManager of() { - return ClassHolder.holder; - } - public void toOpenMLDBResult(Statement statement, OpenMLDBResult openMLDBResult){ - resultHandler.doHandle(statement,openMLDBResult); - } - -} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultParserManager.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultParserManager.java new file mode 100644 index 00000000000..906f0e9b3d8 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultParserManager.java @@ -0,0 +1,24 @@ +package com._4paradigm.openmldb.test_common.chain.result; + +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; + +public class ResultParserManager { + private AbstractResultHandler resultHandler; + private ResultParserManager() { + DescResultParser selectResultHandler = new DescResultParser(); + + resultHandler = selectResultHandler; + } + + private static class ClassHolder { + private static final ResultParserManager holder = new ResultParserManager(); + } + + public static ResultParserManager of() { + return ClassHolder.holder; + } + public void parseResult(OpenMLDBResult openMLDBResult){ + resultHandler.doHandle(openMLDBResult); + } + +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultSetHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultSetHandler.java deleted file mode 100644 index fde971c8bf0..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/chain/result/ResultSetHandler.java +++ /dev/null @@ -1,42 +0,0 @@ -package com._4paradigm.openmldb.test_common.chain.result; - -import com._4paradigm.openmldb.jdbc.SQLResultSet; -import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.bean.SQLType; -import com._4paradigm.openmldb.test_common.util.ResultUtil; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.List; - -public class ResultSetHandler extends AbstractResultHandler { - - @Override - public boolean preHandle(SQLType sqlType) { - return SQLType.isResultSet(sqlType); - } - - @Override - public void onHandle(Statement statement, OpenMLDBResult openMLDBResult) { - try { - ResultSet resultSet = statement.getResultSet(); - if (resultSet == null) { - openMLDBResult.setOk(false); - openMLDBResult.setMsg("executeSQL fail, result is null"); - } else if (resultSet instanceof SQLResultSet){ - SQLResultSet rs = (SQLResultSet)resultSet; - ResultUtil.setSchema(rs.getMetaData(),openMLDBResult); - List> result = ResultUtil.toList(rs); - openMLDBResult.setCount(result.size()); - openMLDBResult.setResult(result); - openMLDBResult.setMsg("success"); - } - } catch (SQLException e) { - e.printStackTrace(); - openMLDBResult.setOk(false); - openMLDBResult.setMsg(e.getMessage()); - throw new RuntimeException(e); - } - } -} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java index d910956af36..1d0329a2697 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java @@ -86,17 +86,21 @@ public List getCases() { } List testCaseList = new ArrayList<>(); List debugs = getDebugs(); - cases = cases.stream().filter(c->c.isSupportDiskTable()).peek(c->c.setStorage(OpenMLDBGlobalVar.tableStorageMode)).collect(Collectors.toList()); + if (!OpenMLDBGlobalVar.tableStorageMode.equals("memory")) { + cases = cases.stream().filter(c->c.isSupportDiskTable()).peek(c->c.setStorage(OpenMLDBGlobalVar.tableStorageMode)).collect(Collectors.toList()); + } for (SQLCase tmpCase : cases) { tmpCase.setCaseFileName(fileName); - // TODO 排除 create case // List inputs = tmpCase.getInputs(); // if(CollectionUtils.isNotEmpty(inputs)) { // inputs.forEach(t -> t.setStorage(OpenMLDBGlobalVar.tableStorageMode)); // } - if (null == tmpCase.getDb()) { + if (StringUtils.isEmpty(tmpCase.getDb())) { tmpCase.setDb(getDb()); } + if (StringUtils.isEmpty(tmpCase.getVersion())) { + tmpCase.setVersion(this.getVersion()); + } if(CollectionUtils.isEmpty(tmpCase.getSqlDialect())){ tmpCase.setSqlDialect(sqlDialect); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java index 9f6877ae3d3..90f1e54d5ff 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java @@ -56,6 +56,8 @@ public class SQLCase implements Serializable{ private Map expectProvider; private List tearDown; + private List excludes; + private String only; public static String formatSql(String sql, int idx, String name) { return sql.replaceAll("\\{" + idx + "\\}", name); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java index 64c9204da22..22e6f6cd44f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/SDKClient.java @@ -1,20 +1,14 @@ package com._4paradigm.openmldb.test_common.openmldb; -import com._4paradigm.openmldb.jdbc.SQLResultSet; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.bean.SQLType; -import com._4paradigm.openmldb.test_common.chain.result.ResultChainManager; import com._4paradigm.openmldb.test_common.util.ResultUtil; import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.util.SQLUtil; import com._4paradigm.openmldb.test_common.util.WaitUtil; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.collections4.CollectionUtils; import org.testng.Assert; -import org.testng.collections.Lists; -import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/CommandResultUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/CommandResultUtil.java index c20d85c7576..66c51c3e5e0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/CommandResultUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/CommandResultUtil.java @@ -2,7 +2,7 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBColumn; import com._4paradigm.openmldb.test_common.bean.OpenMLDBIndex; -import com._4paradigm.openmldb.test_common.bean.OpenMLDBSchema; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBTable; import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; import com.google.common.base.Joiner; import org.apache.commons.collections4.CollectionUtils; @@ -32,8 +32,8 @@ private static boolean containsErrorMsg(String s){ ||tmp.contains("distribution element is not")||tmp.contains("is not currently supported") ||tmp.contains("wrong type")||tmp.contains("not a supported object type")||tmp.contains("is not"); } - public static OpenMLDBSchema parseSchema(List lines){ - OpenMLDBSchema schema = new OpenMLDBSchema(); + public static OpenMLDBTable parseSchema(List lines){ + OpenMLDBTable schema = new OpenMLDBTable(); List cols = new ArrayList<>(); List indexs = new ArrayList<>(); Iterator it = lines.iterator(); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java index 8e724ba5cc4..85daa618878 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java @@ -17,11 +17,11 @@ package com._4paradigm.openmldb.test_common.util; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.bean.OpenMLDBSchema; import com._4paradigm.openmldb.jdbc.CallablePreparedStatement; import com._4paradigm.openmldb.jdbc.SQLResultSet; import com._4paradigm.openmldb.sdk.QueryFuture; import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.chain.result.ResultParserManager; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; @@ -206,30 +206,29 @@ public static OpenMLDBResult desc(SqlExecutor executor, String dbName, String de return null; } logger.info("desc:{}",descSql); - OpenMLDBResult fesqlResult = new OpenMLDBResult(); + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); + openMLDBResult.setSql(descSql); ResultSet rawRs = executor.executeSQL(dbName, descSql); if (rawRs == null) { - fesqlResult.setOk(false); - fesqlResult.setMsg("executeSQL fail, result is null"); + openMLDBResult.setOk(false); + openMLDBResult.setMsg("executeSQL fail, result is null"); } else if (rawRs instanceof SQLResultSet){ try { SQLResultSet rs = (SQLResultSet)rawRs; - ResultUtil.setSchema(rs.getMetaData(),fesqlResult); - fesqlResult.setOk(true); - String deployStr = ResultUtil.convertResultSetToListDeploy(rs); - List listDesc = ResultUtil.convertResultSetToListDesc(rs); - String[] strings = deployStr.split("\n"); - List stringList = Arrays.asList(strings); - OpenMLDBSchema openMLDBSchema = SchemaUtil.parseSchema(stringList); - fesqlResult.setSchema(openMLDBSchema); + ResultUtil.setSchema(rs.getMetaData(),openMLDBResult); + openMLDBResult.setOk(true); + List> result = ResultUtil.toList(rs); + openMLDBResult.setResult(result); + ResultParserManager.of().parseResult(openMLDBResult); } catch (Exception e) { - fesqlResult.setOk(false); - fesqlResult.setMsg(e.getMessage()); + e.printStackTrace(); + openMLDBResult.setOk(false); + openMLDBResult.setMsg(e.getMessage()); } } - - return fesqlResult; + logger.info("create index result:{}", openMLDBResult); + return openMLDBResult; } @@ -238,17 +237,19 @@ public static OpenMLDBResult createIndex(SqlExecutor executor, String sql) { return null; } logger.info("ddl sql:{}", sql); - OpenMLDBResult fesqlResult = new OpenMLDBResult(); + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); boolean createOk = false; try { createOk = executor.getStatement().execute(sql); + openMLDBResult.setOk(true); Thread.sleep(10000); } catch (Exception e) { e.printStackTrace(); + openMLDBResult.setOk(false); + openMLDBResult.setMsg(e.getMessage()); } - fesqlResult.setOk(createOk); - logger.info("ddl result:{}", fesqlResult); - return fesqlResult; + logger.info("create index result:{}", openMLDBResult); + return openMLDBResult; } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SchemaUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SchemaUtil.java index 0ef5897eb28..8bfdbb9f291 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SchemaUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SchemaUtil.java @@ -2,7 +2,7 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBColumn; import com._4paradigm.openmldb.test_common.bean.OpenMLDBIndex; -import com._4paradigm.openmldb.test_common.bean.OpenMLDBSchema; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBTable; import java.util.ArrayList; import java.util.Arrays; @@ -18,8 +18,8 @@ public static int getIndexByColumnName(List columnNames, String columnNa } return -1; } - public static OpenMLDBSchema parseSchema(List lines){ - OpenMLDBSchema schema = new OpenMLDBSchema(); + public static OpenMLDBTable parseSchemaBySDK(List lines){ + OpenMLDBTable schema = new OpenMLDBTable(); List cols = new ArrayList<>(); List indexs = new ArrayList<>(); Iterator it = lines.iterator(); From 189e8f223c97e126a3bbf66ea388c437f02c8f9e Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 26 Jul 2022 09:01:48 +0800 Subject: [PATCH 079/172] modify cicd --- .github/workflows/integration-test-src.yml | 8 + .gitignore | 1 + .../function/cluster/test_cluster_batch.yaml | 1 + cases/function/cluster/test_window_row.yaml | 1 + .../cluster/test_window_row_range.yaml | 1 + .../function/cluster/window_and_lastjoin.yaml | 3 +- cases/function/dml/multi_insert.yaml | 1 + cases/function/dml/test_insert.yaml | 1 + cases/function/dml/test_insert_prepared.yaml | 1 + .../function/expression/test_arithmetic.yaml | 1 + cases/function/expression/test_condition.yaml | 1 + .../{v040 => expression}/test_like.yaml | 1 + cases/function/expression/test_logic.yaml | 1 + cases/function/expression/test_predicate.yaml | 357 +++---- cases/function/expression/test_type.yaml | 1 + cases/function/function/test_calculate.yaml | 1 + cases/function/function/test_date.yaml | 1 + .../{v040 => function}/test_like_match.yaml | 1 + cases/function/function/test_string.yaml | 1 + .../function/function/test_udaf_function.yaml | 1 + .../function/function/test_udf_function.yaml | 1 + .../function/join/test_lastjoin_complex.yaml | 4 +- cases/function/join/test_lastjoin_simple.yaml | 3 +- cases/function/select/test_select_sample.yaml | 1 + cases/function/select/test_sub_select.yaml | 1 + cases/function/select/test_where.yaml | 3 +- cases/function/test_index_optimized.yaml | 3 +- cases/function/window/error_window.yaml | 1 + .../{tmp => window}/test_current_row.yaml | 752 ++++++++++++++- cases/function/window/test_maxsize.yaml | 3 + cases/function/window/test_window.yaml | 75 +- .../test_window_exclude_current_time.yaml | 1 + cases/function/window/test_window_row.yaml | 2 + .../window/test_window_row_range.yaml | 7 + cases/function/window/test_window_union.yaml | 883 ++++-------------- .../window/test_window_union_cluster.yaml | 738 --------------- .../test_window_union_cluster_thousand.yaml | 1 + cases/function/window/window_attributes.yaml | 1 + cases/query/const_query.yaml | 1 + .../common/OpenMLDBDeploy.java | 1 + .../src/main/resources/deploy.properties | 3 - .../openmldb-sdk-test/pom.xml | 4 +- .../java_sdk_test/checker/ResultChecker.java | 2 + .../java_sdk_test/common/OpenMLDBConfig.java | 8 +- .../src/main/resources/run_case.properties | 2 +- .../cluster/{v230 => sql_test}/DMLTest.java | 22 +- .../{v040 => sql_test}/ExpressTest.java | 20 +- .../{v230 => sql_test}/LastJoinTest.java | 4 +- .../{v230 => sql_test}/SelectTest.java | 4 +- .../{v230 => sql_test}/WindowTest.java | 25 +- .../java_sdk_test/cluster/v030/DMLTest.java | 57 -- .../cluster/v040/FunctionTest.java | 61 -- .../cluster/v230/ExpressTest.java | 81 -- .../java_sdk_test/temp/TestVersion.java | 10 + .../openmldb-sdk-test/test_suite/test_tmp.xml | 2 +- .../openmldb/test_common/model/CaseFile.java | 6 + test/steps/modify_java_sdk_config.sh | 18 +- test/steps/openmldb-sdk-test-java-src.sh | 94 ++ 58 files changed, 1351 insertions(+), 1939 deletions(-) rename cases/function/{v040 => expression}/test_like.yaml (99%) rename cases/function/{v040 => function}/test_like_match.yaml (99%) rename cases/function/{tmp => window}/test_current_row.yaml (57%) delete mode 100644 cases/function/window/test_window_union_cluster.yaml rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/{v230 => sql_test}/DMLTest.java (76%) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/{v040 => sql_test}/ExpressTest.java (84%) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/{v230 => sql_test}/LastJoinTest.java (95%) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/{v230 => sql_test}/SelectTest.java (95%) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/{v230 => sql_test}/WindowTest.java (74%) delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/DMLTest.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/FunctionTest.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/ExpressTest.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestVersion.java create mode 100755 test/steps/openmldb-sdk-test-java-src.sh diff --git a/.github/workflows/integration-test-src.yml b/.github/workflows/integration-test-src.yml index 7e19e9fc2d4..fd21282464f 100644 --- a/.github/workflows/integration-test-src.yml +++ b/.github/workflows/integration-test-src.yml @@ -1,6 +1,12 @@ name: INTEGRATION-TEST-SRC on: + push: + branches: + - main + pull_request: + schedule: + - cron: '* 1 * * *' workflow_dispatch: inputs: EXEC_TEST_TYPE: @@ -74,6 +80,8 @@ jobs: make configure CMAKE_INSTALL_PREFIX=openmldb-linux make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install tar -zcvf openmldb-linux.tar.gz openmldb-linux + echo "openmldb-pkg:" + ls -al - name: test run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b SRC -c test_cluster.xml -d cluster -l "0" - name: TEST Results diff --git a/.gitignore b/.gitignore index 0f6458d3573..9cf2b3b3878 100644 --- a/.gitignore +++ b/.gitignore @@ -98,6 +98,7 @@ java/hybridse-proto/src # test logs/ out/ +allure-results/ # python builds /python/dist/ diff --git a/cases/function/cluster/test_cluster_batch.yaml b/cases/function/cluster/test_cluster_batch.yaml index 8513817e196..329fc9d170d 100644 --- a/cases/function/cluster/test_cluster_batch.yaml +++ b/cases/function/cluster/test_cluster_batch.yaml @@ -13,6 +13,7 @@ # limitations under the License. db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 diff --git a/cases/function/cluster/test_window_row.yaml b/cases/function/cluster/test_window_row.yaml index 5be16f45d6a..35f200af520 100644 --- a/cases/function/cluster/test_window_row.yaml +++ b/cases/function/cluster/test_window_row.yaml @@ -13,6 +13,7 @@ # limitations under the License. db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 diff --git a/cases/function/cluster/test_window_row_range.yaml b/cases/function/cluster/test_window_row_range.yaml index eb8bf4921c3..476336fe4c0 100644 --- a/cases/function/cluster/test_window_row_range.yaml +++ b/cases/function/cluster/test_window_row_range.yaml @@ -13,6 +13,7 @@ # limitations under the License. db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 diff --git a/cases/function/cluster/window_and_lastjoin.yaml b/cases/function/cluster/window_and_lastjoin.yaml index 47fadbbcfb0..c20e6e070ee 100644 --- a/cases/function/cluster/window_and_lastjoin.yaml +++ b/cases/function/cluster/window_and_lastjoin.yaml @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. db: test_zw -debugs: +debugs: [] +version: 0.5.0 cases: - id: 0 diff --git a/cases/function/dml/multi_insert.yaml b/cases/function/dml/multi_insert.yaml index a846b0c2014..1f606089abe 100644 --- a/cases/function/dml/multi_insert.yaml +++ b/cases/function/dml/multi_insert.yaml @@ -13,6 +13,7 @@ # limitations under the License. db: multi_insert_db debugs: [] +version: 0.5.0 cases: - id: 0 desc: 简单INSERT diff --git a/cases/function/dml/test_insert.yaml b/cases/function/dml/test_insert.yaml index fb93c8b2c0c..d1cbe6ea2ba 100644 --- a/cases/function/dml/test_insert.yaml +++ b/cases/function/dml/test_insert.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 diff --git a/cases/function/dml/test_insert_prepared.yaml b/cases/function/dml/test_insert_prepared.yaml index b6fce126821..b67c027e51b 100644 --- a/cases/function/dml/test_insert_prepared.yaml +++ b/cases/function/dml/test_insert_prepared.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 diff --git a/cases/function/expression/test_arithmetic.yaml b/cases/function/expression/test_arithmetic.yaml index 13627c7d732..d90c7422c60 100644 --- a/cases/function/expression/test_arithmetic.yaml +++ b/cases/function/expression/test_arithmetic.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 sqlDialect: ["HybridSQL"] cases: - id: 0 diff --git a/cases/function/expression/test_condition.yaml b/cases/function/expression/test_condition.yaml index 51c5741a0c2..54d1dd4ad4d 100644 --- a/cases/function/expression/test_condition.yaml +++ b/cases/function/expression/test_condition.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 desc: SIMPLE CASE WHEN 表达式 diff --git a/cases/function/v040/test_like.yaml b/cases/function/expression/test_like.yaml similarity index 99% rename from cases/function/v040/test_like.yaml rename to cases/function/expression/test_like.yaml index 7cd6d2bfe07..d47bb57b616 100644 --- a/cases/function/v040/test_like.yaml +++ b/cases/function/expression/test_like.yaml @@ -15,6 +15,7 @@ db: test_zw debugs: [] sqlDialect: ["HybridSQL"] +version: 0.5.0 cases: - id: 0 desc: "使用_" diff --git a/cases/function/expression/test_logic.yaml b/cases/function/expression/test_logic.yaml index 238f3bb0ce5..d1ce41b7825 100644 --- a/cases/function/expression/test_logic.yaml +++ b/cases/function/expression/test_logic.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 sqlDialect: ["HybridSQL"] cases: - id: 0 diff --git a/cases/function/expression/test_predicate.yaml b/cases/function/expression/test_predicate.yaml index bcb0c3bec81..b64d27bef8b 100644 --- a/cases/function/expression/test_predicate.yaml +++ b/cases/function/expression/test_predicate.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 sqlDialect: ["HybridSQL"] cases: - id: 0 @@ -597,181 +598,181 @@ cases: - [5, 1, 1590115440000, false] - [7, 1, 1590115450000, false] - [9, 1, 1590115460000, true] -# - id: like_predicate_1 -# desc: like predicate without escape -# inputs: -# - columns: ["id int", "std_ts timestamp"] -# indexs: ["index1:id:std_ts"] -# rows: -# - [1, 1590115420000 ] -# - [2, 1590115430000 ] -# - [3, 1590115440000 ] -# - [4, 1590115450000 ] -# - [5, 1590115460000 ] -# - [6, 1590115470000 ] -# - columns: ["id int", "ts timestamp", "col2 string"] -# indexs: ["idx:id:ts"] -# rows: -# - [1, 1590115420000, John] -# - [2, 1590115430000, Mary] -# - [3, 1590115440000, mike] -# - [4, 1590115450000, Dan] -# - [5, 1590115460000, Evan_W] -# - [6, 1590115470000, M] -# dataProvider: -# - ["LIKE", "NOT LIKE", "ILIKE", "NOT ILIKE"] # LIKE / NOT LIKE -# - ["m%", "M_ry" ] # match pattern -# sql: | -# select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] 'd[1]'; -# expect: -# columns: ["id int", "col2 string"] -# order: id -# expectProvider: -# 0: -# 0: -# rows: -# - [1, null] -# - [2, null] -# - [3, mike] -# - [4, null] -# - [5, null] -# - [6, null] -# 1: -# rows: -# - [1, null] -# - [2, Mary] -# - [3, null] -# - [4, null] -# - [5, null] -# - [6, null] -# 1: -# 0: -# rows: -# - [1, John] -# - [2, Mary] -# - [3, null] -# - [4, Dan] -# - [5, Evan_W] -# - [6, M] -# 1: -# rows: -# - [1, John] -# - [2, null] -# - [3, mike] -# - [4, Dan] -# - [5, Evan_W] -# - [6, M] -# 2: -# 0: -# rows: -# - [1, null] -# - [2, Mary] -# - [3, mike] -# - [4, null] -# - [5, null] -# - [6, M] -# 1: -# rows: -# - [1, null] -# - [2, Mary] -# - [3, null] -# - [4, null] -# - [5, null] -# - [6, null] -# 3: -# 0: -# rows: -# - [1, John] -# - [2, null] -# - [3, null] -# - [4, Dan] -# - [5, Evan_W] -# - [6, null] -# 1: -# rows: -# - [1, John] -# - [2, null] -# - [3, mike] -# - [4, Dan] -# - [5, Evan_W] -# - [6, M] -# - id: like_predicate_2 -# desc: like predicate with escape -# inputs: -# - columns: ["id int", "std_ts timestamp"] -# indexs: ["index1:id:std_ts"] -# rows: -# - [1, 1590115420000 ] -# - [2, 1590115430000 ] -# - [3, 1590115440000 ] -# - [4, 1590115450000 ] -# - [5, 1590115460000 ] -# - [6, 1590115470000 ] -# - columns: ["id int", "ts timestamp", "col2 string"] -# indexs: ["idx:id:ts"] -# rows: -# - [1, 1590115420000, a*_b] -# - [2, 1590115430000, a*mb] -# - [3, 1590115440000, "%a_%b"] -# - [4, 1590115450000, "Ta_sub"] -# - [5, 1590115460000, "lamrb"] -# - [6, 1590115470000, "%a*_%b"] -# dataProvider: -# - ["LIKE", "NOT ILIKE"] -# - ["%", "*", ""] # escape with % or disable -# sql: | -# select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] '%a*_%b' ESCAPE 'd[1]'; -# expect: -# columns: ["id int", "col2 string"] -# order: id -# expectProvider: -# 0: -# 0: -# rows: -# - [1, a*_b] -# - [2, a*mb] -# - [3, null] -# - [4, null] -# - [5, null] -# - [6, null] -# 1: -# rows: -# - [1, null] -# - [2, null] -# - [3, "%a_%b"] -# - [4, Ta_sub] -# - [5, null] -# - [6, null] -# 2: -# rows: -# - [1, a*_b] -# - [2, a*mb] -# - [3, null] -# - [4, null] -# - [5, null] -# - [6, "%a*_%b"] -# 1: -# 0: -# rows: -# - [1, null] -# - [2, null] -# - [3, "%a_%b"] -# - [4, "Ta_sub"] -# - [5, "lamrb"] -# - [6, "%a*_%b"] -# 1: -# rows: -# - [1, a*_b] -# - [2, a*mb] -# - [3, null] -# - [4, null] -# - [5, "lamrb"] -# - [6, "%a*_%b"] -# 2: -# rows: -# - [1, null] -# - [2, null] -# - [3, "%a_%b"] -# - [4, "Ta_sub"] -# - [5, "lamrb"] -# - [6, null] + - id: like_predicate_1 + desc: like predicate without escape + inputs: + - columns: ["id int", "std_ts timestamp"] + indexs: ["index1:id:std_ts"] + rows: + - [1, 1590115420000 ] + - [2, 1590115430000 ] + - [3, 1590115440000 ] + - [4, 1590115450000 ] + - [5, 1590115460000 ] + - [6, 1590115470000 ] + - columns: ["id int", "ts timestamp", "col2 string"] + indexs: ["idx:id:ts"] + rows: + - [1, 1590115420000, John] + - [2, 1590115430000, Mary] + - [3, 1590115440000, mike] + - [4, 1590115450000, Dan] + - [5, 1590115460000, Evan_W] + - [6, 1590115470000, M] + dataProvider: + - ["LIKE", "NOT LIKE", "ILIKE", "NOT ILIKE"] # LIKE / NOT LIKE + - ["m%", "M_ry" ] # match pattern + sql: | + select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] 'd[1]'; + expect: + columns: ["id int", "col2 string"] + order: id + expectProvider: + 0: + 0: + rows: + - [1, null] + - [2, null] + - [3, mike] + - [4, null] + - [5, null] + - [6, null] + 1: + rows: + - [1, null] + - [2, Mary] + - [3, null] + - [4, null] + - [5, null] + - [6, null] + 1: + 0: + rows: + - [1, John] + - [2, Mary] + - [3, null] + - [4, Dan] + - [5, Evan_W] + - [6, M] + 1: + rows: + - [1, John] + - [2, null] + - [3, mike] + - [4, Dan] + - [5, Evan_W] + - [6, M] + 2: + 0: + rows: + - [1, null] + - [2, Mary] + - [3, mike] + - [4, null] + - [5, null] + - [6, M] + 1: + rows: + - [1, null] + - [2, Mary] + - [3, null] + - [4, null] + - [5, null] + - [6, null] + 3: + 0: + rows: + - [1, John] + - [2, null] + - [3, null] + - [4, Dan] + - [5, Evan_W] + - [6, null] + 1: + rows: + - [1, John] + - [2, null] + - [3, mike] + - [4, Dan] + - [5, Evan_W] + - [6, M] + - id: like_predicate_2 + desc: like predicate with escape + inputs: + - columns: ["id int", "std_ts timestamp"] + indexs: ["index1:id:std_ts"] + rows: + - [1, 1590115420000 ] + - [2, 1590115430000 ] + - [3, 1590115440000 ] + - [4, 1590115450000 ] + - [5, 1590115460000 ] + - [6, 1590115470000 ] + - columns: ["id int", "ts timestamp", "col2 string"] + indexs: ["idx:id:ts"] + rows: + - [1, 1590115420000, a*_b] + - [2, 1590115430000, a*mb] + - [3, 1590115440000, "%a_%b"] + - [4, 1590115450000, "Ta_sub"] + - [5, 1590115460000, "lamrb"] + - [6, 1590115470000, "%a*_%b"] + dataProvider: + - ["LIKE", "NOT ILIKE"] + - ["%", "*", ""] # escape with % or disable + sql: | + select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] '%a*_%b' ESCAPE 'd[1]'; + expect: + columns: ["id int", "col2 string"] + order: id + expectProvider: + 0: + 0: + rows: + - [1, a*_b] + - [2, a*mb] + - [3, null] + - [4, null] + - [5, null] + - [6, null] + 1: + rows: + - [1, null] + - [2, null] + - [3, "%a_%b"] + - [4, Ta_sub] + - [5, null] + - [6, null] + 2: + rows: + - [1, a*_b] + - [2, a*mb] + - [3, null] + - [4, null] + - [5, null] + - [6, "%a*_%b"] + 1: + 0: + rows: + - [1, null] + - [2, null] + - [3, "%a_%b"] + - [4, "Ta_sub"] + - [5, "lamrb"] + - [6, "%a*_%b"] + 1: + rows: + - [1, a*_b] + - [2, a*mb] + - [3, null] + - [4, null] + - [5, "lamrb"] + - [6, "%a*_%b"] + 2: + rows: + - [1, null] + - [2, null] + - [3, "%a_%b"] + - [4, "Ta_sub"] + - [5, "lamrb"] + - [6, null] diff --git a/cases/function/expression/test_type.yaml b/cases/function/expression/test_type.yaml index ae909e66f26..45aac74cf8b 100644 --- a/cases/function/expression/test_type.yaml +++ b/cases/function/expression/test_type.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 sqlDialect: ["HybridSQL"] cases: - id: 0 diff --git a/cases/function/function/test_calculate.yaml b/cases/function/function/test_calculate.yaml index a0955c3499d..7e4b5f5a3c9 100644 --- a/cases/function/function/test_calculate.yaml +++ b/cases/function/function/test_calculate.yaml @@ -15,6 +15,7 @@ db: test_zw debugs: [] sqlDialect: ["HybridSQL"] +version: 0.5.0 cases: - id: 0 desc: abs-normal diff --git a/cases/function/function/test_date.yaml b/cases/function/function/test_date.yaml index f280304c629..66e1ce9cbbd 100644 --- a/cases/function/function/test_date.yaml +++ b/cases/function/function/test_date.yaml @@ -15,6 +15,7 @@ db: test_zw debugs: [] sqlDialect: ["HybridSQL"] +version: 0.5.0 cases: - id: 0 desc: date_format-normal diff --git a/cases/function/v040/test_like_match.yaml b/cases/function/function/test_like_match.yaml similarity index 99% rename from cases/function/v040/test_like_match.yaml rename to cases/function/function/test_like_match.yaml index 760fb9d4401..5300a4f85e5 100644 --- a/cases/function/v040/test_like_match.yaml +++ b/cases/function/function/test_like_match.yaml @@ -15,6 +15,7 @@ db: test_zw debugs: [] sqlDialect: ["HybridSQL"] +version: 0.5.0 cases: - id: 0 desc: "使用_" diff --git a/cases/function/function/test_string.yaml b/cases/function/function/test_string.yaml index 393052a390e..4b9220122f0 100644 --- a/cases/function/function/test_string.yaml +++ b/cases/function/function/test_string.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 desc: "concat_各种类型组合" diff --git a/cases/function/function/test_udaf_function.yaml b/cases/function/function/test_udaf_function.yaml index f6f5d418695..6612576fd19 100644 --- a/cases/function/function/test_udaf_function.yaml +++ b/cases/function/function/test_udaf_function.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 diff --git a/cases/function/function/test_udf_function.yaml b/cases/function/function/test_udf_function.yaml index 8e985742376..7165f09182a 100644 --- a/cases/function/function/test_udf_function.yaml +++ b/cases/function/function/test_udf_function.yaml @@ -15,6 +15,7 @@ db: test_zw debugs: [] sqlDialect: ["HybridSQL"] +version: 0.5.0 cases: - id: 0 desc: 默认udf null处理逻辑:返回null diff --git a/cases/function/join/test_lastjoin_complex.yaml b/cases/function/join/test_lastjoin_complex.yaml index c1e9fed4945..07b65aec95c 100644 --- a/cases/function/join/test_lastjoin_complex.yaml +++ b/cases/function/join/test_lastjoin_complex.yaml @@ -13,7 +13,8 @@ # limitations under the License. db: test_zw -debugs: ["两个子查询lastjoin,order不是主表的ts-rtidb不支持"] +debugs: [] +version: 0.5.0 cases: - id: 0 desc: lastjoin+窗口 @@ -57,6 +58,7 @@ cases: - [5,"bb",24,34,68] - id: 1 desc: lastjoin+窗口-没有匹配的列 + version: 0.6.0 inputs: - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] diff --git a/cases/function/join/test_lastjoin_simple.yaml b/cases/function/join/test_lastjoin_simple.yaml index 626a5917216..b2b0b1ddacf 100644 --- a/cases/function/join/test_lastjoin_simple.yaml +++ b/cases/function/join/test_lastjoin_simple.yaml @@ -13,7 +13,8 @@ # limitations under the License. db: test_zw -debugs: ["Last Join 无order by, 拼表条件命中索引, 副表多条命中"] +debugs: [] +version: 0.5.0 cases: - id: 1 desc: 正常拼接 diff --git a/cases/function/select/test_select_sample.yaml b/cases/function/select/test_select_sample.yaml index 6b1bfe9892f..af8158b368c 100644 --- a/cases/function/select/test_select_sample.yaml +++ b/cases/function/select/test_select_sample.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 desc: 查询所有列 diff --git a/cases/function/select/test_sub_select.yaml b/cases/function/select/test_sub_select.yaml index 381f7cae058..17bb3cbdded 100644 --- a/cases/function/select/test_sub_select.yaml +++ b/cases/function/select/test_sub_select.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 diff --git a/cases/function/select/test_where.yaml b/cases/function/select/test_where.yaml index c2a6db12f7f..8a2f8d26387 100644 --- a/cases/function/select/test_where.yaml +++ b/cases/function/select/test_where.yaml @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. sqlDialect: ["HybridSQL"] -debugs: ["Where条件未命中索引示例2"] +debugs: [] +version: 0.5.0 cases: - id: 0 desc: Where条件命中索引 diff --git a/cases/function/test_index_optimized.yaml b/cases/function/test_index_optimized.yaml index a42d66cd0a9..78e05a96131 100644 --- a/cases/function/test_index_optimized.yaml +++ b/cases/function/test_index_optimized.yaml @@ -13,7 +13,8 @@ # limitations under the License. db: test_zw -debugs: [ ] +debugs: [] +version: 0.5.0 cases: - id: 0 desc: window optimized one key one ts diff --git a/cases/function/window/error_window.yaml b/cases/function/window/error_window.yaml index 82b16fee5e6..9e9419bc74f 100644 --- a/cases/function/window/error_window.yaml +++ b/cases/function/window/error_window.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 desc: no order by diff --git a/cases/function/tmp/test_current_row.yaml b/cases/function/window/test_current_row.yaml similarity index 57% rename from cases/function/tmp/test_current_row.yaml rename to cases/function/window/test_current_row.yaml index 5ad35d1c960..4442e1ef199 100644 --- a/cases/function/tmp/test_current_row.yaml +++ b/cases/function/window/test_current_row.yaml @@ -13,7 +13,8 @@ # limitations under the License. db: test_zw -debugs: ["rows_range-纯历史窗口-current_row-ts=0"] +debugs: [] +version: 0.6.0 cases: - id: 0 desc: rows-current_row @@ -753,4 +754,751 @@ cases: - [ "aa",21,null ] - [ "aa",22,61 ] - [ "aa",23,93 ] - - [ "bb",24,null ] \ No newline at end of file + - [ "bb",24,null ] + + ################################################### + # tests for window attribute 'EXCLUDE CURRENT_ROW' + # - id: 20 - 23: exclude current_row window + lag window + # - id: 24 - 30: exclude current_row window + (maxsize, exclude current_time, instance_not_in_window) + ################################################### + - id: 20 + desc: | + rows_range window union with exclude current_row. batch not support see 1807 + mode: batch-unsupport + request_plan: | + SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding and 0s preceding EXCLUDE CURRENT_ROW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 2, 233, 200, 200 + 2, 3, 233, 21, 21 + - id: 21 + desc: | + rows_range window union with exclude current_row and exclude current_time + mode: batch-unsupport + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 40 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + request_plan: | + SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding and 0s preceding + EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 1, 233, 233, 233 + 2, 1, 233, 233, 233 + 3, 4, 233, 5, 5 + - id: 22 + desc: | + rows_range window union with exclude current_row and instance_not_in_window + mode: batch-unsupport + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + request_plan: | + SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + PROJECT(type=Aggregation) + REQUEST_UNION(INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding and 0s preceding + EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 2, 233, 200, 200 + 2, 2, 233, 200, 200 + - id: 23 + desc: | + rows_range window union with exclude current_row, instance_not_in_window and exclude_current_time + mode: batch-unsupport + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 40 + 4, 102, 111, 0 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + request_plan: | + SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding and 0s preceding + EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 1, 233, 233, 233 + 2, 1, 233, 233, 233 + 3, 2, 233, 200, 200 + 4, 3, 233, 17, 17 + + # rows_range union window with exclude current_row, single window + - id: 24 + desc: | + rows_range union window with exclude_current_row + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 0 + 4, 102, 111, -1 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + batch_plan: | + PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_ROW) + +-WINDOW(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding and 0s preceding + EXCLUDE CURRENT_ROW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + order: id + data: | + 1, 2, 233, 200 + 2, 3, 233, 21 + 3, 5, 233, 5 + 4, 6, 233, 0 + - id: 25 + desc: | + rows_range union window with exclude_current_row and exclude_current_time + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 0 + 4, 102, 111, 0 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + batch_plan: | + PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW) + +-WINDOW(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding AND CURRENT ROW + EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + order: id + data: | + 1, 1, 233, 233 + 2, 1, 233, 233 + 3, 4, 233, 5 + 4, 6, 233, 0 + - id: 26 + desc: | + rows_range union window with exclude_current_row and instance_not_in_window + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 0 + 4, 102, 111, 0 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + # instance_not_in_window not optimize main table + batch_plan: | + PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW) + +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(table=t1) + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding AND CURRENT ROW + EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + order: id + data: | + 1, 2, 233, 200 + 2, 2, 233, 200 + 3, 3, 233, 17 + 4, 3, 233, 17 + - id: 27 + desc: | + rows_range union window with exclude_current_row, exclude current_time and instance_not_in_window + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 0 + 4, 102, 111, 0 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + batch_plan: | + PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW) + +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(table=t1) + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding AND CURRENT ROW + EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + order: id + data: | + 1, 1, 233, 233 + 2, 1, 233, 233 + 3, 2, 233, 200 + 4, 3, 233, 17 + - id: 28 + desc: | + rows_range union window with exclude_current_row, exclude current_time, instance_not_in_window and maxsize + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 0 + 4, 102, 111, 0 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + batch_plan: | + PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW) + +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(table=t1) + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding AND CURRENT ROW + MAXSIZE 2 + EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + order: id + data: | + 1, 1, 233, 233 + 2, 1, 233, 233 + 3, 2, 233, 200 + 4, 2, 200, 17 + - id: 29 + desc: | + rows_range union window with exclude_current_row, instance_not_in_window and maxsize + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 0 + 4, 102, 111, 0 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + batch_plan: | + PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW) + +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(table=t1) + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding AND CURRENT ROW + MAXSIZE 2 + EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + order: id + data: | + 1, 2, 233, 200 + 2, 2, 233, 200 + 3, 2, 200, 17 + 4, 2, 200, 17 + - id: 30 + desc: | + rows_range union window with exclude_current_row, exclude_current_time and maxsize + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 0 + 4, 102, 111, 0 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + batch_plan: | + PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW) + +-WINDOW(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding AND CURRENT ROW + MAXSIZE 2 + EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + order: id + data: | + 1, 1, 233, 233 + 2, 1, 233, 233 + 3, 2, 21, 5 + 4, 2, 17, 0 \ No newline at end of file diff --git a/cases/function/window/test_maxsize.yaml b/cases/function/window/test_maxsize.yaml index 0729b5535d6..28af076d27a 100644 --- a/cases/function/window/test_maxsize.yaml +++ b/cases/function/window/test_maxsize.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 @@ -140,6 +141,7 @@ cases: - id: 6 desc: 纯历史窗口-maxsize + version: 0.6.0 inputs: - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] @@ -164,6 +166,7 @@ cases: - id: 7 desc: 没有数据进入maxsize的窗口 + version: 0.6.0 inputs: - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] diff --git a/cases/function/window/test_window.yaml b/cases/function/window/test_window.yaml index 5bbfe138ab8..db24f7c493f 100644 --- a/cases/function/window/test_window.yaml +++ b/cases/function/window/test_window.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 @@ -1050,15 +1051,15 @@ cases: expect: columns: ["id int", "val1 int", "agg1 int", "agg2 int"] order: id - data: | - 1, 1, 1, NULL - 2, 2, 2, 1 - 3, 3, 3, 2 - 4, 4, 4, 3 - 5, 5, 5, 4 - 6, 4, 4, NULL - 7, 3, 3, 4 - 8, 2, 2, 3 + rows: + - [1, 1, 1, NULL] + - [2, 2, 2, 1] + - [3, 3, 3, 2] + - [4, 4, 4, 3] + - [5, 5, 5, 4] + - [6, 4, 4, NULL] + - [7, 3, 3, 4] + - [8, 2, 2, 3] - id: 34 desc: | @@ -1067,15 +1068,15 @@ cases: - columns: [ "id int","ts timestamp","group1 string","val1 int" ] indexs: [ "index1:group1:ts" ] name: t1 - data: | - 1, 1612130400000, g1, 1 - 2, 1612130401000, g1, 2 - 3, 1612130402000, g1, 3 - 4, 1612130403000, g1, 4 - 5, 1612130404000, g1, 5 - 6, 1612130404000, g2, 4 - 7, 1612130405000, g2, 3 - 8, 1612130406000, g2, 2 + rows: + - [1, 1612130400000, g1, 1] + - [2, 1612130401000, g1, 2] + - [3, 1612130402000, g1, 3] + - [4, 1612130403000, g1, 4] + - [5, 1612130404000, g1, 5] + - [6, 1612130404000, g2, 4] + - [7, 1612130405000, g2, 3] + - [8, 1612130406000, g2, 2] sql: | select `id`, @@ -1088,15 +1089,15 @@ cases: expect: columns: ["id int", "val1 int", "agg1 int", "agg2 int"] order: id - data: | - 1, 1, 1, NULL - 2, 2, 2, 1 - 3, 3, 3, 2 - 4, 4, 4, 3 - 5, 5, 5, 4 - 6, 4, 4, NULL - 7, 3, 3, 4 - 8, 2, 2, 3 + rows: + - [1, 1, 1, NULL] + - [2, 2, 2, 1] + - [3, 3, 3, 2] + - [4, 4, 4, 3] + - [5, 5, 5, 4] + - [6, 4, 4, NULL] + - [7, 3, 3, 4] + - [8, 2, 2, 3] - id: 35 desc: | @@ -1126,17 +1127,18 @@ cases: expect: columns: ["id int", "val1 int", "agg1 int", "agg2 int"] order: id - data: | - 1, 1, 1, NULL - 2, 2, 2, 1 - 3, 3, 3, 2 - 4, 4, 4, 3 - 5, 5, 5, 4 - 6, 4, 4, NULL - 7, 3, 3, 4 - 8, 2, 2, 3 + rows: + - [1, 1, 1, NULL] + - [2, 2, 2, 1] + - [3, 3, 3, 2] + - [4, 4, 4, 3] + - [5, 5, 5, 4] + - [6, 4, 4, NULL] + - [7, 3, 3, 4] + - [8, 2, 2, 3] - id: 36 + version: 0.6.0 desc: | correctness for window functions over window whose border is open inputs: @@ -1176,6 +1178,7 @@ cases: 3, 2, 22, 21, 22 - id: 37 + version: 0.6.0 desc: | correctness for rows_range window functions over window whose border is open inputs: diff --git a/cases/function/window/test_window_exclude_current_time.yaml b/cases/function/window/test_window_exclude_current_time.yaml index ccef8ae1e28..0765136518b 100644 --- a/cases/function/window/test_window_exclude_current_time.yaml +++ b/cases/function/window/test_window_exclude_current_time.yaml @@ -13,6 +13,7 @@ # limitations under the License. db: test_zw +version: 0.5.0 cases: - id: 0 desc: ROWS_RANGE Window OPEN PRECEDING EXCLUDE CURRENT_TIME diff --git a/cases/function/window/test_window_row.yaml b/cases/function/window/test_window_row.yaml index 93529ffe430..c4b0814f8ba 100644 --- a/cases/function/window/test_window_row.yaml +++ b/cases/function/window/test_window_row.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 @@ -847,6 +848,7 @@ cases: - id: 38 desc: rows 1-2 + version: 0.6.0 inputs: - columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] diff --git a/cases/function/window/test_window_row_range.yaml b/cases/function/window/test_window_row_range.yaml index c72734f4dc8..71681b7d41e 100644 --- a/cases/function/window/test_window_row_range.yaml +++ b/cases/function/window/test_window_row_range.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 desc: string为partition by @@ -681,6 +682,7 @@ cases: - [ "aa", 9, 1590739002000, 3.0 ] - id: 24-1 desc: ROWS_RANGE Pure History Window + version: 0.6.0 inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -714,6 +716,7 @@ cases: - [ "aa", 9, 1590739002000, 2.0 ] - id: 24-2 desc: ROWS_RANGE Pure History Window With MaxSize + version: 0.6.0 inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -940,6 +943,7 @@ cases: - id: 27-3 desc: ROWS and ROWS_RANGE Pure History Window Cant' Be Merge + version: 0.6.0 inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -1371,6 +1375,7 @@ cases: - id: 46 desc: timestamp为order by-2s-1s + version: 0.6.0 inputs: - columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] @@ -1443,6 +1448,7 @@ cases: - id: 49 desc: timestamp为order by-2s-1 + version: 0.6.0 inputs: - columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] @@ -1467,6 +1473,7 @@ cases: - id: 50 desc: timestamp为order by-前后单位不一样 + version: 0.6.0 inputs: - columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] diff --git a/cases/function/window/test_window_union.yaml b/cases/function/window/test_window_union.yaml index d3fdbed82dd..6a13a30cf7a 100644 --- a/cases/function/window/test_window_union.yaml +++ b/cases/function/window/test_window_union.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 desc: 正常union @@ -119,7 +120,7 @@ cases: - [5,"ee",21,34] - id: 5 desc: 样本表使用索引,UNION表未命中索引 - mode: rtidb-unsupport,cli-unsupport + mode: rtidb-unsupport inputs: - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] indexs: ["index1:c3:c7"] @@ -143,7 +144,7 @@ cases: - [5,"ee",21,34] - id: 6 desc: union表使用索引,样本表未命中索引 - mode: rtidb-unsupport,cli-unsupport + mode: rtidb-unsupport inputs: - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] indexs: ["index1:c1:c7"] @@ -950,750 +951,180 @@ cases: data: | 1, 3, 233, 33, 200, 233, 33 2, 3, 233, 33, 200, 233, 33 - - ################################################### - # tests for window attribute 'EXCLUDE CURRENT_ROW' - # - id: 20 - 23: exclude current_row window + lag window - # - id: 24 - 30: exclude current_row window + (maxsize, exclude current_time, instance_not_in_window) - ################################################### - - id: 20 - desc: | - rows_range window union with exclude current_row. batch not support see 1807 - mode: batch-unsupport - request_plan: | - SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) - REQUEST_JOIN(type=kJoinTypeConcat) - PROJECT(type=Aggregation) - REQUEST_UNION(EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(request=t1) - DATA_PROVIDER(type=Partition, table=t1, index=idx) - PROJECT(type=Aggregation) - REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) - +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(request=t1) - DATA_PROVIDER(type=Partition, table=t1, index=idx) + - id: 18 + desc: 主表ts都大于副表的 inputs: - - name: t1 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 100, 111, 21 - 2, 100, 111, 5 - - name: t2 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 99, 111, 233 - 1, 100, 111, 200 - 1, 101, 111, 17 + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738995000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] sql: | - select - id, count(val) over w as cnt, - max(val) over w as mv, - min(val) over w as mi, - lag(val, 1) over w as l1 - from t1 window w as( - union t2 - partition by `g` order by `ts` - rows_range between 3s preceding and 0s preceding EXCLUDE CURRENT_ROW); + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: - columns: - - id int - - cnt int64 - - mv int - - mi int - - l1 int order: id - data: | - 1, 2, 233, 200, 200 - 2, 3, 233, 21, 21 - - id: 21 - desc: | - rows_range window union with exclude current_row and exclude current_time - mode: batch-unsupport + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,93] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 19 + desc: 主表ts都小于副表的 inputs: - - name: t1 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 100, 111, 21 - 2, 100, 111, 5 - 3, 101, 111, 40 - - name: t2 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 99, 111, 233 - 1, 100, 111, 200 - 1, 101, 111, 17 - request_plan: | - SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) - REQUEST_JOIN(type=kJoinTypeConcat) - PROJECT(type=Aggregation) - REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(request=t1) - DATA_PROVIDER(type=Partition, table=t1, index=idx) - PROJECT(type=Aggregation) - REQUEST_UNION(EXCLUDE_CURRENT_TIME, partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) - +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(request=t1) - DATA_PROVIDER(type=Partition, table=t1, index=idx) + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738991000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738992000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738993000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738994000,"2020-05-03"] sql: | - select - id, count(val) over w as cnt, - max(val) over w as mv, - min(val) over w as mi, - lag(val, 1) over w as l1 - from t1 window w as( - union t2 - partition by `g` order by `ts` - rows_range between 3s preceding and 0s preceding - EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME); + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: - columns: - - id int - - cnt int64 - - mv int - - mi int - - l1 int order: id - data: | - 1, 1, 233, 233, 233 - 2, 1, 233, 233, 233 - 3, 4, 233, 5, 5 - - id: 22 - desc: | - rows_range window union with exclude current_row and instance_not_in_window - mode: batch-unsupport + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,63] + - [5,"ee",21,34] + - id: 20 + desc: 主表副表ts有交集 inputs: - - name: t1 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 100, 111, 21 - 2, 100, 111, 5 - - name: t2 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 99, 111, 233 - 1, 100, 111, 200 - 1, 101, 111, 17 - request_plan: | - SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) - REQUEST_JOIN(type=kJoinTypeConcat) - PROJECT(type=Aggregation) - REQUEST_UNION(EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(request=t1) - DATA_PROVIDER(table=t1) - PROJECT(type=Aggregation) - REQUEST_UNION(INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=) - +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(request=t1) - DATA_PROVIDER(table=t1) + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] sql: | - select - id, count(val) over w as cnt, - max(val) over w as mv, - min(val) over w as mi, - lag(val, 1) over w as l1 - from t1 window w as( - union t2 - partition by `g` order by `ts` - rows_range between 3s preceding and 0s preceding - EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW); + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: - columns: - - id int - - cnt int64 - - mv int - - mi int - - l1 int order: id - data: | - 1, 2, 233, 200, 200 - 2, 2, 233, 200, 200 - - id: 23 - desc: | - rows_range window union with exclude current_row, instance_not_in_window and exclude_current_time - mode: batch-unsupport + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 21 + desc: 主表和副表分片在同一节点上 inputs: - - name: t1 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 100, 111, 21 - 2, 100, 111, 5 - 3, 101, 111, 40 - 4, 102, 111, 0 - - name: t2 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 99, 111, 233 - 1, 100, 111, 200 - 1, 101, 111, 17 - request_plan: | - SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) - REQUEST_JOIN(type=kJoinTypeConcat) - PROJECT(type=Aggregation) - REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(request=t1) - DATA_PROVIDER(table=t1) - PROJECT(type=Aggregation) - REQUEST_UNION(EXCLUDE_CURRENT_TIME, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=) - +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(request=t1) - DATA_PROVIDER(table=t1) + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + replicaNum: 3 + partitionNum: 1 + distribution: + - leader: "{tb_endpoint_1}" + followers: [ "{tb_endpoint_0}","{tb_endpoint_2}" ] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + replicaNum: 3 + partitionNum: 1 + distribution: + - leader: "{tb_endpoint_1}" + followers: [ "{tb_endpoint_0}","{tb_endpoint_2}" ] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] sql: | - select - id, count(val) over w as cnt, - max(val) over w as mv, - min(val) over w as mi, - lag(val, 1) over w as l1 - from t1 window w as( - union t2 - partition by `g` order by `ts` - rows_range between 3s preceding and 0s preceding - EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME); + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: - columns: - - id int - - cnt int64 - - mv int - - mi int - - l1 int order: id - data: | - 1, 1, 233, 233, 233 - 2, 1, 233, 233, 233 - 3, 2, 233, 200, 200 - 4, 3, 233, 17, 17 - - # rows_range union window with exclude current_row, single window - - id: 24 - desc: | - rows_range union window with exclude_current_row + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 21 + desc: 主表和副表分片在不同的节点上 inputs: - - name: t1 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 100, 111, 21 - 2, 100, 111, 5 - 3, 101, 111, 0 - 4, 102, 111, -1 - - name: t2 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 99, 111, 233 - 1, 100, 111, 200 - 1, 101, 111, 17 - batch_plan: | - PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_ROW) - +-WINDOW(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING)) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(type=Partition, table=t1, index=idx) - request_plan: | - PROJECT(type=Aggregation) - REQUEST_UNION(EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(request=t1) - DATA_PROVIDER(type=Partition, table=t1, index=idx) - sql: | - select - id, count(val) over w as cnt, - max(val) over w as mv, - min(val) over w as mi, - from t1 window w as( - union t2 - partition by `g` order by `ts` - rows_range between 3s preceding and 0s preceding - EXCLUDE CURRENT_ROW); - expect: - columns: - - id int - - cnt int64 - - mv int - - mi int - order: id - data: | - 1, 2, 233, 200 - 2, 3, 233, 21 - 3, 5, 233, 5 - 4, 6, 233, 0 - - id: 25 - desc: | - rows_range union window with exclude_current_row and exclude_current_time - inputs: - - name: t1 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 100, 111, 21 - 2, 100, 111, 5 - 3, 101, 111, 0 - 4, 102, 111, 0 - - name: t2 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 99, 111, 233 - 1, 100, 111, 200 - 1, 101, 111, 17 - batch_plan: | - PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW) - +-WINDOW(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(type=Partition, table=t1, index=idx) - request_plan: | - PROJECT(type=Aggregation) - REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g)) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(request=t1) - DATA_PROVIDER(type=Partition, table=t1, index=idx) - sql: | - select - id, count(val) over w as cnt, - max(val) over w as mv, - min(val) over w as mi, - from t1 window w as( - union t2 - partition by `g` order by `ts` - rows_range between 3s preceding AND CURRENT ROW - EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME); - expect: - columns: - - id int - - cnt int64 - - mv int - - mi int - order: id - data: | - 1, 1, 233, 233 - 2, 1, 233, 233 - 3, 4, 233, 5 - 4, 6, 233, 0 - - id: 26 - desc: | - rows_range union window with exclude_current_row and instance_not_in_window - inputs: - - name: t1 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 100, 111, 21 - 2, 100, 111, 5 - 3, 101, 111, 0 - 4, 102, 111, 0 - - name: t2 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 99, 111, 233 - 1, 100, 111, 200 - 1, 101, 111, 17 - # instance_not_in_window not optimize main table - batch_plan: | - PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW) - +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(table=t1) - request_plan: | - PROJECT(type=Aggregation) - REQUEST_UNION(EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(request=t1) - DATA_PROVIDER(table=t1) - sql: | - select - id, count(val) over w as cnt, - max(val) over w as mv, - min(val) over w as mi, - from t1 window w as( - union t2 - partition by `g` order by `ts` - rows_range between 3s preceding AND CURRENT ROW - EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW); - expect: - columns: - - id int - - cnt int64 - - mv int - - mi int - order: id - data: | - 1, 2, 233, 200 - 2, 2, 233, 200 - 3, 3, 233, 17 - 4, 3, 233, 17 - - id: 27 - desc: | - rows_range union window with exclude_current_row, exclude current_time and instance_not_in_window - inputs: - - name: t1 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 100, 111, 21 - 2, 100, 111, 5 - 3, 101, 111, 0 - 4, 102, 111, 0 - - name: t2 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 99, 111, 233 - 1, 100, 111, 200 - 1, 101, 111, 17 - batch_plan: | - PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW) - +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(table=t1) - request_plan: | - PROJECT(type=Aggregation) - REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(request=t1) - DATA_PROVIDER(table=t1) - sql: | - select - id, count(val) over w as cnt, - max(val) over w as mv, - min(val) over w as mi, - from t1 window w as( - union t2 - partition by `g` order by `ts` - rows_range between 3s preceding AND CURRENT ROW - EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME); - expect: - columns: - - id int - - cnt int64 - - mv int - - mi int - order: id - data: | - 1, 1, 233, 233 - 2, 1, 233, 233 - 3, 2, 233, 200 - 4, 3, 233, 17 - - id: 28 - desc: | - rows_range union window with exclude_current_row, exclude current_time, instance_not_in_window and maxsize - inputs: - - name: t1 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 100, 111, 21 - 2, 100, 111, 5 - 3, 101, 111, 0 - 4, 102, 111, 0 - - name: t2 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 99, 111, 233 - 1, 100, 111, 200 - 1, 101, 111, 17 - batch_plan: | - PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW) - +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(table=t1) - request_plan: | - PROJECT(type=Aggregation) - REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(request=t1) - DATA_PROVIDER(table=t1) + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + distribution: + - leader: "{tb_endpoint_1}" + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + distribution: + - leader: "{tb_endpoint_0}" + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] sql: | - select - id, count(val) over w as cnt, - max(val) over w as mv, - min(val) over w as mi, - from t1 window w as( - union t2 - partition by `g` order by `ts` - rows_range between 3s preceding AND CURRENT ROW - MAXSIZE 2 - EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME); + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: - columns: - - id int - - cnt int64 - - mv int - - mi int order: id - data: | - 1, 1, 233, 233 - 2, 1, 233, 233 - 3, 2, 233, 200 - 4, 2, 200, 17 - - id: 29 - desc: | - rows_range union window with exclude_current_row, instance_not_in_window and maxsize - inputs: - - name: t1 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 100, 111, 21 - 2, 100, 111, 5 - 3, 101, 111, 0 - 4, 102, 111, 0 - - name: t2 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 99, 111, 233 - 1, 100, 111, 200 - 1, 101, 111, 17 - batch_plan: | - PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW) - +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(table=t1) - request_plan: | - PROJECT(type=Aggregation) - REQUEST_UNION(EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(request=t1) - DATA_PROVIDER(table=t1) + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 22 + desc: 两张副表,一张和主表在同一节点,另一张不在 + db: db_wzx sql: | - select - id, count(val) over w as cnt, - max(val) over w as mv, - min(val) over w as mi, - from t1 window w as( - union t2 - partition by `g` order by `ts` - rows_range between 3s preceding AND CURRENT ROW - MAXSIZE 2 - EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW); - expect: - columns: - - id int - - cnt int64 - - mv int - - mi int - order: id - data: | - 1, 2, 233, 200 - 2, 2, 233, 200 - 3, 2, 200, 17 - 4, 2, 200, 17 - - id: 30 - desc: | - rows_range union window with exclude_current_row, exclude_current_time and maxsize + select + c1, + min(c1) over table_1_s2_t1 as table_1_c1_9, + min(c2) over table_1_s2_t1 as table_1_c2_10, + case when !isnull(lag(d1, 1) over table_1_s2_t1) then distinct_count(d1) over table_1_s2_t1 else null end as table_1_d1_11, + case when !isnull(lag(d2, 1) over table_1_s2_t1) then distinct_count(d2) over table_1_s2_t1 else null end as table_1_d2_12, + case when !isnull(lag(s1, 1) over table_1_s2_t1) then distinct_count(s1) over table_1_s2_t1 else null end as table_1_s1_13 + from + {0} as main + window table_1_s2_t1 as (partition by s2 order by t1 rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW); inputs: - - name: t1 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 100, 111, 21 - 2, 100, 111, 5 - 3, 101, 111, 0 - 4, 102, 111, 0 - - name: t2 - columns: - - id int - - ts timestamp - - g int - - val int - indexs: - - idx:g:ts - data: | - 1, 99, 111, 233 - 1, 100, 111, 200 - 1, 101, 111, 17 - batch_plan: | - PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW) - +-WINDOW(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(type=Partition, table=t1, index=idx) - request_plan: | - PROJECT(type=Aggregation) - REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g)) - +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g)) - RENAME(name=t1) - DATA_PROVIDER(type=Partition, table=t2, index=idx) - DATA_PROVIDER(request=t1) - DATA_PROVIDER(type=Partition, table=t1, index=idx) - sql: | - select - id, count(val) over w as cnt, - max(val) over w as mv, - min(val) over w as mi, - from t1 window w as( - union t2 - partition by `g` order by `ts` - rows_range between 3s preceding AND CURRENT ROW - MAXSIZE 2 - EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME); + - columns: ["label int", "s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s2:t1", "index2:s1:t1", "index3:d1:t1", "index4:d2:t1"] + distribution: + - leader: "{tb_endpoint_1}" + rows: + - [1, "1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s2:t1"] + distribution: + - leader: "{tb_endpoint_1}" + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s1:t1"] + distribution: + - leader: "{tb_endpoint_0}" + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s1:t1"] + distribution: + - leader: "{tb_endpoint_0}" + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] expect: - columns: - - id int - - cnt int64 - - mv int - - mi int - order: id - data: | - 1, 1, 233, 233 - 2, 1, 233, 233 - 3, 2, 21, 5 - 4, 2, 17, 0 + order: c1 + columns: ["c1 int", "table_1_c1_9 int", "table_1_c2_10 bigint", "table_1_d1_11 bigint", "table_1_d2_12 bigint", "table_1_s1_13 bigint"] + rows: + - [1, 1, 2, NULL, NULL, NULL] diff --git a/cases/function/window/test_window_union_cluster.yaml b/cases/function/window/test_window_union_cluster.yaml deleted file mode 100644 index 2775ea6fa63..00000000000 --- a/cases/function/window/test_window_union_cluster.yaml +++ /dev/null @@ -1,738 +0,0 @@ -# Copyright 2021 4Paradigm -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - - -db: test_zw -debugs: [] -cases: - - id: 0 - desc: 正常union - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,30] - - [4,"dd",20,96] - - [5,"ee",21,34] - - id: 1 - desc: union的表列个数不一致 - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000] - - [3,"cc",20,32,1.3,2.3,1590738992000] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - success: false - - id: 2 - desc: 列类型不一致 - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 string"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - success: false - - id: 3 - desc: 列名不一致 - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c9 date"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - success: false - - id: 4 - desc: 使用列别名后schema一致 - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c9 date"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW - w1 AS (UNION (select id, c1,c3,c4,c5,c6,c7,c9 as c8 from {1}) - PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,30] - - [4,"dd",20,96] - - [5,"ee",21,34] - - id: 5 - desc: 样本表使用索引,UNION表未命中索引 - mode: rtidb-unsupport - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,30] - - [4,"dd",20,96] - - [5,"ee",21,34] - - id: 6 - desc: union表使用索引,样本表未命中索引 - mode: rtidb-unsupport - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,30] - - [4,"dd",20,96] - - [5,"ee",21,34] - - id: 7 - desc: 样本表union表都使用索引 - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,30] - - [4,"dd",20,96] - - [5,"ee",21,34] - - id: 8 - desc: union多表 - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] - - [5,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] - - [6,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1},{2} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,30] - - [4,"dd",20,126] - - [5,"dd",20,129] - - [6,"ee",21,34] - - id: 9 - desc: 结合limit - tags: ["TODO", "@zhaowei remove limit case here"] - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW - w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) limit 2; - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,30] - - [5,"ee",21,34] - - id: 10 - desc: 使用两个pk - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1|c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"aa",20,33,1.4,2.4,1590738993000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] - - [6,"ee",21,33,1.4,2.4,1590738995000,"2020-05-04"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1|c3:c7"] - rows: - - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"aa",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,30] - - [4,"aa",20,96] - - [5,"ee",21,34] - - [6,"ee",21,67] - - id: 11 - desc: 样本表和union表都使用子查询 - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM (select * from {0}) WINDOW w1 AS (UNION (select * from {1}) PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,30] - - [4,"dd",20,96] - - [5,"ee",21,34] - - id: 12 - desc: union多表,其中一个子查询 - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] - - [5,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] - - [6,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION (select * from {1}),{2} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,30] - - [4,"dd",20,126] - - [5,"dd",20,129] - - [6,"ee",21,34] - - id: 13 - desc: 样本表不进入window - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW - w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,93] - - [4,"dd",20,96] - - [5,"ee",21,34] - - - id: 14-1 - desc: WINDOW UNION 子查询, column cast 和 const cast子查询, string cast as date - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4str string","c5 float","c6 double","c7 timestamp"] - indexs: ["index1:c3:c7"] - rows: - - [2, "bb", 20, "31", 1.2, 2.2, 1590738991000] - - [3, "cc", 20, "32", 1.3, 2.3, 1590738992000] - sql: | - SELECT id, c1, c3, c8, - distinct_count(c8) OVER w1 as w1_c8_dis_cnt, - sum(c4) OVER w1 as w1_c4_sum - FROM {0} WINDOW - w1 AS (UNION (select id, c1, c3, bigint(c4str) as c4, c5, c6, c7, date("2020-10-01") as c8 from {1}) - PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); - expect: - order: id - columns: ["id int", "c1 string", "c3 int", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"] - rows: - - [1, "aa", 20, "2020-05-01", 2, 93] - - [4, "dd", 20, "2020-05-04", 2, 96] - - [5, "ee", 21, "2020-05-05", 1, 34] - - id: 14-2 - desc: WINDOW UNION 子查询, column cast 和 const cast子查询. cast column as partition key - inputs: - - columns: ["id int","c1 string","c3f float","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20.0, 30,1.1,2.1,1590738993000,"2020-05-01"] - - [4,"dd",20.1, 33,1.4,2.4,1590738994000,"2020-05-04"] - - [5,"ee",21.2, 34,1.5,2.5,1590738995000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000] - - [3,"cc",20,32,1.3,2.3,1590738992000] - sql: | - SELECT id, c1, c3, c8, - distinct_count(c8) OVER w1 as w1_c8_dis_cnt, - sum(c4) OVER w1 as w1_c4_sum - FROM (select id, c1, int(c3f) as c3, c4, c5, c6, c7, c8 from {0}) WINDOW - w1 AS (UNION (select id, c1, c3, c4, c5, c6, c7, date("2020-10-01") as c8 from {1}) - PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); - expect: - order: id - columns: ["id int", "c1 string", "c3 int", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"] - rows: - - [1, "aa", 20, "2020-05-01", 2, 93] - - [4, "dd", 20, "2020-05-04", 2, 96] - - [5, "ee", 21, "2020-05-05", 1, 34] - - id: 14-3 - desc: WINDOW UNION 子查询, timestamp(string) as window ts - inputs: - - columns: ["id int","c1 string","c3f float","c4 bigint","c5 float","c6 double","c7str string","c8 date"] - indexs: ["index1:c1:c4"] - rows: - - [1,"aa",20.0, 30,1.1,2.1,"2020-05-29 15:56:33","2020-05-01"] - - [4,"dd",20.1, 33,1.4,2.4,"2020-05-29 15:56:34","2020-05-04"] - - [5,"ee",21.2, 34,1.5,2.5,"2020-05-29 15:56:35","2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2, 1590738991000] - - [3,"cc",20,32,1.3,2.3, 1590738992000] - sql: | - SELECT id, c1, c3, c7, c8, - distinct_count(c8) OVER w1 as w1_c8_dis_cnt, - sum(c4) OVER w1 as w1_c4_sum - FROM (select id, c1, int(c3f) as c3, c4, c5, c6, timestamp(c7str) as c7, c8 from {0}) WINDOW - w1 AS (UNION (select id, c1, c3, c4, c5, c6, c7, date("2020-10-01") as c8 from {1}) - PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); - expect: - order: id - columns: ["id int", "c1 string", "c3 int", "c7 timestamp", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"] - rows: - - [1, "aa", 20, 1590738993000, "2020-05-01", 2, 93] - - [4, "dd", 20, 1590738994000, "2020-05-04", 2, 96] - - [5, "ee", 21, 1590738995000, "2020-05-05", 1, 34] - - id: 14-4 - desc: WINDOW UNION 子查询, cast另一种写法 cast(column as timestamp) as window ts - inputs: - - columns: ["id int","c1 string","c3f float","c4 bigint","c5 float","c6 double","c7str string","c8 date"] - indexs: ["index1:c1:c4"] - rows: - - [1,"aa",20.0, 30,1.1,2.1,"2020-05-29 15:56:33","2020-05-01"] - - [4,"dd",20.1, 33,1.4,2.4,"2020-05-29 15:56:34","2020-05-04"] - - [5,"ee",21.2, 34,1.5,2.5,"2020-05-29 15:56:35","2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2, 1590738991000] - - [3,"cc",20,32,1.3,2.3, 1590738992000] - sql: | - SELECT id, c1, c3, c7, c8, - distinct_count(c8) OVER w1 as w1_c8_dis_cnt, - sum(c4) OVER w1 as w1_c4_sum - FROM (select id, c1, cast(c3f as int) as c3, c4, c5, c6, cast(c7str as timestamp) as c7, c8 from {0}) WINDOW - w1 AS (UNION (select id, c1, c3, c4, c5, c6, c7, cast("2020-10-01" as date) as c8 from {1}) - PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); - expect: - order: id - columns: ["id int", "c1 string", "c3 int", "c7 timestamp", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"] - rows: - - [1, "aa", 20, 1590738993000, "2020-05-01", 2, 93] - - [4, "dd", 20, 1590738994000, "2020-05-04", 2, 96] - - [5, "ee", 21, 1590738995000, "2020-05-05", 1, 34] - - id: 16 - desc: 主表window 添加 INSTANCE_NOT_IN_WINDOW 没有明显错误日志 - db: db_wzx - sql: | - select - c1, - min(c1) over table_1_s2_t1 as table_1_c1_9, - min(c2) over table_1_s2_t1 as table_1_c2_10, - identity(case when lag(d1, 1) != null then distinct_count(d1) else null end) over table_1_s2_t1 as table_1_d1_11, - identity(case when lag(d2, 1) != null then distinct_count(d2) else null end) over table_1_s2_t1 as table_1_d2_12, - identity(case when lag(s1, 1) != null then distinct_count(s1) else null end) over table_1_s2_t1 as table_1_s1_13 - from - {0} as main - window table_1_s2_t1 as (partition by s2 order by t1 rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW); - inputs: - - columns: ["label int", "s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", - "ai string", "kn string", "ks string"] - indexs: ["index1:s2:t1", "index2:s1:t1", "index3:d1:t1", "index4:d2:t1"] - rows: - - [1, "1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] - - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", - "ai string", "kn string", "ks string"] - indexs: ["index1:s2:t1"] - rows: - - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] - - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", - "ai string", "kn string", "ks string"] - indexs: ["index1:s1:t1"] - rows: - - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] - - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", - "ai string", "kn string", "ks string"] - indexs: ["index1:s1:t1"] - rows: - - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] - expect: - order: c1 - columns: ["c1 int", "table_1_c1_9 int", "table_1_c2_10 bigint", "table_1_d1_11 bigint", "table_1_d2_12 bigint", "table_1_s1_13 bigint"] - rows: - - [1, 1, 2, NULL, NULL, NULL] - - - id: 16-2 - desc: 主表window 添加 INSTANCE_NOT_IN_WINDOW 没有明显错误日志 case when写法优化 - db: db_wzx - sql: | - select - c1, - min(c1) over table_1_s2_t1 as table_1_c1_9, - min(c2) over table_1_s2_t1 as table_1_c2_10, - case when !isnull(lag(d1, 1) over table_1_s2_t1) then distinct_count(d1) over table_1_s2_t1 else null end as table_1_d1_11, - case when !isnull(lag(d2, 1) over table_1_s2_t1) then distinct_count(d2) over table_1_s2_t1 else null end as table_1_d2_12, - case when !isnull(lag(s1, 1) over table_1_s2_t1) then distinct_count(s1) over table_1_s2_t1 else null end as table_1_s1_13 - from - {0} as main - window table_1_s2_t1 as (partition by s2 order by t1 rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW); - inputs: - - columns: ["label int", "s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", - "ai string", "kn string", "ks string"] - indexs: ["index1:s2:t1", "index2:s1:t1", "index3:d1:t1", "index4:d2:t1"] - rows: - - [1, "1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] - - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", - "ai string", "kn string", "ks string"] - indexs: ["index1:s2:t1"] - rows: - - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] - - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", - "ai string", "kn string", "ks string"] - indexs: ["index1:s1:t1"] - rows: - - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] - - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", - "ai string", "kn string", "ks string"] - indexs: ["index1:s1:t1"] - rows: - - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] - expect: - order: c1 - columns: ["c1 int", "table_1_c1_9 int", "table_1_c2_10 bigint", "table_1_d1_11 bigint", "table_1_d2_12 bigint", "table_1_s1_13 bigint"] - rows: - - [1, 1, 2, NULL, NULL, NULL] - - id: 17 - desc: 两个索引不一致的表union - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7","index2:c1:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,30] - - [4,"dd",20,96] - - [5,"ee",21,34] - - id: 18 - desc: 主表ts都大于副表的 - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738995000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,93] - - [4,"dd",20,96] - - [5,"ee",21,34] - - id: 19 - desc: 主表ts都小于副表的 - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738991000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738992000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738993000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738994000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,30] - - [4,"dd",20,63] - - [5,"ee",21,34] - - id: 20 - desc: 主表副表ts有交集 - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,30] - - [4,"dd",20,96] - - [5,"ee",21,34] - - id: 21 - desc: 主表和副表分片在同一节点上 - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - replicaNum: 3 - partitionNum: 1 - distribution: - - leader: "{tb_endpoint_1}" - followers: [ "{tb_endpoint_0}","{tb_endpoint_2}" ] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - replicaNum: 3 - partitionNum: 1 - distribution: - - leader: "{tb_endpoint_1}" - followers: [ "{tb_endpoint_0}","{tb_endpoint_2}" ] - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,30] - - [4,"dd",20,96] - - [5,"ee",21,34] - - id: 21 - desc: 主表和副表分片在不同的节点上 - inputs: - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - distribution: - - leader: "{tb_endpoint_1}" - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] - - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] - - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c3:c7"] - distribution: - - leader: "{tb_endpoint_0}" - rows: - - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] - sql: | - SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] - rows: - - [1,"aa",20,30] - - [4,"dd",20,96] - - [5,"ee",21,34] - - id: 22 - desc: 两张副表,一张和主表在同一节点,另一张不在 - db: db_wzx - sql: | - select - c1, - min(c1) over table_1_s2_t1 as table_1_c1_9, - min(c2) over table_1_s2_t1 as table_1_c2_10, - case when !isnull(lag(d1, 1) over table_1_s2_t1) then distinct_count(d1) over table_1_s2_t1 else null end as table_1_d1_11, - case when !isnull(lag(d2, 1) over table_1_s2_t1) then distinct_count(d2) over table_1_s2_t1 else null end as table_1_d2_12, - case when !isnull(lag(s1, 1) over table_1_s2_t1) then distinct_count(s1) over table_1_s2_t1 else null end as table_1_s1_13 - from - {0} as main - window table_1_s2_t1 as (partition by s2 order by t1 rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW); - inputs: - - columns: ["label int", "s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", - "ai string", "kn string", "ks string"] - indexs: ["index1:s2:t1", "index2:s1:t1", "index3:d1:t1", "index4:d2:t1"] - distribution: - - leader: "{tb_endpoint_1}" - rows: - - [1, "1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] - - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", - "ai string", "kn string", "ks string"] - indexs: ["index1:s2:t1"] - distribution: - - leader: "{tb_endpoint_1}" - rows: - - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] - - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", - "ai string", "kn string", "ks string"] - indexs: ["index1:s1:t1"] - distribution: - - leader: "{tb_endpoint_0}" - rows: - - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] - - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", - "ai string", "kn string", "ks string"] - indexs: ["index1:s1:t1"] - distribution: - - leader: "{tb_endpoint_0}" - rows: - - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] - expect: - order: c1 - columns: ["c1 int", "table_1_c1_9 int", "table_1_c2_10 bigint", "table_1_d1_11 bigint", "table_1_d2_12 bigint", "table_1_s1_13 bigint"] - rows: - - [1, 1, 2, NULL, NULL, NULL] \ No newline at end of file diff --git a/cases/function/window/test_window_union_cluster_thousand.yaml b/cases/function/window/test_window_union_cluster_thousand.yaml index 432927ea744..e8e8246dbfb 100644 --- a/cases/function/window/test_window_union_cluster_thousand.yaml +++ b/cases/function/window/test_window_union_cluster_thousand.yaml @@ -16,6 +16,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 desc: 正常union diff --git a/cases/function/window/window_attributes.yaml b/cases/function/window/window_attributes.yaml index f1e54311993..f538096f4be 100644 --- a/cases/function/window/window_attributes.yaml +++ b/cases/function/window/window_attributes.yaml @@ -5,6 +5,7 @@ # - MAXSIZE debugs: [] +version: 0.6.0 cases: - id: 0 desc: ROWS_RANGE window with exclude_current_row diff --git a/cases/query/const_query.yaml b/cases/query/const_query.yaml index 70c8a7bf955..304f0486073 100644 --- a/cases/query/const_query.yaml +++ b/cases/query/const_query.yaml @@ -13,6 +13,7 @@ # limitations under the License. debugs: [] +version: 0.5.0 cases: - id: 0 desc: select const number diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java index 48485b02fcb..dddb7f6ca8c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java @@ -154,6 +154,7 @@ public OpenMLDBInfo deployCluster(String clusterName, int ns, int tablet){ private String downloadOpenMLDB(String testPath){ try { String command; + log.info("openMLDBUrl:{}",openMLDBUrl); if(openMLDBUrl.startsWith("http")) { command = "wget -P " + testPath + " -q " + openMLDBUrl; }else{ diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index a9c31e13b4a..a80f46f944d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -1,9 +1,6 @@ -#zk的url zk_url=https://archive.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz -#配置fedb版本以及对应的url - main=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2.3-linux.tar.gz 0.2.2=https://github.com/4paradigm/OpenMLDB/releases/download/0.2.2/openmldb-0.2.2-linux.tar.gz 0.2.3=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2.3-linux.tar.gz diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml index ad10dcb34ea..806e96fe8da 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/pom.xml @@ -114,8 +114,8 @@ ${caseLevel} - fedbVersion - ${fedbVersion} + diffVersion + ${diffVersion} reportLog diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java index ec21b35b4dd..b08367c789b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java @@ -87,6 +87,8 @@ public void check() throws ParseException { ); } else if (actual_val != null && actual_val instanceof Double) { + System.out.println("expect_val = " + expect_val); + System.out.println("actual_val = " + actual_val); Assert.assertTrue(expect_val != null && expect_val instanceof Double); Assert.assertEquals( (Double) actual_val, (Double) expect_val, 1e-4, diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java index 8d54b2676a8..7d984a35878 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java @@ -72,7 +72,7 @@ public class OpenMLDBConfig { log.info("YAML_CASE_BASE_DIR {}", YAML_CASE_BASE_DIR); } - String versionStr = System.getProperty("fedbVersion"); + String versionStr = System.getProperty("diffVersion"); if (StringUtils.isEmpty(versionStr)) { versionStr = CONFIG.getProperty(OpenMLDBGlobalVar.env + "_versions"); } @@ -96,6 +96,12 @@ public class OpenMLDBConfig { if(StringUtils.isNotEmpty(tableStorageMode)){ OpenMLDBGlobalVar.tableStorageMode = tableStorageMode; } + log.info("test tableStorageMode: {}", OpenMLDBGlobalVar.tableStorageMode); + String version = CONFIG.getProperty("version"); + if(StringUtils.isNotEmpty(version)){ + OpenMLDBGlobalVar.version = version; + } + log.info("test version: {}", OpenMLDBGlobalVar.version); } public static boolean isCluster() { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties index e3252014c95..2959f96554e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties @@ -1,4 +1,4 @@ # memory/ssd/hdd #table_storage_mode=ssd - +version=0.5.0 diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/DMLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DMLTest.java similarity index 76% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/DMLTest.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DMLTest.java index 80a6c907ba0..9b697eb46a8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/DMLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DMLTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.cluster.v230; +package com._4paradigm.openmldb.java_sdk_test.cluster.sql_test; import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; @@ -42,6 +42,13 @@ public void testInsert(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); } + @Test(dataProvider = "getCase",enabled = false) + @Yaml(filePaths = {"function/dml/test_insert.yaml"}) + @Story("insert") + public void testInsertByCli(SQLCase testCase){ + ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); + } + @Test(dataProvider = "getCase") @Yaml(filePaths = "function/dml/test_insert_prepared.yaml") @Story("insert-prepared") @@ -49,10 +56,17 @@ public void testInsertWithPrepared(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kInsertPrepared).run(); } + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/dml/multi_insert.yaml") + @Story("multi-insert") + public void testMultiInsert(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); + } + @Test(dataProvider = "getCase",enabled = false) - @Yaml(filePaths = {"function/dml/test_insert.yaml"}) - @Story("insert") - public void testInsertByCli(SQLCase testCase){ + @Yaml(filePaths = "function/dml/multi_insert.yaml") + @Story("multi-insert") + public void testMultiInsertByCli(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/ExpressTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/ExpressTest.java similarity index 84% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/ExpressTest.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/ExpressTest.java index 30da353292c..0b4f5a1ebf8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/ExpressTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/ExpressTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.cluster.v040; +package com._4paradigm.openmldb.java_sdk_test.cluster.sql_test; import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; @@ -35,26 +35,34 @@ public class ExpressTest extends OpenMLDBTest { @Story("batch") - @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/v040/test_like.yaml") + @Test(dataProvider = "getCase",enabled = false) + @Yaml(filePaths = { + "function/expression/" + }) public void testExpress(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kBatch).run(); } @Story("request") @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/v040/test_like.yaml") + @Yaml(filePaths = { + "function/expression/" + }) public void testExpressRequestMode(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); } @Story("requestWithSp") @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/v040/test_like.yaml") + @Yaml(filePaths = { + "function/expression/" + }) public void testExpressRequestModeWithSp(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); } @Story("requestWithSpAysn") @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/v040/test_like.yaml") + @Yaml(filePaths = { + "function/expression/" + }) public void testExpressRequestModeWithSpAysn(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/LastJoinTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/LastJoinTest.java similarity index 95% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/LastJoinTest.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/LastJoinTest.java index 02dde710286..128e21c2e0c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/LastJoinTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/LastJoinTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.cluster.v230; +package com._4paradigm.openmldb.java_sdk_test.cluster.sql_test; import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; @@ -35,7 +35,7 @@ public class LastJoinTest extends OpenMLDBTest { @Story("batch") - @Test(dataProvider = "getCase") + @Test(dataProvider = "getCase",enabled = false) @Yaml(filePaths = {"function/join/"}) public void testLastJoin(SQLCase testCase) throws Exception { ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/SelectTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/SelectTest.java similarity index 95% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/SelectTest.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/SelectTest.java index 28266fad214..9930e689ad6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/SelectTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/SelectTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.cluster.v230; +package com._4paradigm.openmldb.java_sdk_test.cluster.sql_test; import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; @@ -36,7 +36,7 @@ public class SelectTest extends OpenMLDBTest { @Story("batch") - @Test(dataProvider = "getCase") + @Test(dataProvider = "getCase",enabled = false) @Yaml(filePaths = {"function/select/","query/const_query.yaml"}) @Step("{testCase.desc}") public void testSelect(SQLCase testCase) throws Exception { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/WindowTest.java similarity index 74% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/WindowTest.java index 90d2d2ca187..1434103f167 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/WindowTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/WindowTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.cluster.v230; +package com._4paradigm.openmldb.java_sdk_test.cluster.sql_test; import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; @@ -67,33 +67,10 @@ public void testWindowRequestModeWithSpAsync(SQLCase testCase) throws Exception ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); } - - @Story("batch") - @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/window/test_window_union_cluster.yaml"}) - public void testWindowBatch2(SQLCase testCase) throws Exception { - ExecutorFactory.build(executor, testCase, SQLCaseType.kBatch).run(); - } - @Story("request") @Test(dataProvider = "getCase") @Yaml(filePaths = {"function/window/test_window_union_cluster_thousand.yaml"}) public void testWindowRequestMode2(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); } - - @Story("requestWithSp") - @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/window/test_window_union_cluster.yaml"}) - public void testWindowRequestModeWithSp2(SQLCase testCase) throws Exception { - ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); - } - - //暂时不支持 - @Story("requestWithSp") -// @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/window/test_window_union_cluster.yaml"}) - public void testWindowCLI(SQLCase testCase) throws Exception { - ExecutorFactory.build(executor, testCase, SQLCaseType.kClusterCLI).run(); - } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/DMLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/DMLTest.java deleted file mode 100644 index c8240fc74c0..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/DMLTest.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com._4paradigm.openmldb.java_sdk_test.cluster.v030; - - -import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; -import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; -import com._4paradigm.openmldb.test_common.model.SQLCase; -import com._4paradigm.openmldb.test_common.model.SQLCaseType; -import com._4paradigm.openmldb.test_common.provider.Yaml; -import io.qameta.allure.Feature; -import io.qameta.allure.Story; -import lombok.extern.slf4j.Slf4j; -import org.testng.annotations.Test; - -/** - * @author zhaowei - * @date 2020/6/11 2:53 PM - */ -@Slf4j -@Feature("DML") -public class DMLTest extends OpenMLDBTest { - @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/dml/multi_insert.yaml") - @Story("multi-insert") - public void testMultiInsert(SQLCase testCase){ - ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); - } - - @Test(dataProvider = "getCase",enabled = false) - @Yaml(filePaths = "function/dml/multi_insert.yaml") - @Story("multi-insert") - public void testMultiInsertByCli(SQLCase testCase){ - ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); - } - - @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/dml/multi_insert.yaml") - @Story("multi-insert") - public void testMultiInsertBySDK(SQLCase testCase){ - ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); - } -} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/FunctionTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/FunctionTest.java deleted file mode 100644 index 79b752477e6..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v040/FunctionTest.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com._4paradigm.openmldb.java_sdk_test.cluster.v040; - -import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; -import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; -import com._4paradigm.openmldb.test_common.model.SQLCase; -import com._4paradigm.openmldb.test_common.model.SQLCaseType; -import com._4paradigm.openmldb.test_common.provider.Yaml; -import io.qameta.allure.Feature; -import io.qameta.allure.Story; -import lombok.extern.slf4j.Slf4j; -import org.testng.annotations.Test; - -/** - * @author zhaowei - * @date 2020/6/11 2:53 PM - */ -@Slf4j -@Feature("Function") -public class FunctionTest extends OpenMLDBTest { - - @Story("batch") - @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/v040/test_like_match.yaml") - public void testFunction(SQLCase testCase) throws Exception { - ExecutorFactory.build(executor, testCase, SQLCaseType.kBatch).run(); - } - @Story("request") - @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/v040/test_like_match.yaml") - public void testFunctionRequestMode(SQLCase testCase) throws Exception { - ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); - } - @Story("requestWithSp") - @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/v040/test_like_match.yaml") - public void testFunctionRequestModeWithSp(SQLCase testCase) throws Exception { - ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); - } - @Story("requestWithSpAysn") - @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/v040/test_like_match.yaml") - public void testFunctionRequestModeWithSpAysn(SQLCase testCase) throws Exception { - ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); - } -} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/ExpressTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/ExpressTest.java deleted file mode 100644 index ba8a926af53..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/ExpressTest.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com._4paradigm.openmldb.java_sdk_test.cluster.v230; - -import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; -import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; -import com._4paradigm.openmldb.test_common.model.SQLCase; -import com._4paradigm.openmldb.test_common.model.SQLCaseType; -import com._4paradigm.openmldb.test_common.provider.Yaml; -import io.qameta.allure.Feature; -import io.qameta.allure.Story; -import lombok.extern.slf4j.Slf4j; -import org.testng.annotations.Test; - -/** - * @author zhaowei - * @date 2020/6/11 2:53 PM - */ -@Slf4j -@Feature("Express") -public class ExpressTest extends OpenMLDBTest { - - @Story("batch") - @Test(dataProvider = "getCase") - @Yaml(filePaths = { - "function/expression/test_arithmetic.yaml", - "function/expression/test_condition.yaml", - "function/expression/test_logic.yaml", - "function/expression/test_type.yaml" - }) - public void testExpress(SQLCase testCase) throws Exception { - ExecutorFactory.build(executor, testCase, SQLCaseType.kBatch).run(); - } - @Story("request") - @Test(dataProvider = "getCase") - @Yaml(filePaths = { - "function/expression/test_arithmetic.yaml", - "function/expression/test_condition.yaml", - "function/expression/test_logic.yaml", - "function/expression/test_type.yaml" - }) - public void testExpressRequestMode(SQLCase testCase) throws Exception { - ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); - } - @Story("requestWithSp") - @Test(dataProvider = "getCase") - @Yaml(filePaths = { - "function/expression/test_arithmetic.yaml", - "function/expression/test_condition.yaml", - "function/expression/test_logic.yaml", - "function/expression/test_type.yaml" - }) - public void testExpressRequestModeWithSp(SQLCase testCase) throws Exception { - ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); - } - @Story("requestWithSpAysn") - @Test(dataProvider = "getCase") - @Yaml(filePaths = { - "function/expression/test_arithmetic.yaml", - "function/expression/test_condition.yaml", - "function/expression/test_logic.yaml", - "function/expression/test_type.yaml" - }) - public void testExpressRequestModeWithSpAysn(SQLCase testCase) throws Exception { - ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); - } -} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestVersion.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestVersion.java new file mode 100644 index 00000000000..25939d13123 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestVersion.java @@ -0,0 +1,10 @@ +package com._4paradigm.openmldb.java_sdk_test.temp; + +import org.testng.annotations.Test; + +public class TestVersion { + @Test + public void testCompareTo(){ + System.out.println("0.5.0".compareTo("")); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml index b4af16ea5a5..6051ecf80a3 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml @@ -11,7 +11,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java index 1d0329a2697..c79a5e1f767 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java @@ -86,6 +86,9 @@ public List getCases() { } List testCaseList = new ArrayList<>(); List debugs = getDebugs(); +// if(StringUtils.isNotEmpty(OpenMLDBGlobalVar.version)){ +// cases = cases.stream().filter(c->c.getVersion().compareTo(OpenMLDBGlobalVar.version)<=0).collect(Collectors.toList()); +// } if (!OpenMLDBGlobalVar.tableStorageMode.equals("memory")) { cases = cases.stream().filter(c->c.isSupportDiskTable()).peek(c->c.setStorage(OpenMLDBGlobalVar.tableStorageMode)).collect(Collectors.toList()); } @@ -113,6 +116,9 @@ public List getCases() { if (isCaseInBlackList(tmpCase)) { continue; } + if(StringUtils.isNotEmpty(OpenMLDBGlobalVar.version)&&OpenMLDBGlobalVar.version.compareTo(tmpCase.getVersion())<0){ + continue; + } addCase(tmpCase,testCaseList); } return testCaseList; diff --git a/test/steps/modify_java_sdk_config.sh b/test/steps/modify_java_sdk_config.sh index 55573c144dc..6412a9bc5de 100755 --- a/test/steps/modify_java_sdk_config.sh +++ b/test/steps/modify_java_sdk_config.sh @@ -17,21 +17,21 @@ CASE_XML=$1 DEPLOY_MODE=$2 -FEDB_SDK_VERSION=$3 +OPENMLDB_SDK_VERSION=$3 BUILD_MODE=$4 -FEDB_SERVER_VERSION=$4 -JAVA_NATIVE_VERSION=$5 +OPENMLDB_SERVER_VERSION=$5 +JAVA_NATIVE_VERSION=$6 echo "deploy_mode:${DEPLOY_MODE}" ROOT_DIR=$(pwd) -echo "test_version:$FEDB_SDK_VERSION" +echo "test_version:$OPENMLDB_SDK_VERSION" cd test/integration-test/openmldb-test-java/openmldb-sdk-test || exit # modify suite_xml -sed -i "s###" test_suite/"${CASE_XML}" +sed -i "s###" test_suite/"${CASE_XML}" sed -i "s###" test_suite/"${CASE_XML}" -if [[ "${BUILD_MODE}" == "SRC" ]]; then - sed -i "s###" test_suite/"${CASE_XML}" -fi +#if [[ "${BUILD_MODE}" == "SRC" ]]; then +# sed -i "s###" test_suite/"${CASE_XML}" +#fi # modify pom -sed -i "s#.*#${FEDB_SDK_VERSION}#" pom.xml +sed -i "s#.*#${OPENMLDB_SDK_VERSION}#" pom.xml sed -i "s#.*#${JAVA_NATIVE_VERSION}#" pom.xml cd "${ROOT_DIR}" || exit diff --git a/test/steps/openmldb-sdk-test-java-src.sh b/test/steps/openmldb-sdk-test-java-src.sh new file mode 100755 index 00000000000..6cf0e38005b --- /dev/null +++ b/test/steps/openmldb-sdk-test-java-src.sh @@ -0,0 +1,94 @@ +#!/usr/bin/env bash + +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#bash openmldb-sdk-test-java.sh -b SRC -c test_all.xml -d cluster -l 0 +#-b SRC表示从源码进行编译,会从github上下载代码然后进行编译,PKG表示直接从github上下载压缩包部署 +#-c 执行的suite_xml,决定了跑哪些case +#-d 部署模式,有cluster和standalone两种,默认cluster +#-l 测试的case级别,有0,1,2,3,4,5六个级别,默认为0,也可以同时跑多个级别的case,例如:1,2,3,4,5 + +while getopts ":b:c:d:l:" opt +do + case $opt in + c) + echo "参数c的值:$OPTARG" + CASE_XML=$OPTARG + ;; + d) + echo "参数d的值:$OPTARG" + DEPLOY_MODE=$OPTARG + ;; + l) echo "参数l的值:$OPTARG" + CASE_LEVEL=$OPTARG + ;; + ?) echo "未知参数" + exit 1 + ;; + esac +done +if [[ "${CASE_XML}" == "" ]]; then + CASE_XML="test_all.xml" +fi +if [[ "${DEPLOY_MODE}" == "" ]]; then + DEPLOY_MODE="cluster" +fi +if [[ "${CASE_LEVEL}" == "" ]]; then + CASE_LEVEL="0" +fi + +echo "CASE_XML:${CASE_XML}" +echo "DEPLOY_MODE:${DEPLOY_MODE}" +echo "CASE_LEVEL:${CASE_LEVEL}" + +ROOT_DIR=$(pwd) +# 安装wget +yum install -y wget +yum install -y net-tools +ulimit -c unlimited +echo "ROOT_DIR:${ROOT_DIR}" +#source test/steps/read_properties.sh +#echo "OPENMLDB_SERVER_VERSION:${OPENMLDB_SERVER_VERSION}" +#echo "DIFF_VERSIONS:${DIFF_VERSIONS}" +# 从源码编译 +deployConfigPath="test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties" +OPENMLDB_SERVER_VERSION="SRC" +SERVER_URL=$(more ${deployConfigPath} | grep "${OPENMLDB_SERVER_VERSION}") +if [[ "${SERVER_URL}" == "" ]]; then + echo "${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz" >> ${deployConfigPath} +else + sed -i "s#${OPENMLDB_SERVER_VERSION}=.*#${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz#" ${deployConfigPath} +fi +JAVA_SDK_VERSION=$(more java/pom.xml | grep ".*" | head -1 | sed 's#.*\(.*\).*#\1#') +JAVA_NATIVE_VERSION=$(more java/pom.xml | grep ".*" | head -1 | sed 's#.*\(.*\).*#\1#') +sh test/steps/build-java-sdk.sh + +echo "JAVA_SDK_VERSION:${JAVA_SDK_VERSION}" +echo "JAVA_NATIVE_VERSION:${JAVA_NATIVE_VERSION}" +cat ${deployConfigPath} +# install command tool +cd test/test-tool/command-tool || exit +mvn clean install -Dmaven.test.skip=true +cd "${ROOT_DIR}" || exit +# modify config +sh test/steps/modify_java_sdk_config.sh "${CASE_XML}" "${DEPLOY_MODE}" "${JAVA_SDK_VERSION}" "SRC" "${OPENMLDB_SERVER_VERSION}" "${JAVA_NATIVE_VERSION}" +# install jar +cd test/integration-test/openmldb-test-java || exit +mvn clean install -Dmaven.test.skip=true +cd "${ROOT_DIR}" || exit +# run case +cd "${ROOT_DIR}"/test/integration-test/openmldb-test-java/openmldb-sdk-test || exit +mvn clean test -DsuiteXmlFile=test_suite/"${CASE_XML}" -DcaseLevel="${CASE_LEVEL}" From 462a4bf30570ea3319dfba21a71ffc1db4abfd6f Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 26 Jul 2022 09:04:33 +0800 Subject: [PATCH 080/172] modify cicd --- .github/workflows/integration-test-src.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/integration-test-src.yml b/.github/workflows/integration-test-src.yml index fd21282464f..fd68959ec63 100644 --- a/.github/workflows/integration-test-src.yml +++ b/.github/workflows/integration-test-src.yml @@ -83,7 +83,7 @@ jobs: echo "openmldb-pkg:" ls -al - name: test - run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b SRC -c test_cluster.xml -d cluster -l "0" + run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster.xml -d cluster -l "0" - name: TEST Results if: always() uses: EnricoMi/publish-unit-test-result-action@v1 @@ -108,7 +108,7 @@ jobs: make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install tar -zcvf openmldb-linux.tar.gz openmldb-linux - name: test - run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b SRC -c test_cluster.xml -d cluster -l "1,2,3,4,5" + run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster.xml -d cluster -l "1,2,3,4,5" - name: TEST Results if: always() uses: EnricoMi/publish-unit-test-result-action@v1 From cbdea3bf0637fd62c322ef48f12a662b1207070a Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 26 Jul 2022 10:02:57 +0800 Subject: [PATCH 081/172] support create index by sdk --- .../function/function/test_udaf_function.yaml | 106 +++++++++--------- .../test-suite/test_deploy_tmp2.xml | 2 +- .../java_sdk_test/checker/ResultChecker.java | 3 - .../{v230 => sql_test}/FunctionTest.java | 4 +- .../openmldb-sdk-test/test_suite/test_tmp.xml | 2 +- 5 files changed, 59 insertions(+), 58 deletions(-) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/{v230 => sql_test}/FunctionTest.java (95%) diff --git a/cases/function/function/test_udaf_function.yaml b/cases/function/function/test_udaf_function.yaml index 6612576fd19..f1333331895 100644 --- a/cases/function/function/test_udaf_function.yaml +++ b/cases/function/function/test_udaf_function.yaml @@ -137,11 +137,11 @@ cases: expect: order: id columns: ["id int","c1 string","m2 double","m3 double","m4 double","m5 double","m6 double", "m7 double"] - data: | - 1, aa, 1, NULL, 30, 1.100000023841858,2.1, NULL - 2, aa, 2.5, 4.0, 31.5, 1.25, 2.25, 5.0 - 3, aa, 2, 2.5, 32, 1.200000007947286,2.1999999999999997,3.5 - 4, aa, 2.5, 2.5, 33, 1.25, 2.25, 3.5 + rows: + - [1, aa, 1, NULL, 30, 1.100000023841858,2.1, NULL] + - [2, aa, 2.5, 4.0, 31.5, 1.25, 2.25, 5.0] + - [3, aa, 2, 2.5, 32, 1.200000007947286,2.1999999999999997,3.5] + - [4, aa, 2.5, 2.5, 33, 1.25, 2.25, 3.5] - id: 5 desc: distinct_count @@ -360,6 +360,7 @@ cases: id: 15 desc: SUM_WHERE-normal sqlDialect: ["HybridSQL"] + version: 0.6.0 inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] @@ -395,11 +396,11 @@ cases: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] indexs: ["index1:c1:c7"] - data: | - 1, aa, 1, 1, 30, NULL,2.1, 1590738990000, 2020-05-01, a, true - 2, aa, 4, 4, NULL,1.4, 2.4, 1590738991000, 2020-05-03, c, false - 3, aa, 3, NULL,32, 1.3, 2.3, 1590738992000, 2020-05-02, b, true - 4, aa, NULL,3, 33, 1.1, NULL,1590738993000, NULL, NULL,NULL + rows: + - [1, aa, 1, 1, 30, NULL,2.1, 1590738990000, 2020-05-01, a, true] + - [2, aa, 4, 4, NULL,1.4, 2.4, 1590738991000, 2020-05-03, c, false] + - [3, aa, 3, NULL,32, 1.3, 2.3, 1590738992000, 2020-05-02, b, true] + - [4, aa, NULL,3, 33, 1.1, NULL,1590738993000, NULL, NULL,NULL] sql: | SELECT {0}.id, c1, avg_where(c2, c2<4) OVER w1 as m2, @@ -413,11 +414,11 @@ cases: expect: order: id columns: ["id int","c1 string","m2 double","m3 double","m4 double","m5 double","m6 double", "m7 double"] - data: | - 1, aa, 1, 1, 30, NULL, 2.1, NULL - 2, aa, 1, 1, 30, NULL, 2.1, NULL - 3, aa, 2, 1, 31, 1.2999999523162842, 2.2, NULL - 4, aa, 3, 3, 32, 1.199999988079071, 2.3, NULL + rows: + - [1, aa, 1, 1, 30, NULL, 2.1, NULL] + - [2, aa, 1, 1, 30, NULL, 2.1, NULL] + - [3, aa, 2, 1, 31, 1.2999999523162842, 2.2, NULL] + - [4, aa, 3, 3, 32, 1.199999988079071, 2.3, NULL] - id: 17 desc: COUNT_WHERE-normal @@ -2182,6 +2183,7 @@ cases: - id: 52 desc: 多个可合并窗口上的多个聚合函数计算 sqlDialect: ["HybridSQL"] + version: 0.6.0 sql: | SELECT {0}.id, pk, col1, std_ts, distinct_count(col1) OVER w1 as a1, @@ -2237,6 +2239,7 @@ cases: - id: 53 desc: 同窗口下多类聚合函数 sqlDialect: ["HybridSQL"] + version: 0.6.0 sql: | SELECT {0}.id, pk, col1, std_ts, sum(col1 + count(col1)) OVER w as a1, @@ -2404,15 +2407,15 @@ cases: expect: columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"] order: id - data: | - 1, 1, 1, NULL, NULL - 2, 2, 2, 1, NULL - 3, 3, 3, 2, NULL - 4, 4, 4, 3, 1 - 5, 5, 5, 4, 2 - 6, 4, 4, NULL, NULL - 7, 3, 3, 4, NULL - 8, 2, 2, 3, NULL + rows: + - [1, 1, 1, NULL, NULL] + - [2, 2, 2, 1, NULL] + - [3, 3, 3, 2, NULL] + - [4, 4, 4, 3, 1] + - [5, 5, 5, 4, 2] + - [6, 4, 4, NULL, NULL] + - [7, 3, 3, 4, NULL] + - [8, 2, 2, 3, NULL] - id: 58 desc: | @@ -2443,15 +2446,15 @@ cases: expect: columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"] order: id - data: | - 1, 1, 1, NULL, NULL - 2, 2, 2, NULL, 1 - 3, 3, 3, NULL, 2 - 4, 4, 4, 1, 3 - 5, 5, 5, 2, 4 - 6, 4, 4, NULL, NULL - 7, 3, 3, NULL, 4 - 8, 2, 2, NULL, 3 + rows: + - [1, 1, 1, NULL, NULL] + - [2, 2, 2, NULL, 1] + - [3, 3, 3, NULL, 2] + - [4, 4, 4, 1, 3] + - [5, 5, 5, 2, 4] + - [6, 4, 4, NULL, NULL] + - [7, 3, 3, NULL, 4] + - [8, 2, 2, NULL, 3] - id: 59 desc: | @@ -2483,15 +2486,15 @@ cases: expect: columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"] order: id - data: | - 1, 1, 1, NULL, NULL - 2, 2, 2, 1, NULL - 3, 3, 3, 2, NULL - 4, 4, 4, 3, 1 - 5, 5, 5, 4, 2 - 6, 4, 4, NULL, NULL - 7, 3, 3, 4, NULL - 8, 2, 2, 3, NULL + rows: + - [1, 1, 1, NULL, NULL] + - [2, 2, 2, 1, NULL] + - [3, 3, 3, 2, NULL] + - [4, 4, 4, 3, 1] + - [5, 5, 5, 4, 2] + - [6, 4, 4, NULL, NULL] + - [7, 3, 3, 4, NULL] + - [8, 2, 2, 3, NULL] - id: 60 desc: | @@ -2522,19 +2525,20 @@ cases: expect: columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"] order: id - data: | - 1, 1, 1, NULL, NULL - 2, 2, 2, NULL, 1 - 3, 3, 3, NULL, 2 - 4, 4, 4, 1, 3 - 5, 5, 5, 2, 4 - 6, 4, 4, NULL, NULL - 7, 3, 3, NULL, 4 - 8, 2, 2, NULL, 3 + rows: + - [1, 1, 1, NULL, NULL] + - [2, 2, 2, NULL, 1] + - [3, 3, 3, NULL, 2] + - [4, 4, 4, 1, 3] + - [5, 5, 5, 2, 4] + - [6, 4, 4, NULL, NULL] + - [7, 3, 3, NULL, 4] + - [8, 2, 2, NULL, 3] - id: 61 desc: median sqlDialect: ["HybridSQL"] + version: 0.6.0 inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml index c83a419bc5e..629590aeb19 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml @@ -2,7 +2,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java index b08367c789b..77ae4efac2d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java @@ -75,7 +75,6 @@ public void check() throws ParseException { for (int j = 0; j < actual_list.size(); ++j) { Object actual_val = actual_list.get(j); Object expect_val = expect_list.get(j); - if (actual_val != null && actual_val instanceof Float) { Assert.assertTrue(expect_val != null && expect_val instanceof Float); Assert.assertEquals( @@ -87,8 +86,6 @@ public void check() throws ParseException { ); } else if (actual_val != null && actual_val instanceof Double) { - System.out.println("expect_val = " + expect_val); - System.out.println("actual_val = " + actual_val); Assert.assertTrue(expect_val != null && expect_val instanceof Double); Assert.assertEquals( (Double) actual_val, (Double) expect_val, 1e-4, diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FunctionTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java similarity index 95% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FunctionTest.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java index c2eea0d85c9..88ebec258f6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FunctionTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.cluster.v230; +package com._4paradigm.openmldb.java_sdk_test.cluster.sql_test; import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; @@ -35,7 +35,7 @@ public class FunctionTest extends OpenMLDBTest { @Story("batch") - @Test(dataProvider = "getCase") + @Test(dataProvider = "getCase",enabled = false) @Yaml(filePaths = "function/function/") public void testFunction(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kBatch).run(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml index 6051ecf80a3..6eba18c1912 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml @@ -10,7 +10,7 @@ - + From e8681e8ced6dcf370e3ed38e57a1449113cca7be Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 26 Jul 2022 10:31:55 +0800 Subject: [PATCH 082/172] modify cicd --- cases/function/function/test_udaf_function.yaml | 11 ++++++----- .../java_sdk_test/cluster/sql_test/FunctionTest.java | 1 - test/steps/openmldb-sdk-test-java-src.sh | 3 +++ 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/cases/function/function/test_udaf_function.yaml b/cases/function/function/test_udaf_function.yaml index f1333331895..3968be2bb46 100644 --- a/cases/function/function/test_udaf_function.yaml +++ b/cases/function/function/test_udaf_function.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: [] +debugs: ["AVG_WHERE-normal"] version: 0.5.0 cases: - @@ -114,6 +114,7 @@ cases: - id: 4 desc: avg + version: 0.6.0 sqlDialect: ["HybridSQL"] inputs: - @@ -397,10 +398,10 @@ cases: columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] indexs: ["index1:c1:c7"] rows: - - [1, aa, 1, 1, 30, NULL,2.1, 1590738990000, 2020-05-01, a, true] - - [2, aa, 4, 4, NULL,1.4, 2.4, 1590738991000, 2020-05-03, c, false] - - [3, aa, 3, NULL,32, 1.3, 2.3, 1590738992000, 2020-05-02, b, true] - - [4, aa, NULL,3, 33, 1.1, NULL,1590738993000, NULL, NULL,NULL] + - [1, "aa", 1, 1, 30, NULL,2.1, 1590738990000, "2020-05-01", "a", true] + - [2, "aa", 4, 4, NULL,1.4, 2.4, 1590738991000, "2020-05-03", "c", false] + - [3, "aa", 3, NULL,32, 1.3, 2.3, 1590738992000, "2020-05-02", "b", true] + - [4, "aa", NULL,3, 33, 1.1, NULL,1590738993000, NULL, NULL,NULL] sql: | SELECT {0}.id, c1, avg_where(c2, c2<4) OVER w1 as m2, diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java index 88ebec258f6..1fff9b1547a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java @@ -52,7 +52,6 @@ public void testFunctionRequestMode(SQLCase testCase) throws Exception { public void testFunctionRequestModeWithSp(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); } -// 146-157有问题 @Story("requestWithSpAysn") @Test(dataProvider = "getCase") @Yaml(filePaths = "function/function/") diff --git a/test/steps/openmldb-sdk-test-java-src.sh b/test/steps/openmldb-sdk-test-java-src.sh index 6cf0e38005b..94d04dd089b 100755 --- a/test/steps/openmldb-sdk-test-java-src.sh +++ b/test/steps/openmldb-sdk-test-java-src.sh @@ -67,9 +67,12 @@ echo "ROOT_DIR:${ROOT_DIR}" deployConfigPath="test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties" OPENMLDB_SERVER_VERSION="SRC" SERVER_URL=$(more ${deployConfigPath} | grep "${OPENMLDB_SERVER_VERSION}") +echo "SERVER_URL:${SERVER_URL}" if [[ "${SERVER_URL}" == "" ]]; then + echo "AAA" echo "${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz" >> ${deployConfigPath} else + echo "BBBB" sed -i "s#${OPENMLDB_SERVER_VERSION}=.*#${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz#" ${deployConfigPath} fi JAVA_SDK_VERSION=$(more java/pom.xml | grep ".*" | head -1 | sed 's#.*\(.*\).*#\1#') From 4bee139ba2b736e8d77861bdeddfe8bac8eca478 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 26 Jul 2022 10:38:22 +0800 Subject: [PATCH 083/172] modify cicd --- cases/function/window/test_window_union.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cases/function/window/test_window_union.yaml b/cases/function/window/test_window_union.yaml index 3063111a6ac..efb72e9ded4 100644 --- a/cases/function/window/test_window_union.yaml +++ b/cases/function/window/test_window_union.yaml @@ -778,12 +778,12 @@ cases: - mi int - l1 int order: id - data: | - 0, 1, 19, 19, NULL - 1, 1, 18, 18, NULL - 2, 4, 233, 18, 233 - 3, 4, 233, 5, 233 - 4, 7, 233, 5, 5 + rows: + - [0, 1, 19, 19, NULL] + - [1, 1, 18, 18, NULL] + - [2, 4, 233, 18, 233] + - [3, 4, 233, 5, 233] + - [4, 7, 233, 5, 5] - id: 18-5 desc: | From 7fde21545470821551234fdde7b6282d24477893 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 26 Jul 2022 11:54:50 +0800 Subject: [PATCH 084/172] modify cicd --- cases/function/expression/test_predicate.yaml | 356 +++++++++--------- .../function/function/test_udaf_function.yaml | 3 +- .../src/main/resources/deploy.properties | 1 + test/steps/openmldb-sdk-test-java-src.sh | 6 +- 4 files changed, 186 insertions(+), 180 deletions(-) diff --git a/cases/function/expression/test_predicate.yaml b/cases/function/expression/test_predicate.yaml index b64d27bef8b..db183a878e7 100644 --- a/cases/function/expression/test_predicate.yaml +++ b/cases/function/expression/test_predicate.yaml @@ -598,181 +598,181 @@ cases: - [5, 1, 1590115440000, false] - [7, 1, 1590115450000, false] - [9, 1, 1590115460000, true] - - id: like_predicate_1 - desc: like predicate without escape - inputs: - - columns: ["id int", "std_ts timestamp"] - indexs: ["index1:id:std_ts"] - rows: - - [1, 1590115420000 ] - - [2, 1590115430000 ] - - [3, 1590115440000 ] - - [4, 1590115450000 ] - - [5, 1590115460000 ] - - [6, 1590115470000 ] - - columns: ["id int", "ts timestamp", "col2 string"] - indexs: ["idx:id:ts"] - rows: - - [1, 1590115420000, John] - - [2, 1590115430000, Mary] - - [3, 1590115440000, mike] - - [4, 1590115450000, Dan] - - [5, 1590115460000, Evan_W] - - [6, 1590115470000, M] - dataProvider: - - ["LIKE", "NOT LIKE", "ILIKE", "NOT ILIKE"] # LIKE / NOT LIKE - - ["m%", "M_ry" ] # match pattern - sql: | - select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] 'd[1]'; - expect: - columns: ["id int", "col2 string"] - order: id - expectProvider: - 0: - 0: - rows: - - [1, null] - - [2, null] - - [3, mike] - - [4, null] - - [5, null] - - [6, null] - 1: - rows: - - [1, null] - - [2, Mary] - - [3, null] - - [4, null] - - [5, null] - - [6, null] - 1: - 0: - rows: - - [1, John] - - [2, Mary] - - [3, null] - - [4, Dan] - - [5, Evan_W] - - [6, M] - 1: - rows: - - [1, John] - - [2, null] - - [3, mike] - - [4, Dan] - - [5, Evan_W] - - [6, M] - 2: - 0: - rows: - - [1, null] - - [2, Mary] - - [3, mike] - - [4, null] - - [5, null] - - [6, M] - 1: - rows: - - [1, null] - - [2, Mary] - - [3, null] - - [4, null] - - [5, null] - - [6, null] - 3: - 0: - rows: - - [1, John] - - [2, null] - - [3, null] - - [4, Dan] - - [5, Evan_W] - - [6, null] - 1: - rows: - - [1, John] - - [2, null] - - [3, mike] - - [4, Dan] - - [5, Evan_W] - - [6, M] - - id: like_predicate_2 - desc: like predicate with escape - inputs: - - columns: ["id int", "std_ts timestamp"] - indexs: ["index1:id:std_ts"] - rows: - - [1, 1590115420000 ] - - [2, 1590115430000 ] - - [3, 1590115440000 ] - - [4, 1590115450000 ] - - [5, 1590115460000 ] - - [6, 1590115470000 ] - - columns: ["id int", "ts timestamp", "col2 string"] - indexs: ["idx:id:ts"] - rows: - - [1, 1590115420000, a*_b] - - [2, 1590115430000, a*mb] - - [3, 1590115440000, "%a_%b"] - - [4, 1590115450000, "Ta_sub"] - - [5, 1590115460000, "lamrb"] - - [6, 1590115470000, "%a*_%b"] - dataProvider: - - ["LIKE", "NOT ILIKE"] - - ["%", "*", ""] # escape with % or disable - sql: | - select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] '%a*_%b' ESCAPE 'd[1]'; - expect: - columns: ["id int", "col2 string"] - order: id - expectProvider: - 0: - 0: - rows: - - [1, a*_b] - - [2, a*mb] - - [3, null] - - [4, null] - - [5, null] - - [6, null] - 1: - rows: - - [1, null] - - [2, null] - - [3, "%a_%b"] - - [4, Ta_sub] - - [5, null] - - [6, null] - 2: - rows: - - [1, a*_b] - - [2, a*mb] - - [3, null] - - [4, null] - - [5, null] - - [6, "%a*_%b"] - 1: - 0: - rows: - - [1, null] - - [2, null] - - [3, "%a_%b"] - - [4, "Ta_sub"] - - [5, "lamrb"] - - [6, "%a*_%b"] - 1: - rows: - - [1, a*_b] - - [2, a*mb] - - [3, null] - - [4, null] - - [5, "lamrb"] - - [6, "%a*_%b"] - 2: - rows: - - [1, null] - - [2, null] - - [3, "%a_%b"] - - [4, "Ta_sub"] - - [5, "lamrb"] - - [6, null] +# - id: like_predicate_1 +# desc: like predicate without escape +# inputs: +# - columns: ["id int", "std_ts timestamp"] +# indexs: ["index1:id:std_ts"] +# rows: +# - [1, 1590115420000 ] +# - [2, 1590115430000 ] +# - [3, 1590115440000 ] +# - [4, 1590115450000 ] +# - [5, 1590115460000 ] +# - [6, 1590115470000 ] +# - columns: ["id int", "ts timestamp", "col2 string"] +# indexs: ["idx:id:ts"] +# rows: +# - [1, 1590115420000, John] +# - [2, 1590115430000, Mary] +# - [3, 1590115440000, mike] +# - [4, 1590115450000, Dan] +# - [5, 1590115460000, Evan_W] +# - [6, 1590115470000, M] +# dataProvider: +# - ["LIKE", "NOT LIKE", "ILIKE", "NOT ILIKE"] # LIKE / NOT LIKE +# - ["m%", "M_ry" ] # match pattern +# sql: | +# select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] 'd[1]'; +# expect: +# columns: ["id int", "col2 string"] +# order: id +# expectProvider: +# 0: +# 0: +# rows: +# - [1, null] +# - [2, null] +# - [3, mike] +# - [4, null] +# - [5, null] +# - [6, null] +# 1: +# rows: +# - [1, null] +# - [2, Mary] +# - [3, null] +# - [4, null] +# - [5, null] +# - [6, null] +# 1: +# 0: +# rows: +# - [1, John] +# - [2, Mary] +# - [3, null] +# - [4, Dan] +# - [5, Evan_W] +# - [6, M] +# 1: +# rows: +# - [1, John] +# - [2, null] +# - [3, mike] +# - [4, Dan] +# - [5, Evan_W] +# - [6, M] +# 2: +# 0: +# rows: +# - [1, null] +# - [2, Mary] +# - [3, mike] +# - [4, null] +# - [5, null] +# - [6, M] +# 1: +# rows: +# - [1, null] +# - [2, Mary] +# - [3, null] +# - [4, null] +# - [5, null] +# - [6, null] +# 3: +# 0: +# rows: +# - [1, John] +# - [2, null] +# - [3, null] +# - [4, Dan] +# - [5, Evan_W] +# - [6, null] +# 1: +# rows: +# - [1, John] +# - [2, null] +# - [3, mike] +# - [4, Dan] +# - [5, Evan_W] +# - [6, M] +# - id: like_predicate_2 +# desc: like predicate with escape +# inputs: +# - columns: ["id int", "std_ts timestamp"] +# indexs: ["index1:id:std_ts"] +# rows: +# - [1, 1590115420000 ] +# - [2, 1590115430000 ] +# - [3, 1590115440000 ] +# - [4, 1590115450000 ] +# - [5, 1590115460000 ] +# - [6, 1590115470000 ] +# - columns: ["id int", "ts timestamp", "col2 string"] +# indexs: ["idx:id:ts"] +# rows: +# - [1, 1590115420000, a*_b] +# - [2, 1590115430000, a*mb] +# - [3, 1590115440000, "%a_%b"] +# - [4, 1590115450000, "Ta_sub"] +# - [5, 1590115460000, "lamrb"] +# - [6, 1590115470000, "%a*_%b"] +# dataProvider: +# - ["LIKE", "NOT ILIKE"] +# - ["%", "*", ""] # escape with % or disable +# sql: | +# select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] '%a*_%b' ESCAPE 'd[1]'; +# expect: +# columns: ["id int", "col2 string"] +# order: id +# expectProvider: +# 0: +# 0: +# rows: +# - [1, a*_b] +# - [2, a*mb] +# - [3, null] +# - [4, null] +# - [5, null] +# - [6, null] +# 1: +# rows: +# - [1, null] +# - [2, null] +# - [3, "%a_%b"] +# - [4, Ta_sub] +# - [5, null] +# - [6, null] +# 2: +# rows: +# - [1, a*_b] +# - [2, a*mb] +# - [3, null] +# - [4, null] +# - [5, null] +# - [6, "%a*_%b"] +# 1: +# 0: +# rows: +# - [1, null] +# - [2, null] +# - [3, "%a_%b"] +# - [4, "Ta_sub"] +# - [5, "lamrb"] +# - [6, "%a*_%b"] +# 1: +# rows: +# - [1, a*_b] +# - [2, a*mb] +# - [3, null] +# - [4, null] +# - [5, "lamrb"] +# - [6, "%a*_%b"] +# 2: +# rows: +# - [1, null] +# - [2, null] +# - [3, "%a_%b"] +# - [4, "Ta_sub"] +# - [5, "lamrb"] +# - [6, null] diff --git a/cases/function/function/test_udaf_function.yaml b/cases/function/function/test_udaf_function.yaml index 3968be2bb46..b504230162f 100644 --- a/cases/function/function/test_udaf_function.yaml +++ b/cases/function/function/test_udaf_function.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: ["AVG_WHERE-normal"] +debugs: [] version: 0.5.0 cases: - @@ -393,6 +393,7 @@ cases: id: 16 desc: AVG_WHERE-normal sqlDialect: ["HybridSQL"] + version: 0.6.0 inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy.properties index a1f60abd7b7..81d2bb96ef1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy.properties @@ -14,3 +14,4 @@ tmp2=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.4-linux.tar.gz standalone=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.4-linux.tar.gz tmp2_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz tmp2_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark.tar.gz + diff --git a/test/steps/openmldb-sdk-test-java-src.sh b/test/steps/openmldb-sdk-test-java-src.sh index 94d04dd089b..b3e50eb0a4b 100755 --- a/test/steps/openmldb-sdk-test-java-src.sh +++ b/test/steps/openmldb-sdk-test-java-src.sh @@ -70,11 +70,15 @@ SERVER_URL=$(more ${deployConfigPath} | grep "${OPENMLDB_SERVER_VERSION}") echo "SERVER_URL:${SERVER_URL}" if [[ "${SERVER_URL}" == "" ]]; then echo "AAA" - echo "${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz" >> ${deployConfigPath} + echo "\n${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz" >> ${deployConfigPath} + echo "CCCC" + cat ${deployConfigPath} else echo "BBBB" sed -i "s#${OPENMLDB_SERVER_VERSION}=.*#${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz#" ${deployConfigPath} fi +echo "DDDD" +cat ${deployConfigPath} JAVA_SDK_VERSION=$(more java/pom.xml | grep ".*" | head -1 | sed 's#.*\(.*\).*#\1#') JAVA_NATIVE_VERSION=$(more java/pom.xml | grep ".*" | head -1 | sed 's#.*\(.*\).*#\1#') sh test/steps/build-java-sdk.sh From 0e5523dc08082a07184e9639e7655b7cd47d860f Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 26 Jul 2022 13:54:51 +0800 Subject: [PATCH 085/172] support create index by sdk --- cases/function/fz_ddl/test_bank.yaml | 15 ++++++ cases/function/fz_ddl/test_luoji.yaml | 1 + cases/function/fz_ddl/test_myhug.yaml | 1 + .../test_multiple_databases.yaml | 1 + .../{v040 => select}/test_groupby.yaml | 1 + cases/function/test_batch_request.yaml | 1 + cases/query/parameterized_query.yaml | 1 + .../cluster/{v230 => fz}/FZCaseTest.java | 2 +- .../{v230 => sql_test}/BatchRequestTest.java | 2 +- .../{v030 => sql_test}/MultiDBTest.java | 4 +- .../ParameterQueryTest.java | 2 +- .../cluster/v030/SchemaTest.java | 54 ------------------- .../test_suite/test_cluster.xml | 4 +- test/steps/openmldb-sdk-test-java-src.sh | 6 +-- 14 files changed, 28 insertions(+), 67 deletions(-) rename cases/function/{v040 => select}/test_groupby.yaml (99%) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/{v230 => fz}/FZCaseTest.java (97%) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/{v230 => sql_test}/BatchRequestTest.java (96%) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/{v030 => sql_test}/MultiDBTest.java (95%) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/{v230 => sql_test}/ParameterQueryTest.java (95%) delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java diff --git a/cases/function/fz_ddl/test_bank.yaml b/cases/function/fz_ddl/test_bank.yaml index 6d71e4d3bca..4b725afd22c 100644 --- a/cases/function/fz_ddl/test_bank.yaml +++ b/cases/function/fz_ddl/test_bank.yaml @@ -1,4 +1,19 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + db: bank +version: 0.5.0 cases: - desc: bank test id: 0 diff --git a/cases/function/fz_ddl/test_luoji.yaml b/cases/function/fz_ddl/test_luoji.yaml index c1673497e22..65b8056909f 100644 --- a/cases/function/fz_ddl/test_luoji.yaml +++ b/cases/function/fz_ddl/test_luoji.yaml @@ -13,6 +13,7 @@ # limitations under the License. db: luoji +version: 0.5.0 cases: - id: 0 desc: luoji test diff --git a/cases/function/fz_ddl/test_myhug.yaml b/cases/function/fz_ddl/test_myhug.yaml index 7ed43b3315f..02d0f971040 100644 --- a/cases/function/fz_ddl/test_myhug.yaml +++ b/cases/function/fz_ddl/test_myhug.yaml @@ -13,6 +13,7 @@ # limitations under the License. db: mybug +version: 0.5.0 cases: - id: 0 desc: mybug test diff --git a/cases/function/multiple_databases/test_multiple_databases.yaml b/cases/function/multiple_databases/test_multiple_databases.yaml index 3c0590828c8..208270b4ae5 100644 --- a/cases/function/multiple_databases/test_multiple_databases.yaml +++ b/cases/function/multiple_databases/test_multiple_databases.yaml @@ -13,6 +13,7 @@ # limitations under the License. debugs: [] +version: 0.5.0 cases: - id: 0 desc: Last Join tables from two databases 1 - default db is db1 diff --git a/cases/function/v040/test_groupby.yaml b/cases/function/select/test_groupby.yaml similarity index 99% rename from cases/function/v040/test_groupby.yaml rename to cases/function/select/test_groupby.yaml index 77fd25bb847..7150588bedd 100644 --- a/cases/function/v040/test_groupby.yaml +++ b/cases/function/select/test_groupby.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 sqlDialect: ["HybridSQL"] cases: - id: 0 diff --git a/cases/function/test_batch_request.yaml b/cases/function/test_batch_request.yaml index c333ac68b92..e00fb773163 100644 --- a/cases/function/test_batch_request.yaml +++ b/cases/function/test_batch_request.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 desc: batch request without common column diff --git a/cases/query/parameterized_query.yaml b/cases/query/parameterized_query.yaml index 455f31ac619..b3c58fcf710 100644 --- a/cases/query/parameterized_query.yaml +++ b/cases/query/parameterized_query.yaml @@ -13,6 +13,7 @@ # limitations under the License. db: testdb debugs: [] +version: 0.5.0 cases: - id: 0 desc: 带参数的Where条件命中索引 diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FZCaseTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/fz/FZCaseTest.java similarity index 97% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FZCaseTest.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/fz/FZCaseTest.java index 0d34e1f744f..4e0c8af47e4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/FZCaseTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/fz/FZCaseTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.cluster.v230; +package com._4paradigm.openmldb.java_sdk_test.cluster.fz; import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/BatchRequestTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/BatchRequestTest.java similarity index 96% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/BatchRequestTest.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/BatchRequestTest.java index 1452ef8a56a..f3203c4dbdd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/BatchRequestTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/BatchRequestTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.cluster.v230; +package com._4paradigm.openmldb.java_sdk_test.cluster.sql_test; import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/MultiDBTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/MultiDBTest.java similarity index 95% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/MultiDBTest.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/MultiDBTest.java index f3c61e859d7..4dcfd0df47a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/MultiDBTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/MultiDBTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.cluster.v030; +package com._4paradigm.openmldb.java_sdk_test.cluster.sql_test; import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; @@ -36,7 +36,7 @@ public class MultiDBTest extends OpenMLDBTest { @Story("batch") - @Test(dataProvider = "getCase") + @Test(dataProvider = "getCase",enabled = false) @Yaml(filePaths = {"function/multiple_databases/"}) @Step("{testCase.desc}") public void testMultiDB(SQLCase testCase) throws Exception { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/ParameterQueryTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/ParameterQueryTest.java similarity index 95% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/ParameterQueryTest.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/ParameterQueryTest.java index 3b28fd9e9e4..e3c36c536fb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v230/ParameterQueryTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/ParameterQueryTest.java @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.cluster.v230; +package com._4paradigm.openmldb.java_sdk_test.cluster.sql_test; import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java deleted file mode 100644 index af413747a41..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v030/SchemaTest.java +++ /dev/null @@ -1,54 +0,0 @@ -package com._4paradigm.openmldb.java_sdk_test.cluster.v030; - -import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; -import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; -import com._4paradigm.openmldb.test_common.util.TypeUtil; -import com._4paradigm.openmldb.test_common.util.SDKUtil; -import com._4paradigm.openmldb.sdk.Column; -import com._4paradigm.openmldb.sdk.Schema; -import io.qameta.allure.Feature; -import io.qameta.allure.Story; -import lombok.extern.slf4j.Slf4j; -import org.testng.Assert; -import org.testng.collections.Lists; - -import java.sql.SQLException; -import java.util.List; -import java.util.stream.Collectors; - -@Slf4j -@Feature("SchemaTest") -public class SchemaTest extends OpenMLDBTest { - @Story("schema-sdk") - // @Test - public void testHaveIndexAndOption() throws SQLException { - boolean dbOk = executor.createDB(OpenMLDBGlobalVar.dbName); - log.info("create db:{},{}", OpenMLDBGlobalVar.dbName, dbOk); - String tableName = "test_schema1"; - String createSql = "create table "+tableName+"(\n" + - "c1 string,\n" + - "c2 int not null,\n" + - "c3 bigint,\n" + - "c4 smallint,\n" + - "c5 float,\n" + - "c6 double not null,\n" + - "c7 timestamp not null,\n" + - "c8 date,\n" + - "c9 bool not null,\n" + - "index(key=(c1),ts=c7,ttl=10,ttl_type=latest))options(partitionnum=8,replicanum=3);"; - SDKUtil.sql(executor, OpenMLDBGlobalVar.dbName,createSql); - Schema tableSchema = executor.getTableSchema(OpenMLDBGlobalVar.dbName, tableName); - List columnList = tableSchema.getColumnList(); - List actualList = columnList.stream() - .map(column -> String.format("%s %s %s", - column.getColumnName(), - TypeUtil.fromJDBCTypeToString(column.getSqlType()), - column.isNotNull() ? "not null" : "").trim()) - .collect(Collectors.toList()); - List expectList = Lists.newArrayList("c1 string","c2 int not null","c3 bigint","c4 smallint", - "c5 float","c6 double not null","c7 timestamp not null","c8 date","c9 bool not null"); - Assert.assertEquals(actualList,expectList); - String deleteSql = "drop table "+tableName+";"; - SDKUtil.sql(executor, OpenMLDBGlobalVar.dbName,deleteSql); - } -} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_cluster.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_cluster.xml index b2c7ed735dd..fb886e5f1f9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_cluster.xml +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_cluster.xml @@ -6,9 +6,7 @@ - - - + diff --git a/test/steps/openmldb-sdk-test-java-src.sh b/test/steps/openmldb-sdk-test-java-src.sh index b3e50eb0a4b..b5d641c1dc1 100755 --- a/test/steps/openmldb-sdk-test-java-src.sh +++ b/test/steps/openmldb-sdk-test-java-src.sh @@ -69,15 +69,11 @@ OPENMLDB_SERVER_VERSION="SRC" SERVER_URL=$(more ${deployConfigPath} | grep "${OPENMLDB_SERVER_VERSION}") echo "SERVER_URL:${SERVER_URL}" if [[ "${SERVER_URL}" == "" ]]; then - echo "AAA" - echo "\n${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz" >> ${deployConfigPath} - echo "CCCC" + echo -e "\n${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz" >> ${deployConfigPath} cat ${deployConfigPath} else - echo "BBBB" sed -i "s#${OPENMLDB_SERVER_VERSION}=.*#${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz#" ${deployConfigPath} fi -echo "DDDD" cat ${deployConfigPath} JAVA_SDK_VERSION=$(more java/pom.xml | grep ".*" | head -1 | sed 's#.*\(.*\).*#\1#') JAVA_NATIVE_VERSION=$(more java/pom.xml | grep ".*" | head -1 | sed 's#.*\(.*\).*#\1#') From 862397524c6662fd79eebb89a5a8660b28ed91fb Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 26 Jul 2022 15:51:54 +0800 Subject: [PATCH 086/172] modify cicd --- cases/function/disk_table/disk_table.yaml | 63 +++++++++---------- .../{select => v040}/test_groupby.yaml | 0 .../conf/OpenMLDBDeployConfig.java | 5 +- .../cluster/v050/DiskTableTest.java | 31 ++++++--- .../openmldb/test_common/model/SQLCase.java | 6 +- .../openmldb/test_common/model/Table.java | 4 +- .../openmldb/test_common/util/DataUtil.java | 15 +++++ .../openmldb/test_common/util/SQLUtil.java | 8 +-- test/steps/modify_java_sdk_config.sh | 2 + test/steps/openmldb-sdk-test-java-src.sh | 2 + 10 files changed, 85 insertions(+), 51 deletions(-) rename cases/function/{select => v040}/test_groupby.yaml (100%) diff --git a/cases/function/disk_table/disk_table.yaml b/cases/function/disk_table/disk_table.yaml index 809d4ccd535..fbe6cff3686 100644 --- a/cases/function/disk_table/disk_table.yaml +++ b/cases/function/disk_table/disk_table.yaml @@ -13,7 +13,8 @@ # limitations under the License. db: test_zw -debugs: ['创建磁盘表,有两个索引,分别为latest和absolute,insert=10 ,where条件'] +debugs: ["创建磁盘表,ttl_type=latest,ttl=4,insert 10"] +version: 0.5.0 cases: - id: 0 @@ -57,7 +58,6 @@ cases: - id: 2 desc: ssd和内存表,join - mode: cluster-unsupport inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] @@ -70,6 +70,7 @@ cases: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] indexs: ["index1:c1:c4"] + storage: memory rows: - ["aa", 2, 3, 1590738989000] - ["bb", 21, 31, 1590738990000] @@ -84,7 +85,6 @@ cases: - id: 3 desc: hdd和内存表,join - mode: cluster-unsupport inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] @@ -97,6 +97,7 @@ cases: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] indexs: ["index1:c1:c4"] + storage: memory rows: - ["aa", 2, 3, 1590738989000] - ["bb", 21, 31, 1590738990000] @@ -111,11 +112,11 @@ cases: - id: 4 desc: 内存表和ssd,join - mode: cluster-unsupport inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] indexs: ["index1:c1:c4"] + storage: memory rows: - ["aa", 2, 3, 1590738989000] - ["bb", 21, 31, 1590738990000] @@ -138,11 +139,11 @@ cases: - id: 5 desc: 内存表和hdd,join - mode: cluster-unsupport inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] indexs: ["index1:c1:c4"] + storage: memory rows: - ["aa", 2, 3, 1590738989000] - ["bb", 21, 31, 1590738990000] @@ -165,7 +166,6 @@ cases: - id: 6 desc: hdd和ssd,join - mode: cluster-unsupport inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] @@ -193,11 +193,11 @@ cases: - id: 7 desc: hdd和ssd,join - mode: cluster-unsupport inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] indexs: ["index1:c1:c4"] + storage: memory rows: - ["aa", 2, 3, 1590738989000] - ["bb", 21, 31, 1590738990000] @@ -238,6 +238,7 @@ cases: - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] indexs: ["index1:c3:c7"] + storage: memory rows: - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] @@ -262,6 +263,7 @@ cases: - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] indexs: ["index1:c3:c7"] + storage: memory rows: - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] @@ -279,6 +281,7 @@ cases: inputs: - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] indexs: ["index1:c3:c7"] + storage: memory rows: - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] @@ -303,6 +306,7 @@ cases: inputs: - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] indexs: ["index1:c3:c7"] + storage: memory rows: - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] @@ -329,17 +333,15 @@ cases: columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] indexs: ["index1:c1:c4"] storage: SSD -# rows: -# - ["aa", 2, 3, 1590738989000] - sqls: - - insert into {0} values("aa", 2, 3, 1590738989000) - - insert into {0} values("aa", 2, 3, 1590738989000) - - select * from {0}; + rows: + - ["aa", 2, 3, 1590738989000] + - ["aa", 2, 3, 1590738989000] + sql: select * from {0}; expect: columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] rows: - ["aa", 2, 3, 1590738989000] - - id: 12 + - id: 13 desc: HDD 插入索引和ts 一样的数据 inputs: - @@ -354,20 +356,25 @@ cases: columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] rows: - ["aa", 2, 3, 1590738989000] - - id: 13 + - id: 14 desc: storage_mode=其他字符 - inputs: - - - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] - indexs: ["index1:c1:c4"] - storage: hdp +# inputs: +# - +# columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] +# indexs: ["index1:c1:c4"] +# storage: hdp # rows: # - ["aa", 2, 3, 1590738989000] - sql: select * from {0}; + sql: | + create table auto_MDYewbTv( + c1 string, + c2 int, + c3 bigint, + c4 timestamp, + index(key=(c1),ts=c4))options(partitionnum=1,replicanum=1,storage_mode="hdp"); expect: success: false - - id: 14 desc: 创建磁盘表,ttl_type=latest,ttl=4,insert 10 inputs: @@ -475,15 +482,3 @@ cases: - ["bb", 2, 5] - ["bb", 2, 9] - ["bb", 2, 11] - -# - id: 16 -# desc: 数据过期类型长时间 -# inputs: -# - -# columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] -# indexs: ["index1:c1:c4:10000h"] -# storage: hdp -# rows: -# - ["aa", 2, 3, 1590738989000] -# sql: select * from {0}; -# expect: \ No newline at end of file diff --git a/cases/function/select/test_groupby.yaml b/cases/function/v040/test_groupby.yaml similarity index 100% rename from cases/function/select/test_groupby.yaml rename to cases/function/v040/test_groupby.yaml diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java index 0b4f228db29..9355952a619 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/conf/OpenMLDBDeployConfig.java @@ -40,7 +40,10 @@ public class OpenMLDBDeployConfig { } public static String getUrl(String version){ - return CONFIG.getProperty(version, DeployUtil.getOpenMLDBUrl(version)); + System.out.println("CONFIG = " + CONFIG); + String openMLDBPkgUrl = CONFIG.getProperty(version, DeployUtil.getOpenMLDBUrl(version)); + System.out.println("openMLDBPkgUrl = " + openMLDBPkgUrl); + return openMLDBPkgUrl; } public static String getZKUrl(String version){ return CONFIG.getProperty(version+"_zk_url", ZK_URL); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java index ce976b51f04..7bf539e5b75 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java @@ -5,32 +5,45 @@ import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; import com._4paradigm.openmldb.test_common.provider.Yaml; +import io.qameta.allure.Feature; import io.qameta.allure.Story; import lombok.extern.slf4j.Slf4j; import org.testng.annotations.Test; @Slf4j +@Feature("Disk-Table") public class DiskTableTest extends OpenMLDBTest { - @Test(dataProvider = "getCase") + @Test(dataProvider = "getCase",enabled = false) @Yaml(filePaths = "function/disk_table/disk_table.yaml") - @Story("Disk-Table") + @Story("batch") public void testDiskTable(SQLCase testCase){ - ExecutorFactory.build(executor,testCase, SQLCaseType.kRequest).run(); + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); } - //all pass @Test(dataProvider = "getCase") @Yaml(filePaths = "function/disk_table/disk_table.yaml") - @Story("Disk-Table") - public void testDiskTable2(SQLCase testCase){ - ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + @Story("request") + public void testDiskTableRequestMode(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kRequest).run(); } - + @Story("requestWithSp") @Test(dataProvider = "getCase") @Yaml(filePaths = "function/disk_table/disk_table.yaml") - @Story("Disk-Table") + public void testDiskTableRequestModeWithSp(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); + } + @Story("requestWithSpAysn") + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/disk_table/disk_table.yaml") + public void testDiskTableRequestModeWithSpAysn(SQLCase testCase) throws Exception { + ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); + } + + @Test(dataProvider = "getCase",enabled = false) + @Yaml(filePaths = "function/disk_table/disk_table.yaml") + @Story("CLI") public void testDiskTable3(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java index 90f1e54d5ff..5bd5c64537c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java @@ -101,7 +101,11 @@ public boolean isSupportDiskTable(){ } public void setStorage(String storageMode){ if(CollectionUtils.isNotEmpty(inputs)) { - inputs.forEach(t -> t.setStorage(storageMode)); + inputs.forEach(t -> { + if(StringUtils.isEmpty(t.getStorage())){ + t.setStorage(storageMode); + } + }); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java index ae50b2016c4..aac1f296a35 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java @@ -16,6 +16,7 @@ package com._4paradigm.openmldb.test_common.model; +import com._4paradigm.openmldb.test_common.util.DataUtil; import com.google.common.base.Joiner; import com.google.common.collect.Lists; import lombok.Data; @@ -116,8 +117,7 @@ public List extractInserts() { for (List row : getRows()) { List> rows = Lists.newArrayList(); rows.add(row); - inserts.add(buildInsertSQLFromRows(name, getColumns(), - rows)); + inserts.add(buildInsertSQLFromRows(name, getColumns(), rows)); } return inserts; } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java index a00c064ac26..734b577c5d0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java @@ -10,6 +10,20 @@ import java.util.List; @Slf4j public class DataUtil { + + public static Object parseTime(Object data){ + String dataStr = String.valueOf(data); + if(dataStr.equals("{currentTime}")){ + return System.currentTimeMillis(); + }else if(dataStr.startsWith("{currentTime}-")){ + long t = Long.parseLong(dataStr.substring(14)); + return System.currentTimeMillis()-t; + }else if(dataStr.startsWith("{currentTime}+")){ + long t = Long.parseLong(dataStr.substring(14)); + return System.currentTimeMillis()+t; + } + return data; + } public static Object parseRules(String data){ Object obj = null; if(data.equals("{currentTime}")){ @@ -91,6 +105,7 @@ public static boolean setRequestData(PreparedStatement requestPs, List o requestPs.setNull(i + 1, 0); continue; } + obj = DataUtil.parseTime(obj); int columnType = metaData.getColumnType(i + 1); if (columnType == Types.BOOLEAN) { requestPs.setBoolean(i + 1, Boolean.parseBoolean(obj.toString())); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java index 0c96ecdef54..2b105f0f9a5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java @@ -75,15 +75,15 @@ public static String formatSql(String sql, List tableNames, OpenMLDBInfo return sql; } - public static String formatSql(String sql, OpenMLDBInfo fedbInfo) { + public static String formatSql(String sql, OpenMLDBInfo openMLDBInfo) { if(sql.contains("{tb_endpoint_0}")){ - sql = sql.replace("{tb_endpoint_0}", fedbInfo.getTabletEndpoints().get(0)); + sql = sql.replace("{tb_endpoint_0}", openMLDBInfo.getTabletEndpoints().get(0)); } if(sql.contains("{tb_endpoint_1}")){ - sql = sql.replace("{tb_endpoint_1}", fedbInfo.getTabletEndpoints().get(1)); + sql = sql.replace("{tb_endpoint_1}", openMLDBInfo.getTabletEndpoints().get(1)); } if(sql.contains("{tb_endpoint_2}")){ - sql = sql.replace("{tb_endpoint_2}", fedbInfo.getTabletEndpoints().get(2)); + sql = sql.replace("{tb_endpoint_2}", openMLDBInfo.getTabletEndpoints().get(2)); } return sql; } diff --git a/test/steps/modify_java_sdk_config.sh b/test/steps/modify_java_sdk_config.sh index 6412a9bc5de..92c07c735f2 100755 --- a/test/steps/modify_java_sdk_config.sh +++ b/test/steps/modify_java_sdk_config.sh @@ -31,6 +31,8 @@ sed -i "s####" test_suite/"${CASE_XML}" #fi +echo "test suite xml:" +cat test_suite/"${CASE_XML}" # modify pom sed -i "s#.*#${OPENMLDB_SDK_VERSION}#" pom.xml sed -i "s#.*#${JAVA_NATIVE_VERSION}#" pom.xml diff --git a/test/steps/openmldb-sdk-test-java-src.sh b/test/steps/openmldb-sdk-test-java-src.sh index b5d641c1dc1..e706d718abf 100755 --- a/test/steps/openmldb-sdk-test-java-src.sh +++ b/test/steps/openmldb-sdk-test-java-src.sh @@ -81,6 +81,7 @@ sh test/steps/build-java-sdk.sh echo "JAVA_SDK_VERSION:${JAVA_SDK_VERSION}" echo "JAVA_NATIVE_VERSION:${JAVA_NATIVE_VERSION}" +echo "deploy config:" cat ${deployConfigPath} # install command tool cd test/test-tool/command-tool || exit @@ -88,6 +89,7 @@ mvn clean install -Dmaven.test.skip=true cd "${ROOT_DIR}" || exit # modify config sh test/steps/modify_java_sdk_config.sh "${CASE_XML}" "${DEPLOY_MODE}" "${JAVA_SDK_VERSION}" "SRC" "${OPENMLDB_SERVER_VERSION}" "${JAVA_NATIVE_VERSION}" + # install jar cd test/integration-test/openmldb-test-java || exit mvn clean install -Dmaven.test.skip=true From 9b80330cfbd271023abf8510b84151f0cc5944b7 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 26 Jul 2022 17:35:49 +0800 Subject: [PATCH 087/172] deploy --- .../openmldb-deploy/src/main/resources/deploy.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index a80f46f944d..42fea2cff3e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -10,7 +10,7 @@ tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.3.tar.gz tmp_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz tmp_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz -single=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-linux.tar.gz +single=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.3-linux.tar.gz single_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz single_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz From a98854b2465d0e2acf74a08a5526248f653dbe9c Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 26 Jul 2022 17:57:28 +0800 Subject: [PATCH 088/172] deploy --- .../_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java index dddb7f6ca8c..5b315279d78 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java @@ -426,7 +426,7 @@ public OpenMLDBInfo deployStandalone(String testPath, String ip){ "sed -i "+sedSeparator+" 's@--zk_cluster=.*@#--zk_cluster=127.0.0.1:2181@' " + testPath + standaloneName + "/conf/standalone_tablet.flags", "sed -i "+sedSeparator+" 's@--zk_root_path=.*@#--zk_root_path=/openmldb@' "+testPath+standaloneName+"/conf/standalone_tablet.flags", "sed -i "+sedSeparator+" 's#--endpoint=.*#--endpoint=" + tabletEndpoint + "#' " + testPath + standaloneName + "/conf/standalone_tablet.flags", - "echo '--hdd_root_path=./db_hdd' >> "+testPath+standaloneName+"/conf/standalone_tablet.flags", + "echo -e '\n--hdd_root_path=./db_hdd' >> "+testPath+standaloneName+"/conf/standalone_tablet.flags", "echo '--recycle_bin_hdd_root_path=./recycle_hdd' >> "+testPath+standaloneName+"/conf/standalone_tablet.flags", "echo '--ssd_root_path=./db_ssd' >> "+testPath+standaloneName+"/conf/standalone_tablet.flags", "echo '--recycle_bin_ssd_root_path=./recycle_ssd' >> "+testPath+standaloneName+"/conf/standalone_tablet.flags", From 92946995799b421dd75b61384b1cacb640c0471f Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 26 Jul 2022 18:24:48 +0800 Subject: [PATCH 089/172] modify cicd --- test/steps/openmldb-sdk-test-java-src.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/steps/openmldb-sdk-test-java-src.sh b/test/steps/openmldb-sdk-test-java-src.sh index e706d718abf..681a56a97a8 100755 --- a/test/steps/openmldb-sdk-test-java-src.sh +++ b/test/steps/openmldb-sdk-test-java-src.sh @@ -69,7 +69,7 @@ OPENMLDB_SERVER_VERSION="SRC" SERVER_URL=$(more ${deployConfigPath} | grep "${OPENMLDB_SERVER_VERSION}") echo "SERVER_URL:${SERVER_URL}" if [[ "${SERVER_URL}" == "" ]]; then - echo -e "\n${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz" >> ${deployConfigPath} + echo -e "\n${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz\n" >> ${deployConfigPath} cat ${deployConfigPath} else sed -i "s#${OPENMLDB_SERVER_VERSION}=.*#${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz#" ${deployConfigPath} @@ -96,4 +96,4 @@ mvn clean install -Dmaven.test.skip=true cd "${ROOT_DIR}" || exit # run case cd "${ROOT_DIR}"/test/integration-test/openmldb-test-java/openmldb-sdk-test || exit -mvn clean test -DsuiteXmlFile=test_suite/"${CASE_XML}" -DcaseLevel="${CASE_LEVEL}" +mvn clean test -e -U -DsuiteXmlFile=test_suite/"${CASE_XML}" -DcaseLevel="${CASE_LEVEL}" From 19a498c39b85b9a69f9dffcbd89eeddd162dfe55 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 26 Jul 2022 18:35:14 +0800 Subject: [PATCH 090/172] modify cicd --- .../main/resources/{deploy.properties => deploy1111.properties} | 0 test/steps/openmldb-sdk-test-java-src.sh | 1 + 2 files changed, 1 insertion(+) rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/{deploy.properties => deploy1111.properties} (100%) diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy1111.properties similarity index 100% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy.properties rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy1111.properties diff --git a/test/steps/openmldb-sdk-test-java-src.sh b/test/steps/openmldb-sdk-test-java-src.sh index 681a56a97a8..5850f6b616a 100755 --- a/test/steps/openmldb-sdk-test-java-src.sh +++ b/test/steps/openmldb-sdk-test-java-src.sh @@ -64,6 +64,7 @@ echo "ROOT_DIR:${ROOT_DIR}" #echo "OPENMLDB_SERVER_VERSION:${OPENMLDB_SERVER_VERSION}" #echo "DIFF_VERSIONS:${DIFF_VERSIONS}" # 从源码编译 +#deployConfigPath="test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy.properties" deployConfigPath="test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties" OPENMLDB_SERVER_VERSION="SRC" SERVER_URL=$(more ${deployConfigPath} | grep "${OPENMLDB_SERVER_VERSION}") From 5537d85314d885f5a51a42f9bc700963fba5b862 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 26 Jul 2022 19:47:33 +0800 Subject: [PATCH 091/172] modify cicd --- cases/function/disk_table/disk_table.yaml | 39 ++++++++++--------- .../java_sdk_test/common/StandaloneTest.java | 8 ++-- test/steps/modify_java_sdk_config.sh | 4 ++ 3 files changed, 28 insertions(+), 23 deletions(-) diff --git a/cases/function/disk_table/disk_table.yaml b/cases/function/disk_table/disk_table.yaml index fbe6cff3686..f2617ac1171 100644 --- a/cases/function/disk_table/disk_table.yaml +++ b/cases/function/disk_table/disk_table.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: ["创建磁盘表,ttl_type=latest,ttl=4,insert 10"] +debugs: [] version: 0.5.0 cases: - @@ -336,6 +336,7 @@ cases: rows: - ["aa", 2, 3, 1590738989000] - ["aa", 2, 3, 1590738989000] + - ["aa", 2, 3, 1590738989000] sql: select * from {0}; expect: columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] @@ -375,7 +376,7 @@ cases: expect: success: false - - id: 14 + - id: 15 desc: 创建磁盘表,ttl_type=latest,ttl=4,insert 10 inputs: - @@ -383,26 +384,26 @@ cases: indexs: ["index1:c1:c4:4:latest"] storage: SSD rows: - - ["bb", 2, 3, "{currentTime}-600001"] - - ["bb", 4, 5, "{currentTime}-600002"] - - ["bb", 6, 7, "{currentTime}-600003"] - - ["bb", 8, 9, "{currentTime}-600004"] - - ["bb", 10, 11, "{currentTime}-600005"] - - ["bb", 12, 13, "{currentTime}-600006"] - - ["bb", 14, 15, "{currentTime}-600007"] - - ["bb", 16, 17, "{currentTime}-600008"] - - ["bb", 18, 19, "{currentTime}-600009"] - - ["bb", 20, 21, "{currentTime}-600010"] + - ["bb", 2, 3, 1590738989000] + - ["bb", 4, 5, 1590738990000] + - ["bb", 6, 7, 1590738991000] + - ["bb", 8, 9, 1590738992000] + - ["bb", 10, 11, 1590738993000] + - ["bb", 12, 13, 1590738994000] + - ["bb", 14, 15, 1590738995000] + - ["bb", 16, 17, 1590738996000] + - ["bb", 18, 19, 1590738997000] + - ["bb", 20, 21, 1590738998000] sql: select c1,c2,c3 from {0}; expect: columns: ["c1 string","c2 int","c3 bigint"] rows: - - ["bb", 2, 3] - - ["bb", 4, 5] - - ["bb", 6, 7] - - ["bb", 8, 9] + - ["bb", 20, 21] + - ["bb", 18, 19] + - ["bb", 16, 17] + - ["bb", 14, 15] - - id: 15 + - id: 16 desc: 创建磁盘表,ttl_type=absolute,ttl=10m, insert 10 inputs: - @@ -428,7 +429,7 @@ cases: - ["bb", 4, 5] - ["bb", 6, 7] - - id: 16 + - id: 17 desc: 创建磁盘表,有两个索引,分别为latest和absolute,insert=10 inputs: - @@ -455,7 +456,7 @@ cases: - ["bb", 2, 5] - ["bb", 2, 9] - - id: 17 + - id: 18 desc: 创建磁盘表,有两个索引,分别为latest和absolute,insert=10 ,where条件 inputs: - diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java index ac76d7369d6..c45ddb4bfab 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java @@ -52,11 +52,11 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi .basePath("/home/wangkaidong/fedb-auto-test/standalone") .openMLDBPath("/home/wangkaidong/fedb-auto-test/standalone/openmldb-standalone/bin/openmldb") .nsNum(1).tabletNum(1) - .nsEndpoints(Lists.newArrayList("172.24.4.55:30016")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30017")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30018")) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30013")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30014")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30015")) .host("172.24.4.55") - .port(30016) + .port(30013) .build(); } String caseEnv = System.getProperty("caseEnv"); diff --git a/test/steps/modify_java_sdk_config.sh b/test/steps/modify_java_sdk_config.sh index 92c07c735f2..acca723a9be 100755 --- a/test/steps/modify_java_sdk_config.sh +++ b/test/steps/modify_java_sdk_config.sh @@ -34,6 +34,10 @@ sed -i "s##.*#${OPENMLDB_SDK_VERSION}#" pom.xml sed -i "s#.*#${JAVA_NATIVE_VERSION}#" pom.xml +echo "pom xml:" +cat pom.xml cd "${ROOT_DIR}" || exit From ebaccfcab08f587df1b2d23d4de44f2c963c2fdc Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 27 Jul 2022 09:32:44 +0800 Subject: [PATCH 092/172] modify cicd --- .github/workflows/integration-test-src.yml | 8 ++++---- cases/function/disk_table/disk_table.yaml | 15 +++++++------- cases/function/window/test_window.yaml | 20 +++++++++++-------- .../src/main/resources/deploy1111.properties | 17 ---------------- .../src/main/resources/run_case.properties | 3 +-- .../cluster/{v050 => disk}/DiskTableTest.java | 2 +- test/steps/modify_java_sdk_config.sh | 13 ++++++++++-- test/steps/openmldb-sdk-test-java-src.sh | 8 +++++--- 8 files changed, 42 insertions(+), 44 deletions(-) delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy1111.properties rename test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/{v050 => disk}/DiskTableTest.java (96%) diff --git a/.github/workflows/integration-test-src.yml b/.github/workflows/integration-test-src.yml index fd68959ec63..3af33f0b5a7 100644 --- a/.github/workflows/integration-test-src.yml +++ b/.github/workflows/integration-test-src.yml @@ -66,7 +66,7 @@ jobs: # check_name: Java SDK Test Standalone1 SRC Report # comment_title: Java SDK Test Standalone1 SRC Report - java-sdk-test-cluster-0: + java-sdk-cluster-memory-0: if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }} runs-on: ubuntu-latest container: @@ -83,7 +83,7 @@ jobs: echo "openmldb-pkg:" ls -al - name: test - run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster.xml -d cluster -l "0" + run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster.xml -d cluster -l "0" -s "memory" - name: TEST Results if: always() uses: EnricoMi/publish-unit-test-result-action@v1 @@ -93,7 +93,7 @@ jobs: check_name: "Java SDK Test Cluster0 SRC Report" comment_title: "Java SDK Test Cluster0 SRC Report" - java-sdk-test-cluster-1: + java-sdk-cluster-memory-1: if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }} runs-on: ubuntu-latest container: @@ -108,7 +108,7 @@ jobs: make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install tar -zcvf openmldb-linux.tar.gz openmldb-linux - name: test - run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster.xml -d cluster -l "1,2,3,4,5" + run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster.xml -d cluster -l "1,2,3,4,5" -s "memory" - name: TEST Results if: always() uses: EnricoMi/publish-unit-test-result-action@v1 diff --git a/cases/function/disk_table/disk_table.yaml b/cases/function/disk_table/disk_table.yaml index f2617ac1171..7b3f81e902c 100644 --- a/cases/function/disk_table/disk_table.yaml +++ b/cases/function/disk_table/disk_table.yaml @@ -328,6 +328,7 @@ cases: - [5,"ee",21,34] - id: 12 desc: SSD 插入索引和ts 一样的数据 + tags: ["TODO","bug修改后验证"] inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] @@ -344,6 +345,7 @@ cases: - ["aa", 2, 3, 1590738989000] - id: 13 desc: HDD 插入索引和ts 一样的数据 + tags: ["TODO","bug修改后验证"] inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] @@ -352,6 +354,7 @@ cases: rows: - ["aa", 2, 3, 1590738989000] - ["aa", 2, 3, 1590738989000] + - ["aa", 2, 3, 1590738989000] sql: select * from {0}; expect: columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] @@ -359,13 +362,7 @@ cases: - ["aa", 2, 3, 1590738989000] - id: 14 desc: storage_mode=其他字符 -# inputs: -# - -# columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] -# indexs: ["index1:c1:c4"] -# storage: hdp -# rows: -# - ["aa", 2, 3, 1590738989000] + mode: request-unsupport sql: | create table auto_MDYewbTv( c1 string, @@ -378,6 +375,7 @@ cases: - id: 15 desc: 创建磁盘表,ttl_type=latest,ttl=4,insert 10 + tags: ["TODO","bug修改后验证"] inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] @@ -405,6 +403,7 @@ cases: - id: 16 desc: 创建磁盘表,ttl_type=absolute,ttl=10m, insert 10 + tags: ["TODO","bug修改后验证"] inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] @@ -431,6 +430,7 @@ cases: - id: 17 desc: 创建磁盘表,有两个索引,分别为latest和absolute,insert=10 + tags: ["TODO","bug修改后验证"] inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] @@ -458,6 +458,7 @@ cases: - id: 18 desc: 创建磁盘表,有两个索引,分别为latest和absolute,insert=10 ,where条件 + mode: request-unsupport inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] diff --git a/cases/function/window/test_window.yaml b/cases/function/window/test_window.yaml index db24f7c493f..04235f433eb 100644 --- a/cases/function/window/test_window.yaml +++ b/cases/function/window/test_window.yaml @@ -97,6 +97,7 @@ cases: - id: 3 desc: 一个pk所有数据都不在窗口内 + version: 0.6.0 inputs: - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] @@ -113,9 +114,9 @@ cases: order: id columns: ["id int","c1 string","w1_c4_sum bigint"] rows: - - [1,"aa",0] - - [2,"aa",0] - - [3,"aa",0] + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",null] - id: 4 desc: 窗口只要当前行 @@ -163,6 +164,7 @@ cases: - id: 6 desc: 最后一行进入窗口 + version: 0.6.0 inputs: - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] @@ -179,12 +181,13 @@ cases: order: id columns: ["id int","c1 string","w1_c4_sum bigint"] rows: - - [1,"aa",0] - - [2,"aa",0] + - [1,"aa",null] + - [2,"aa",null] - [3,"aa",30] - id: 7 desc: 纯历史窗口-滑动 + version: 0.6.0 inputs: - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] @@ -203,7 +206,7 @@ cases: order: id columns: ["id int","c1 string","w1_c4_sum bigint"] rows: - - [1,"aa",0] + - [1,"aa",null] - [2,"aa",30] - [3,"aa",61] - [4,"aa",63] @@ -211,6 +214,7 @@ cases: - id: 8 desc: 两个pk,一个没有进入窗口,一个滑动 + version: 0.6.0 inputs: - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] @@ -229,11 +233,11 @@ cases: order: id columns: ["id int","c1 string","w1_c4_sum bigint"] rows: - - [1,"aa",0] + - [1,"aa",null] - [2,"aa",30] - [3,"aa",61] - [4,"aa",63] - - [5,"bb",0] + - [5,"bb",null] - id: 9 desc: 两个pk,一个全部进入窗口,一个滑动 diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy1111.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy1111.properties deleted file mode 100644 index 81d2bb96ef1..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy1111.properties +++ /dev/null @@ -1,17 +0,0 @@ - -#zk的url -zk_url=https://archive.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz - -#配置fedb版本以及对应的url - -main=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2.3-linux.tar.gz -0.2.2=https://github.com/4paradigm/OpenMLDB/releases/download/0.2.2/openmldb-0.2.2-linux.tar.gz -0.2.3=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2.3-linux.tar.gz -spark_url=https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0.3.2/spark-3.0.0-bin-openmldbspark.tgz - -tmp2=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.4-linux.tar.gz -#tmp=/home/zhaowei01/tobe/openmldb_linux.tar.gz -standalone=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.4-linux.tar.gz -tmp2_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz -tmp2_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark.tar.gz - diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties index 2959f96554e..912d8e3a2da 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties @@ -1,4 +1,3 @@ # memory/ssd/hdd -#table_storage_mode=ssd -version=0.5.0 +table_storage_mode=ssd diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/disk/DiskTableTest.java similarity index 96% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java rename to test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/disk/DiskTableTest.java index 7bf539e5b75..0c16692df6e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/DiskTableTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/disk/DiskTableTest.java @@ -1,4 +1,4 @@ -package com._4paradigm.openmldb.java_sdk_test.cluster.v050; +package com._4paradigm.openmldb.java_sdk_test.cluster.disk; import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; diff --git a/test/steps/modify_java_sdk_config.sh b/test/steps/modify_java_sdk_config.sh index acca723a9be..dfbc670e058 100755 --- a/test/steps/modify_java_sdk_config.sh +++ b/test/steps/modify_java_sdk_config.sh @@ -18,12 +18,13 @@ CASE_XML=$1 DEPLOY_MODE=$2 OPENMLDB_SDK_VERSION=$3 -BUILD_MODE=$4 +TEST_CASE_VERSION=$4 OPENMLDB_SERVER_VERSION=$5 JAVA_NATIVE_VERSION=$6 +TABLE_STORAGE_MODE=$7 echo "deploy_mode:${DEPLOY_MODE}" ROOT_DIR=$(pwd) -echo "test_version:$OPENMLDB_SDK_VERSION" +echo "test_sdk_version:$OPENMLDB_SDK_VERSION" cd test/integration-test/openmldb-test-java/openmldb-sdk-test || exit # modify suite_xml sed -i "s###" test_suite/"${CASE_XML}" @@ -33,6 +34,14 @@ sed -i "s##> src/main/resources/run_case.properties +fi +if [ -n "${TABLE_STORAGE_MODE}" ]; then + sed -i "s#table_storage_mode=.*#table_storage_mode=${TABLE_STORAGE_MODE}#" src/main/resources/run_case.properties +fi +echo "run_case config:" +cat src/main/resources/run_case.properties # modify pom cd "${ROOT_DIR}" cd test/integration-test/openmldb-test-java/openmldb-test-common || exit diff --git a/test/steps/openmldb-sdk-test-java-src.sh b/test/steps/openmldb-sdk-test-java-src.sh index 5850f6b616a..535f6ea5c16 100755 --- a/test/steps/openmldb-sdk-test-java-src.sh +++ b/test/steps/openmldb-sdk-test-java-src.sh @@ -35,6 +35,9 @@ do l) echo "参数l的值:$OPTARG" CASE_LEVEL=$OPTARG ;; + s) echo "参数s的值:$OPTARG" + TABLE_STORAGE_MODE=$OPTARG + ;; ?) echo "未知参数" exit 1 ;; @@ -53,6 +56,7 @@ fi echo "CASE_XML:${CASE_XML}" echo "DEPLOY_MODE:${DEPLOY_MODE}" echo "CASE_LEVEL:${CASE_LEVEL}" +echo "TABLE_STORAGE_MODE:${TABLE_STORAGE_MODE}" ROOT_DIR=$(pwd) # 安装wget @@ -64,14 +68,12 @@ echo "ROOT_DIR:${ROOT_DIR}" #echo "OPENMLDB_SERVER_VERSION:${OPENMLDB_SERVER_VERSION}" #echo "DIFF_VERSIONS:${DIFF_VERSIONS}" # 从源码编译 -#deployConfigPath="test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/deploy.properties" deployConfigPath="test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties" OPENMLDB_SERVER_VERSION="SRC" SERVER_URL=$(more ${deployConfigPath} | grep "${OPENMLDB_SERVER_VERSION}") echo "SERVER_URL:${SERVER_URL}" if [[ "${SERVER_URL}" == "" ]]; then echo -e "\n${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz\n" >> ${deployConfigPath} - cat ${deployConfigPath} else sed -i "s#${OPENMLDB_SERVER_VERSION}=.*#${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz#" ${deployConfigPath} fi @@ -89,7 +91,7 @@ cd test/test-tool/command-tool || exit mvn clean install -Dmaven.test.skip=true cd "${ROOT_DIR}" || exit # modify config -sh test/steps/modify_java_sdk_config.sh "${CASE_XML}" "${DEPLOY_MODE}" "${JAVA_SDK_VERSION}" "SRC" "${OPENMLDB_SERVER_VERSION}" "${JAVA_NATIVE_VERSION}" +sh test/steps/modify_java_sdk_config.sh "${CASE_XML}" "${DEPLOY_MODE}" "${JAVA_SDK_VERSION}" "" "${OPENMLDB_SERVER_VERSION}" "${JAVA_NATIVE_VERSION}" "${TABLE_STORAGE_MODE}" # install jar cd test/integration-test/openmldb-test-java || exit From 792e8baef070e0dc6cdd35d81781af22580cfebe Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 27 Jul 2022 11:52:47 +0800 Subject: [PATCH 093/172] modify cicd --- cases/function/disk_table/disk_table.yaml | 10 +++++----- .../java_sdk_test/cluster/sql_test/DDLTest.java | 16 +++++++++++++++- test/steps/openmldb-sdk-test-java-src.sh | 2 +- 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/cases/function/disk_table/disk_table.yaml b/cases/function/disk_table/disk_table.yaml index 7b3f81e902c..33c0b45e0be 100644 --- a/cases/function/disk_table/disk_table.yaml +++ b/cases/function/disk_table/disk_table.yaml @@ -328,7 +328,7 @@ cases: - [5,"ee",21,34] - id: 12 desc: SSD 插入索引和ts 一样的数据 - tags: ["TODO","bug修改后验证"] + mode: request-unsupport inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] @@ -345,7 +345,7 @@ cases: - ["aa", 2, 3, 1590738989000] - id: 13 desc: HDD 插入索引和ts 一样的数据 - tags: ["TODO","bug修改后验证"] + mode: request-unsupport inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] @@ -375,7 +375,7 @@ cases: - id: 15 desc: 创建磁盘表,ttl_type=latest,ttl=4,insert 10 - tags: ["TODO","bug修改后验证"] + mode: request-unsupport inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] @@ -403,7 +403,7 @@ cases: - id: 16 desc: 创建磁盘表,ttl_type=absolute,ttl=10m, insert 10 - tags: ["TODO","bug修改后验证"] + mode: request-unsupport inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] @@ -430,7 +430,7 @@ cases: - id: 17 desc: 创建磁盘表,有两个索引,分别为latest和absolute,insert=10 - tags: ["TODO","bug修改后验证"] + mode: request-unsupport inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java index c5b9ecd3991..cda4acbcaff 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java @@ -40,7 +40,7 @@ public void testCreate(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); } - @Yaml(filePaths = "function/ddl/test_create.yaml")//7 表名为非保留关键字 没过 + @Yaml(filePaths = "function/ddl/test_create.yaml") @Story("create") @Test(dataProvider = "getCase",enabled = false) public void testCreateByCli(SQLCase testCase){ @@ -85,4 +85,18 @@ public void testOptions(SQLCase testCase){ public void testOptionsByCli(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); } + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/ddl/test_create_no_index.yaml") + @Story("create_no_index") + public void testCreateNoIndex(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/ddl/test_create_no_index.yaml") + @Story("create_no_index") + public void testCreateNoIndexByCli(SQLCase testCase){ + ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); + } } diff --git a/test/steps/openmldb-sdk-test-java-src.sh b/test/steps/openmldb-sdk-test-java-src.sh index 535f6ea5c16..10d30d1f043 100755 --- a/test/steps/openmldb-sdk-test-java-src.sh +++ b/test/steps/openmldb-sdk-test-java-src.sh @@ -21,7 +21,7 @@ #-d 部署模式,有cluster和standalone两种,默认cluster #-l 测试的case级别,有0,1,2,3,4,5六个级别,默认为0,也可以同时跑多个级别的case,例如:1,2,3,4,5 -while getopts ":b:c:d:l:" opt +while getopts ":c:d:l:s:" opt do case $opt in c) From 4cb353af51df7be2e3d67e394cd418b492ca0eef Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 27 Jul 2022 14:26:27 +0800 Subject: [PATCH 094/172] modify cicd --- .github/workflows/integration-test-src.yml | 2 +- cases/function/ddl/test_create_no_index.yaml | 140 +----------------- cases/function/ddl/test_options.yaml | 1 + cases/function/ddl/test_ttl.yaml | 8 +- cases/function/join/test_lastjoin_simple.yaml | 46 +++++- cases/function/select/test_select_sample.yaml | 15 +- cases/function/select/test_sub_select.yaml | 13 ++ .../cluster/sql_test/DDLTest.java | 2 +- .../test_suite/test_cluster_disk.xml | 28 ++++ .../openmldb/test_common/model/SQLCase.java | 2 +- .../openmldb/test_common/model/Table.java | 6 +- .../openmldb/test_common/util/SDKUtil.java | 130 ++++++++-------- test/steps/openmldb-sdk-test-java-src.sh | 2 +- 13 files changed, 185 insertions(+), 210 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_cluster_disk.xml diff --git a/.github/workflows/integration-test-src.yml b/.github/workflows/integration-test-src.yml index 3af33f0b5a7..f0f1ee5c4ea 100644 --- a/.github/workflows/integration-test-src.yml +++ b/.github/workflows/integration-test-src.yml @@ -78,7 +78,7 @@ jobs: - name: build jsdk and package run: | make configure CMAKE_INSTALL_PREFIX=openmldb-linux - make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install + make SQL_JAVASDK_ENABLE=ON install tar -zcvf openmldb-linux.tar.gz openmldb-linux echo "openmldb-pkg:" ls -al diff --git a/cases/function/ddl/test_create_no_index.yaml b/cases/function/ddl/test_create_no_index.yaml index 66f859ea7a5..f29afdf4717 100644 --- a/cases/function/ddl/test_create_no_index.yaml +++ b/cases/function/ddl/test_create_no_index.yaml @@ -20,23 +20,9 @@ cases: id: 0 desc: 创建表不指定索引 inputs: - - - create: | - create table {0} ( - id int not null, - c1 int not null, - c2 smallint not null, - c3 float not null, - c4 double not null, - c5 bigint not null, - c6 string not null, - c7 timestamp not null, - c8 date not null, - c9 bool not null - ); - insert: | - insert into {0} values - (1, 1, 2, 3.3f, 4.4, 5L, "aa", 12345678L, "2020-05-21", true); + - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ] + rows: + - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ] sql: desc {0}; expect: idxs: @@ -280,126 +266,14 @@ cases: ts: "c7" ttl: 100min ttlType: kAbsoluteTime - - - id: 12 - desc: 不指定索引,进行lastjoin - inputs: - - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] - rows: - - [ "aa",2,3,1590738989000 ] - - [ "bb",21,31,1590738990000 ] - - [ "dd",41,51,1590738990000 ] - - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] - rows: - - [ "aa",2,13,1590738989000 ] - - [ "bb",21,131,1590738990000 ] - - [ "cc",41,121,1590738991000 ] - sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1; - expect: - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] - order: c1 - rows: - - [ "aa",2,13,1590738989000 ] - - [ "bb",21,131,1590738990000 ] - - [ "dd", 41, NULL, NULL ] - - - id: 13 - desc: 不指定索引,进行lastjoin,匹配多行 - inputs: - - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] - rows: - - [ "aa",2,3,1590738989000 ] - - [ "bb",21,31,1590738990000 ] - - [ "dd",41,51,1590738990000 ] - - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] - rows: - - [ "aa",2,13,1590738989000 ] - - [ "aa",21,131,1590738990000 ] - - [ "cc",41,121,1590738991000 ] - sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1; - expect: - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] - order: c1 - rows: - - [ "aa",2,131,1590738990000 ] - - [ "bb",21,NULL,NULL ] - - [ "dd", 41, NULL, NULL ] - - - id: 14 - desc: 不指定索引,插入数据,可查询 - inputs: - - - create: | - create table {0} ( - id int not null, - c1 int not null, - c2 smallint not null, - c3 float not null, - c4 double not null, - c5 bigint not null, - c6 string not null, - c7 timestamp not null, - c8 date not null, - c9 bool not null - ); - insert: | - insert into {0} values - (1, 1, 2, 3.3f, 4.4, 5L, "aa", 12345678L, "2020-05-21", true); - sql: select * from {0}; - expect: - columns : ["id int","c1 int","c2 smallint","c3 float","c4 double","c5 bigint","c6 string","c7 timestamp","c8 date","c9 bool"] - order: id - rows: - - [1,1,2,3.3,4.4,5,"aa",12345678,"2020-05-21",true] - - - id: 15 - desc: 不指定索引,进行子查询操作 - inputs: - - - create: | - create table {0} ( - id int not null, - c1 int not null, - c2 smallint not null, - c3 float not null, - c4 double not null, - c5 bigint not null, - c6 string not null, - c7 timestamp not null, - c8 date not null, - c9 bool not null - ); - insert: | - insert into {0} values - (1, 1, 2, 3.3f, 4.4, 5L, "aa", 12345678L, "2020-05-21", true); - sql: select c1,c2 from (select id as c1,c1 as c2,c7 as c3 from {0}); - expect: - columns : ["c1 int","c2 int"] - order: id - rows: - - [1,1] - id: 16 desc: 创建表指定索引,没有默认索引 inputs: - - - create: | - create table {0} ( - id int not null, - c1 int not null, - c2 smallint not null, - c3 float not null, - c4 double not null, - c5 bigint not null, - c6 string not null, - c7 timestamp not null, - c8 date not null, - c9 bool not null, - index(key=(c1), ts=c5) - ); - insert: | - insert into {0} values - (1, 1, 2, 3.3f, 4.4, 5L, "aa", 12345678L, "2020-05-21", true); + - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ] + rows: + - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ] + indexs: ["index1:c1:c5"] sql: desc {0}; expect: idxs: diff --git a/cases/function/ddl/test_options.yaml b/cases/function/ddl/test_options.yaml index 1c8ed43ad7d..a9fc0b44631 100644 --- a/cases/function/ddl/test_options.yaml +++ b/cases/function/ddl/test_options.yaml @@ -359,6 +359,7 @@ cases: - id: 22 desc: test-case + tags: ["TODO","disk table apiserver:Table not found"] mode: standalone-unsupport inputs: - diff --git a/cases/function/ddl/test_ttl.yaml b/cases/function/ddl/test_ttl.yaml index ecd5c9232c9..ba2456856c1 100644 --- a/cases/function/ddl/test_ttl.yaml +++ b/cases/function/ddl/test_ttl.yaml @@ -195,15 +195,15 @@ cases: indexs: ["index1:c1:c4:(10m,2):absandlat"] rows: - [1,"aa", 1, 1590738990000,1590738990000] - - [2,"aa", 2, 1590738990000,1590738990000] - - [3,"aa", 3, 1590738990000,1590738990000] + - [2,"aa", 2, 1590738990000,1590738991000] + - [3,"aa", 3, 1590738990000,1590738992000] sql: select * from {0}; expect: columns: ["id int","c1 string","c2 int","c3 timestamp","c4 timestamp"] order: id rows: - - [2,"aa", 2, 1590738990000,1590738990000] - - [3,"aa", 3, 1590738990000,1590738990000] + - [2,"aa", 2, 1590738990000,1590738991000] + - [3,"aa", 3, 1590738990000,1590738992000] - id: 23 desc: 指定ttl_type=absorlat,部分数据过期 diff --git a/cases/function/join/test_lastjoin_simple.yaml b/cases/function/join/test_lastjoin_simple.yaml index b2b0b1ddacf..05a27164047 100644 --- a/cases/function/join/test_lastjoin_simple.yaml +++ b/cases/function/join/test_lastjoin_simple.yaml @@ -1021,4 +1021,48 @@ cases: order: c1 rows: - [ "aa", 2, 13, 1590738989000 ] - - [ "bb", 21, 131, 1590738990000 ] \ No newline at end of file + - [ "bb", 21, 131, 1590738990000 ] + - + id: 12 + desc: 不指定索引,进行lastjoin + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "dd", 41, NULL, NULL ] + - + id: 13 + desc: 不指定索引,进行lastjoin,匹配多行 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "aa",21,131,1590738990000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,131,1590738990000 ] + - [ "bb",21,NULL,NULL ] + - [ "dd", 41, NULL, NULL ] \ No newline at end of file diff --git a/cases/function/select/test_select_sample.yaml b/cases/function/select/test_select_sample.yaml index af8158b368c..97c47194cac 100644 --- a/cases/function/select/test_select_sample.yaml +++ b/cases/function/select/test_select_sample.yaml @@ -291,4 +291,17 @@ cases: columns: ["sum_col1 int32", "cnt_col1 int64", "max_col1 int32", "min_col1 int32", "avg_col1 double"] order: sum_col1 rows: - - [15, 5, 5, 1, 3] \ No newline at end of file + - [15, 5, 5, 1, 3] + - + id: 14 + desc: 不指定索引,插入数据,可查询 + inputs: + - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ] + rows: + - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ] + sql: select * from {0}; + expect: + columns : ["id int","c1 int","c2 smallint","c3 float","c4 double","c5 bigint","c6 string","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,1,2,3.3,4.4,5,"aa",12345678,"2020-05-21",true] \ No newline at end of file diff --git a/cases/function/select/test_sub_select.yaml b/cases/function/select/test_sub_select.yaml index 17bb3cbdded..292b6d35d2a 100644 --- a/cases/function/select/test_sub_select.yaml +++ b/cases/function/select/test_sub_select.yaml @@ -344,3 +344,16 @@ cases: sql: select id,v2,v2 from (select id,c2+1 as v2,c3+1 as v2 from {0}); expect: success: false + - + id: 15 + desc: 不指定索引,进行子查询操作 + inputs: + - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ] + rows: + - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ] + sql: select c1,c2 from (select id as c1,c1 as c2,c7 as c3 from {0}); + expect: + columns : ["c1 int","c2 int"] + order: id + rows: + - [1,1] diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java index cda4acbcaff..cd4a0f8fcc9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java @@ -90,7 +90,7 @@ public void testOptionsByCli(SQLCase testCase){ @Yaml(filePaths = "function/ddl/test_create_no_index.yaml") @Story("create_no_index") public void testCreateNoIndex(SQLCase testCase){ - ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); } @Test(dataProvider = "getCase") diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_cluster_disk.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_cluster_disk.xml new file mode 100644 index 00000000000..93c042a75c6 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_cluster_disk.xml @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java index 5bd5c64537c..0709c08e109 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java @@ -93,7 +93,7 @@ public boolean isSupportDiskTable(){ return false; } for(InputDesc input:inputs){ - if (CollectionUtils.isNotEmpty(input.getColumns())&&CollectionUtils.isNotEmpty(input.getIndexs())&& StringUtils.isEmpty(input.getCreate())) { + if (CollectionUtils.isNotEmpty(input.getColumns())&& StringUtils.isEmpty(input.getCreate())) { return true; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java index aac1f296a35..672b9d0cb2d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java @@ -328,8 +328,10 @@ public static String getColumnName(String column) { * @return */ public static String getColumnType(String column) { - int pos = column.trim().lastIndexOf(' '); - return column.trim().substring(pos).trim(); +// int pos = column.trim().lastIndexOf(' '); +// return column.trim().substring(pos).trim(); + String[] ss = column.split("\\s+"); + return ss[1]; } /** diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java index 85daa618878..a3d4e181b63 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java @@ -30,7 +30,7 @@ import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; + import org.testng.collections.Lists; import java.sql.*; @@ -44,7 +44,7 @@ */ @Slf4j public class SDKUtil { - private static final Logger logger = new LogProxy(log); +// private static final log log = new LogProxy(log); public static OpenMLDBResult sqlList(SqlExecutor executor, String dbName, List sqls) { OpenMLDBResult fesqlResult = null; @@ -60,7 +60,7 @@ public static OpenMLDBResult sqlRequestMode(SqlExecutor executor, String dbName, if (sql.toLowerCase().startsWith("select")||sql.toLowerCase().startsWith("deploy")) { fesqlResult = selectRequestModeWithPreparedStatement(executor, dbName, need_insert_request_row, sql, input); } else { - logger.error("unsupport sql: {}", sql); + log.error("unsupport sql: {}", sql); } return fesqlResult; } @@ -73,7 +73,7 @@ public static OpenMLDBResult sqlBatchRequestMode(SqlExecutor executor, String db fesqlResult = selectBatchRequestModeWithPreparedStatement( executor, dbName, sql, input, commonColumnIndices); } else { - logger.error("unsupport sql: {}", sql); + log.error("unsupport sql: {}", sql); } return fesqlResult; } @@ -85,7 +85,7 @@ public static OpenMLDBResult sqlRequestModeWithProcedure(SqlExecutor executor, S if (sql.toLowerCase().startsWith("create procedure")) { fesqlResult = selectRequestModeWithSp(executor, dbName, spName, needInsertRequestRow, sql, rows, isAsyn); } else { - logger.error("unsupport sql: {}", sql); + log.error("unsupport sql: {}", sql); } return fesqlResult; } @@ -119,7 +119,7 @@ public static OpenMLDBResult selectInto(SqlExecutor executor, String dbName, Str if (outSql.isEmpty()){ return null; } - logger.info("select into:{}",outSql); + log.info("select into:{}",outSql); OpenMLDBResult fesqlResult = new OpenMLDBResult(); ResultSet rawRs = executor.executeSQL(dbName, outSql); if (rawRs == null) { @@ -134,7 +134,7 @@ public static OpenMLDBResult selectInto(SqlExecutor executor, String dbName, Str fesqlResult.setMsg(e.getMessage()); } } - logger.info("select result:{} \n", fesqlResult); + log.info("select result:{} \n", fesqlResult); return fesqlResult; } @@ -142,7 +142,7 @@ public static OpenMLDBResult deploy(SqlExecutor executor, String dbName, String if (showdeploySql.isEmpty()){ return null; } - logger.info("show deployment:{}",showdeploySql); + log.info("show deployment:{}",showdeploySql); OpenMLDBResult fesqlResult = new OpenMLDBResult(); ResultSet rawRs = executor.executeSQL(dbName, showdeploySql); if (rawRs == null) { @@ -163,7 +163,7 @@ public static OpenMLDBResult deploy(SqlExecutor executor, String dbName, String fesqlResult.setMsg(e.getMessage()); } } - logger.info("select result:{} \n", fesqlResult); + log.info("select result:{} \n", fesqlResult); return fesqlResult; } @@ -171,7 +171,7 @@ public static OpenMLDBResult showDeploys(SqlExecutor executor, String dbName, St if (showdeploySqls.isEmpty()){ return null; } - logger.info("show deployments:{}",showdeploySqls); + log.info("show deployments:{}",showdeploySqls); OpenMLDBResult fesqlResult = new OpenMLDBResult(); ResultSet rawRs = executor.executeSQL(dbName, showdeploySqls); if (rawRs == null) { @@ -205,7 +205,7 @@ public static OpenMLDBResult desc(SqlExecutor executor, String dbName, String de if (descSql.isEmpty()){ return null; } - logger.info("desc:{}",descSql); + log.info("desc:{}",descSql); OpenMLDBResult openMLDBResult = new OpenMLDBResult(); openMLDBResult.setSql(descSql); ResultSet rawRs = executor.executeSQL(dbName, descSql); @@ -227,7 +227,7 @@ public static OpenMLDBResult desc(SqlExecutor executor, String dbName, String de openMLDBResult.setMsg(e.getMessage()); } } - logger.info("create index result:{}", openMLDBResult); + log.info("create index result:{}", openMLDBResult); return openMLDBResult; } @@ -236,7 +236,7 @@ public static OpenMLDBResult createIndex(SqlExecutor executor, String sql) { if (sql.isEmpty()) { return null; } - logger.info("ddl sql:{}", sql); + log.info("ddl sql:{}", sql); OpenMLDBResult openMLDBResult = new OpenMLDBResult(); boolean createOk = false; try { @@ -248,7 +248,7 @@ public static OpenMLDBResult createIndex(SqlExecutor executor, String sql) { openMLDBResult.setOk(false); openMLDBResult.setMsg(e.getMessage()); } - logger.info("create index result:{}", openMLDBResult); + log.info("create index result:{}", openMLDBResult); return openMLDBResult; } @@ -257,11 +257,11 @@ public static OpenMLDBResult insert(SqlExecutor executor, String dbName, String if (insertSql.isEmpty()) { return null; } - logger.info("insert sql:{}", insertSql); + log.info("insert sql:{}", insertSql); OpenMLDBResult fesqlResult = new OpenMLDBResult(); boolean createOk = executor.executeInsert(dbName, insertSql); fesqlResult.setOk(createOk); - logger.info("insert result:{}" + fesqlResult); + log.info("insert result:{}" + fesqlResult); return fesqlResult; } @@ -271,7 +271,7 @@ public static OpenMLDBResult selectWithPrepareStatement(SqlExecutor executor, St if (sql.isEmpty()) { return null; } - logger.info("prepare sql:{}", sql); + log.info("prepare sql:{}", sql); PreparedStatement preparedStmt = executor.getPreparedStatement(dbName, sql); DataUtil.setPreparedData(preparedStmt,paramterTypes,params); ResultSet resultSet = preparedStmt.executeQuery(); @@ -292,7 +292,7 @@ public static OpenMLDBResult selectWithPrepareStatement(SqlExecutor executor, St fesqlResult.setMsg(e.getMessage()); } } - logger.info("insert result:{}" + fesqlResult); + log.info("insert result:{}" + fesqlResult); }catch (Exception e){ e.printStackTrace(); fesqlResult.setOk(false); @@ -307,7 +307,7 @@ public static OpenMLDBResult insertWithPrepareStatement(SqlExecutor executor, St if (insertSql.isEmpty()) { return null; } - logger.info("prepare sql:{}", insertSql); + log.info("prepare sql:{}", insertSql); PreparedStatement preparedStmt = executor.getInsertPreparedStmt(dbName, insertSql); DataUtil.setRequestData(preparedStmt,params); // for(int i=0;i> rows = null == input ? null : input.getRows(); if (CollectionUtils.isEmpty(rows)) { - logger.error("fail to execute sql in request mode: request rows is null or empty"); + log.error("fail to execute sql in request mode: request rows is null or empty"); return null; } List inserts = input.extractInserts(); if (CollectionUtils.isEmpty(inserts)) { - logger.error("fail to execute sql in request mode: fail to build insert sql for request rows"); + log.error("fail to execute sql in request mode: fail to build insert sql for request rows"); return null; } if (rows.size() != inserts.size()) { - logger.error("fail to execute sql in request mode: rows size isn't match with inserts size"); + log.error("fail to execute sql in request mode: rows size isn't match with inserts size"); return null; } String insertDbName= input.getDb().isEmpty() ? dbName : input.getDb(); - logger.info("select sql:{}", selectSql); + log.info("select sql:{}", selectSql); OpenMLDBResult fesqlResult = new OpenMLDBResult(); List> result = Lists.newArrayList(); for (int i = 0; i < rows.size(); i++) { @@ -404,7 +404,7 @@ private static OpenMLDBResult selectRequestModeWithPreparedStatement(SqlExecutor if (resultSet == null) { fesqlResult.setOk(false); fesqlResult.setMsg("Select result is null"); - logger.error("select result:{}", fesqlResult); + log.error("select result:{}", fesqlResult); return fesqlResult; } try { @@ -417,7 +417,7 @@ private static OpenMLDBResult selectRequestModeWithPreparedStatement(SqlExecutor if (need_insert_request_row && !executor.executeInsert(insertDbName, inserts.get(i))) { fesqlResult.setOk(false); fesqlResult.setMsg("Fail to execute sql in request mode fail to insert request row after query"); - logger.error(fesqlResult.getMsg()); + log.error(fesqlResult.getMsg()); return fesqlResult; } if (i == rows.size()-1) { @@ -444,7 +444,7 @@ private static OpenMLDBResult selectRequestModeWithPreparedStatement(SqlExecutor fesqlResult.setCount(result.size()); fesqlResult.setOk(true); - logger.info("select result:{}", fesqlResult); + log.info("select result:{}", fesqlResult); return fesqlResult; } @@ -452,24 +452,24 @@ private static OpenMLDBResult selectBatchRequestModeWithPreparedStatement(SqlExe String selectSql, InputDesc input, List commonColumnIndices) { if (selectSql.isEmpty()) { - logger.error("fail to execute sql in batch request mode: select sql is empty"); + log.error("fail to execute sql in batch request mode: select sql is empty"); return null; } List> rows = null == input ? null : input.getRows(); if (CollectionUtils.isEmpty(rows)) { - logger.error("fail to execute sql in batch request mode: request rows is null or empty"); + log.error("fail to execute sql in batch request mode: request rows is null or empty"); return null; } List inserts = input.extractInserts(); if (CollectionUtils.isEmpty(inserts)) { - logger.error("fail to execute sql in batch request mode: fail to build insert sql for request rows"); + log.error("fail to execute sql in batch request mode: fail to build insert sql for request rows"); return null; } if (rows.size() != inserts.size()) { - logger.error("fail to execute sql in batch request mode: rows size isn't match with inserts size"); + log.error("fail to execute sql in batch request mode: rows size isn't match with inserts size"); return null; } - logger.info("select sql:{}", selectSql); + log.info("select sql:{}", selectSql); OpenMLDBResult fesqlResult = new OpenMLDBResult(); PreparedStatement rps = null; @@ -509,7 +509,7 @@ private static OpenMLDBResult selectBatchRequestModeWithPreparedStatement(SqlExe } } fesqlResult.setOk(true); - logger.info("select result:{}", fesqlResult); + log.info("select result:{}", fesqlResult); return fesqlResult; } @@ -517,32 +517,32 @@ private static OpenMLDBResult selectRequestModeWithSp(SqlExecutor executor, Stri Boolean needInsertRequestRow, String sql, InputDesc input, boolean isAsyn) { if (sql.isEmpty()) { - logger.error("fail to execute sql in request mode: select sql is empty"); + log.error("fail to execute sql in request mode: select sql is empty"); return null; } List> rows = null == input ? null : input.getRows(); if (CollectionUtils.isEmpty(rows)) { - logger.error("fail to execute sql in request mode: request rows is null or empty"); + log.error("fail to execute sql in request mode: request rows is null or empty"); return null; } List inserts = needInsertRequestRow ? input.extractInserts() : Lists.newArrayList(); if (needInsertRequestRow){ if (CollectionUtils.isEmpty(inserts)) { - logger.error("fail to execute sql in request mode: fail to build insert sql for request rows"); + log.error("fail to execute sql in request mode: fail to build insert sql for request rows"); return null; } if (rows.size() != inserts.size()) { - logger.error("fail to execute sql in request mode: rows size isn't match with inserts size"); + log.error("fail to execute sql in request mode: rows size isn't match with inserts size"); return null; } } - logger.info("procedure sql:{}", sql); + log.info("procedure sql:{}", sql); String insertDbName = input.getDb().isEmpty() ? dbName : input.getDb(); OpenMLDBResult fesqlResult = new OpenMLDBResult(); if (!executor.executeDDL(dbName, sql)) { - logger.error("execute ddl failed! sql: {}", sql); + log.error("execute ddl failed! sql: {}", sql); fesqlResult.setOk(false); fesqlResult.setMsg("execute ddl failed"); return fesqlResult; @@ -570,14 +570,14 @@ private static OpenMLDBResult selectRequestModeWithSp(SqlExecutor executor, Stri if (resultSet == null) { fesqlResult.setOk(false); fesqlResult.setMsg("result set is null"); - logger.error("select result:{}", fesqlResult); + log.error("select result:{}", fesqlResult); return fesqlResult; } result.addAll(ResultUtil.toList((SQLResultSet) resultSet)); if (needInsertRequestRow && !executor.executeInsert(insertDbName, inserts.get(i))) { fesqlResult.setOk(false); fesqlResult.setMsg("fail to execute sql in request mode: fail to insert request row after query"); - logger.error(fesqlResult.getMsg()); + log.error(fesqlResult.getMsg()); return fesqlResult; } if (i == 0) { @@ -591,7 +591,7 @@ private static OpenMLDBResult selectRequestModeWithSp(SqlExecutor executor, Stri } } catch (SQLException throwables) { throwables.printStackTrace(); - logger.error("has exception. sql: {}", sql); + log.error("has exception. sql: {}", sql); fesqlResult.setOk(false); fesqlResult.setMsg("fail to execute sql"); return fesqlResult; @@ -607,22 +607,22 @@ private static OpenMLDBResult selectRequestModeWithSp(SqlExecutor executor, Stri fesqlResult.setResult(result); fesqlResult.setCount(result.size()); fesqlResult.setOk(true); - logger.info("select result:{}", fesqlResult); + log.info("select result:{}", fesqlResult); return fesqlResult; } public static OpenMLDBResult selectBatchRequestModeWithSp(SqlExecutor executor, String dbName, String spName, String sql, InputDesc input, boolean isAsyn) { if (sql.isEmpty()) { - logger.error("fail to execute sql in batch request mode: select sql is empty"); + log.error("fail to execute sql in batch request mode: select sql is empty"); return null; } List> rows = null == input ? null : input.getRows(); if (CollectionUtils.isEmpty(rows)) { - logger.error("fail to execute sql in batch request mode: request rows is null or empty"); + log.error("fail to execute sql in batch request mode: request rows is null or empty"); return null; } - logger.info("procedure sql: {}", sql); + log.info("procedure sql: {}", sql); OpenMLDBResult fesqlResult = new OpenMLDBResult(); if (!executor.executeDDL(dbName, sql)) { fesqlResult.setOk(false); @@ -672,7 +672,7 @@ public static OpenMLDBResult selectBatchRequestModeWithSp(SqlExecutor executor, fesqlResult.setCount(result.size()); } catch (SQLException e) { - logger.error("Call procedure failed", e); + log.error("Call procedure failed", e); fesqlResult.setOk(false); fesqlResult.setMsg("Call procedure failed"); return fesqlResult; @@ -689,7 +689,7 @@ public static OpenMLDBResult selectBatchRequestModeWithSp(SqlExecutor executor, } } fesqlResult.setOk(true); - logger.info("select result:{}", fesqlResult); + log.info("select result:{}", fesqlResult); return fesqlResult; } @@ -706,7 +706,7 @@ public static OpenMLDBResult selectBatchRequestModeWithSp(SqlExecutor executor, // } // } // -// logger.info("init request row: {}", totalSize); +// log.info("init request row: {}", totalSize); // requestRow.Init(totalSize); // for (int i = 0; i < schema.GetColumnCnt(); i++) { // Object obj = objects.get(i); @@ -731,17 +731,17 @@ public static OpenMLDBResult selectBatchRequestModeWithSp(SqlExecutor executor, // } else if (DataType.kTypeDate.equals(dataType)) { // try { // Date date = new Date(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").parse(obj.toString() + " 00:00:00").getTime()); -// logger.info("build request row: obj: {}, append date: {}, {}, {}, {}", +// log.info("build request row: obj: {}, append date: {}, {}, {}, {}", // obj, date.toString(), date.getYear() + 1900, date.getMonth() + 1, date.getDate()); // requestRow.AppendDate(date.getYear() + 1900, date.getMonth() + 1, date.getDate()); // } catch (ParseException e) { -// logger.error("Fail convert {} to date", obj.toString()); +// log.error("Fail convert {} to date", obj.toString()); // return false; // } // } else if (DataType.kTypeString.equals(schema.GetColumnType(i))) { // requestRow.AppendString(obj.toString()); // } else { -// logger.error("fail to build request row: invalid data type {]", schema.GetColumnType(i)); +// log.error("fail to build request row: invalid data type {]", schema.GetColumnType(i)); // return false; // } // } @@ -782,7 +782,7 @@ public static OpenMLDBResult select(SqlExecutor executor, String dbName, String if (selectSql.isEmpty()) { return null; } - logger.info("select sql:{}", selectSql); + log.info("select sql:{}", selectSql); OpenMLDBResult fesqlResult = new OpenMLDBResult(); ResultSet rawRs = executor.executeSQL(dbName, selectSql); if (rawRs == null) { @@ -801,7 +801,7 @@ public static OpenMLDBResult select(SqlExecutor executor, String dbName, String fesqlResult.setMsg(e.getMessage()); } } - logger.info("select result:{} \n", fesqlResult); + log.info("select result:{} \n", fesqlResult); return fesqlResult; } @@ -809,7 +809,7 @@ public static OpenMLDBResult select(SqlExecutor executor, String dbName, String // Object obj = null; // DataType dataType = schema.GetColumnType(index); // if (rs.IsNULL(index)) { - // logger.info("rs is null"); + // log.info("rs is null"); // return null; // } // if (dataType.equals(DataType.kTypeBool)) { @@ -834,7 +834,7 @@ public static OpenMLDBResult select(SqlExecutor executor, String dbName, String // obj = rs.GetInt64Unsafe(index); // } else if (dataType.equals(DataType.kTypeString)) { // obj = rs.GetStringUnsafe(index); - // logger.info("conver string data {}", obj); + // log.info("conver string data {}", obj); // } else if (dataType.equals(DataType.kTypeTimestamp)) { // obj = new Timestamp(rs.GetTimeUnsafe(index)); // } @@ -847,7 +847,7 @@ public static OpenMLDBResult createTable(SqlExecutor executor, String dbName, St if (StringUtils.isNotEmpty(createSql)) { OpenMLDBResult res = SDKUtil.ddl(executor, dbName, createSql); if (!res.isOk()) { - logger.error("fail to create table"); + log.error("fail to create table"); return res; } return res; @@ -897,7 +897,7 @@ public static OpenMLDBResult createAndInsert(SqlExecutor executor, if (!insertSql.isEmpty()) { OpenMLDBResult res = SDKUtil.insert(executor, dbName, insertSql); if (!res.isOk()) { - logger.error("fail to insert table"); + log.error("fail to insert table"); return res; } } @@ -927,7 +927,7 @@ public static OpenMLDBResult createAndInsertWithPrepared(SqlExecutor executor, S for(List row:rows){ OpenMLDBResult res = SDKUtil.insertWithPrepareStatement(executor, dbName, insertSql, row); if (!res.isOk()) { - logger.error("fail to insert table"); + log.error("fail to insert table"); return res; } } @@ -947,7 +947,7 @@ public static void show(com._4paradigm.openmldb.ResultSet rs) { while (rs.Next()) { sb.append(rs.GetRowString()).append("\n"); } - logger.info("RESULT:\n{} row in set\n{}", rs.Size(), sb.toString()); + log.info("RESULT:\n{} row in set\n{}", rs.Size(), sb.toString()); } diff --git a/test/steps/openmldb-sdk-test-java-src.sh b/test/steps/openmldb-sdk-test-java-src.sh index 10d30d1f043..9b9712b9091 100755 --- a/test/steps/openmldb-sdk-test-java-src.sh +++ b/test/steps/openmldb-sdk-test-java-src.sh @@ -91,7 +91,7 @@ cd test/test-tool/command-tool || exit mvn clean install -Dmaven.test.skip=true cd "${ROOT_DIR}" || exit # modify config -sh test/steps/modify_java_sdk_config.sh "${CASE_XML}" "${DEPLOY_MODE}" "${JAVA_SDK_VERSION}" "" "${OPENMLDB_SERVER_VERSION}" "${JAVA_NATIVE_VERSION}" "${TABLE_STORAGE_MODE}" +sh test/steps/modify_java_sdk_config.sh "${CASE_XML}" "${DEPLOY_MODE}" "${JAVA_SDK_VERSION}" "0.5.0" "${OPENMLDB_SERVER_VERSION}" "${JAVA_NATIVE_VERSION}" "${TABLE_STORAGE_MODE}" # install jar cd test/integration-test/openmldb-test-java || exit From f6da6e9b3464685e23bd6d0116bf3656c670890d Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 27 Jul 2022 17:40:26 +0800 Subject: [PATCH 095/172] deploy --- cases/function/ddl/test_options.yaml | 1 - cases/function/dml/test_insert.yaml | 1 + cases/function/dml/test_insert_prepared.yaml | 1 + cases/function/test_batch_request.yaml | 3 ++ .../src/main/resources/deploy.properties | 2 +- .../test-suite/test_deploy.xml | 2 +- .../java_sdk_test/checker/ColumnsChecker.java | 2 +- .../java_sdk_test/executor/BaseExecutor.java | 5 ++- .../executor/BaseSQLExecutor.java | 4 +-- .../executor/BatchSQLExecutor.java | 25 +++++++++------ .../executor/ClusterCliExecutor.java | 6 ++-- .../executor/CommandExecutor.java | 28 ++++++++--------- .../executor/InsertPreparedExecutor.java | 6 ++-- .../java_sdk_test/executor/MysqlExecutor.java | 12 +++---- .../java_sdk_test/executor/NullExecutor.java | 2 +- .../executor/QueryPreparedExecutor.java | 4 +-- .../executor/RequestQuerySQLExecutor.java | 31 +++++++++++-------- .../executor/Sqlite3Executor.java | 12 +++---- .../executor/StandaloneCliExecutor.java | 5 +-- .../executor/StoredProcedureSQLExecutor.java | 20 ++++++------ .../test_suite/test_cluster_disk.xml | 2 ++ .../openmldb/test_common/model/SQLCase.java | 4 ++- .../openmldb/test_common/model/Table.java | 4 +++ 23 files changed, 98 insertions(+), 84 deletions(-) diff --git a/cases/function/ddl/test_options.yaml b/cases/function/ddl/test_options.yaml index a9fc0b44631..1c8ed43ad7d 100644 --- a/cases/function/ddl/test_options.yaml +++ b/cases/function/ddl/test_options.yaml @@ -359,7 +359,6 @@ cases: - id: 22 desc: test-case - tags: ["TODO","disk table apiserver:Table not found"] mode: standalone-unsupport inputs: - diff --git a/cases/function/dml/test_insert.yaml b/cases/function/dml/test_insert.yaml index d1cbe6ea2ba..36ae56ca82b 100644 --- a/cases/function/dml/test_insert.yaml +++ b/cases/function/dml/test_insert.yaml @@ -154,6 +154,7 @@ cases: - id: 10 desc: 相同时间戳数据 + mode: disk-unsupport inputs: - columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] diff --git a/cases/function/dml/test_insert_prepared.yaml b/cases/function/dml/test_insert_prepared.yaml index b67c027e51b..f43f5662094 100644 --- a/cases/function/dml/test_insert_prepared.yaml +++ b/cases/function/dml/test_insert_prepared.yaml @@ -101,6 +101,7 @@ cases: - id: 5 desc: 相同时间戳数据 + mode: disk-unsupport inputs: - columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] diff --git a/cases/function/test_batch_request.yaml b/cases/function/test_batch_request.yaml index e00fb773163..9f3134806e1 100644 --- a/cases/function/test_batch_request.yaml +++ b/cases/function/test_batch_request.yaml @@ -253,6 +253,7 @@ cases: - [7,"a",15,100,4.2,7.2,1590738996000,"2020-05-07","e"] - id: 6 desc: batch request with one common window and one non-common window + mode: disk-unsupport inputs: - columns: ["id int","k1 bigint","k2 bigint","k3 timestamp", "k4 timestamp", @@ -290,6 +291,7 @@ cases: - id: 7 desc: batch request with common window and common and non-common aggregations, window is small + mode: disk-unsupport inputs: - columns: ["id int","k1 bigint","k2 timestamp", @@ -325,6 +327,7 @@ cases: - id: 8 desc: batch request with one common window and one non-common window, current time == history time + mode: disk-unsupport inputs: - columns: ["id int","k1 bigint","k2 bigint","k3 timestamp", "k4 timestamp", diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index 42fea2cff3e..cf67073c54e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -6,7 +6,7 @@ main=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2 0.2.3=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2.3-linux.tar.gz spark_url=https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0.3.2/spark-3.0.0-bin-openmldbspark.tgz -tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.3.tar.gz +tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.3-linux.tar.gz tmp_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz tmp_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index f6a768515a5..85c476ba1dd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -3,7 +3,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java index 48d78e70a1d..53f23c23eb2 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java @@ -51,7 +51,7 @@ public void check() throws Exception { // Assert.assertEquals(columnNames.get(i)+" "+columnTypes.get(i),expectColumns.get(i)); Assert.assertEquals(columnNames.get(i), Table.getColumnName(expectColumns.get(i))); Assert.assertEquals(TypeUtil.getOpenMLDBColumnType(columnTypes.get(i)), - TypeUtil.getOpenMLDBColumnType(Table.getColumnType(expectColumns.get(i)))); + TypeUtil.getOpenMLDBColumnType(Table.getColumnTypeByExpect(expectColumns.get(i)))); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java index 6ef47384552..a795fc5c4e4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java @@ -21,7 +21,6 @@ import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; import lombok.extern.slf4j.Slf4j; -import org.slf4j.Logger; import org.testng.Assert; import org.testng.collections.Lists; @@ -33,7 +32,7 @@ */ @Slf4j public abstract class BaseExecutor implements IExecutor{ - protected static final Logger logger = new LogProxy(log); +// protected static final log log = new LogProxy(log); protected SQLCase fesqlCase; protected SQLCaseType executorType; protected String dbName; @@ -45,7 +44,7 @@ public void run() { String className = Thread.currentThread().getStackTrace()[2].getClassName(); String methodName = Thread.currentThread().getStackTrace()[2].getMethodName(); System.out.println(className+"."+methodName+":"+fesqlCase.getCaseFileName()+":"+fesqlCase.getDesc() + " Begin!"); - logger.info(className+"."+methodName+":"+fesqlCase.getDesc() + " Begin!"); + log.info(className+"."+methodName+":"+fesqlCase.getDesc() + " Begin!"); boolean verify = false; try { verify = verify(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java index 8cadffc888c..c1cc044308a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java @@ -112,7 +112,7 @@ public void tearDown() { public void tearDown(String version,SqlExecutor executor) { - logger.info("version:{},begin tear down",version); + log.info("version:{},begin tear down",version); List tearDown = fesqlCase.getTearDown(); if(CollectionUtils.isNotEmpty(tearDown)){ tearDown.forEach(sql->{ @@ -124,7 +124,7 @@ public void tearDown(String version,SqlExecutor executor) { SDKUtil.sql(executor, dbName, sql); }); } - logger.info("version:{},begin drop table",version); + log.info("version:{},begin drop table",version); List tables = fesqlCase.getInputs(); if (CollectionUtils.isEmpty(tables)) { return; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java index 024039c33f6..7b91b87981a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java @@ -17,6 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.executor; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.SQLCase; @@ -46,23 +47,27 @@ public BatchSQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map sqls = fesqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { @@ -107,7 +112,7 @@ public OpenMLDBResult execute(String version, SqlExecutor executor){ } fesqlResult = SDKUtil.sql(executor, dbName, sql); } - logger.info("version:{} execute end",version); + log.info("version:{} execute end",version); return fesqlResult; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ClusterCliExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ClusterCliExecutor.java index 7bf71a59627..f7a281000df 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ClusterCliExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ClusterCliExecutor.java @@ -21,13 +21,11 @@ import com._4paradigm.openmldb.test_common.model.SQLCaseType; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; -import org.slf4j.Logger; import java.util.Map; @Slf4j public class ClusterCliExecutor extends CommandExecutor{ - private static final Logger logger = new LogProxy(log); public ClusterCliExecutor(SQLCase fesqlCase, SQLCaseType executorType) { super(fesqlCase, executorType); } @@ -39,11 +37,11 @@ public ClusterCliExecutor(SQLCase fesqlCase, Map openMLDBI @Override public boolean verify() { if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("cluster-cli-unsupport")) { - logger.info("skip case in cli mode: {}", fesqlCase.getDesc()); + log.info("skip case in cli mode: {}", fesqlCase.getDesc()); return false; } if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("cluster-unsupport")) { - logger.info("skip case , mode: {}", fesqlCase.getDesc()); + log.info("skip case , mode: {}", fesqlCase.getDesc()); return false; } return super.verify(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java index 02d0f262694..ad96d8265a1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java @@ -35,7 +35,6 @@ import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; import java.util.List; import java.util.Map; @@ -44,7 +43,6 @@ @Slf4j public class CommandExecutor extends BaseExecutor{ - private static final Logger logger = new LogProxy(log); protected Map openMLDBInfoMap; private Map resultMap; @@ -67,27 +65,27 @@ public CommandExecutor(SQLCase fesqlCase, Map openMLDBInfo @Override public boolean verify() { if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("hybridse-only")) { - logger.info("skip case in cli mode: {}", fesqlCase.getDesc()); + log.info("skip case in cli mode: {}", fesqlCase.getDesc()); return false; } if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("batch-unsupport")) { - logger.info("skip case in batch mode: {}", fesqlCase.getDesc()); + log.info("skip case in batch mode: {}", fesqlCase.getDesc()); return false; } if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("rtidb-batch-unsupport")) { - logger.info("skip case in rtidb batch mode: {}", fesqlCase.getDesc()); + log.info("skip case in rtidb batch mode: {}", fesqlCase.getDesc()); return false; } if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("rtidb-unsupport")) { - logger.info("skip case in rtidb mode: {}", fesqlCase.getDesc()); + log.info("skip case in rtidb mode: {}", fesqlCase.getDesc()); return false; } if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("performance-sensitive-unsupport")) { - logger.info("skip case in rtidb mode: {}", fesqlCase.getDesc()); + log.info("skip case in rtidb mode: {}", fesqlCase.getDesc()); return false; } if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("cli-unsupport")) { - logger.info("skip case in cli mode: {}", fesqlCase.getDesc()); + log.info("skip case in cli mode: {}", fesqlCase.getDesc()); return false; } return true; @@ -102,14 +100,14 @@ public void prepare(){ } protected void prepare(String version, OpenMLDBInfo openMLDBInfo){ - logger.info("version:{} prepare begin",version); + log.info("version:{} prepare begin",version); OpenMLDBResult fesqlResult = OpenMLDBCommandUtil.createDB(openMLDBInfo,dbName); - logger.info("version:{},create db:{},{}", version, dbName, fesqlResult.isOk()); + log.info("version:{},create db:{},{}", version, dbName, fesqlResult.isOk()); OpenMLDBResult res = OpenMLDBCommandUtil.createAndInsert(openMLDBInfo, dbName, fesqlCase.getInputs()); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail . version:"+version); } - logger.info("version:{} prepare end",version); + log.info("version:{} prepare end",version); } @Override @@ -126,7 +124,7 @@ public void execute() { } protected OpenMLDBResult execute(String version, OpenMLDBInfo openMLDBInfo){ - logger.info("version:{} execute begin",version); + log.info("version:{} execute begin",version); OpenMLDBResult fesqlResult = null; List sqls = fesqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { @@ -150,7 +148,7 @@ protected OpenMLDBResult execute(String version, OpenMLDBInfo openMLDBInfo){ } fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo, dbName, sql); } - logger.info("version:{} execute end",version); + log.info("version:{} execute end",version); return fesqlResult; } @@ -174,7 +172,7 @@ public void tearDown() { public void tearDown(String version,OpenMLDBInfo openMLDBInfo) { - logger.info("version:{},begin tear down",version); + log.info("version:{},begin tear down",version); List tearDown = fesqlCase.getTearDown(); if(CollectionUtils.isNotEmpty(tearDown)){ tearDown.forEach(sql->{ @@ -186,7 +184,7 @@ public void tearDown(String version,OpenMLDBInfo openMLDBInfo) { OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName, sql); }); } - logger.info("version:{},begin drop table",version); + log.info("version:{},begin drop table",version); List tables = fesqlCase.getInputs(); if (CollectionUtils.isEmpty(tables)) { return; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java index 4a64d26ae77..e12901f63f3 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java @@ -42,13 +42,13 @@ public InsertPreparedExecutor(SQLCase fesqlCase, SqlExecutor executor, Map sqls = fesqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { @@ -80,12 +80,12 @@ public void execute() { fesqlResult = JDBCUtil.executeQuery(sql,DBType.MYSQL); } mainResult = fesqlResult; - logger.info("mysql execute end"); + log.info("mysql execute end"); } @Override public void tearDown() { - logger.info("mysql,begin drop table"); + log.info("mysql,begin drop table"); List tables = fesqlCase.getInputs(); if (CollectionUtils.isEmpty(tables)) { return; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/NullExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/NullExecutor.java index 5fa87e34b9b..ef747f7a400 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/NullExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/NullExecutor.java @@ -45,7 +45,7 @@ protected void prepare(String mainVersion, SqlExecutor executor) { @Override public boolean verify() { - logger.info("No case need to be run."); + log.info("No case need to be run."); return false; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java index 6c7ff449fe2..8ef1b9332d3 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java @@ -47,7 +47,7 @@ public QueryPreparedExecutor(SQLCase fesqlCase, SqlExecutor executor, Map sqls = fesqlCase.getSqls(); // if (sqls != null && sqls.size() > 0) { @@ -74,7 +74,7 @@ public OpenMLDBResult execute(String version, SqlExecutor executor){ List objects = parameters.getRows().get(0); fesqlResult = SDKUtil.selectWithPrepareStatement(executor, dbName,sql, types,objects); } - logger.info("version:{} execute end",version); + log.info("version:{} execute end",version); return fesqlResult; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java index 4ab6e19bd30..d3c4d3c74ab 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java @@ -18,6 +18,7 @@ import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBConfig; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.model.InputDesc; @@ -53,7 +54,7 @@ public RequestQuerySQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map sqls = fesqlCase.getSqls(); @@ -80,7 +81,7 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { if (isBatchRequest) { InputDesc batchRequest = fesqlCase.getBatch_request(); if (batchRequest == null) { - logger.error("No batch request provided in case"); + log.error("No batch request provided in case"); return null; } List commonColumnIndices = new ArrayList<>(); @@ -101,7 +102,7 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { request = fesqlCase.getInputs().get(0); } if (null == request || CollectionUtils.isEmpty(request.getColumns())) { - logger.error("fail to execute in request query sql executor: sql case request columns is empty"); + log.error("fail to execute in request query sql executor: sql case request columns is empty"); return null; } fesqlResult = SDKUtil.sqlRequestMode(executor, dbName, null == fesqlCase.getBatch_request(), sql, request); @@ -110,48 +111,52 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { }catch (Exception e){ e.printStackTrace(); } - logger.info("version:{} execute end",version); + log.info("version:{} execute end",version); return fesqlResult; } @Override protected void prepare(String version,SqlExecutor executor) { - logger.info("version:{} prepare begin",version); + log.info("version:{} prepare begin",version); boolean dbOk = executor.createDB(dbName); - logger.info("create db:{},{}", dbName, dbOk); + log.info("create db:{},{}", dbName, dbOk); boolean useFirstInputAsRequests = !isBatchRequest && null == fesqlCase.getBatch_request(); OpenMLDBResult res = SDKUtil.createAndInsert(executor, dbName, fesqlCase.getInputs(), useFirstInputAsRequests); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail"); } - logger.info("version:{} prepare end",version); + log.info("version:{} prepare end",version); } @Override public boolean verify() { if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("hybridse-only")) { - logger.info("skip case in request mode: {}", fesqlCase.getDesc()); + log.info("skip case in request mode: {}", fesqlCase.getDesc()); return false; } if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("request-unsupport")) { - logger.info("skip case in request mode: {}", fesqlCase.getDesc()); + log.info("skip case in request mode: {}", fesqlCase.getDesc()); return false; } if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("rtidb-unsupport")) { - logger.info("skip case in rtidb mode: {}", fesqlCase.getDesc()); + log.info("skip case in rtidb mode: {}", fesqlCase.getDesc()); return false; } if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("performance-sensitive-unsupport")) { - logger.info("skip case in rtidb mode: {}", fesqlCase.getDesc()); + log.info("skip case in rtidb mode: {}", fesqlCase.getDesc()); return false; } if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("rtidb-request-unsupport")) { - logger.info("skip case in rtidb request mode: {}", fesqlCase.getDesc()); + log.info("skip case in rtidb request mode: {}", fesqlCase.getDesc()); + return false; + } + if (null != fesqlCase.getMode() && !OpenMLDBGlobalVar.tableStorageMode.equals("memory") && fesqlCase.getMode().contains("disk-unsupport")) { + log.info("skip case in disk mode: {}", fesqlCase.getDesc()); return false; } if (OpenMLDBConfig.isCluster() && null != fesqlCase.getMode() && fesqlCase.getMode().contains("cluster-unsupport")) { - logger.info("cluster-unsupport, skip case in cluster request mode: {}", fesqlCase.getDesc()); + log.info("cluster-unsupport, skip case in cluster request mode: {}", fesqlCase.getDesc()); return false; } return true; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java index 816372a0819..fc3ca356c67 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java @@ -46,13 +46,13 @@ public boolean verify() { if(sqlDialect.contains(DBType.ANSISQL.name())|| sqlDialect.contains(DBType.SQLITE3.name())){ return true; } - logger.info("skip case in sqlite3 mode: {}", fesqlCase.getDesc()); + log.info("skip case in sqlite3 mode: {}", fesqlCase.getDesc()); return false; } @Override public void prepare() { - logger.info("sqlite3 prepare begin"); + log.info("sqlite3 prepare begin"); for(InputDesc inputDesc:fesqlCase.getInputs()) { String createSql = Sqlite3Util.getCreateTableSql(inputDesc); JDBCUtil.executeUpdate(createSql,DBType.SQLITE3); @@ -61,12 +61,12 @@ public void prepare() { throw new RuntimeException("fail to run Sqlite3Executor: prepare fail"); } } - logger.info("sqlite3 prepare end"); + log.info("sqlite3 prepare end"); } @Override public void execute() { - logger.info("sqlite3 execute begin"); + log.info("sqlite3 execute begin"); OpenMLDBResult fesqlResult = null; List sqls = fesqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { @@ -81,12 +81,12 @@ public void execute() { fesqlResult = JDBCUtil.executeQuery(sql,DBType.SQLITE3); } mainResult = fesqlResult; - logger.info("sqlite3 execute end"); + log.info("sqlite3 execute end"); } @Override public void tearDown() { - logger.info("sqlite3,begin drop table"); + log.info("sqlite3,begin drop table"); List tables = fesqlCase.getInputs(); if (CollectionUtils.isEmpty(tables)) { return; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StandaloneCliExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StandaloneCliExecutor.java index b803148ab5c..2e3b12e690b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StandaloneCliExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StandaloneCliExecutor.java @@ -16,18 +16,15 @@ package com._4paradigm.openmldb.java_sdk_test.executor; -import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; -import org.slf4j.Logger; import java.util.Map; @Slf4j public class StandaloneCliExecutor extends CommandExecutor{ - private static final Logger logger = new LogProxy(log); public StandaloneCliExecutor(SQLCase fesqlCase, SQLCaseType executorType) { super(fesqlCase, executorType); } @@ -39,7 +36,7 @@ public StandaloneCliExecutor(SQLCase fesqlCase, Map openML @Override public boolean verify() { if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("standalone-unsupport")) { - logger.info("skip case in cli mode: {}", fesqlCase.getDesc()); + log.info("skip case in cli mode: {}", fesqlCase.getDesc()); return false; } return super.verify(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java index f8bf184e5e3..2e15d9854d1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java @@ -48,29 +48,29 @@ public StoredProcedureSQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map + + diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java index 0709c08e109..1761e289210 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java @@ -119,7 +119,9 @@ public static String buildCreateSpSQLFromColumnsIndexs(String name, String sql, } StringBuilder builder = new StringBuilder("create procedure " + name + "(\n"); for (int i = 0; i < columns.size(); i++) { - builder.append(columns.get(i)); + String column = columns.get(i); + String[] ss = column.split("\\s+"); + builder.append(ss[0]+" "+ss[1]); if (i != columns.size() - 1) { builder.append(","); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java index 672b9d0cb2d..be130bb7542 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java @@ -333,6 +333,10 @@ public static String getColumnType(String column) { String[] ss = column.split("\\s+"); return ss[1]; } + public static String getColumnTypeByExpect(String column) { + int pos = column.trim().lastIndexOf(' '); + return column.trim().substring(pos).trim(); + } /** * format columns and rows From 3835615b82b8469ef222b57d3f96d65cecf11fd1 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 27 Jul 2022 19:49:35 +0800 Subject: [PATCH 096/172] modify cicd --- .github/workflows/integration-test-src.yml | 2 +- cases/function/window/test_window.yaml | 1 + .../test_window_exclude_current_time.yaml | 31 +++++++++++-------- cases/function/window/test_window_union.yaml | 7 +++++ .../test_window_union_cluster_thousand.yaml | 1 + .../src/main/resources/run_case.properties | 1 + .../cluster/sql_test/WindowTest.java | 6 ---- 7 files changed, 29 insertions(+), 20 deletions(-) diff --git a/.github/workflows/integration-test-src.yml b/.github/workflows/integration-test-src.yml index f0f1ee5c4ea..3af33f0b5a7 100644 --- a/.github/workflows/integration-test-src.yml +++ b/.github/workflows/integration-test-src.yml @@ -78,7 +78,7 @@ jobs: - name: build jsdk and package run: | make configure CMAKE_INSTALL_PREFIX=openmldb-linux - make SQL_JAVASDK_ENABLE=ON install + make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install tar -zcvf openmldb-linux.tar.gz openmldb-linux echo "openmldb-pkg:" ls -al diff --git a/cases/function/window/test_window.yaml b/cases/function/window/test_window.yaml index 04235f433eb..80731888843 100644 --- a/cases/function/window/test_window.yaml +++ b/cases/function/window/test_window.yaml @@ -353,6 +353,7 @@ cases: - id: 13 desc: ts列相同 + mode: disk-unsupport inputs: - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] diff --git a/cases/function/window/test_window_exclude_current_time.yaml b/cases/function/window/test_window_exclude_current_time.yaml index b85ee2358bf..2f00fff56e1 100644 --- a/cases/function/window/test_window_exclude_current_time.yaml +++ b/cases/function/window/test_window_exclude_current_time.yaml @@ -16,6 +16,7 @@ db: test_zw version: 0.5.0 cases: - id: 0 + mode: disk-unsupport desc: ROWS_RANGE Window OPEN PRECEDING EXCLUDE CURRENT_TIME inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] @@ -89,6 +90,7 @@ cases: - [ "aa", 9, 1590739002000, 2.0 ] - id: 2 desc: ROWS_RANGE Window with MaxSize 10 OPEN PRECEDING EXCLUDE CURRENT_TIME + mode: disk-unsupport inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -126,6 +128,7 @@ cases: - [ "aa", 9, 1590739002000, 3.0 ] - id: 3 desc: ROWS Window OPEN PRECEDING EXCLUDE CURRENT_TIME + mode: disk-unsupport inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -163,6 +166,7 @@ cases: - [ "aa", 9, 1590739002000, 7.0 ] - id: 4 desc: ROWS and ROWS Window OPEN PRECEDING EXCLUDE CURRENT_TIME + mode: disk-unsupport inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -198,7 +202,7 @@ cases: - [ "aa", 9, 1590739002000, 3.0, 7.0 ] - id: 5 - mode: offline-unsupport + mode: offline-unsupport,disk-unsupport desc: ROWS_RANGE Window and EXCLUDE CURRENT_TIME Window inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] @@ -235,7 +239,7 @@ cases: - [ "aa", 9, 1590739002000, 3.0, 3.0 ] - id: 6 desc: ROWS_RANGE Window with MaxSize 2 and EXCLUDE CURRENT_TIME Window - mode: offline-unsupport + mode: offline-unsupport,disk-unsupport inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -271,7 +275,7 @@ cases: - [ "aa", 9, 1590739002000, 2.0, 2.0 ] - id: 7 desc: ROWS_RANGE Window with MaxSize 10 and EXCLUDE CURRENT_TIME Window - mode: offline-unsupport + mode: offline-unsupport,disk-unsupport inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -307,7 +311,7 @@ cases: - [ "aa", 9, 1590739002000, 3.0, 3.0 ] - id: 8 desc: ROWS Window and EXCLUDE CURRENT_TIME Window - mode: offline-unsupport + mode: offline-unsupport,disk-unsupport inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -343,7 +347,7 @@ cases: - [ "aa", 9, 1590739002000, 7.0, 7.0 ] - id: 9 desc: ROWS and ROWS Window and EXCLUDE CURRENT_TIME Window - mode: offline-unsupport + mode: offline-unsupport,disk-unsupport inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -390,7 +394,7 @@ cases: - id: 10 desc: ROWS_RANGE Window OPEN PRECEDING and EXCLUDE CURRENT_TIME Window - mode: offline-unsupport + mode: offline-unsupport,disk-unsupport inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -426,7 +430,7 @@ cases: - [ "aa", 9, 1590739002000, 3.0, 3.0 ] - id: 11 desc: ROWS_RANGE Window with MaxSize 2 OPEN PRECEDING amd EXCLUDE CURRENT_TIME Window - mode: offline-unsupport + mode: offline-unsupport,disk-unsupport inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -462,7 +466,7 @@ cases: - [ "aa", 9, 1590739002000, 2.0, 2.0 ] - id: 12 desc: ROWS_RANGE Window with MaxSize 10 OPEN PRECEDING and EXCLUDE CURRENT_TIME Window - mode: offline-unsupport + mode: offline-unsupport,disk-unsupport inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -498,7 +502,7 @@ cases: - [ "aa", 9, 1590739002000, 3.0, 3.0 ] - id: 13 desc: ROWS Window OPEN PRECEDING and EXCLUDE CURRENT_TIME Window - mode: offline-unsupport + mode: offline-unsupport,disk-unsupport inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -534,7 +538,7 @@ cases: - [ "aa", 9, 1590739002000, 7.0, 7.0 ] - id: 14 desc: ROWS and ROWS Window OPEN PRECEDING and EXCLUDE CURRENT_TIME Window - mode: offline-unsupport + mode: offline-unsupport,disk-unsupport inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -580,7 +584,7 @@ cases: - [ "aa", 9, 1590739002000, 3.0, 3.0, 2.0, 2.0, 7.0, 7.0 ] - id: 16 desc: ROWS and ROWS Window 各类窗口混合 - mode: offline-unsupport + mode: offline-unsupport,disk-unsupport inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -642,7 +646,7 @@ cases: - [ "aa", 9, 1590739002000, 3.0, 3.0, 2.0, 2.0, 7.0, 7.0, 3.0, 3.0, 2.0, 2.0, 7.0, 7.0 ] - id: 17 desc: ROWS Window with same timestamp - mode: offline-unsupport + mode: offline-unsupport,disk-unsupport inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -676,6 +680,7 @@ cases: - [ "aa", 9, 1590738993000, 4.0] - id: 18 desc: ROWS Window with same timestamp Exclude CurretTime + mode: disk-unsupport inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] indexs: [ "index1:c1:c7" ] @@ -709,7 +714,7 @@ cases: - [ "aa", 9, 1590738993000, 4.0] - id: 19 desc: ROWS, ROWS_RANGE Window, Normal Window, OPEN Window, EXCLUDE CURRENT TIME Window - mode: batch-unsupport + mode: batch-unsupport,disk-unsupport tags: ["@chendihao, @baoxinqi, 测试的时候spark需要保证输入数据滑入顺序"] inputs: - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] diff --git a/cases/function/window/test_window_union.yaml b/cases/function/window/test_window_union.yaml index efb72e9ded4..b11957c25e6 100644 --- a/cases/function/window/test_window_union.yaml +++ b/cases/function/window/test_window_union.yaml @@ -573,6 +573,7 @@ cases: - id: 18-1 desc: | when UNION ROWS_RANGE has the same key with original rows, original rows first then union rows + mode: disk-unsupport inputs: - name: t1 columns: @@ -625,6 +626,7 @@ cases: desc: | when UNION ROWS has the same key with original rows, original rows first then union rows, union rows filtered out first for max window size limitation + mode: disk-unsupport inputs: - name: t1 columns: @@ -672,6 +674,7 @@ cases: 1, 3, 233, 21, 200 2, 3, 400, 21, 21 - id: 18-3 + mode: disk-unsupport desc: | when UNION ROWS_RANGE MAXSIZE has the same key with original rows, original rows first then union rows union rows filtered out for max window size first @@ -721,6 +724,7 @@ cases: 1, 2, 200, 21, 200 2, 2, 21, 0, 21 - id: 18-4 + mode: disk-unsupport desc: | when UNION ROWS_RANGE EXCLUDE CURRENT_TIME has the same key with original rows, original rows first then union rows other rows except current row filtered out by EXCLUDE CURRENT_TIME @@ -786,6 +790,7 @@ cases: - [4, 7, 233, 5, 5] - id: 18-5 + mode: disk-unsupport desc: | UNION ROWS current time rows filtered out inputs: @@ -843,6 +848,7 @@ cases: # # 19-* series test case tests for this for SQL engine only, you should never reply on this behavior anyway - id: 19-1 + mode: disk-unsupport desc: | window unions multiple tables, the order for rows in union tables with same ts is explicitly as the order in SQL inputs: @@ -904,6 +910,7 @@ cases: 1, 6, 999, 0, 200, 233 2, 7, 10000, 0, 21, 200 - id: 19-2 + mode: disk-unsupport desc: | rows order for pure history window union inputs: diff --git a/cases/function/window/test_window_union_cluster_thousand.yaml b/cases/function/window/test_window_union_cluster_thousand.yaml index e8e8246dbfb..aa12f1b549f 100644 --- a/cases/function/window/test_window_union_cluster_thousand.yaml +++ b/cases/function/window/test_window_union_cluster_thousand.yaml @@ -20,6 +20,7 @@ version: 0.5.0 cases: - id: 0 desc: 正常union + mode: disk-unsupport inputs: - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] indexs: ["index1:c3:c7"] diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties index 912d8e3a2da..e742006ef82 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties @@ -1,3 +1,4 @@ # memory/ssd/hdd table_storage_mode=ssd +version=0.5.0 diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/WindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/WindowTest.java index 1434103f167..29f0235e702 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/WindowTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/WindowTest.java @@ -67,10 +67,4 @@ public void testWindowRequestModeWithSpAsync(SQLCase testCase) throws Exception ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); } - @Story("request") - @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/window/test_window_union_cluster_thousand.yaml"}) - public void testWindowRequestMode2(SQLCase testCase) throws Exception { - ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); - } } From 9cce07caac876367fb3688a72bafb0f917998531 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 28 Jul 2022 08:10:32 +0800 Subject: [PATCH 097/172] deploy --- .github/workflows/integration-test-src.yml | 2 ++ .../openmldb/java_sdk_test/cluster/sql_test/DDLTest.java | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/integration-test-src.yml b/.github/workflows/integration-test-src.yml index 3af33f0b5a7..b27a24c13b0 100644 --- a/.github/workflows/integration-test-src.yml +++ b/.github/workflows/integration-test-src.yml @@ -107,6 +107,8 @@ jobs: make configure CMAKE_INSTALL_PREFIX=openmldb-linux make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install tar -zcvf openmldb-linux.tar.gz openmldb-linux + echo "openmldb-pkg:" + ls -al - name: test run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster.xml -d cluster -l "1,2,3,4,5" -s "memory" - name: TEST Results diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java index cd4a0f8fcc9..52ebeca22bc 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java @@ -93,7 +93,7 @@ public void testCreateNoIndex(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); } - @Test(dataProvider = "getCase") + @Test(dataProvider = "getCase",enabled = false) @Yaml(filePaths = "function/ddl/test_create_no_index.yaml") @Story("create_no_index") public void testCreateNoIndexByCli(SQLCase testCase){ From 82ea52f33af270acca88d2800ef05a504d1ed44b Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 28 Jul 2022 10:10:58 +0800 Subject: [PATCH 098/172] modify cicd --- cases/function/dml/test_delete.yaml | 501 ++++++++++++++++++ .../java_sdk_test/checker/BaseChecker.java | 2 - .../checker/CatCheckerByCli.java | 1 - .../checker/CheckerStrategy.java | 4 + .../java_sdk_test/checker/ColumnsChecker.java | 1 - .../checker/ColumnsCheckerByCli.java | 1 - .../checker/ColumnsCheckerByJBDC.java | 1 - .../java_sdk_test/checker/CountChecker.java | 1 - .../checker/DeploymentCheckerByCli.java | 1 - .../DeploymentContainsCheckerByCli.java | 1 - .../checker/DeploymentCountCheckerByCli.java | 1 - .../checker/DiffResultChecker.java | 6 - .../checker/DiffVersionChecker.java | 1 - .../java_sdk_test/checker/MessageChecker.java | 41 ++ .../java_sdk_test/checker/OptionsChecker.java | 1 - .../java_sdk_test/checker/ResultChecker.java | 3 - .../checker/ResultCheckerByCli.java | 3 - .../checker/ResultCheckerByJDBC.java | 3 - .../java_sdk_test/checker/SuccessChecker.java | 1 - .../test_common/model/ExpectDesc.java | 1 + test/steps/openmldb-sdk-test-java-src.sh | 2 +- 21 files changed, 548 insertions(+), 29 deletions(-) create mode 100644 cases/function/dml/test_delete.yaml create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/MessageChecker.java diff --git a/cases/function/dml/test_delete.yaml b/cases/function/dml/test_delete.yaml new file mode 100644 index 00000000000..a50859cd2e9 --- /dev/null +++ b/cases/function/dml/test_delete.yaml @@ -0,0 +1,501 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: delete 一个key + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa'; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 1 + desc: delete 组合索引 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1|c2:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [4,"aa",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa' and c2=1; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [4,"aa",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + - + id: 2 + desc: delete 一个索引的两个key + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa' or c1='cc'; + expect: + success: false + msg: failed + - + id: 3 + desc: delete 两个索引的两个key + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7","index2:c2:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa' or c2=1; + expect: + success: false + msg: failed + - + id: 4 + desc: 两个索引 delete 其中一个 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7","index2:c2:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [4,"aa",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c2=2; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - + id: 5 + desc: delete 不是索引列 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c2=1; + expect: + success: false + msg: failed + - + id: 6 + desc: delete key不存在 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1='cc'; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - + id: 7 + desc: delete null + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,null,1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,null,1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1 is null; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 8 + desc: delete 空串 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1=''; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 10 + desc: delete int + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,3,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c3=3; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 11 + desc: delete smallint + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c2:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c2=1; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 12 + desc: delete bigint + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c4:c7"] + rows: + - [1,"aa",1,2,4,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,4,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c4=4; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - + id: 13 + desc: delete date + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c8:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-02",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c8='2020-05-02'; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - + id: 14 + desc: delete timestamp + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c7:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c7=1590738989000; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - + id: 15 + desc: delete bool + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c9:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",false] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c9=true; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",false] + - + id: 16 + desc: 两次delete相同index 不同的key + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"cc",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa'; + - delete from {0} where c1='cc'; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 17 + desc: 两次delete 不同的index + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7","index1:c2:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [4,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa'; + - delete from {0} where c2=1; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [4,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + - + id: 18 + desc: delete过期数据 + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7:1:latest"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa'; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 19 + desc: delete表不存在 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sql: delete from {0}1 where c1='aa'; + expect: + success: false + msg: failed + - + id: 20 + desc: delete列不存在 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c11=1; + expect: + success: false + msg: failed + - + id: 21 + desc: delete 其他库的数据 + - + id: 22 + desc: 两个index中key相同 delete 一个key + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7","index2:c1:c4:1:latest"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa'; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + - + id: 23 + desc: delete全部数据 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c2:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c2=1; + - select * from {0}; + expect: + count: 0 + - + id: 24 + desc: 两个索引,一个索引数据过期,删除另一个索引 + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7:1:latest","index2:c2:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c2=1; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + - + id: 25 + desc: 数据过期,delete其他pk + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7:1:latest"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1='bb'; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - + id: 26 + desc: 不等式删除 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"cc",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1!='cc'; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [3,"cc",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - + id: 27 + desc: delete 一个key + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c2:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",3,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c2>=2; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/BaseChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/BaseChecker.java index 64e64a7e6c0..212f39806d1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/BaseChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/BaseChecker.java @@ -18,7 +18,6 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.common.ReportLog; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import java.util.Map; @@ -31,7 +30,6 @@ public abstract class BaseChecker implements Checker { protected OpenMLDBResult fesqlResult; protected Map resultMap; protected ExpectDesc expect; - protected ReportLog reportLog = ReportLog.of(); public BaseChecker(ExpectDesc expect, OpenMLDBResult fesqlResult){ this.expect = expect; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CatCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CatCheckerByCli.java index 0f9a8f2e3d5..0a0502f6256 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CatCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CatCheckerByCli.java @@ -20,7 +20,6 @@ public CatCheckerByCli(ExpectDesc expect, OpenMLDBResult fesqlResult) { @Override public void check() throws Exception { log.info("cat check"); - reportLog.info("cat check"); CatFile expectCat = expect.getCat(); String path = expectCat.getPath(); path = SQLUtil.formatSql(path, fesqlResult.getTableNames()); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java index 502c0bfc377..6d38d16c138 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java @@ -23,6 +23,7 @@ import com._4paradigm.openmldb.test_common.model.SQLCaseType; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; +import org.apache.commons.lang3.StringUtils; import java.util.ArrayList; import java.util.List; @@ -81,6 +82,9 @@ public static List build(SQLCase fesqlCase, OpenMLDBResult fesqlResult, if(expect.getCat()!=null){ checkList.add(new CatCheckerByCli(expect, fesqlResult)); } + if(StringUtils.isNotEmpty(expect.getMsg())){ + checkList.add(new MessageChecker(expect, fesqlResult)); + } return checkList; } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java index 53f23c23eb2..b352280c23b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java @@ -39,7 +39,6 @@ public ColumnsChecker(ExpectDesc expect, OpenMLDBResult fesqlResult) { @Override public void check() throws Exception { log.info("column name check"); - reportLog.info("column name check"); List expectColumns = expect.getColumns(); if (expectColumns == null || expectColumns.size() == 0) { return; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByCli.java index 662dca28913..0a81899ef47 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByCli.java @@ -38,7 +38,6 @@ public ColumnsCheckerByCli(ExpectDesc expect, OpenMLDBResult fesqlResult) { @Override public void check() throws Exception { log.info("column name check"); - reportLog.info("column name check"); List expectColumns = expect.getColumns(); if (expectColumns == null || expectColumns.size() == 0) { return; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByJBDC.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByJBDC.java index 8eda4685cdd..1e3c3f0abda 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByJBDC.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByJBDC.java @@ -38,7 +38,6 @@ public ColumnsCheckerByJBDC(ExpectDesc expect, OpenMLDBResult fesqlResult) { @Override public void check() throws Exception { log.info("column name check"); - reportLog.info("column name check"); List expectColumns = expect.getColumns(); if (expectColumns == null || expectColumns.size() == 0) { return; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CountChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CountChecker.java index 10c02e08253..0a8e707b2dd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CountChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CountChecker.java @@ -37,7 +37,6 @@ public CountChecker(ExpectDesc expect, OpenMLDBResult fesqlResult){ @Override public void check() throws Exception { log.info("count check"); - reportLog.info("count check"); int expectCount = expect.getCount(); int actual = fesqlResult.getCount(); Assert.assertEquals(actual,expectCount,"count验证失败"); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java index b1de3a498d7..c3b202627e7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java @@ -38,7 +38,6 @@ public DeploymentCheckerByCli(ExpectDesc expect, OpenMLDBResult fesqlResult) { @Override public void check() throws Exception { log.info("deployment check"); - reportLog.info("deployment check"); OpenmldbDeployment expectDeployment = expect.getDeployment(); String name = expectDeployment.getName(); name = SQLUtil.formatSql(name, fesqlResult.getTableNames()); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentContainsCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentContainsCheckerByCli.java index 49031b2f106..3ad385e9f50 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentContainsCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentContainsCheckerByCli.java @@ -38,7 +38,6 @@ public DeploymentContainsCheckerByCli(ExpectDesc expect, OpenMLDBResult fesqlRes @Override public void check() throws Exception { log.info("deployment contains check"); - reportLog.info("deployment contains name check"); OpenmldbDeployment expectDeployment = expect.getDeploymentContains(); if (expectDeployment == null) { return; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java index a7b57aed4d4..c75a200920c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java @@ -38,7 +38,6 @@ public DeploymentCountCheckerByCli(ExpectDesc expect, OpenMLDBResult fesqlResult @Override public void check() throws Exception { log.info("deployment count check"); - reportLog.info("deployment count name check"); int expectDeploymentCount = expect.getDeploymentCount(); List actualDeployments = fesqlResult.getDeployments(); Integer deploymentCount = fesqlResult.getDeploymentCount(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffResultChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffResultChecker.java index fc5f2938822..bf1cdf1231c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffResultChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffResultChecker.java @@ -50,7 +50,6 @@ public void check() throws Exception { } public void checkMysql(OpenMLDBResult mysqlResult) throws Exception { log.info("diff mysql check"); - reportLog.info("diff mysql check"); //验证success boolean fesqlOk = fesqlResult.isOk(); boolean sqlite3Ok = mysqlResult.isOk(); @@ -60,16 +59,13 @@ public void checkMysql(OpenMLDBResult mysqlResult) throws Exception { List> fesqlRows = fesqlResult.getResult(); List> mysqlRows = mysqlResult.getResult(); log.info("fesqlRows:{}", fesqlRows); - reportLog.info("fesqlRows:{}", fesqlRows); log.info("mysqlRows:{}", mysqlRows); - reportLog.info("mysqlRows:{}", mysqlRows); // Assert.assertEquals(fesqlRows.size(), mysqlRows.size(), // String.format("ResultChecker fail: mysql size %d, fesql size %d", mysqlRows.size(), fesqlRows.size())); Assert.assertEquals(fesqlRows,mysqlRows,String.format("ResultChecker fail: mysql: %s, fesql: %s", mysqlRows, fesqlRows)); } public void checkSqlite3(OpenMLDBResult sqlite3Result) throws Exception { log.info("diff sqlite3 check"); - reportLog.info("diff sqlite3 check"); //验证success boolean fesqlOk = fesqlResult.isOk(); boolean sqlite3Ok = sqlite3Result.isOk(); @@ -79,9 +75,7 @@ public void checkSqlite3(OpenMLDBResult sqlite3Result) throws Exception { List> fesqlRows = fesqlResult.getResult(); List> sqlite3Rows = sqlite3Result.getResult(); log.info("fesqlRows:{}", fesqlRows); - reportLog.info("fesqlRows:{}", fesqlRows); log.info("sqlite3Rows:{}", sqlite3Rows); - reportLog.info("sqlite3Rows:{}", sqlite3Rows); Assert.assertEquals(fesqlRows.size(), sqlite3Rows.size(), String.format("ResultChecker fail: sqlite3 size %d, fesql size %d", sqlite3Rows.size(), fesqlRows.size())); for (int i = 0; i < fesqlRows.size(); ++i) { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffVersionChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffVersionChecker.java index dfc3de0fdb9..894120bfeef 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffVersionChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffVersionChecker.java @@ -37,7 +37,6 @@ public DiffVersionChecker(OpenMLDBResult fesqlResult, Map{ String version = e.getKey(); OpenMLDBResult result = e.getValue(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/MessageChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/MessageChecker.java new file mode 100644 index 00000000000..7831be712ea --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/MessageChecker.java @@ -0,0 +1,41 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.openmldb.java_sdk_test.checker; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.model.ExpectDesc; +import lombok.extern.slf4j.Slf4j; +import org.testng.Assert; + +/** + * @author zhaowei + * @date 2020/6/16 3:14 PM + */ +@Slf4j +public class MessageChecker extends BaseChecker { + + public MessageChecker(ExpectDesc expect, OpenMLDBResult fesqlResult){ + super(expect,fesqlResult); + } + + @Override + public void check() throws Exception { + log.info("message check"); + String expectMsg = expect.getMsg(); + String actualMsg = fesqlResult.getMsg(); + Assert.assertTrue(actualMsg.contains(expectMsg),"msg验证失败,actualMsg="+actualMsg+",expectMsg="+expectMsg); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/OptionsChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/OptionsChecker.java index 624feacae6d..781180aa302 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/OptionsChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/OptionsChecker.java @@ -44,7 +44,6 @@ public OptionsChecker(ExpectDesc expect, OpenMLDBResult fesqlResult) { @Override public void check() throws Exception { log.info("options check"); - reportLog.info("options check"); String apiserverEndpoint = OpenMLDBGlobalVar.mainInfo.getApiServerEndpoints().get(0); String dbName = fesqlResult.getDbName(); String tableName = expect.getName(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java index 77ae4efac2d..5643a8f69c5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java @@ -45,7 +45,6 @@ public ResultChecker(ExpectDesc expect, OpenMLDBResult fesqlResult) { @Override public void check() throws ParseException { log.info("result check"); - reportLog.info("result check"); if (expect.getColumns().isEmpty()) { throw new RuntimeException("fail check result: columns are empty"); } @@ -61,9 +60,7 @@ public void check() throws ParseException { } log.info("expect:{}", expectRows); - reportLog.info("expect:{}", expectRows); log.info("actual:{}", actual); - reportLog.info("actual:{}", actual); Assert.assertEquals(actual.size(), expectRows.size(), String.format("ResultChecker fail: expect size %d, real size %d", expectRows.size(), actual.size())); for (int i = 0; i < actual.size(); ++i) { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java index 8fde7eb4833..68cf423aca7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java @@ -45,7 +45,6 @@ public ResultCheckerByCli(ExpectDesc expect, OpenMLDBResult fesqlResult) { @Override public void check() throws ParseException { log.info("result check"); - reportLog.info("result check"); if (expect.getColumns().isEmpty()) { throw new RuntimeException("fail check result: columns are empty"); } @@ -60,9 +59,7 @@ public void check() throws ParseException { } log.info("expect:{}", expectRows); - reportLog.info("expect:{}", expectRows); log.info("actual:{}", actual); - reportLog.info("actual:{}", actual); Assert.assertEquals(actual.size(), expectRows.size(), String.format("ResultChecker fail: expect size %d, real size %d", expectRows.size(), actual.size())); for (int i = 0; i < actual.size(); ++i) { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java index 8c1065a819e..a20971c4491 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java @@ -43,7 +43,6 @@ public ResultCheckerByJDBC(ExpectDesc expect, OpenMLDBResult fesqlResult) { @Override public void check() throws Exception { log.info("result check"); - reportLog.info("result check"); if (expect.getColumns().isEmpty()) { throw new RuntimeException("fail check result: columns are empty"); } @@ -59,9 +58,7 @@ public void check() throws Exception { } log.info("expect:{}", expectRows); - reportLog.info("expect:{}", expectRows); log.info("actual:{}", actual); - reportLog.info("actual:{}", actual); Assert.assertEquals(actual.size(), expectRows.size(), String.format("ResultChecker fail: expect size %d, real size %d", expectRows.size(), actual.size())); for (int i = 0; i < actual.size(); ++i) { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/SuccessChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/SuccessChecker.java index b16d2504c7a..18be5712257 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/SuccessChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/SuccessChecker.java @@ -34,7 +34,6 @@ public SuccessChecker(ExpectDesc expect, OpenMLDBResult fesqlResult){ @Override public void check() throws Exception { log.info("success check"); - reportLog.info("success check"); boolean success = expect.getSuccess(); boolean actual = fesqlResult.isOk(); Assert.assertEquals(actual,success,"success验证失败"); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/ExpectDesc.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/ExpectDesc.java index 247f71266fd..bb6a0e08b3c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/ExpectDesc.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/ExpectDesc.java @@ -34,4 +34,5 @@ public class ExpectDesc extends Table { private int deploymentCount = -1; private List diffTables; private CatFile cat; + private String msg; } diff --git a/test/steps/openmldb-sdk-test-java-src.sh b/test/steps/openmldb-sdk-test-java-src.sh index 9b9712b9091..10d30d1f043 100755 --- a/test/steps/openmldb-sdk-test-java-src.sh +++ b/test/steps/openmldb-sdk-test-java-src.sh @@ -91,7 +91,7 @@ cd test/test-tool/command-tool || exit mvn clean install -Dmaven.test.skip=true cd "${ROOT_DIR}" || exit # modify config -sh test/steps/modify_java_sdk_config.sh "${CASE_XML}" "${DEPLOY_MODE}" "${JAVA_SDK_VERSION}" "0.5.0" "${OPENMLDB_SERVER_VERSION}" "${JAVA_NATIVE_VERSION}" "${TABLE_STORAGE_MODE}" +sh test/steps/modify_java_sdk_config.sh "${CASE_XML}" "${DEPLOY_MODE}" "${JAVA_SDK_VERSION}" "" "${OPENMLDB_SERVER_VERSION}" "${JAVA_NATIVE_VERSION}" "${TABLE_STORAGE_MODE}" # install jar cd test/integration-test/openmldb-test-java || exit From 4452c5cdae92d93b94bd84cb05b17e27a7f9a1ee Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 28 Jul 2022 11:46:40 +0800 Subject: [PATCH 099/172] deploy --- .../data_expiration/test_data_expiration.yaml | 72 +++++++++++++++++++ cases/function/dml/test_delete.yaml | 16 +++++ .../function/function/test_udaf_function.yaml | 16 ++--- .../src/main/resources/run_case.properties | 1 - .../cluster/sql_test/FunctionTest.java | 2 +- 5 files changed, 97 insertions(+), 10 deletions(-) create mode 100644 cases/function/data_expiration/test_data_expiration.yaml diff --git a/cases/function/data_expiration/test_data_expiration.yaml b/cases/function/data_expiration/test_data_expiration.yaml new file mode 100644 index 00000000000..c3f863ee578 --- /dev/null +++ b/cases/function/data_expiration/test_data_expiration.yaml @@ -0,0 +1,72 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: +- id: 15 + desc: 创建磁盘表,ttl_type=latest,ttl=4,insert 10 + mode: request-unsupport + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4:4:latest"] + storage: SSD + rows: + - ["bb", 2, 3, 1590738989000] + - ["bb", 4, 5, 1590738990000] + - ["bb", 6, 7, 1590738991000] + - ["bb", 8, 9, 1590738992000] + - ["bb", 10, 11, 1590738993000] + - ["bb", 12, 13, 1590738994000] + - ["bb", 14, 15, 1590738995000] + - ["bb", 16, 17, 1590738996000] + - ["bb", 18, 19, 1590738997000] + - ["bb", 20, 21, 1590738998000] + sql: select c1,c2,c3 from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint"] + rows: + - ["bb", 20, 21] + - ["bb", 18, 19] + - ["bb", 16, 17] + - ["bb", 14, 15] + +- id: 16 + desc: 创建磁盘表,ttl_type=absolute,ttl=10m, insert 10 + mode: request-unsupport + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4:10m:absolute"] + storage: hdd + rows: + - ["bb", 2, 3, "{currentTime}-100"] + - ["bb", 4, 5, "{currentTime}-200"] + - ["bb", 6, 7, "{currentTime}-599000"] + - ["bb", 8, 9, "{currentTime}-600000"] + - ["bb", 10, 11, "{currentTime}-600005"] + - ["bb", 12, 13, "{currentTime}-600006"] + - ["bb", 14, 15, "{currentTime}-600007"] + - ["bb", 16, 17, "{currentTime}-600008"] + - ["bb", 18, 19, "{currentTime}-600009"] + - ["bb", 20, 21, "{currentTime}-600010"] + sql: select c1,c2,c3 from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint"] + rows: + - ["bb", 2, 3] + - ["bb", 4, 5] + - ["bb", 6, 7] \ No newline at end of file diff --git a/cases/function/dml/test_delete.yaml b/cases/function/dml/test_delete.yaml index a50859cd2e9..ad4869ca056 100644 --- a/cases/function/dml/test_delete.yaml +++ b/cases/function/dml/test_delete.yaml @@ -387,6 +387,22 @@ cases: - id: 21 desc: delete 其他库的数据 + inputs: + - + db: d1 + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from d1.{0} where c1='aa'; + - select * from d1.{0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - id: 22 desc: 两个index中key相同 delete 一个key diff --git a/cases/function/function/test_udaf_function.yaml b/cases/function/function/test_udaf_function.yaml index b504230162f..dda2cf634dd 100644 --- a/cases/function/function/test_udaf_function.yaml +++ b/cases/function/function/test_udaf_function.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: [] +debugs: ["distinct_count"] version: 0.5.0 cases: - @@ -149,16 +149,16 @@ cases: sqlDialect: ["HybridSQL"] inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] - indexs: ["index1:c1:c7"] + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool","ts timestamp"] + indexs: ["index1:c1:ts"] rows: - - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] - - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-02","c",false] - - [3,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-02","c",true] - - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true,1590738990000] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-02","c",false,1590738991000] + - [3,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-02","c",true,1590738992000] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL,1590738993000] sql: | SELECT {0}.id, c1, distinct_count(c2) OVER w1 as m2,distinct_count(c3) OVER w1 as m3,distinct_count(c4) OVER w1 as m4,distinct_count(c5) OVER w1 as m5,distinct_count(c6) OVER w1 as m6,distinct_count(c7) OVER w1 as m7,distinct_count(c8) OVER w1 as m8,distinct_count(c9) OVER w1 as m9 FROM {0} WINDOW - w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.ts ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","m2 bigint","m3 bigint","m4 bigint","m5 bigint","m6 bigint","m7 bigint","m8 bigint","m9 bigint"] diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties index e742006ef82..912d8e3a2da 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties @@ -1,4 +1,3 @@ # memory/ssd/hdd table_storage_mode=ssd -version=0.5.0 diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java index 1fff9b1547a..552afbaa09a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java @@ -54,7 +54,7 @@ public void testFunctionRequestModeWithSp(SQLCase testCase) throws Exception { } @Story("requestWithSpAysn") @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/function/") + @Yaml(filePaths = "function/function/test_udaf_function.yaml") public void testFunctionRequestModeWithSpAysn(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); } From fc8cfeff89fcf6576f2dac07924385c7da53060b Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 28 Jul 2022 12:40:47 +0800 Subject: [PATCH 100/172] modify cicd --- .github/workflows/integration-test-src.yml | 248 +++++++++++------- .../data_expiration/test_data_expiration.yaml | 6 +- .../src/main/resources/run_case.properties | 1 + 3 files changed, 154 insertions(+), 101 deletions(-) diff --git a/.github/workflows/integration-test-src.yml b/.github/workflows/integration-test-src.yml index b27a24c13b0..206a8a78305 100644 --- a/.github/workflows/integration-test-src.yml +++ b/.github/workflows/integration-test-src.yml @@ -90,8 +90,8 @@ jobs: with: files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml comment_mode: "create new" - check_name: "Java SDK Test Cluster0 SRC Report" - comment_title: "Java SDK Test Cluster0 SRC Report" + check_name: "SRC java-sdk-cluster-memory-0 Report" + comment_title: "SRC java-sdk-cluster-memory-0 Report" java-sdk-cluster-memory-1: if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }} @@ -116,8 +116,62 @@ jobs: uses: EnricoMi/publish-unit-test-result-action@v1 with: files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml - check_name: Java SDK Test Cluster1 SRC Report - comment_title: Java SDK Test Cluster1 SRC Report + check_name: SRC java-sdk-cluster-memory-1 Report + comment_title: SRC java-sdk-cluster-memory-1 Report + + java-sdk-cluster-ssd-0: + if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }} + runs-on: ubuntu-latest + container: + image: ghcr.io/4paradigm/hybridsql:latest + env: + OS: linux + steps: + - uses: actions/checkout@v2 + - name: build jsdk and package + run: | + make configure CMAKE_INSTALL_PREFIX=openmldb-linux + make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install + tar -zcvf openmldb-linux.tar.gz openmldb-linux + echo "openmldb-pkg:" + ls -al + - name: test + run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster.xml -d cluster -l "0" -s "ssd" + - name: TEST Results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml + comment_mode: "create new" + check_name: "SRC java-sdk-cluster-ssd-0 Report" + comment_title: "SRC java-sdk-cluster-ssd-0 Report" + + java-sdk-cluster-hdd-0: + if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }} + runs-on: ubuntu-latest + container: + image: ghcr.io/4paradigm/hybridsql:latest + env: + OS: linux + steps: + - uses: actions/checkout@v2 + - name: build jsdk and package + run: | + make configure CMAKE_INSTALL_PREFIX=openmldb-linux + make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install + tar -zcvf openmldb-linux.tar.gz openmldb-linux + echo "openmldb-pkg:" + ls -al + - name: test + run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster.xml -d cluster -l "0" -s "hdd" + - name: TEST Results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml + comment_mode: "create new" + check_name: "SRC java-sdk-cluster-hdd-0 Report" + comment_title: "SRC java-sdk-cluster-hdd-0 Report" # standalone-cli-test-0: # if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'standalone-cli' || github.event.inputs.EXEC_TEST_TYPE == 'cli' }} @@ -167,29 +221,29 @@ jobs: # check_name: Standalone CLI1 Test SRC Report # comment_title: Standalone CLI1 Test SRC Report - python-sdk-test-standalone-0: - if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'python' }} - runs-on: ubuntu-latest - container: - image: ghcr.io/4paradigm/hybridsql:latest - env: - OS: linux - steps: - - uses: actions/checkout@v2 - - name: build pysdk - run: | - make thirdparty - mkdir -p build - source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=ON -DSQL_JAVASDK_ENABLE=OFF -DTESTING_ENABLE=OFF .. && make -j$(nproc) cp_python_sdk_so openmldb && cd ../ - - name: test - run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-python.sh -b SRC -d standalone -l "0" - - name: upload test results - if: always() - uses: actions/upload-artifact@v2 - with: - name: python-sdk-standalone-0-src-${{ github.sha }} - path: | - python/report/allure-results +# python-sdk-test-standalone-0: +# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'python' }} +# runs-on: ubuntu-latest +# container: +# image: ghcr.io/4paradigm/hybridsql:latest +# env: +# OS: linux +# steps: +# - uses: actions/checkout@v2 +# - name: build pysdk +# run: | +# make thirdparty +# mkdir -p build +# source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=ON -DSQL_JAVASDK_ENABLE=OFF -DTESTING_ENABLE=OFF .. && make -j$(nproc) cp_python_sdk_so openmldb && cd ../ +# - name: test +# run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-python.sh -b SRC -d standalone -l "0" +# - name: upload test results +# if: always() +# uses: actions/upload-artifact@v2 +# with: +# name: python-sdk-standalone-0-src-${{ github.sha }} +# path: | +# python/report/allure-results # - name: allure-report # uses: simple-elf/allure-report-action@master @@ -208,74 +262,74 @@ jobs: # PUBLISH_BRANCH: gh-pages # PUBLISH_DIR: allure-history - python-sdk-test-standalone-1: - if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'python' }} - runs-on: ubuntu-latest - container: - image: ghcr.io/4paradigm/hybridsql:latest - env: - OS: linux - steps: - - uses: actions/checkout@v2 - - name: build pysdk - run: | - make thirdparty - mkdir -p build - source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=ON -DSQL_JAVASDK_ENABLE=OFF -DTESTING_ENABLE=OFF .. && make -j$(nproc) cp_python_sdk_so openmldb && cd ../ - - name: test - run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-python.sh -b SRC -d standalone -l "1,2,3,4,5" - - name: upload test results - if: always() - uses: actions/upload-artifact@v2 - with: - name: python-sdk-standalone-1-src-${{ github.sha }} - path: | - python/report/allure-results - - apiserver-test: - if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'apiserver' }} - runs-on: ubuntu-latest - container: - image: ghcr.io/4paradigm/hybridsql:latest - env: - OS: linux - steps: - - uses: actions/checkout@v2 - - name: build jsdk and package - run: | - make thirdparty - mkdir -p build - source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=OFF -DSQL_JAVASDK_ENABLE=ON -DTESTING_ENABLE=OFF .. && make -j$(nproc) sql_javasdk_package openmldb && cd ../ - - name: test - run: source /root/.bashrc && bash test/steps/openmldb-apiserver-test.sh -b SRC -c test_all.xml -d standalone -l "0" - - name: TEST Results - if: always() - uses: EnricoMi/publish-unit-test-result-action@v1 - with: - files: test/integration-test/openmldb-test-java/openmldb-http-test/target/surefire-reports/TEST-*.xml - check_name: APIServer SRC Report - comment_title: APIServer SRC Report +# python-sdk-test-standalone-1: +# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'python' }} +# runs-on: ubuntu-latest +# container: +# image: ghcr.io/4paradigm/hybridsql:latest +# env: +# OS: linux +# steps: +# - uses: actions/checkout@v2 +# - name: build pysdk +# run: | +# make thirdparty +# mkdir -p build +# source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=ON -DSQL_JAVASDK_ENABLE=OFF -DTESTING_ENABLE=OFF .. && make -j$(nproc) cp_python_sdk_so openmldb && cd ../ +# - name: test +# run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-python.sh -b SRC -d standalone -l "1,2,3,4,5" +# - name: upload test results +# if: always() +# uses: actions/upload-artifact@v2 +# with: +# name: python-sdk-standalone-1-src-${{ github.sha }} +# path: | +# python/report/allure-results - batch-test: - if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'batch' }} - runs-on: ubuntu-latest - container: - image: ghcr.io/4paradigm/hybridsql:latest - env: - OS: linux - steps: - - uses: actions/checkout@v2 - - name: build - run: | - make thirdparty - mkdir -p build - source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=OFF -DSQL_JAVASDK_ENABLE=ON -DTESTING_ENABLE=OFF .. && make -j$(nproc) sql_javasdk_package openmldb && cd ../ - - name: test - run: source /root/.bashrc && bash test/steps/openmldb-batch-test.sh -b SRC - - name: TEST Results - if: always() - uses: EnricoMi/publish-unit-test-result-action@v1 - with: - files: test/batch-test/openmldb-batch-test/target/surefire-reports/TEST-*.xml - check_name: Batch Test SRC Report - comment_title: Batch Test SRC Report +# apiserver-test: +# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'apiserver' }} +# runs-on: ubuntu-latest +# container: +# image: ghcr.io/4paradigm/hybridsql:latest +# env: +# OS: linux +# steps: +# - uses: actions/checkout@v2 +# - name: build jsdk and package +# run: | +# make thirdparty +# mkdir -p build +# source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=OFF -DSQL_JAVASDK_ENABLE=ON -DTESTING_ENABLE=OFF .. && make -j$(nproc) sql_javasdk_package openmldb && cd ../ +# - name: test +# run: source /root/.bashrc && bash test/steps/openmldb-apiserver-test.sh -b SRC -c test_all.xml -d standalone -l "0" +# - name: TEST Results +# if: always() +# uses: EnricoMi/publish-unit-test-result-action@v1 +# with: +# files: test/integration-test/openmldb-test-java/openmldb-http-test/target/surefire-reports/TEST-*.xml +# check_name: APIServer SRC Report +# comment_title: APIServer SRC Report +# +# batch-test: +# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'batch' }} +# runs-on: ubuntu-latest +# container: +# image: ghcr.io/4paradigm/hybridsql:latest +# env: +# OS: linux +# steps: +# - uses: actions/checkout@v2 +# - name: build +# run: | +# make thirdparty +# mkdir -p build +# source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=OFF -DSQL_JAVASDK_ENABLE=ON -DTESTING_ENABLE=OFF .. && make -j$(nproc) sql_javasdk_package openmldb && cd ../ +# - name: test +# run: source /root/.bashrc && bash test/steps/openmldb-batch-test.sh -b SRC +# - name: TEST Results +# if: always() +# uses: EnricoMi/publish-unit-test-result-action@v1 +# with: +# files: test/batch-test/openmldb-batch-test/target/surefire-reports/TEST-*.xml +# check_name: Batch Test SRC Report +# comment_title: Batch Test SRC Report diff --git a/cases/function/data_expiration/test_data_expiration.yaml b/cases/function/data_expiration/test_data_expiration.yaml index c3f863ee578..d686692bd92 100644 --- a/cases/function/data_expiration/test_data_expiration.yaml +++ b/cases/function/data_expiration/test_data_expiration.yaml @@ -16,14 +16,12 @@ db: test_zw debugs: [] version: 0.5.0 cases: -- id: 15 - desc: 创建磁盘表,ttl_type=latest,ttl=4,insert 10 - mode: request-unsupport +- id: 0 + desc: ttl_type=latest,ttl=4,insert 10 inputs: - columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] indexs: ["index1:c1:c4:4:latest"] - storage: SSD rows: - ["bb", 2, 3, 1590738989000] - ["bb", 4, 5, 1590738990000] diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties index 912d8e3a2da..e742006ef82 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties @@ -1,3 +1,4 @@ # memory/ssd/hdd table_storage_mode=ssd +version=0.5.0 From e60288096d223d9f8eb15d134918e0503221a47f Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 28 Jul 2022 12:45:37 +0800 Subject: [PATCH 101/172] deploy --- .../openmldb-deploy/test-suite/test_deploy.xml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index 85c476ba1dd..f512a758c26 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -3,7 +3,8 @@ - + + From 28d88ae8df6da53e9bcefe93b5646febff0541b5 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 28 Jul 2022 12:58:08 +0800 Subject: [PATCH 102/172] modify cicd --- .github/workflows/integration-test-src.yml | 4 ++-- cases/function/dml/test_delete.yaml | 21 ++++++++++++++++++++- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/.github/workflows/integration-test-src.yml b/.github/workflows/integration-test-src.yml index 206a8a78305..7e1c8fc8621 100644 --- a/.github/workflows/integration-test-src.yml +++ b/.github/workflows/integration-test-src.yml @@ -136,7 +136,7 @@ jobs: echo "openmldb-pkg:" ls -al - name: test - run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster.xml -d cluster -l "0" -s "ssd" + run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster_disk.xml -d cluster -l "0" -s "ssd" - name: TEST Results if: always() uses: EnricoMi/publish-unit-test-result-action@v1 @@ -163,7 +163,7 @@ jobs: echo "openmldb-pkg:" ls -al - name: test - run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster.xml -d cluster -l "0" -s "hdd" + run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster_disk.xml -d cluster -l "0" -s "hdd" - name: TEST Results if: always() uses: EnricoMi/publish-unit-test-result-action@v1 diff --git a/cases/function/dml/test_delete.yaml b/cases/function/dml/test_delete.yaml index ad4869ca056..99b16da977f 100644 --- a/cases/function/dml/test_delete.yaml +++ b/cases/function/dml/test_delete.yaml @@ -499,7 +499,7 @@ cases: - [3,"cc",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] - id: 27 - desc: delete 一个key + desc: 比较运算符删除 inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] @@ -515,3 +515,22 @@ cases: columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 28 + desc: 表名为job delete + inputs: + - + name: job + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",3,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa'; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] From 374aa34cf61b0da7de70f2bcfc523a15d14fff95 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 28 Jul 2022 14:32:31 +0800 Subject: [PATCH 103/172] deploy --- cases/function/dml/test_delete.yaml | 152 +++++++++--------- .../long_window/test_count_where.yaml | 100 ++++++++++++ .../test-suite/test_deploy.xml | 4 +- .../src/main/resources/run_case.properties | 2 +- .../java_sdk_test/cluster/v060/DMLTest.java | 54 +++++++ 5 files changed, 235 insertions(+), 77 deletions(-) create mode 100644 cases/function/long_window/test_count_where.yaml create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java diff --git a/cases/function/dml/test_delete.yaml b/cases/function/dml/test_delete.yaml index 99b16da977f..2db00c73e7d 100644 --- a/cases/function/dml/test_delete.yaml +++ b/cases/function/dml/test_delete.yaml @@ -21,7 +21,7 @@ cases: desc: delete 一个key inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -31,7 +31,7 @@ cases: - delete from {0} where c1='aa'; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - @@ -39,7 +39,7 @@ cases: desc: delete 组合索引 inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1|c2:c7"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -50,7 +50,7 @@ cases: - delete from {0} where c1='aa' and c2=1; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] order: id rows: - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -60,7 +60,7 @@ cases: desc: delete 一个索引的两个key inputs: - - columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7"] rows: - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -77,7 +77,7 @@ cases: desc: delete 两个索引的两个key inputs: - - columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7","index2:c2:c7"] rows: - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -94,7 +94,7 @@ cases: desc: 两个索引 delete 其中一个 inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7","index2:c2:c7"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -105,7 +105,7 @@ cases: - delete from {0} where c2=2; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] order: id rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -116,7 +116,7 @@ cases: desc: delete 不是索引列 inputs: - - columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7"] rows: - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -133,7 +133,7 @@ cases: desc: delete key不存在 inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -143,7 +143,7 @@ cases: - delete from {0} where c1='cc'; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -153,7 +153,7 @@ cases: desc: delete null inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7"] rows: - [1,null,1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -163,7 +163,7 @@ cases: - delete from {0} where c1 is null; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - @@ -171,7 +171,7 @@ cases: desc: delete 空串 inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7"] rows: - [1,"",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -181,7 +181,7 @@ cases: - delete from {0} where c1=''; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - @@ -189,7 +189,7 @@ cases: desc: delete int inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c3:c7"] rows: - [1,"aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -199,7 +199,7 @@ cases: - delete from {0} where c3=3; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [1,"aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -208,7 +208,7 @@ cases: desc: delete smallint inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c2:c7"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -218,7 +218,7 @@ cases: - delete from {0} where c2=1; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - @@ -226,7 +226,7 @@ cases: desc: delete bigint inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c4:c7"] rows: - [1,"aa",1,2,4,1.1,2.1,1590738989000,"2020-05-01",true] @@ -236,7 +236,7 @@ cases: - delete from {0} where c4=4; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] - @@ -244,7 +244,7 @@ cases: desc: delete date inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c8:c7"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -254,7 +254,7 @@ cases: - delete from {0} where c8='2020-05-02'; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] @@ -263,7 +263,7 @@ cases: desc: delete timestamp inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c7:c7"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -273,7 +273,7 @@ cases: - delete from {0} where c7=1590738989000; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] - @@ -281,7 +281,7 @@ cases: desc: delete bool inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c9:c7"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",false] @@ -291,7 +291,7 @@ cases: - delete from {0} where c9=true; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",false] - @@ -299,7 +299,7 @@ cases: desc: 两次delete相同index 不同的key inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -310,7 +310,7 @@ cases: - delete from {0} where c1='cc'; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - @@ -318,7 +318,7 @@ cases: desc: 两次delete 不同的index inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7","index1:c2:c7"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -330,25 +330,26 @@ cases: - delete from {0} where c2=1; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] order: id rows: - [4,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] - id: 18 desc: delete过期数据 + inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] - indexs: ["index1:c1:c7:1:latest"] - rows: - - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7:1:latest"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] sqls: - delete from {0} where c1='aa'; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - @@ -356,7 +357,7 @@ cases: desc: delete表不存在 inputs: - - columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7"] rows: - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -372,7 +373,7 @@ cases: desc: delete列不存在 inputs: - - columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7"] rows: - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -390,7 +391,7 @@ cases: inputs: - db: d1 - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -400,26 +401,27 @@ cases: - delete from d1.{0} where c1='aa'; - select * from d1.{0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - id: 22 desc: 两个index中key相同 delete 一个key + inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] - indexs: ["index1:c1:c7","index2:c1:c4:1:latest"] - rows: - - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] - - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] - - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7","index2:c1:c4:1:latest"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] sqls: - delete from {0} where c1='aa'; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] order: id rows: - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -430,7 +432,7 @@ cases: desc: delete全部数据 inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c2:c7"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -444,20 +446,21 @@ cases: - id: 24 desc: 两个索引,一个索引数据过期,删除另一个索引 + inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] - indexs: ["index1:c1:c7:1:latest","index2:c2:c7"] - rows: - - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] - - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] - - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7:1:latest","index2:c2:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] sqls: - delete from {0} where c2=1; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] order: id rows: - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] @@ -465,18 +468,19 @@ cases: - id: 25 desc: 数据过期,delete其他pk + inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] - indexs: ["index1:c1:c7:1:latest"] - rows: - - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7:1:latest"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] sqls: - delete from {0} where c1='bb'; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] - @@ -484,7 +488,7 @@ cases: desc: 不等式删除 inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -494,7 +498,7 @@ cases: - delete from {0} where c1!='cc'; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [3,"cc",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] - @@ -502,7 +506,7 @@ cases: desc: 比较运算符删除 inputs: - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c2:c7"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -512,7 +516,7 @@ cases: - delete from {0} where c2>=2; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - @@ -521,7 +525,7 @@ cases: inputs: - name: job - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7"] rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -531,6 +535,6 @@ cases: - delete from {0} where c1='aa'; - select * from {0}; expect: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] diff --git a/cases/function/long_window/test_count_where.yaml b/cases/function/long_window/test_count_where.yaml new file mode 100644 index 00000000000..c9aa6091206 --- /dev/null +++ b/cases/function/long_window/test_count_where.yaml @@ -0,0 +1,100 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +cases: + - + id: 0 + desc: 长窗口count_where,date类型 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 1 + desc: 长窗口count_where,smallint类型 + - + id: 2 + desc: 长窗口count_where,int类型 + - + id: 3 + desc: 长窗口count_where,bigint类型 + - + id: 4 + desc: 长窗口count_where,string类型 + - + id: 5 + desc: 长窗口count_where,timestamp类型 + - + id: 6 + desc: 长窗口count_where,row类型 + - + id: 7 + desc: 长窗口count_where,bool类型 + - + id: 8 + desc: 长窗口count_where,float类型 + - + id: 9 + desc: 长窗口count_where,double类型 + - + id: 10 + desc: 长窗口count_where,第二个参数使用bool列 + - + id: 11 + desc: 长窗口count_where,第二个参数使用= + - + id: 12 + desc: 长窗口count_where,第二个参数使用!= + - + id: 13 + desc: 长窗口count_where,第二个参数使用>= + - + id: 14 + desc: 长窗口count_where,第二个参数使用<= + - + id: 15 + desc: 长窗口count_where,第二个参数使用> + - + id: 16 + desc: 长窗口count_where,第二个参数使用< + - + id: 17 + desc: 长窗口count_where,第二个参数使用and + - + id: 18 + desc: 长窗口count_where,第二个参数使用两个列 + - + id: 19 + desc: 长窗口count_where,第二个参数使用嵌套 + - + id: 20 + desc: 长窗口count_where,第二个参数常量在前 + + + + diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index f512a758c26..46a07223367 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -3,8 +3,8 @@ - - + + diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties index e742006ef82..2219ec5e741 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties @@ -1,4 +1,4 @@ # memory/ssd/hdd -table_storage_mode=ssd +table_storage_mode=memory version=0.5.0 diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java new file mode 100644 index 00000000000..9c424dc0b1f --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java @@ -0,0 +1,54 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.openmldb.java_sdk_test.cluster.v060; + + +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; +import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; +import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.provider.Yaml; +import io.qameta.allure.Feature; +import io.qameta.allure.Story; +import lombok.extern.slf4j.Slf4j; +import org.testng.annotations.Test; + +/** + * @author zhaowei + * @date 2020/6/11 2:53 PM + */ +@Slf4j +@Feature("DML") +public class DMLTest extends OpenMLDBTest { + + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/dml/test_delete.yaml"}) + @Story("delete") + public void testDelete(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); + } + + @Test(dataProvider = "getCase",enabled = false) + @Yaml(filePaths = {"function/dml/test_delete.yaml"}) + @Story("delete") + public void testDeleteByCli(SQLCase testCase){ + ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); + } + + + +} From 0c81a66563e25c0c6f167d0d104b955e474f0900 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 28 Jul 2022 15:12:56 +0800 Subject: [PATCH 104/172] modify cicd --- .../function/function/test_udaf_function.yaml | 2 +- cases/function/window/test_current_row.yaml | 3 + cases/function/window/window_attributes.yaml | 101 +++++++++--------- .../src/main/resources/run_case.properties | 2 +- .../cluster/sql_test/FunctionTest.java | 2 +- .../cluster/sql_test/WindowTest.java | 2 +- .../openmldb/test_common/model/Table.java | 6 +- 7 files changed, 63 insertions(+), 55 deletions(-) diff --git a/cases/function/function/test_udaf_function.yaml b/cases/function/function/test_udaf_function.yaml index dda2cf634dd..0642ed737fa 100644 --- a/cases/function/function/test_udaf_function.yaml +++ b/cases/function/function/test_udaf_function.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: ["distinct_count"] +debugs: [] version: 0.5.0 cases: - diff --git a/cases/function/window/test_current_row.yaml b/cases/function/window/test_current_row.yaml index 4442e1ef199..a70e63b570c 100644 --- a/cases/function/window/test_current_row.yaml +++ b/cases/function/window/test_current_row.yaml @@ -308,6 +308,7 @@ cases: - [ "bb",24,null ] - id: 13 desc: rows_range-open-current_row + tags: ["TODO","bug,修复后验证"] inputs: - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] indexs: [ "index1:c1:c7" ] @@ -652,6 +653,7 @@ cases: - [ "bb",24,null ] - id: 28 desc: 两个窗口,一个rows,一个rows_range,current_row + tags: ["TODO","bug,修复后验证"] inputs: - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] indexs: [ "index1:c1:c7" ] @@ -735,6 +737,7 @@ cases: - [ "bb",24,null ] - id: 32 desc: rows_range-纯历史窗口-current_row-ts=0 + tags: ["TODO","bug,修复后验证"] inputs: - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] indexs: [ "index1:c1:c7" ] diff --git a/cases/function/window/window_attributes.yaml b/cases/function/window/window_attributes.yaml index 39cc811714a..53ebc8fcde7 100644 --- a/cases/function/window/window_attributes.yaml +++ b/cases/function/window/window_attributes.yaml @@ -6,6 +6,7 @@ debugs: [] version: 0.6.0 +db: test_java cases: - id: 0 desc: ROWS_RANGE window with exclude_current_row @@ -60,13 +61,13 @@ cases: - mi int - l1 int order: id - data: | - 0, 0, NULL, NULL, NULL - 1, 1, 0, 0, 0 - 2, 0, NULL, NULL, 0 - 3, 1, 21, 21, 21 - 4, 2, 22, 21, 22 - 5, 0, NULL, NULL, NULL + rows: + - [0, 0, NULL, NULL, NULL] + - [1, 1, 0, 0, 0] + - [2, 0, NULL, NULL, 0] + - [3, 1, 21, 21, 21] + - [4, 2, 22, 21, 22] + - [5, 0, NULL, NULL, NULL] - id: 1 desc: | ROWS window with exclude_current_row, '0 PRECEDING EXCLUDE CURRENT_ROW' actually is the same as '0 OPEN PRECEDING' @@ -102,11 +103,11 @@ cases: - mi int - l1 int order: id - data: | - 1, 0, NULL, NULL, NULL - 2, 1, 21, 21, 21 - 3, 2, 22, 21, 22 - 4, 0, NULL, NULL, NULL + rows: + - [1, 0, NULL, NULL, NULL] + - [2, 1, 21, 21, 21] + - [3, 2, 22, 21, 22] + - [4, 0, NULL, NULL, NULL] - id: 2 desc: | ROWS_RANGE pure-history window with exclude_current_row @@ -160,11 +161,11 @@ cases: - mi int - l1 int order: id - data: | - 1, 0, NULL, NULL, NULL - 2, 1, 21, 21, 21 - 3, 2, 22, 21, 22 - 4, 0, NULL, NULL, NULL + rows: + - [1, 0, NULL, NULL, NULL] + - [2, 1, 21, 21, 21] + - [3, 2, 22, 21, 22] + - [4, 0, NULL, NULL, NULL] - id: 3 desc: | ROWS pure-history window with exclude_current_row @@ -218,11 +219,11 @@ cases: - mi int - l1 int order: id - data: | - 1, 0, NULL, NULL, NULL - 2, 1, 21, 21, 21 - 3, 2, 22, 21, 22 - 4, 0, NULL, NULL, NULL + rows: + - [1, 0, NULL, NULL, NULL] + - [2, 1, 21, 21, 21] + - [3, 2, 22, 21, 22] + - [4, 0, NULL, NULL, NULL] - id: 4 desc: | @@ -261,13 +262,13 @@ cases: - mi int - l1 int order: id - data: | - 1, 0, NULL, NULL, NULL - 2, 1, 21, 21, 21 - 3, 2, 22, 21, 22 - 4, 2, 23, 22, 23 - 5, 0, NULL, NULL, NULL - 6, 1, 56, 56, 56 + rows: + - [1, 0, NULL, NULL, NULL] + - [2, 1, 21, 21, 21] + - [3, 2, 22, 21, 22] + - [4, 2, 23, 22, 23] + - [5, 0, NULL, NULL, NULL] + - [6, 1, 56, 56, 56] - id: 5 desc: | @@ -323,13 +324,13 @@ cases: - mi int - l1 int order: id - data: | - 1, 0, NULL, NULL, NULL - 2, 1, 21, 21, 21 - 3, 2, 22, 21, 22 - 4, 2, 23, 22, 23 - 5, 0, NULL, NULL, NULL - 6, 1, 56, 56, 56 + rows: + - [1, 0, NULL, NULL, NULL] + - [2, 1, 21, 21, 21] + - [3, 2, 22, 21, 22] + - [4, 2, 23, 22, 23] + - [5, 0, NULL, NULL, NULL] + - [6, 1, 56, 56, 56] - id: 6 desc: | @@ -385,13 +386,13 @@ cases: - mi int - l1 int order: id - data: | - 1, 0, NULL, NULL, NULL - 2, 1, 21, 21, 21 - 3, 2, 22, 21, 22 - 4, 3, 23, 21, 23 - 5, 0, NULL, NULL, NULL - 6, 1, 56, 56, 56 + rows: + - [1, 0, NULL, NULL, NULL] + - [2, 1, 21, 21, 21] + - [3, 2, 22, 21, 22] + - [4, 3, 23, 21, 23] + - [5, 0, NULL, NULL, NULL] + - [6, 1, 56, 56, 56] - id: 7 desc: | @@ -430,13 +431,13 @@ cases: - mi int - l1 int order: id - data: | - 1, 0, NULL, NULL, NULL - 2, 1, 21, 21, 21 - 3, 2, 22, 21, 22 - 4, 2, 23, 22, 23 - 5, 0, NULL, NULL, NULL - 6, 1, 56, 56, 56 + rows: + - [1, 0, NULL, NULL, NULL] + - [2, 1, 21, 21, 21] + - [3, 2, 22, 21, 22] + - [4, 2, 23, 22, 23] + - [5, 0, NULL, NULL, NULL] + - [6, 1, 56, 56, 56] - id: 8 desc: | diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties index 2219ec5e741..d361f7ddc73 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/resources/run_case.properties @@ -1,4 +1,4 @@ # memory/ssd/hdd table_storage_mode=memory -version=0.5.0 +#version=0.5.0 diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java index 552afbaa09a..1fff9b1547a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java @@ -54,7 +54,7 @@ public void testFunctionRequestModeWithSp(SQLCase testCase) throws Exception { } @Story("requestWithSpAysn") @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/function/test_udaf_function.yaml") + @Yaml(filePaths = "function/function/") public void testFunctionRequestModeWithSpAysn(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/WindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/WindowTest.java index 29f0235e702..bf3003b1248 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/WindowTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/WindowTest.java @@ -42,7 +42,7 @@ public class WindowTest extends OpenMLDBTest { public void testWindowBatch(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kBatch).run(); } - @Story("request") + @Story("requestWithSp") @Test(dataProvider = "getCase") @Yaml(filePaths = {"function/window/", "function/cluster/", diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java index be130bb7542..1d4530f9838 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/Table.java @@ -208,7 +208,11 @@ public List> getRows() { for (String row : data.trim().split("\n")) { List each_row = new ArrayList(); for (String item : row.trim().split(",")) { - each_row.add(item.trim()); + String data = item.trim(); + if(data.equalsIgnoreCase("null")){ + data = null; + } + each_row.add(data); } parserd_rows.add(each_row); } From f45f1bd23d0c0fec9f05d0d190b33a68409befa4 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 28 Jul 2022 16:32:13 +0800 Subject: [PATCH 105/172] support deploy executor --- .../long_window/test_count_where.yaml | 1 + .../executor/LongWindowExecutor.java | 106 ++++++++++++++++++ .../executor/RequestQuerySQLExecutor.java | 2 +- .../executor/StoredProcedureSQLExecutor.java | 24 +--- .../openmldb/test_common/model/SQLCase.java | 1 + 5 files changed, 114 insertions(+), 20 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java diff --git a/cases/function/long_window/test_count_where.yaml b/cases/function/long_window/test_count_where.yaml index c9aa6091206..9d430521188 100644 --- a/cases/function/long_window/test_count_where.yaml +++ b/cases/function/long_window/test_count_where.yaml @@ -18,6 +18,7 @@ cases: - id: 0 desc: 长窗口count_where,date类型 + longWindow: w1:2 inputs: - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java new file mode 100644 index 00000000000..0d6958e69dc --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java @@ -0,0 +1,106 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.openmldb.java_sdk_test.executor; + +import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.util.SDKUtil; +import com._4paradigm.openmldb.test_common.util.SQLUtil; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.collections4.CollectionUtils; + +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +@Slf4j +public class LongWindowExecutor extends StoredProcedureSQLExecutor { + + private List spNames; + + public LongWindowExecutor(SqlExecutor executor, SQLCase fesqlCase, boolean isBatchRequest, boolean isAsyn, SQLCaseType executorType) { + super(executor, fesqlCase, isBatchRequest, isAsyn, executorType); + spNames = new ArrayList<>(); + } + + public LongWindowExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map fedbInfoMap, boolean isBatchRequest, boolean isAsyn, SQLCaseType executorType) { + super(fesqlCase, executor, executorMap, fedbInfoMap, isBatchRequest, isAsyn, executorType); + spNames = new ArrayList<>(); + } + + @Override + public OpenMLDBResult execute(String version, SqlExecutor executor) { + log.info("version:{} execute begin",version); + OpenMLDBResult fesqlResult = null; + try { + if (fesqlCase.getInputs().isEmpty() || + CollectionUtils.isEmpty(fesqlCase.getInputs().get(0).getRows())) { + log.error("fail to execute in request query sql executor: sql case inputs is empty"); + return null; + } + String sql = fesqlCase.getSql(); + log.info("sql: {}", sql); + if (sql == null || sql.length() == 0) { + return null; + } + if (fesqlCase.getBatch_request() != null) { + fesqlResult = executeBatch(executor, sql, this.isAsyn); + } else { + fesqlResult = executeSingle(executor, sql, this.isAsyn); + } + spNames.add(fesqlCase.getSpName()); + }catch (Exception e){ + e.printStackTrace(); + } + log.info("version:{} execute end",version); + return fesqlResult; + } + + private OpenMLDBResult executeSingle(SqlExecutor executor, String sql, boolean isAsyn) throws SQLException { + String spSql = fesqlCase.getProcedure(sql); + log.info("spSql: {}", spSql); + return SDKUtil.sqlRequestModeWithProcedure( + executor, dbName, fesqlCase.getSpName(), null == fesqlCase.getBatch_request(), + spSql, fesqlCase.getInputs().get(0), isAsyn); + } + + private OpenMLDBResult executeBatch(SqlExecutor executor, String sql, boolean isAsyn) throws SQLException { + String spName = "sp_" + tableNames.get(0) + "_" + System.currentTimeMillis(); + String spSql = SQLUtil.buildSpSQLWithConstColumns(spName, sql, fesqlCase.getBatch_request()); + log.info("spSql: {}", spSql); + return SDKUtil.selectBatchRequestModeWithSp( + executor, dbName, spName, spSql, fesqlCase.getBatch_request(), isAsyn); + } + + +// @Override +// public void tearDown(String version,SqlExecutor executor) { +// log.info("version:{},begin tearDown",version); +// if (CollectionUtils.isEmpty(spNames)) { +// return; +// } +// for (String spName : spNames) { +// String drop = "drop procedure " + spName + ";"; +// SDKUtil.ddl(executor, dbName, drop); +// } +// super.tearDown(version,executor); +// } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java index d3c4d3c74ab..66c9eaa3f27 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java @@ -123,7 +123,7 @@ protected void prepare(String version,SqlExecutor executor) { boolean useFirstInputAsRequests = !isBatchRequest && null == fesqlCase.getBatch_request(); OpenMLDBResult res = SDKUtil.createAndInsert(executor, dbName, fesqlCase.getInputs(), useFirstInputAsRequests); if (!res.isOk()) { - throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail"); + throw new RuntimeException("fail to run RequestQuerySQLExecutor: prepare fail"); } log.info("version:{} prepare end",version); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java index 2e15d9854d1..c588f29def7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java @@ -25,6 +25,7 @@ import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import java.sql.SQLException; import java.util.ArrayList; @@ -46,33 +47,18 @@ public StoredProcedureSQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map(); } - @Override - public void prepare(String version,SqlExecutor executor){ - log.info("version:{} prepare begin",version); - boolean dbOk = executor.createDB(dbName); - log.info("create db:{},{}", dbName, dbOk); - OpenMLDBResult res = SDKUtil.createAndInsert( - executor, dbName, fesqlCase.getInputs(), - !isBatchRequest && null == fesqlCase.getBatch_request()); - if (!res.isOk()) { - throw new RuntimeException("fail to run StoredProcedureSQLExecutor: prepare fail"); - } - log.info("version:{} prepare end",version); - } @Override public OpenMLDBResult execute(String version, SqlExecutor executor) { log.info("version:{} execute begin",version); OpenMLDBResult fesqlResult = null; try { - if (fesqlCase.getInputs().isEmpty() || - CollectionUtils.isEmpty(fesqlCase.getInputs().get(0).getRows())) { - log.error("fail to execute in request query sql executor: sql case inputs is empty"); - return null; + if (fesqlCase.getInputs().isEmpty() || CollectionUtils.isEmpty(fesqlCase.getInputs().get(0).getRows())) { + throw new IllegalArgumentException("fail to execute in request query sql executor: sql case inputs is empty"); } String sql = fesqlCase.getSql(); log.info("sql: {}", sql); - if (sql == null || sql.length() == 0) { - return null; + if (StringUtils.isEmpty(sql)) { + throw new IllegalArgumentException("fail to execute in request query sql executor: sql is empty"); } if (fesqlCase.getBatch_request() != null) { fesqlResult = executeBatch(executor, sql, this.isAsyn); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java index 1761e289210..a95a843fe67 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java @@ -35,6 +35,7 @@ public class SQLCase implements Serializable{ private String mode; private String db; private String version; + private String longWindow; private String sql; private List> dataProvider; private List sqls; From 8c5ad53ba308da494fc6cee44209befdaa6e355e Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 28 Jul 2022 18:16:49 +0800 Subject: [PATCH 106/172] support deploy executor --- .../long_window/test_count_where.yaml | 143 +++++++++--------- .../java_sdk_test/executor/BaseExecutor.java | 13 +- .../executor/BaseSQLExecutor.java | 8 +- .../executor/BatchSQLExecutor.java | 30 ++-- .../executor/ClusterCliExecutor.java | 9 +- .../executor/CommandExecutor.java | 40 +++-- .../executor/DiffResultExecutor.java | 2 +- .../executor/ExecutorFactory.java | 95 +++++------- .../executor/InsertPreparedExecutor.java | 2 +- .../java_sdk_test/executor/JDBCExecutor.java | 4 +- .../executor/LongWindowExecutor.java | 37 ++--- .../java_sdk_test/executor/MysqlExecutor.java | 13 +- .../executor/QueryPreparedExecutor.java | 4 +- .../executor/RequestQuerySQLExecutor.java | 48 +++--- .../executor/Sqlite3Executor.java | 13 +- .../executor/StandaloneCliExecutor.java | 4 +- .../executor/StoredProcedureSQLExecutor.java | 18 +-- .../cluster/v060/LongWindowTest.java | 23 +++ .../test_common/model/SQLCaseType.java | 3 +- .../openmldb/test_common/util/SDKUtil.java | 37 ++++- .../openmldb/test_common/util/SQLUtil.java | 5 + 21 files changed, 290 insertions(+), 261 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/LongWindowTest.java diff --git a/cases/function/long_window/test_count_where.yaml b/cases/function/long_window/test_count_where.yaml index 9d430521188..50109b1b4e7 100644 --- a/cases/function/long_window/test_count_where.yaml +++ b/cases/function/long_window/test_count_where.yaml @@ -21,80 +21,87 @@ cases: longWindow: w1:2 inputs: - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c1:c7"] rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] dataProvider: - ["ROWS","ROWS_RANGE"] sql: | - deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); + deploy {0} options(long_windows='w1:2') SELECT id, c1 FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); expect: - success: true - - - id: 1 - desc: 长窗口count_where,smallint类型 - - - id: 2 - desc: 长窗口count_where,int类型 - - - id: 3 - desc: 长窗口count_where,bigint类型 - - - id: 4 - desc: 长窗口count_where,string类型 - - - id: 5 - desc: 长窗口count_where,timestamp类型 - - - id: 6 - desc: 长窗口count_where,row类型 - - - id: 7 - desc: 长窗口count_where,bool类型 - - - id: 8 - desc: 长窗口count_where,float类型 - - - id: 9 - desc: 长窗口count_where,double类型 - - - id: 10 - desc: 长窗口count_where,第二个参数使用bool列 - - - id: 11 - desc: 长窗口count_where,第二个参数使用= - - - id: 12 - desc: 长窗口count_where,第二个参数使用!= - - - id: 13 - desc: 长窗口count_where,第二个参数使用>= - - - id: 14 - desc: 长窗口count_where,第二个参数使用<= - - - id: 15 - desc: 长窗口count_where,第二个参数使用> - - - id: 16 - desc: 长窗口count_where,第二个参数使用< - - - id: 17 - desc: 长窗口count_where,第二个参数使用and - - - id: 18 - desc: 长窗口count_where,第二个参数使用两个列 - - - id: 19 - desc: 长窗口count_where,第二个参数使用嵌套 - - - id: 20 - desc: 长窗口count_where,第二个参数常量在前 + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] +# - +# id: 1 +# desc: 长窗口count_where,smallint类型 +# - +# id: 2 +# desc: 长窗口count_where,int类型 +# - +# id: 3 +# desc: 长窗口count_where,bigint类型 +# - +# id: 4 +# desc: 长窗口count_where,string类型 +# - +# id: 5 +# desc: 长窗口count_where,timestamp类型 +# - +# id: 6 +# desc: 长窗口count_where,row类型 +# - +# id: 7 +# desc: 长窗口count_where,bool类型 +# - +# id: 8 +# desc: 长窗口count_where,float类型 +# - +# id: 9 +# desc: 长窗口count_where,double类型 +# - +# id: 10 +# desc: 长窗口count_where,第二个参数使用bool列 +# - +# id: 11 +# desc: 长窗口count_where,第二个参数使用= +# - +# id: 12 +# desc: 长窗口count_where,第二个参数使用!= +# - +# id: 13 +# desc: 长窗口count_where,第二个参数使用>= +# - +# id: 14 +# desc: 长窗口count_where,第二个参数使用<= +# - +# id: 15 +# desc: 长窗口count_where,第二个参数使用> +# - +# id: 16 +# desc: 长窗口count_where,第二个参数使用< +# - +# id: 17 +# desc: 长窗口count_where,第二个参数使用and +# - +# id: 18 +# desc: 长窗口count_where,第二个参数使用两个列 +# - +# id: 19 +# desc: 长窗口count_where,第二个参数使用嵌套 +# - +# id: 20 +# desc: 长窗口count_where,第二个参数常量在前 diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java index a795fc5c4e4..adc918aa947 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseExecutor.java @@ -17,7 +17,6 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; import lombok.extern.slf4j.Slf4j; @@ -33,7 +32,7 @@ @Slf4j public abstract class BaseExecutor implements IExecutor{ // protected static final log log = new LogProxy(log); - protected SQLCase fesqlCase; + protected SQLCase sqlCase; protected SQLCaseType executorType; protected String dbName; protected List tableNames = Lists.newArrayList(); @@ -43,13 +42,13 @@ public abstract class BaseExecutor implements IExecutor{ public void run() { String className = Thread.currentThread().getStackTrace()[2].getClassName(); String methodName = Thread.currentThread().getStackTrace()[2].getMethodName(); - System.out.println(className+"."+methodName+":"+fesqlCase.getCaseFileName()+":"+fesqlCase.getDesc() + " Begin!"); - log.info(className+"."+methodName+":"+fesqlCase.getDesc() + " Begin!"); + System.out.println(className+"."+methodName+":"+ sqlCase.getCaseFileName()+":"+ sqlCase.getDesc() + " Begin!"); + log.info(className+"."+methodName+":"+ sqlCase.getDesc() + " Begin!"); boolean verify = false; try { verify = verify(); if(!verify) return; - if (null == fesqlCase) { + if (null == sqlCase) { Assert.fail("executor run with null case"); return; } @@ -58,13 +57,13 @@ public void run() { check(); } catch (Exception e) { e.printStackTrace(); - System.out.println(className+"."+methodName+":"+fesqlCase.getDesc() + " FAIL!"); + System.out.println(className+"."+methodName+":"+ sqlCase.getDesc() + " FAIL!"); Assert.fail("executor run with exception"); }finally { if(verify) { tearDown(); } - System.out.println(className+"."+methodName+":"+fesqlCase.getDesc() + " DONE!"); + System.out.println(className+"."+methodName+":"+ sqlCase.getDesc() + " DONE!"); } } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java index c1cc044308a..97dbba8b1f7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java @@ -50,7 +50,7 @@ public abstract class BaseSQLExecutor extends BaseExecutor{ public BaseSQLExecutor(SqlExecutor executor, SQLCase fesqlCase, SQLCaseType executorType) { this.executor = executor; - this.fesqlCase = fesqlCase; + this.sqlCase = fesqlCase; this.executorType = executorType; dbName = Objects.isNull(fesqlCase.getDb()) ? "" : fesqlCase.getDb(); if (!CollectionUtils.isEmpty(fesqlCase.getInputs())) { @@ -94,7 +94,7 @@ public void execute() { @Override public void check() throws Exception { - List strategyList = CheckerStrategy.build(fesqlCase, mainResult, executorType); + List strategyList = CheckerStrategy.build(sqlCase, mainResult, executorType); if(MapUtils.isNotEmpty(resultMap)) { strategyList.add(new DiffVersionChecker(mainResult, resultMap)); } @@ -113,7 +113,7 @@ public void tearDown() { public void tearDown(String version,SqlExecutor executor) { log.info("version:{},begin tear down",version); - List tearDown = fesqlCase.getTearDown(); + List tearDown = sqlCase.getTearDown(); if(CollectionUtils.isNotEmpty(tearDown)){ tearDown.forEach(sql->{ if(MapUtils.isNotEmpty(fedbInfoMap)) { @@ -125,7 +125,7 @@ public void tearDown(String version,SqlExecutor executor) { }); } log.info("version:{},begin drop table",version); - List tables = fesqlCase.getInputs(); + List tables = sqlCase.getInputs(); if (CollectionUtils.isEmpty(tables)) { return; } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java index 7b91b87981a..e15ff18f75b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java @@ -46,28 +46,28 @@ public BatchSQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map sqls = fesqlCase.getSqls(); + List sqls = sqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { // log.info("sql:{}", sql); @@ -102,7 +102,7 @@ public OpenMLDBResult execute(String version, SqlExecutor executor){ fesqlResult = SDKUtil.sql(executor, dbName, sql); } } - String sql = fesqlCase.getSql(); + String sql = sqlCase.getSql(); if (sql != null && sql.length() > 0) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(fedbInfoMap)) { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ClusterCliExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ClusterCliExecutor.java index f7a281000df..bddbf14bf5f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ClusterCliExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ClusterCliExecutor.java @@ -16,7 +16,6 @@ package com._4paradigm.openmldb.java_sdk_test.executor; -import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; @@ -36,12 +35,12 @@ public ClusterCliExecutor(SQLCase fesqlCase, Map openMLDBI @Override public boolean verify() { - if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("cluster-cli-unsupport")) { - log.info("skip case in cli mode: {}", fesqlCase.getDesc()); + if (null != sqlCase.getMode() && sqlCase.getMode().contains("cluster-cli-unsupport")) { + log.info("skip case in cli mode: {}", sqlCase.getDesc()); return false; } - if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("cluster-unsupport")) { - log.info("skip case , mode: {}", fesqlCase.getDesc()); + if (null != sqlCase.getMode() && sqlCase.getMode().contains("cluster-unsupport")) { + log.info("skip case , mode: {}", sqlCase.getDesc()); return false; } return super.verify(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java index ad96d8265a1..b068674e72b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java @@ -24,8 +24,6 @@ import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.SDKUtil; -import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; @@ -47,7 +45,7 @@ public class CommandExecutor extends BaseExecutor{ private Map resultMap; public CommandExecutor(SQLCase fesqlCase, SQLCaseType executorType) { - this.fesqlCase = fesqlCase; + this.sqlCase = fesqlCase; this.executorType = executorType; dbName = fesqlCase.getDb(); if (!CollectionUtils.isEmpty(fesqlCase.getInputs())) { @@ -64,28 +62,28 @@ public CommandExecutor(SQLCase fesqlCase, Map openMLDBInfo @Override public boolean verify() { - if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("hybridse-only")) { - log.info("skip case in cli mode: {}", fesqlCase.getDesc()); + if (null != sqlCase.getMode() && sqlCase.getMode().contains("hybridse-only")) { + log.info("skip case in cli mode: {}", sqlCase.getDesc()); return false; } - if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("batch-unsupport")) { - log.info("skip case in batch mode: {}", fesqlCase.getDesc()); + if (null != sqlCase.getMode() && sqlCase.getMode().contains("batch-unsupport")) { + log.info("skip case in batch mode: {}", sqlCase.getDesc()); return false; } - if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("rtidb-batch-unsupport")) { - log.info("skip case in rtidb batch mode: {}", fesqlCase.getDesc()); + if (null != sqlCase.getMode() && sqlCase.getMode().contains("rtidb-batch-unsupport")) { + log.info("skip case in rtidb batch mode: {}", sqlCase.getDesc()); return false; } - if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("rtidb-unsupport")) { - log.info("skip case in rtidb mode: {}", fesqlCase.getDesc()); + if (null != sqlCase.getMode() && sqlCase.getMode().contains("rtidb-unsupport")) { + log.info("skip case in rtidb mode: {}", sqlCase.getDesc()); return false; } - if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("performance-sensitive-unsupport")) { - log.info("skip case in rtidb mode: {}", fesqlCase.getDesc()); + if (null != sqlCase.getMode() && sqlCase.getMode().contains("performance-sensitive-unsupport")) { + log.info("skip case in rtidb mode: {}", sqlCase.getDesc()); return false; } - if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("cli-unsupport")) { - log.info("skip case in cli mode: {}", fesqlCase.getDesc()); + if (null != sqlCase.getMode() && sqlCase.getMode().contains("cli-unsupport")) { + log.info("skip case in cli mode: {}", sqlCase.getDesc()); return false; } return true; @@ -103,7 +101,7 @@ protected void prepare(String version, OpenMLDBInfo openMLDBInfo){ log.info("version:{} prepare begin",version); OpenMLDBResult fesqlResult = OpenMLDBCommandUtil.createDB(openMLDBInfo,dbName); log.info("version:{},create db:{},{}", version, dbName, fesqlResult.isOk()); - OpenMLDBResult res = OpenMLDBCommandUtil.createAndInsert(openMLDBInfo, dbName, fesqlCase.getInputs()); + OpenMLDBResult res = OpenMLDBCommandUtil.createAndInsert(openMLDBInfo, dbName, sqlCase.getInputs()); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail . version:"+version); } @@ -126,7 +124,7 @@ public void execute() { protected OpenMLDBResult execute(String version, OpenMLDBInfo openMLDBInfo){ log.info("version:{} execute begin",version); OpenMLDBResult fesqlResult = null; - List sqls = fesqlCase.getSqls(); + List sqls = sqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { // log.info("sql:{}", sql); @@ -138,7 +136,7 @@ protected OpenMLDBResult execute(String version, OpenMLDBInfo openMLDBInfo){ fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo, dbName, sql); } } - String sql = fesqlCase.getSql(); + String sql = sqlCase.getSql(); if (StringUtils.isNotEmpty(sql)) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(openMLDBInfoMap)) { @@ -154,7 +152,7 @@ protected OpenMLDBResult execute(String version, OpenMLDBInfo openMLDBInfo){ @Override public void check() throws Exception { - List strategyList = CheckerStrategy.build(fesqlCase, mainResult, executorType); + List strategyList = CheckerStrategy.build(sqlCase, mainResult, executorType); if(MapUtils.isNotEmpty(resultMap)) { strategyList.add(new DiffVersionChecker(mainResult, resultMap)); } @@ -173,7 +171,7 @@ public void tearDown() { public void tearDown(String version,OpenMLDBInfo openMLDBInfo) { log.info("version:{},begin tear down",version); - List tearDown = fesqlCase.getTearDown(); + List tearDown = sqlCase.getTearDown(); if(CollectionUtils.isNotEmpty(tearDown)){ tearDown.forEach(sql->{ if(MapUtils.isNotEmpty(openMLDBInfoMap)) { @@ -185,7 +183,7 @@ public void tearDown(String version,OpenMLDBInfo openMLDBInfo) { }); } log.info("version:{},begin drop table",version); - List tables = fesqlCase.getInputs(); + List tables = sqlCase.getInputs(); if (CollectionUtils.isEmpty(tables)) { return; } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/DiffResultExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/DiffResultExecutor.java index 06702626378..5ad9f6c96c1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/DiffResultExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/DiffResultExecutor.java @@ -89,7 +89,7 @@ public void tearDown() { @Override public void check() throws Exception { - List strategyList = CheckerStrategy.build(fesqlCase, mainResult, executorType); + List strategyList = CheckerStrategy.build(sqlCase, mainResult, executorType); strategyList.add(new DiffResultChecker(mainResult, resultMap)); for (Checker checker : strategyList) { checker.check(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java index 084241a5a1b..b6f45de3803 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/ExecutorFactory.java @@ -18,7 +18,6 @@ import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBConfig; import com._4paradigm.openmldb.sdk.SqlExecutor; -import com._4paradigm.openmldb.test_common.common.ReportLog; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; @@ -29,116 +28,104 @@ @Slf4j public class ExecutorFactory { - private static ReportLog reportLog = ReportLog.of(); - - public static IExecutor build(SQLCase fesqlCase, SQLCaseType type) { + public static IExecutor build(SQLCase sqlCase, SQLCaseType type) { switch (type){ case kSQLITE3: - return new Sqlite3Executor(fesqlCase,type); + return new Sqlite3Executor(sqlCase,type); case kMYSQL: - return new MysqlExecutor(fesqlCase,type); + return new MysqlExecutor(sqlCase,type); case kCLI: - return new CommandExecutor(fesqlCase,type); + return new CommandExecutor(sqlCase,type); case kStandaloneCLI: - return new StandaloneCliExecutor(fesqlCase,type); + return new StandaloneCliExecutor(sqlCase,type); case kClusterCLI: - return new ClusterCliExecutor(fesqlCase,type); + return new ClusterCliExecutor(sqlCase,type); } return null; } - public static IExecutor build(SqlExecutor executor, Map executorMap, Map fedbInfoMap, SQLCase fesqlCase, SQLCaseType type) { + public static IExecutor build(SqlExecutor executor, Map executorMap, Map fedbInfoMap, SQLCase sqlCase, SQLCaseType type) { switch (type) { case kDiffBatch: { - return new BatchSQLExecutor(fesqlCase, executor, executorMap, fedbInfoMap, type); + return new BatchSQLExecutor(sqlCase, executor, executorMap, fedbInfoMap, type); } case kDiffRequest:{ - return new RequestQuerySQLExecutor(fesqlCase, executor, executorMap, fedbInfoMap, false, false, type); + return new RequestQuerySQLExecutor(sqlCase, executor, executorMap, fedbInfoMap, false, false, type); } case kDiffRequestWithSp:{ - return new StoredProcedureSQLExecutor(fesqlCase, executor, executorMap, fedbInfoMap, false, false, type); + return new StoredProcedureSQLExecutor(sqlCase, executor, executorMap, fedbInfoMap, false, false, type); } case kDiffRequestWithSpAsync:{ - return new StoredProcedureSQLExecutor(fesqlCase, executor, executorMap, fedbInfoMap, false, true, type); + return new StoredProcedureSQLExecutor(sqlCase, executor, executorMap, fedbInfoMap, false, true, type); } } return null; } - public static BaseSQLExecutor build(SqlExecutor executor, SQLCase fesqlCase, SQLCaseType type) { + public static BaseSQLExecutor build(SqlExecutor executor, SQLCase sqlCase, SQLCaseType type) { switch (type) { case kDDL: { - return getDDLExecutor(executor, fesqlCase, type); + return getDDLExecutor(executor, sqlCase, type); } case kInsertPrepared: { - return new InsertPreparedExecutor(executor,fesqlCase,type); + return new InsertPreparedExecutor(executor,sqlCase,type); } case kSelectPrepared: { - return new QueryPreparedExecutor(executor,fesqlCase,type); + return new QueryPreparedExecutor(executor,sqlCase,type); } case kBatch: { - return getFeBatchQueryExecutor(executor, fesqlCase, type); + return getFeBatchQueryExecutor(executor, sqlCase, type); } case kRequest: { - return getFeRequestQueryExecutor(executor, fesqlCase, type); + return getFeRequestQueryExecutor(executor, sqlCase, type); } case kBatchRequest: { - return getFeBatchRequestQueryExecutor(executor, fesqlCase, type); + return getFeBatchRequestQueryExecutor(executor, sqlCase, type); } case kRequestWithSp: { - return getFeRequestQueryWithSpExecutor(executor, fesqlCase, false, type); + return getFeRequestQueryWithSpExecutor(executor, sqlCase, false, type); } case kRequestWithSpAsync: { - return getFeRequestQueryWithSpExecutor(executor, fesqlCase, true, type); + return getFeRequestQueryWithSpExecutor(executor, sqlCase, true, type); } case kBatchRequestWithSp: { - return getFeBatchRequestQueryWithSpExecutor(executor, fesqlCase, false, type); + return getFeBatchRequestQueryWithSpExecutor(executor, sqlCase, false, type); } case kBatchRequestWithSpAsync: { - return getFeBatchRequestQueryWithSpExecutor(executor, fesqlCase, true, type); + return getFeBatchRequestQueryWithSpExecutor(executor, sqlCase, true, type); } case kDiffSQLResult: - return new DiffResultExecutor(executor,fesqlCase,type); + return new DiffResultExecutor(executor,sqlCase,type); + case kLongWindow: + return new LongWindowExecutor(executor,sqlCase,false,false,type); } return null; } - private static BaseSQLExecutor getDDLExecutor(SqlExecutor sqlExecutor, SQLCase fesqlCase, SQLCaseType type) { - BaseSQLExecutor executor = null; - executor = new BatchSQLExecutor(sqlExecutor, fesqlCase, type); - return executor; + private static BaseSQLExecutor getDDLExecutor(SqlExecutor sqlExecutor, SQLCase sqlCase, SQLCaseType type) { + return new BatchSQLExecutor(sqlExecutor, sqlCase, type); } - private static BaseSQLExecutor getFeBatchQueryExecutor(SqlExecutor sqlExecutor, SQLCase fesqlCase, SQLCaseType type) { + private static BaseSQLExecutor getFeBatchQueryExecutor(SqlExecutor sqlExecutor, SQLCase sqlCase, SQLCaseType type) { if (OpenMLDBConfig.isCluster()) { log.info("cluster unsupport batch query mode"); - reportLog.info("cluster unsupport batch query mode"); - return new NullExecutor(sqlExecutor, fesqlCase, type); + return new NullExecutor(sqlExecutor, sqlCase, type); } - BaseSQLExecutor executor = null; - executor = new BatchSQLExecutor(sqlExecutor, fesqlCase, type); - return executor; + return new BatchSQLExecutor(sqlExecutor, sqlCase, type); } - private static BaseSQLExecutor getFeRequestQueryExecutor(SqlExecutor sqlExecutor, SQLCase fesqlCase, SQLCaseType type) { - BaseSQLExecutor executor = null; - executor = new RequestQuerySQLExecutor(sqlExecutor, fesqlCase, false, false, type); - return executor; + private static BaseSQLExecutor getFeRequestQueryExecutor(SqlExecutor sqlExecutor, SQLCase sqlCase, SQLCaseType type) { + return new RequestQuerySQLExecutor(sqlExecutor, sqlCase, false, false, type); } private static BaseSQLExecutor getFeBatchRequestQueryExecutor(SqlExecutor sqlExecutor, - SQLCase fesqlCase, SQLCaseType type) { - RequestQuerySQLExecutor executor = new RequestQuerySQLExecutor( - sqlExecutor, fesqlCase, true, false, type); - return executor; + SQLCase sqlCase, SQLCaseType type) { + return new RequestQuerySQLExecutor( + sqlExecutor, sqlCase, true, false, type); } - private static BaseSQLExecutor getFeRequestQueryWithSpExecutor(SqlExecutor sqlExecutor, SQLCase fesqlCase, boolean isAsyn, SQLCaseType type) { - BaseSQLExecutor executor = null; - executor = new StoredProcedureSQLExecutor( - sqlExecutor, fesqlCase, false, isAsyn, type); - return executor; + private static BaseSQLExecutor getFeRequestQueryWithSpExecutor(SqlExecutor sqlExecutor, SQLCase sqlCase, boolean isAsyn, SQLCaseType type) { + return new StoredProcedureSQLExecutor( + sqlExecutor, sqlCase, false, isAsyn, type); } - private static BaseSQLExecutor getFeBatchRequestQueryWithSpExecutor(SqlExecutor sqlExecutor, SQLCase fesqlCase, boolean isAsyn, SQLCaseType type) { - BaseSQLExecutor executor = null; - executor = new StoredProcedureSQLExecutor( - sqlExecutor, fesqlCase, fesqlCase.getBatch_request() != null, isAsyn, type); - return executor; + private static BaseSQLExecutor getFeBatchRequestQueryWithSpExecutor(SqlExecutor sqlExecutor, SQLCase sqlCase, boolean isAsyn, SQLCaseType type) { + return new StoredProcedureSQLExecutor( + sqlExecutor, sqlCase, sqlCase.getBatch_request() != null, isAsyn, type); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java index e12901f63f3..3f50b9cb989 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/InsertPreparedExecutor.java @@ -45,7 +45,7 @@ public void prepare(String version,SqlExecutor executor){ log.info("version:{} prepare begin",version); boolean dbOk = executor.createDB(dbName); log.info("version:{},create db:{},{}", version, dbName, dbOk); - OpenMLDBResult res = SDKUtil.createAndInsertWithPrepared(executor, dbName, fesqlCase.getInputs(), false); + OpenMLDBResult res = SDKUtil.createAndInsertWithPrepared(executor, dbName, sqlCase.getInputs(), false); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail . version:"+version); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/JDBCExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/JDBCExecutor.java index d1b3ce96990..fddb6c09e23 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/JDBCExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/JDBCExecutor.java @@ -34,7 +34,7 @@ public abstract class JDBCExecutor extends BaseExecutor{ public JDBCExecutor(SQLCase fesqlCase, SQLCaseType sqlCaseType) { - this.fesqlCase = fesqlCase; + this.sqlCase = fesqlCase; this.executorType = sqlCaseType; dbName = fesqlCase.getDb(); if (!CollectionUtils.isEmpty(fesqlCase.getInputs())) { @@ -46,7 +46,7 @@ public JDBCExecutor(SQLCase fesqlCase, SQLCaseType sqlCaseType) { @Override public void check() throws Exception { - List strategyList = CheckerStrategy.build(fesqlCase, mainResult,executorType); + List strategyList = CheckerStrategy.build(sqlCase, mainResult,executorType); for (Checker checker : strategyList) { checker.check(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java index 0d6958e69dc..d71357cb510 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java @@ -21,7 +21,6 @@ import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; import com._4paradigm.openmldb.test_common.util.SDKUtil; -import com._4paradigm.openmldb.test_common.util.SQLUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; @@ -51,22 +50,18 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { log.info("version:{} execute begin",version); OpenMLDBResult fesqlResult = null; try { - if (fesqlCase.getInputs().isEmpty() || - CollectionUtils.isEmpty(fesqlCase.getInputs().get(0).getRows())) { + if (sqlCase.getInputs().isEmpty() || + CollectionUtils.isEmpty(sqlCase.getInputs().get(0).getRows())) { log.error("fail to execute in request query sql executor: sql case inputs is empty"); return null; } - String sql = fesqlCase.getSql(); + String sql = sqlCase.getSql(); log.info("sql: {}", sql); if (sql == null || sql.length() == 0) { return null; } - if (fesqlCase.getBatch_request() != null) { - fesqlResult = executeBatch(executor, sql, this.isAsyn); - } else { - fesqlResult = executeSingle(executor, sql, this.isAsyn); - } - spNames.add(fesqlCase.getSpName()); + fesqlResult = SDKUtil.executeLongWindowDeploy(executor, sqlCase, this.isAsyn); + spNames.add(sqlCase.getSpName()); }catch (Exception e){ e.printStackTrace(); } @@ -74,21 +69,13 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { return fesqlResult; } - private OpenMLDBResult executeSingle(SqlExecutor executor, String sql, boolean isAsyn) throws SQLException { - String spSql = fesqlCase.getProcedure(sql); - log.info("spSql: {}", spSql); - return SDKUtil.sqlRequestModeWithProcedure( - executor, dbName, fesqlCase.getSpName(), null == fesqlCase.getBatch_request(), - spSql, fesqlCase.getInputs().get(0), isAsyn); - } - - private OpenMLDBResult executeBatch(SqlExecutor executor, String sql, boolean isAsyn) throws SQLException { - String spName = "sp_" + tableNames.get(0) + "_" + System.currentTimeMillis(); - String spSql = SQLUtil.buildSpSQLWithConstColumns(spName, sql, fesqlCase.getBatch_request()); - log.info("spSql: {}", spSql); - return SDKUtil.selectBatchRequestModeWithSp( - executor, dbName, spName, spSql, fesqlCase.getBatch_request(), isAsyn); - } +// private OpenMLDBResult executeSingle(SqlExecutor executor, String sql, boolean isAsyn) throws SQLException { +// String spSql = sqlCase.getProcedure(sql); +// log.info("spSql: {}", spSql); +// return SDKUtil.sqlRequestModeWithProcedure( +// executor, dbName, sqlCase.getSpName(), null == sqlCase.getBatch_request(), +// spSql, sqlCase.getInputs().get(0), isAsyn); +// } // @Override diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/MysqlExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/MysqlExecutor.java index 1a1a56017e8..50a4a4d8b8b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/MysqlExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/MysqlExecutor.java @@ -16,7 +16,6 @@ package com._4paradigm.openmldb.java_sdk_test.executor; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.java_sdk_test.util.JDBCUtil; import com._4paradigm.openmldb.java_sdk_test.util.MysqlUtil; import com._4paradigm.openmldb.test_common.model.DBType; @@ -41,18 +40,18 @@ public MysqlExecutor(SQLCase fesqlCase, SQLCaseType sqlCaseType) { @Override public boolean verify() { - List sqlDialect = fesqlCase.getSqlDialect(); + List sqlDialect = sqlCase.getSqlDialect(); if(sqlDialect.contains(DBType.ANSISQL.name())|| sqlDialect.contains(DBType.MYSQL.name())){ return true; } - log.info("skip case in mysql mode: {}", fesqlCase.getDesc()); + log.info("skip case in mysql mode: {}", sqlCase.getDesc()); return false; } @Override public void prepare() { log.info("mysql prepare begin"); - for(InputDesc inputDesc:fesqlCase.getInputs()) { + for(InputDesc inputDesc: sqlCase.getInputs()) { String createSql = MysqlUtil.getCreateTableSql(inputDesc); JDBCUtil.executeUpdate(createSql, DBType.MYSQL); boolean ok = MysqlUtil.insertData(inputDesc); @@ -67,14 +66,14 @@ public void prepare() { public void execute() { log.info("mysql execute begin"); OpenMLDBResult fesqlResult = null; - List sqls = fesqlCase.getSqls(); + List sqls = sqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { sql = SQLUtil.formatSql(sql, tableNames); fesqlResult = JDBCUtil.executeQuery(sql,DBType.MYSQL); } } - String sql = fesqlCase.getSql(); + String sql = sqlCase.getSql(); if (sql != null && sql.length() > 0) { sql = SQLUtil.formatSql(sql, tableNames); fesqlResult = JDBCUtil.executeQuery(sql,DBType.MYSQL); @@ -86,7 +85,7 @@ public void execute() { @Override public void tearDown() { log.info("mysql,begin drop table"); - List tables = fesqlCase.getInputs(); + List tables = sqlCase.getInputs(); if (CollectionUtils.isEmpty(tables)) { return; } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java index 8ef1b9332d3..2edd703fc01 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java @@ -61,7 +61,7 @@ public OpenMLDBResult execute(String version, SqlExecutor executor){ // fesqlResult = FesqlUtil.sql(executor, dbName, sql); // } // } - String sql = fesqlCase.getSql(); + String sql = sqlCase.getSql(); if (sql != null && sql.length() > 0) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(fedbInfoMap)) { @@ -69,7 +69,7 @@ public OpenMLDBResult execute(String version, SqlExecutor executor){ }else { sql = SQLUtil.formatSql(sql, tableNames); } - InputDesc parameters = fesqlCase.getParameters(); + InputDesc parameters = sqlCase.getParameters(); List types = parameters.getColumns().stream().map(s -> s.split("\\s+")[1]).collect(Collectors.toList()); List objects = parameters.getRows().get(0); fesqlResult = SDKUtil.selectWithPrepareStatement(executor, dbName,sql, types,objects); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java index 66c9eaa3f27..0c5545eea3c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java @@ -57,7 +57,7 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { log.info("version:{} execute begin",version); OpenMLDBResult fesqlResult = null; try { - List sqls = fesqlCase.getSqls(); + List sqls = sqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { // log.info("sql:{}", sql); @@ -69,7 +69,7 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { fesqlResult = SDKUtil.sql(executor, dbName, sql); } } - String sql = fesqlCase.getSql(); + String sql = sqlCase.getSql(); if (sql != null && sql.length() > 0) { // log.info("sql:{}", sql); if(MapUtils.isNotEmpty(fedbInfoMap)) { @@ -79,7 +79,7 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { } InputDesc request = null; if (isBatchRequest) { - InputDesc batchRequest = fesqlCase.getBatch_request(); + InputDesc batchRequest = sqlCase.getBatch_request(); if (batchRequest == null) { log.error("No batch request provided in case"); return null; @@ -96,16 +96,16 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { fesqlResult = SDKUtil.sqlBatchRequestMode( executor, dbName, sql, batchRequest, commonColumnIndices); } else { - if (null != fesqlCase.getBatch_request()) { - request = fesqlCase.getBatch_request(); - } else if (!fesqlCase.getInputs().isEmpty()) { - request = fesqlCase.getInputs().get(0); + if (null != sqlCase.getBatch_request()) { + request = sqlCase.getBatch_request(); + } else if (!sqlCase.getInputs().isEmpty()) { + request = sqlCase.getInputs().get(0); } if (null == request || CollectionUtils.isEmpty(request.getColumns())) { log.error("fail to execute in request query sql executor: sql case request columns is empty"); return null; } - fesqlResult = SDKUtil.sqlRequestMode(executor, dbName, null == fesqlCase.getBatch_request(), sql, request); + fesqlResult = SDKUtil.sqlRequestMode(executor, dbName, null == sqlCase.getBatch_request(), sql, request); } } }catch (Exception e){ @@ -120,8 +120,8 @@ protected void prepare(String version,SqlExecutor executor) { log.info("version:{} prepare begin",version); boolean dbOk = executor.createDB(dbName); log.info("create db:{},{}", dbName, dbOk); - boolean useFirstInputAsRequests = !isBatchRequest && null == fesqlCase.getBatch_request(); - OpenMLDBResult res = SDKUtil.createAndInsert(executor, dbName, fesqlCase.getInputs(), useFirstInputAsRequests); + boolean useFirstInputAsRequests = !isBatchRequest && null == sqlCase.getBatch_request(); + OpenMLDBResult res = SDKUtil.createAndInsert(executor, dbName, sqlCase.getInputs(), useFirstInputAsRequests); if (!res.isOk()) { throw new RuntimeException("fail to run RequestQuerySQLExecutor: prepare fail"); } @@ -130,33 +130,33 @@ protected void prepare(String version,SqlExecutor executor) { @Override public boolean verify() { - if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("hybridse-only")) { - log.info("skip case in request mode: {}", fesqlCase.getDesc()); + if (null != sqlCase.getMode() && sqlCase.getMode().contains("hybridse-only")) { + log.info("skip case in request mode: {}", sqlCase.getDesc()); return false; } - if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("request-unsupport")) { - log.info("skip case in request mode: {}", fesqlCase.getDesc()); + if (null != sqlCase.getMode() && sqlCase.getMode().contains("request-unsupport")) { + log.info("skip case in request mode: {}", sqlCase.getDesc()); return false; } - if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("rtidb-unsupport")) { - log.info("skip case in rtidb mode: {}", fesqlCase.getDesc()); + if (null != sqlCase.getMode() && sqlCase.getMode().contains("rtidb-unsupport")) { + log.info("skip case in rtidb mode: {}", sqlCase.getDesc()); return false; } - if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("performance-sensitive-unsupport")) { - log.info("skip case in rtidb mode: {}", fesqlCase.getDesc()); + if (null != sqlCase.getMode() && sqlCase.getMode().contains("performance-sensitive-unsupport")) { + log.info("skip case in rtidb mode: {}", sqlCase.getDesc()); return false; } - if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("rtidb-request-unsupport")) { - log.info("skip case in rtidb request mode: {}", fesqlCase.getDesc()); + if (null != sqlCase.getMode() && sqlCase.getMode().contains("rtidb-request-unsupport")) { + log.info("skip case in rtidb request mode: {}", sqlCase.getDesc()); return false; } - if (null != fesqlCase.getMode() && !OpenMLDBGlobalVar.tableStorageMode.equals("memory") && fesqlCase.getMode().contains("disk-unsupport")) { - log.info("skip case in disk mode: {}", fesqlCase.getDesc()); + if (null != sqlCase.getMode() && !OpenMLDBGlobalVar.tableStorageMode.equals("memory") && sqlCase.getMode().contains("disk-unsupport")) { + log.info("skip case in disk mode: {}", sqlCase.getDesc()); return false; } if (OpenMLDBConfig.isCluster() && - null != fesqlCase.getMode() && fesqlCase.getMode().contains("cluster-unsupport")) { - log.info("cluster-unsupport, skip case in cluster request mode: {}", fesqlCase.getDesc()); + null != sqlCase.getMode() && sqlCase.getMode().contains("cluster-unsupport")) { + log.info("cluster-unsupport, skip case in cluster request mode: {}", sqlCase.getDesc()); return false; } return true; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java index fc3ca356c67..e1b59f86961 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/Sqlite3Executor.java @@ -17,7 +17,6 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.java_sdk_test.util.JDBCUtil; import com._4paradigm.openmldb.java_sdk_test.util.Sqlite3Util; import com._4paradigm.openmldb.test_common.model.DBType; @@ -42,18 +41,18 @@ public Sqlite3Executor(SQLCase fesqlCase, SQLCaseType sqlCaseType) { @Override public boolean verify() { - List sqlDialect = fesqlCase.getSqlDialect(); + List sqlDialect = sqlCase.getSqlDialect(); if(sqlDialect.contains(DBType.ANSISQL.name())|| sqlDialect.contains(DBType.SQLITE3.name())){ return true; } - log.info("skip case in sqlite3 mode: {}", fesqlCase.getDesc()); + log.info("skip case in sqlite3 mode: {}", sqlCase.getDesc()); return false; } @Override public void prepare() { log.info("sqlite3 prepare begin"); - for(InputDesc inputDesc:fesqlCase.getInputs()) { + for(InputDesc inputDesc: sqlCase.getInputs()) { String createSql = Sqlite3Util.getCreateTableSql(inputDesc); JDBCUtil.executeUpdate(createSql,DBType.SQLITE3); boolean ok = Sqlite3Util.insertData(inputDesc); @@ -68,14 +67,14 @@ public void prepare() { public void execute() { log.info("sqlite3 execute begin"); OpenMLDBResult fesqlResult = null; - List sqls = fesqlCase.getSqls(); + List sqls = sqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { sql = SQLUtil.formatSql(sql, tableNames); fesqlResult = JDBCUtil.executeQuery(sql,DBType.SQLITE3); } } - String sql = fesqlCase.getSql(); + String sql = sqlCase.getSql(); if (sql != null && sql.length() > 0) { sql = SQLUtil.formatSql(sql, tableNames); fesqlResult = JDBCUtil.executeQuery(sql,DBType.SQLITE3); @@ -87,7 +86,7 @@ public void execute() { @Override public void tearDown() { log.info("sqlite3,begin drop table"); - List tables = fesqlCase.getInputs(); + List tables = sqlCase.getInputs(); if (CollectionUtils.isEmpty(tables)) { return; } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StandaloneCliExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StandaloneCliExecutor.java index 2e3b12e690b..d326bdd8cfb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StandaloneCliExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StandaloneCliExecutor.java @@ -35,8 +35,8 @@ public StandaloneCliExecutor(SQLCase fesqlCase, Map openML @Override public boolean verify() { - if (null != fesqlCase.getMode() && fesqlCase.getMode().contains("standalone-unsupport")) { - log.info("skip case in cli mode: {}", fesqlCase.getDesc()); + if (null != sqlCase.getMode() && sqlCase.getMode().contains("standalone-unsupport")) { + log.info("skip case in cli mode: {}", sqlCase.getDesc()); return false; } return super.verify(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java index c588f29def7..7a85246c3fe 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java @@ -52,20 +52,20 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { log.info("version:{} execute begin",version); OpenMLDBResult fesqlResult = null; try { - if (fesqlCase.getInputs().isEmpty() || CollectionUtils.isEmpty(fesqlCase.getInputs().get(0).getRows())) { + if (sqlCase.getInputs().isEmpty() || CollectionUtils.isEmpty(sqlCase.getInputs().get(0).getRows())) { throw new IllegalArgumentException("fail to execute in request query sql executor: sql case inputs is empty"); } - String sql = fesqlCase.getSql(); + String sql = sqlCase.getSql(); log.info("sql: {}", sql); if (StringUtils.isEmpty(sql)) { throw new IllegalArgumentException("fail to execute in request query sql executor: sql is empty"); } - if (fesqlCase.getBatch_request() != null) { + if (sqlCase.getBatch_request() != null) { fesqlResult = executeBatch(executor, sql, this.isAsyn); } else { fesqlResult = executeSingle(executor, sql, this.isAsyn); } - spNames.add(fesqlCase.getSpName()); + spNames.add(sqlCase.getSpName()); }catch (Exception e){ e.printStackTrace(); } @@ -74,19 +74,19 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { } private OpenMLDBResult executeSingle(SqlExecutor executor, String sql, boolean isAsyn) throws SQLException { - String spSql = fesqlCase.getProcedure(sql); + String spSql = sqlCase.getProcedure(sql); log.info("spSql: {}", spSql); return SDKUtil.sqlRequestModeWithProcedure( - executor, dbName, fesqlCase.getSpName(), null == fesqlCase.getBatch_request(), - spSql, fesqlCase.getInputs().get(0), isAsyn); + executor, dbName, sqlCase.getSpName(), null == sqlCase.getBatch_request(), + spSql, sqlCase.getInputs().get(0), isAsyn); } private OpenMLDBResult executeBatch(SqlExecutor executor, String sql, boolean isAsyn) throws SQLException { String spName = "sp_" + tableNames.get(0) + "_" + System.currentTimeMillis(); - String spSql = SQLUtil.buildSpSQLWithConstColumns(spName, sql, fesqlCase.getBatch_request()); + String spSql = SQLUtil.buildSpSQLWithConstColumns(spName, sql, sqlCase.getBatch_request()); log.info("spSql: {}", spSql); return SDKUtil.selectBatchRequestModeWithSp( - executor, dbName, spName, spSql, fesqlCase.getBatch_request(), isAsyn); + executor, dbName, spName, spSql, sqlCase.getBatch_request(), isAsyn); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/LongWindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/LongWindowTest.java new file mode 100644 index 00000000000..92cec6149fc --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/LongWindowTest.java @@ -0,0 +1,23 @@ +package com._4paradigm.openmldb.java_sdk_test.cluster.v060; + +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; +import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; +import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.provider.Yaml; +import io.qameta.allure.Feature; +import io.qameta.allure.Story; +import lombok.extern.slf4j.Slf4j; +import org.testng.annotations.Test; + +@Slf4j +@Feature("long_window") +public class LongWindowTest extends OpenMLDBTest { + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "function/long_window/test_count_where.yaml") + @Story("longWindowDeploy") + public void testLongWindow2(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kLongWindow).run(); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCaseType.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCaseType.java index e5828e35169..3b2193f5177 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCaseType.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCaseType.java @@ -43,7 +43,8 @@ public enum SQLCaseType { kStandaloneCLI("StandaloneCLI"), kClusterCLI("ClusterCLI"), kInsertPrepared("INSERT_PREPARED"), - kSelectPrepared("SELECT_PREPARED") + kSelectPrepared("SELECT_PREPARED"), + kLongWindow("LONG_WINDOW_DEPLOY") ; @Getter private String typeName; diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java index a3d4e181b63..92e33df97cd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java @@ -22,7 +22,6 @@ import com._4paradigm.openmldb.sdk.QueryFuture; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.chain.result.ResultParserManager; -import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; import com._4paradigm.openmldb.test_common.model.SQLCase; @@ -54,6 +53,32 @@ public static OpenMLDBResult sqlList(SqlExecutor executor, String dbName, List> dataList) { if (CollectionUtils.isEmpty(dataList)) { return ""; From db2d33b5b9fb71e7f4f06a273ece30217b106f4c Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 28 Jul 2022 18:28:53 +0800 Subject: [PATCH 107/172] support deploy executor --- cases/function/long_window/test_count_where.yaml | 3 +-- .../java_sdk_test/executor/RequestQuerySQLExecutor.java | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cases/function/long_window/test_count_where.yaml b/cases/function/long_window/test_count_where.yaml index 50109b1b4e7..1747b48e0f6 100644 --- a/cases/function/long_window/test_count_where.yaml +++ b/cases/function/long_window/test_count_where.yaml @@ -22,7 +22,6 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] - indexs: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] @@ -32,7 +31,7 @@ cases: dataProvider: - ["ROWS","ROWS_RANGE"] sql: | - deploy {0} options(long_windows='w1:2') SELECT id, c1 FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_c4_sum bigint"] diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java index 0c5545eea3c..2d16627e1eb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java @@ -120,6 +120,7 @@ protected void prepare(String version,SqlExecutor executor) { log.info("version:{} prepare begin",version); boolean dbOk = executor.createDB(dbName); log.info("create db:{},{}", dbName, dbOk); + SDKUtil.useDB(executor,dbName); boolean useFirstInputAsRequests = !isBatchRequest && null == sqlCase.getBatch_request(); OpenMLDBResult res = SDKUtil.createAndInsert(executor, dbName, sqlCase.getInputs(), useFirstInputAsRequests); if (!res.isOk()) { From 66ce4e05e7853a23cbc3be12440bbd85475e3c4b Mon Sep 17 00:00:00 2001 From: zhaowei Date: Fri, 29 Jul 2022 14:21:59 +0800 Subject: [PATCH 108/172] deploy --- .../long_window/test_count_where.yaml | 122 +++++++++--------- .../test-suite/test_deploy_tmp2.xml | 2 +- .../executor/BaseSQLExecutor.java | 8 +- .../executor/BatchSQLExecutor.java | 8 +- .../executor/LongWindowExecutor.java | 2 +- .../executor/QueryPreparedExecutor.java | 4 +- .../executor/RequestQuerySQLExecutor.java | 8 +- .../executor/StoredProcedureSQLExecutor.java | 4 +- .../openmldb/test_common/util/SDKUtil.java | 10 +- 9 files changed, 85 insertions(+), 83 deletions(-) diff --git a/cases/function/long_window/test_count_where.yaml b/cases/function/long_window/test_count_where.yaml index 1747b48e0f6..36dbfd72fd0 100644 --- a/cases/function/long_window/test_count_where.yaml +++ b/cases/function/long_window/test_count_where.yaml @@ -34,73 +34,73 @@ cases: SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id - columns: ["id int","c1 string","w1_c4_sum bigint"] + columns: ["id int","c1 string","w1_count bigint"] rows: - [1,"aa",1] - [2,"aa",2] - [3,"aa",3] - [4,"aa",2] - [5,"aa",1] -# - -# id: 1 -# desc: 长窗口count_where,smallint类型 -# - -# id: 2 -# desc: 长窗口count_where,int类型 -# - -# id: 3 -# desc: 长窗口count_where,bigint类型 -# - -# id: 4 -# desc: 长窗口count_where,string类型 -# - -# id: 5 -# desc: 长窗口count_where,timestamp类型 -# - -# id: 6 -# desc: 长窗口count_where,row类型 -# - -# id: 7 -# desc: 长窗口count_where,bool类型 -# - -# id: 8 -# desc: 长窗口count_where,float类型 -# - -# id: 9 -# desc: 长窗口count_where,double类型 -# - -# id: 10 -# desc: 长窗口count_where,第二个参数使用bool列 -# - -# id: 11 -# desc: 长窗口count_where,第二个参数使用= -# - -# id: 12 -# desc: 长窗口count_where,第二个参数使用!= -# - -# id: 13 -# desc: 长窗口count_where,第二个参数使用>= -# - -# id: 14 -# desc: 长窗口count_where,第二个参数使用<= -# - -# id: 15 -# desc: 长窗口count_where,第二个参数使用> -# - -# id: 16 -# desc: 长窗口count_where,第二个参数使用< -# - -# id: 17 -# desc: 长窗口count_where,第二个参数使用and -# - -# id: 18 -# desc: 长窗口count_where,第二个参数使用两个列 -# - -# id: 19 -# desc: 长窗口count_where,第二个参数使用嵌套 -# - -# id: 20 -# desc: 长窗口count_where,第二个参数常量在前 + - + id: 1 + desc: 长窗口count_where,smallint类型 + - + id: 2 + desc: 长窗口count_where,int类型 + - + id: 3 + desc: 长窗口count_where,bigint类型 + - + id: 4 + desc: 长窗口count_where,string类型 + - + id: 5 + desc: 长窗口count_where,timestamp类型 + - + id: 6 + desc: 长窗口count_where,row类型 + - + id: 7 + desc: 长窗口count_where,bool类型 + - + id: 8 + desc: 长窗口count_where,float类型 + - + id: 9 + desc: 长窗口count_where,double类型 + - + id: 10 + desc: 长窗口count_where,第二个参数使用bool列 + - + id: 11 + desc: 长窗口count_where,第二个参数使用= + - + id: 12 + desc: 长窗口count_where,第二个参数使用!= + - + id: 13 + desc: 长窗口count_where,第二个参数使用>= + - + id: 14 + desc: 长窗口count_where,第二个参数使用<= + - + id: 15 + desc: 长窗口count_where,第二个参数使用> + - + id: 16 + desc: 长窗口count_where,第二个参数使用< + - + id: 17 + desc: 长窗口count_where,第二个参数使用and + - + id: 18 + desc: 长窗口count_where,第二个参数使用两个列 + - + id: 19 + desc: 长窗口count_where,第二个参数使用嵌套 + - + id: 20 + desc: 长窗口count_where,第二个参数常量在前 diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml index 629590aeb19..d67cb1e5448 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml @@ -2,7 +2,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java index 97dbba8b1f7..57daa0b02f5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java @@ -45,7 +45,7 @@ public abstract class BaseSQLExecutor extends BaseExecutor{ protected SqlExecutor executor; private Map executorMap; - protected Map fedbInfoMap; + protected Map openMLDBInfoMap; private Map resultMap; public BaseSQLExecutor(SqlExecutor executor, SQLCase fesqlCase, SQLCaseType executorType) { @@ -64,7 +64,7 @@ public BaseSQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map tearDown = sqlCase.getTearDown(); if(CollectionUtils.isNotEmpty(tearDown)){ tearDown.forEach(sql->{ - if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = SQLUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + if(MapUtils.isNotEmpty(openMLDBInfoMap)) { + sql = SQLUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); }else { sql = SQLUtil.formatSql(sql, tableNames); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java index e15ff18f75b..82e07504dd0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java @@ -94,8 +94,8 @@ public OpenMLDBResult execute(String version, SqlExecutor executor){ if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { // log.info("sql:{}", sql); - if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = SQLUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + if(MapUtils.isNotEmpty(openMLDBInfoMap)) { + sql = SQLUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); }else { sql = SQLUtil.formatSql(sql, tableNames); } @@ -105,8 +105,8 @@ public OpenMLDBResult execute(String version, SqlExecutor executor){ String sql = sqlCase.getSql(); if (sql != null && sql.length() > 0) { // log.info("sql:{}", sql); - if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = SQLUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + if(MapUtils.isNotEmpty(openMLDBInfoMap)) { + sql = SQLUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); }else { sql = SQLUtil.formatSql(sql, tableNames); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java index d71357cb510..2e500d8b983 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java @@ -33,7 +33,7 @@ @Slf4j public class LongWindowExecutor extends StoredProcedureSQLExecutor { - private List spNames; +// private List spNames; public LongWindowExecutor(SqlExecutor executor, SQLCase fesqlCase, boolean isBatchRequest, boolean isAsyn, SQLCaseType executorType) { super(executor, fesqlCase, isBatchRequest, isAsyn, executorType); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java index 2edd703fc01..c7b75bef3d9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/QueryPreparedExecutor.java @@ -64,8 +64,8 @@ public OpenMLDBResult execute(String version, SqlExecutor executor){ String sql = sqlCase.getSql(); if (sql != null && sql.length() > 0) { // log.info("sql:{}", sql); - if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = SQLUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + if(MapUtils.isNotEmpty(openMLDBInfoMap)) { + sql = SQLUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); }else { sql = SQLUtil.formatSql(sql, tableNames); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java index 2d16627e1eb..62a3f230c30 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/RequestQuerySQLExecutor.java @@ -61,8 +61,8 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { // log.info("sql:{}", sql); - if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = SQLUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + if(MapUtils.isNotEmpty(openMLDBInfoMap)) { + sql = SQLUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); }else { sql = SQLUtil.formatSql(sql, tableNames); } @@ -72,8 +72,8 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { String sql = sqlCase.getSql(); if (sql != null && sql.length() > 0) { // log.info("sql:{}", sql); - if(MapUtils.isNotEmpty(fedbInfoMap)) { - sql = SQLUtil.formatSql(sql, tableNames, fedbInfoMap.get(version)); + if(MapUtils.isNotEmpty(openMLDBInfoMap)) { + sql = SQLUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); }else { sql = SQLUtil.formatSql(sql, tableNames); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java index 7a85246c3fe..4de340711be 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java @@ -35,7 +35,7 @@ @Slf4j public class StoredProcedureSQLExecutor extends RequestQuerySQLExecutor { - private List spNames; + protected List spNames; public StoredProcedureSQLExecutor(SqlExecutor executor, SQLCase fesqlCase, boolean isBatchRequest, boolean isAsyn, SQLCaseType executorType) { super(executor, fesqlCase, isBatchRequest, isAsyn, executorType); @@ -92,7 +92,7 @@ private OpenMLDBResult executeBatch(SqlExecutor executor, String sql, boolean is @Override public void tearDown(String version,SqlExecutor executor) { - log.info("version:{},begin drop table",version); + log.info("version:{},begin tearDown",version); if (CollectionUtils.isEmpty(spNames)) { return; } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java index 92e33df97cd..60246425180 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java @@ -55,7 +55,6 @@ public static OpenMLDBResult sqlList(SqlExecutor executor, String dbName, List Date: Fri, 29 Jul 2022 22:03:19 +0800 Subject: [PATCH 109/172] deploy --- .../openmldb-deploy/test-suite/test_deploy.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index 46a07223367..f512a758c26 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -3,8 +3,8 @@ - - + + From f53b62a90b1ef95f1e52aebc5682c542a39f9e2e Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 1 Aug 2022 14:08:18 +0800 Subject: [PATCH 110/172] modify case --- cases/function/dml/test_delete.yaml | 36 +- .../long_window/test_count_where.yaml | 449 +++++++++++++++++- .../executor/StoredProcedureSQLExecutor.java | 2 +- .../openmldb-test-common/pom.xml | 4 + .../openmldb/test_common/util/SDKUtil.java | 49 +- 5 files changed, 507 insertions(+), 33 deletions(-) diff --git a/cases/function/dml/test_delete.yaml b/cases/function/dml/test_delete.yaml index 2db00c73e7d..3f494605692 100644 --- a/cases/function/dml/test_delete.yaml +++ b/cases/function/dml/test_delete.yaml @@ -71,7 +71,7 @@ cases: - delete from {0} where c1='aa' or c1='cc'; expect: success: false - msg: failed + msg: fail - id: 3 desc: delete 两个索引的两个key @@ -88,7 +88,7 @@ cases: - delete from {0} where c1='aa' or c2=1; expect: success: false - msg: failed + msg: fail - id: 4 desc: 两个索引 delete 其中一个 @@ -144,6 +144,7 @@ cases: - select * from {0}; expect: columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -200,6 +201,7 @@ cases: - select * from {0}; expect: columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id rows: - [1,"aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] @@ -255,6 +257,7 @@ cases: - select * from {0}; expect: columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] @@ -367,7 +370,7 @@ cases: sql: delete from {0}1 where c1='aa'; expect: success: false - msg: failed + msg: fail - id: 20 desc: delete列不存在 @@ -384,7 +387,7 @@ cases: - delete from {0} where c11=1; expect: success: false - msg: failed + msg: fail - id: 21 desc: delete 其他库的数据 @@ -496,11 +499,9 @@ cases: - [3,"cc",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] sqls: - delete from {0} where c1!='cc'; - - select * from {0}; expect: - columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] - rows: - - [3,"cc",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + success: false + msg: fail - id: 27 desc: 比较运算符删除 @@ -514,11 +515,9 @@ cases: - [3,"aa",3,2,3,1.1,2.1,1590738990000,"2020-05-01",true] sqls: - delete from {0} where c2>=2; - - select * from {0}; expect: - columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] - rows: - - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + success: false + msg: fail - id: 28 desc: 表名为job delete @@ -538,3 +537,16 @@ cases: columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] rows: - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 29 + desc: delete空表 + inputs: + - + name: job + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - delete from {0} where c1='aa'; + expect: + success: true + diff --git a/cases/function/long_window/test_count_where.yaml b/cases/function/long_window/test_count_where.yaml index 36dbfd72fd0..be270cfa650 100644 --- a/cases/function/long_window/test_count_where.yaml +++ b/cases/function/long_window/test_count_where.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: [] +debugs: ["长窗口count_where,date类型","长窗口count_where,rows"] cases: - id: 0 @@ -22,16 +22,40 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 0-1 + desc: 长窗口count_where,rows + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + sql: | + SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -44,64 +68,473 @@ cases: - id: 1 desc: 长窗口count_where,smallint类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c2,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] - id: 2 desc: 长窗口count_where,int类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c3,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] - id: 3 desc: 长窗口count_where,bigint类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c4,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] - id: 4 desc: 长窗口count_where,string类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c1,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] - id: 5 desc: 长窗口count_where,timestamp类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c7,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] - id: 6 desc: 长窗口count_where,row类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(*,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] - id: 7 desc: 长窗口count_where,bool类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c9,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] - id: 8 desc: 长窗口count_where,float类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c5,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] - id: 9 desc: 长窗口count_where,double类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c6,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] - id: 10 desc: 长窗口count_where,第二个参数使用bool列 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c9) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail - id: 11 desc: 长窗口count_where,第二个参数使用= + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c2=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",0] + - [2,"aa",0] + - [3,"aa",0] + - [4,"aa",1] + - [5,"aa",1] - id: 12 desc: 长窗口count_where,第二个参数使用!= + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c2!=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",2] - id: 13 desc: 长窗口count_where,第二个参数使用>= + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c2>=2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",0] + - [2,"aa",1] + - [3,"aa",2] + - [4,"aa",3] + - [5,"aa",3] - id: 14 desc: 长窗口count_where,第二个参数使用<= + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c2<=3) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] - id: 15 desc: 长窗口count_where,第二个参数使用> - - - id: 16 - desc: 长窗口count_where,第二个参数使用< + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",0] + - [2,"aa",1] + - [3,"aa",2] + - [4,"aa",3] + - [5,"aa",3] - id: 17 desc: 长窗口count_where,第二个参数使用and + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c2<4 and c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail - id: 18 desc: 长窗口count_where,第二个参数使用两个列 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c3>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail - id: 19 desc: 长窗口count_where,第二个参数使用嵌套 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,if_null(c2,0)>4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail - id: 20 desc: 长窗口count_where,第二个参数常量在前 - + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,4>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java index 4de340711be..896c499fbb2 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java @@ -98,7 +98,7 @@ public void tearDown(String version,SqlExecutor executor) { } for (String spName : spNames) { String drop = "drop procedure " + spName + ";"; - SDKUtil.ddl(executor, dbName, drop); +// SDKUtil.ddl(executor, dbName, drop); } super.tearDown(version,executor); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index 90429813d43..4e3a061e279 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -30,11 +30,15 @@ com.4paradigm.openmldb openmldb-jdbc ${openmldb.jdbc.version} + system + /Users/zhaowei/Downloads/openmldb-jdbc-0.5.0-SNAPSHOT.jar com.4paradigm.openmldb openmldb-native ${openmldb.navtive.version} + system + /Users/zhaowei/Downloads/openmldb-native-0.5.0-SNAPSHOT.jar diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java index 60246425180..5a86cae9c8f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java @@ -117,27 +117,31 @@ public static OpenMLDBResult sqlRequestModeWithProcedure(SqlExecutor executor, S public static OpenMLDBResult sql(SqlExecutor executor, String dbName, String sql) { useDB(executor,dbName); - OpenMLDBResult fesqlResult = null; + OpenMLDBResult openMLDBResult = null; if (sql.startsWith("create database") || sql.startsWith("drop database")) { - fesqlResult = db(executor, sql); + openMLDBResult = db(executor, sql); }else if(sql.startsWith("CREATE INDEX")||sql.startsWith("create index")){ - fesqlResult = createIndex(executor, sql); + openMLDBResult = createIndex(executor, sql); }else if (sql.startsWith("create") || sql.startsWith("CREATE") || sql.startsWith("DROP")|| sql.startsWith("drop")) { - fesqlResult = ddl(executor, dbName, sql); - } else if (sql.startsWith("insert")||sql.startsWith("INSERT")) { - fesqlResult = insert(executor, dbName, sql); + openMLDBResult = ddl(executor, dbName, sql); + }else if (sql.startsWith("insert")||sql.startsWith("INSERT")) { + openMLDBResult = insert(executor, dbName, sql); + }else if (sql.startsWith("delete from")) { + openMLDBResult = delete(executor, dbName, sql); }else if(sql.startsWith("show deployments;")){ - fesqlResult = showDeploys(executor,dbName,sql); + openMLDBResult = showDeploys(executor,dbName,sql); }else if(sql.startsWith("show deployment")){ - fesqlResult = showDeploy(executor, dbName, sql); + openMLDBResult = showDeploy(executor, dbName, sql); }else if(sql.startsWith("desc ")){ - fesqlResult = desc(executor,dbName,sql); + openMLDBResult = desc(executor,dbName,sql); }else if(sql.contains("outfile")){ - fesqlResult = selectInto(executor, dbName, sql); + openMLDBResult = selectInto(executor, dbName, sql); }else { - fesqlResult = select(executor, dbName, sql); + openMLDBResult = select(executor, dbName, sql); } - return fesqlResult; + openMLDBResult.setSql(sql); + log.info("openMLDBResult:{}",openMLDBResult); + return openMLDBResult; } public static OpenMLDBResult selectInto(SqlExecutor executor, String dbName, String outSql){ @@ -289,6 +293,27 @@ public static OpenMLDBResult insert(SqlExecutor executor, String dbName, String log.info("insert result:{}" + fesqlResult); return fesqlResult; } + public static OpenMLDBResult delete(SqlExecutor executor, String dbName, String deleteSql) { + useDB(executor,dbName); + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); + Statement statement = executor.getStatement(); + try { + statement.execute(deleteSql); + openMLDBResult.setOk(true); + openMLDBResult.setMsg("success"); + } catch (Exception e) { + e.printStackTrace(); + openMLDBResult.setOk(false); + openMLDBResult.setMsg(e.getMessage()); + }finally { + try { + statement.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + return openMLDBResult; + } public static OpenMLDBResult selectWithPrepareStatement(SqlExecutor executor, String dbName, String sql, List paramterTypes, List params) { OpenMLDBResult fesqlResult = new OpenMLDBResult(); From 26c02b611b813fe41ef05f3667051445cd958e1d Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 2 Aug 2022 11:01:29 +0800 Subject: [PATCH 111/172] deploy --- cases/function/dml/test_delete.yaml | 21 ++++++++++--------- .../test-suite/test_deploy-standalone.xml | 2 +- .../java_sdk_test/cluster/v060/DMLTest.java | 7 ++++++- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/cases/function/dml/test_delete.yaml b/cases/function/dml/test_delete.yaml index 3f494605692..ff51f4d07c8 100644 --- a/cases/function/dml/test_delete.yaml +++ b/cases/function/dml/test_delete.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: [] +debugs: ["两个索引 delete 其中一个"] version: 0.5.0 cases: - @@ -99,18 +99,19 @@ cases: rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] - - [4,"aa",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + - [3,"aa",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [4,"aa",1,2,3,1.1,2.1,1590738991000,"2020-05-01",true] sqls: - delete from {0} where c2=2; - - select * from {0}; + sql: SELECT id, c2, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: - columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c2 smallint","w1_c4_count bigint"] order: id rows: - - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [1,1,1] + - [2,1,2] + - [3,2,1] + - [4,1,3] - id: 5 desc: delete 不是索引列 @@ -127,7 +128,7 @@ cases: - delete from {0} where c2=1; expect: success: false - msg: failed + msg: fail - id: 6 desc: delete key不存在 @@ -161,7 +162,7 @@ cases: - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - [3,null,1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] sqls: - - delete from {0} where c1 is null; + - delete from {0} where c1=null; - select * from {0}; expect: columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy-standalone.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy-standalone.xml index 95763f349fb..8a6659af952 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy-standalone.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy-standalone.xml @@ -2,7 +2,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java index 9c424dc0b1f..f577fe4a8fe 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java @@ -49,6 +49,11 @@ public void testDeleteByCli(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); } - + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"function/dml/test_delete.yaml"}) + @Story("delete") + public void testDeleteByRequest(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kRequest).run(); + } } From 6621487ac2253e32dbff505289175243752e85a3 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 3 Aug 2022 08:58:57 +0800 Subject: [PATCH 112/172] support kafka --- cases/function/dml/test_delete.yaml | 66 +++++++++++++++---- .../openmldb-ecosystem/pom.xml | 26 ++++++++ .../openmldb/ecosystem/tmp/TestKafka.java | 43 ++++++++++++ .../http_test/common/StandaloneTest.java | 6 +- .../executor/BatchSQLExecutor.java | 5 ++ .../java_sdk_test/cluster/v060/DMLTest.java | 6 -- .../openmldb-test-java/pom.xml | 1 + 7 files changed, 133 insertions(+), 20 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-ecosystem/pom.xml create mode 100644 test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java diff --git a/cases/function/dml/test_delete.yaml b/cases/function/dml/test_delete.yaml index ff51f4d07c8..51e0a39736f 100644 --- a/cases/function/dml/test_delete.yaml +++ b/cases/function/dml/test_delete.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: ["两个索引 delete 其中一个"] +debugs: [] version: 0.5.0 cases: - @@ -92,6 +92,7 @@ cases: - id: 4 desc: 两个索引 delete 其中一个 + mode: cluster-unsupport inputs: - columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] @@ -110,7 +111,6 @@ cases: rows: - [1,1,1] - [2,1,2] - - [3,2,1] - [4,1,3] - id: 5 @@ -320,6 +320,7 @@ cases: - id: 17 desc: 两次delete 不同的index + mode: cluster-unsupport inputs: - columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] @@ -327,17 +328,21 @@ cases: rows: - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [3,"aa",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] - [4,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] sqls: - delete from {0} where c1='aa'; - - delete from {0} where c2=1; - - select * from {0}; + - delete from {0} where c2=2; + sql: | + SELECT id, c2, count(c4) OVER w1 as w1_c4_count, count(c5) OVER w2 as w2_c5_count FROM {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: - columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c2 smallint","w1_c4_count bigint","w2_c5_count bigint"] order: id rows: - - [4,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + - [1,1,1,1] + - [2,1,1,2] - id: 18 desc: delete过期数据 @@ -450,6 +455,7 @@ cases: - id: 24 desc: 两个索引,一个索引数据过期,删除另一个索引 + mode: cluster-unsupport inputs: - columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] @@ -462,13 +468,13 @@ cases: - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] sqls: - delete from {0} where c2=1; - - select * from {0}; + sql: SELECT id, c2, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: - columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + columns: ["id int","c2 smallint","w1_c4_count bigint"] order: id rows: - - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] - - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + - [4,2,1] + - [5,2,2] - id: 25 desc: 数据过期,delete其他pk @@ -550,4 +556,42 @@ cases: - delete from {0} where c1='aa'; expect: success: true + - + id: 30 + desc: 组合key有一个是null + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1|c2:c7"] + rows: + - [1,null,2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,null,1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1=null and c2=2; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,null,1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - + id: 31 + desc: 组合key有一个是空串 + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1|c2:c7"] + rows: + - [1,"",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1='' and c2=2; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] diff --git a/test/integration-test/openmldb-test-java/openmldb-ecosystem/pom.xml b/test/integration-test/openmldb-test-java/openmldb-ecosystem/pom.xml new file mode 100644 index 00000000000..2e648fd5735 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-ecosystem/pom.xml @@ -0,0 +1,26 @@ + + + + openmldb-test-java + com.4paradigm.openmldb + 0.1.0-SNAPSHOT + + 4.0.0 + + openmldb-ecosystem + + + 8 + 8 + + + + org.apache.kafka + kafka-clients + 2.7.0 + + + + \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java new file mode 100644 index 00000000000..9fd00e5cf72 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java @@ -0,0 +1,43 @@ +package com._4paradigm.openmldb.ecosystem.tmp; + +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.testng.annotations.Test; + +import java.util.Properties; + +public class TestKafka { + @Test + public void test(){ + //1.创建Kafka生产者的配置信息 + Properties properties = new Properties(); + //指定链接的kafka集群 + properties.put("bootstrap.servers","172.24.4.55:39092"); + //ack应答级别 +// properties.put("acks","all");//all等价于-1 0 1 + //重试次数 + properties.put("retries",1); + //批次大小 + properties.put("batch.size",16384);//16k + //等待时间 + properties.put("linger.ms",1); + //RecordAccumulator缓冲区大小 + properties.put("buffer.memory",33554432);//32m + //Key,Value的序列化类 + properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); + properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); + + //创建生产者对象 + KafkaProducer producer = new KafkaProducer<>(properties); +// String message = "{\"schema\":{\"type\":\"struct\",\"fields\":[{\"type\":\"int16\",\"optional\":true,\"field\":\"c1_int16\"},{\"type\":\"int32\",\"optional\":true,\"field\":\"c2_int32\"},{\"type\":\"int64\",\"optional\":true,\"field\":\"c3_int64\"},{\"type\":\"float\",\"optional\":true,\"field\":\"c4_float\"},{\"type\":\"double\",\"optional\":true,\"field\":\"c5_double\"},{\"type\":\"boolean\",\"optional\":true,\"field\":\"c6_boolean\"},{\"type\":\"string\",\"optional\":true,\"field\":\"c7_string\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Date\",\"optional\":true,\"field\":\"c8_date\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Timestamp\",\"optional\":true,\"field\":\"c9_timestamp\"}],\"optional\":false,\"name\":\"foobar\"},\"payload\":{\"c1_int16\":1,\"c2_int32\":2,\"c3_int64\":3,\"c4_float\":4.4,\"c5_double\":5.555,\"c6_boolean\":true,\"c7_string\":\"c77777\",\"c8_date\":19109,\"c9_timestamp\":1651051906000}}"; + String message = "{\"data\":[{\"ID\":11,\"UUID\":\"11\",\"PID\":11,\"GID\":11,\"CID\":11}],\"database\":\"d1\",\"isDdl\":false,\"mysqlType\":{\"ID\":\"bigint\",\"UUID\":\"varchar\",\"PID\":\"int\",\"GID\":\"int\",\"CID\":\"int\"},\"old\":[{\"ID\":10,\"UUID\":\"10\",\"PID\":10,\"GID\":10,\"CID\":10}],\"pkNames\":[\"ID\"],\"sql\":\"\",\"table\":\"test_kafka\",\"ts\":1657611359356,\"type\":\"INSERT\"}"; + //发送数据 + producer.send(new ProducerRecord("test_kafka",message)); +// for (int i=0;i<10;i++){ +// producer.send(new ProducerRecord("study","luzelong"+i)); +// } + + //关闭资源 + producer.close(); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/StandaloneTest.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/StandaloneTest.java index 744c788fb2f..5383e32164d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/StandaloneTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/StandaloneTest.java @@ -48,9 +48,9 @@ public void beforeTest(@Optional("qa") String env, @Optional("main") String vers .basePath("/home/zhaowei01/fedb-auto-test/standalone") .openMLDBPath("/home/zhaowei01/fedb-auto-test/standalone/openmldb-standalone/bin/openmldb") .nsNum(1).tabletNum(1) - .nsEndpoints(Lists.newArrayList("172.24.4.55:10018")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:10019")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:10020")) + .nsEndpoints(Lists.newArrayList("172.24.4.55:10013")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:10014")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:10015")) .host("172.24.4.55") .port(10018) .build(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java index 82e07504dd0..ee6c21a01c7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java @@ -16,6 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.executor; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBConfig; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.util.SDKUtil; @@ -70,6 +71,10 @@ public boolean verify() { log.info("skip case in disk mode: {}", sqlCase.getDesc()); return false; } + if (OpenMLDBConfig.isCluster() && null != sqlCase.getMode() && sqlCase.getMode().contains("cluster-unsupport")) { + log.info("skip case in cluster mode: {}", sqlCase.getDesc()); + return false; + } return true; } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java index f577fe4a8fe..bcc7f2620af 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java @@ -49,11 +49,5 @@ public void testDeleteByCli(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); } - @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/dml/test_delete.yaml"}) - @Story("delete") - public void testDeleteByRequest(SQLCase testCase){ - ExecutorFactory.build(executor,testCase, SQLCaseType.kRequest).run(); - } } diff --git a/test/integration-test/openmldb-test-java/pom.xml b/test/integration-test/openmldb-test-java/pom.xml index 568e3f8b0f5..231e8a03304 100644 --- a/test/integration-test/openmldb-test-java/pom.xml +++ b/test/integration-test/openmldb-test-java/pom.xml @@ -15,6 +15,7 @@ openmldb-tool-test openmldb-deploy openmldb-devops-test + openmldb-ecosystem From 2c5f071a2dd06b811e2bd92f7efdec9a6e8fc30c Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 3 Aug 2022 13:10:07 +0800 Subject: [PATCH 113/172] test kafka --- cases/function/long_window/test_count_where.yaml | 12 ++++++------ .../_4paradigm/openmldb/ecosystem/tmp/TestKafka.java | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cases/function/long_window/test_count_where.yaml b/cases/function/long_window/test_count_where.yaml index be270cfa650..b73f87ba99b 100644 --- a/cases/function/long_window/test_count_where.yaml +++ b/cases/function/long_window/test_count_where.yaml @@ -24,13 +24,13 @@ cases: columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] rows: - - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,2,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,3,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,4,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,5,34,1.5,2.5,1590738990004,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c3<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] diff --git a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java index 9fd00e5cf72..5ebb51a0fdb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java +++ b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java @@ -30,7 +30,7 @@ public void test(){ //创建生产者对象 KafkaProducer producer = new KafkaProducer<>(properties); // String message = "{\"schema\":{\"type\":\"struct\",\"fields\":[{\"type\":\"int16\",\"optional\":true,\"field\":\"c1_int16\"},{\"type\":\"int32\",\"optional\":true,\"field\":\"c2_int32\"},{\"type\":\"int64\",\"optional\":true,\"field\":\"c3_int64\"},{\"type\":\"float\",\"optional\":true,\"field\":\"c4_float\"},{\"type\":\"double\",\"optional\":true,\"field\":\"c5_double\"},{\"type\":\"boolean\",\"optional\":true,\"field\":\"c6_boolean\"},{\"type\":\"string\",\"optional\":true,\"field\":\"c7_string\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Date\",\"optional\":true,\"field\":\"c8_date\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Timestamp\",\"optional\":true,\"field\":\"c9_timestamp\"}],\"optional\":false,\"name\":\"foobar\"},\"payload\":{\"c1_int16\":1,\"c2_int32\":2,\"c3_int64\":3,\"c4_float\":4.4,\"c5_double\":5.555,\"c6_boolean\":true,\"c7_string\":\"c77777\",\"c8_date\":19109,\"c9_timestamp\":1651051906000}}"; - String message = "{\"data\":[{\"ID\":11,\"UUID\":\"11\",\"PID\":11,\"GID\":11,\"CID\":11}],\"database\":\"d1\",\"isDdl\":false,\"mysqlType\":{\"ID\":\"bigint\",\"UUID\":\"varchar\",\"PID\":\"int\",\"GID\":\"int\",\"CID\":\"int\"},\"old\":[{\"ID\":10,\"UUID\":\"10\",\"PID\":10,\"GID\":10,\"CID\":10}],\"pkNames\":[\"ID\"],\"sql\":\"\",\"table\":\"test_kafka\",\"ts\":1657611359356,\"type\":\"INSERT\"}"; + String message = "{\"data\":[{\"ID\":16,\"UUID\":\"11\",\"PID\":11,\"GID\":11,\"CID\":11}],\"database\":\"d1\",\"table\":\"test_kafka\",\"type\":\"INSERT\"}"; //发送数据 producer.send(new ProducerRecord("test_kafka",message)); // for (int i=0;i<10;i++){ From ce36bccc44527349633457e657ea50ecddc14209 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 3 Aug 2022 18:58:45 +0800 Subject: [PATCH 114/172] support kafka --- .../long_window/test_count_where.yaml | 2 +- .../openmldb/ecosystem/tmp/TestKafka.java | 46 ++++++++++++++++++- 2 files changed, 45 insertions(+), 3 deletions(-) diff --git a/cases/function/long_window/test_count_where.yaml b/cases/function/long_window/test_count_where.yaml index b73f87ba99b..84740eaa889 100644 --- a/cases/function/long_window/test_count_where.yaml +++ b/cases/function/long_window/test_count_where.yaml @@ -30,7 +30,7 @@ cases: - [4,"aa",4,4,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,5,34,1.5,2.5,1590738990004,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,c3<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] diff --git a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java index 5ebb51a0fdb..f81124da66b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java +++ b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java @@ -1,9 +1,14 @@ package com._4paradigm.openmldb.ecosystem.tmp; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerRecord; import org.testng.annotations.Test; +import java.util.Collections; import java.util.Properties; public class TestKafka { @@ -30,9 +35,11 @@ public void test(){ //创建生产者对象 KafkaProducer producer = new KafkaProducer<>(properties); // String message = "{\"schema\":{\"type\":\"struct\",\"fields\":[{\"type\":\"int16\",\"optional\":true,\"field\":\"c1_int16\"},{\"type\":\"int32\",\"optional\":true,\"field\":\"c2_int32\"},{\"type\":\"int64\",\"optional\":true,\"field\":\"c3_int64\"},{\"type\":\"float\",\"optional\":true,\"field\":\"c4_float\"},{\"type\":\"double\",\"optional\":true,\"field\":\"c5_double\"},{\"type\":\"boolean\",\"optional\":true,\"field\":\"c6_boolean\"},{\"type\":\"string\",\"optional\":true,\"field\":\"c7_string\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Date\",\"optional\":true,\"field\":\"c8_date\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Timestamp\",\"optional\":true,\"field\":\"c9_timestamp\"}],\"optional\":false,\"name\":\"foobar\"},\"payload\":{\"c1_int16\":1,\"c2_int32\":2,\"c3_int64\":3,\"c4_float\":4.4,\"c5_double\":5.555,\"c6_boolean\":true,\"c7_string\":\"c77777\",\"c8_date\":19109,\"c9_timestamp\":1651051906000}}"; - String message = "{\"data\":[{\"ID\":16,\"UUID\":\"11\",\"PID\":11,\"GID\":11,\"CID\":11}],\"database\":\"d1\",\"table\":\"test_kafka\",\"type\":\"INSERT\"}"; +// String message = "{\"data\":[{\"ID\":20,\"UUID\":\"11\",\"PID\":11,\"GID\":11,\"CID\":11}],\"database\":\"d1\",\"table\":\"test_kafka\",\"type\":\"insert\"}"; + String message = "{\"data\":[{\"c1\":\"a11\",\"c2\":1,\"c3\":2,\"c4\":3,\"c5\":1.1,\"c6\":2.2,\"c7\":11,\"c8\":1659512628000,\"c9\":true}],\"type\":\"delete\"}"; +// String message = "{\"schema\":{\"type\":\"struct\",\"fields\":[{\"type\":\"string\",\"optional\":true,\"field\":\"c1\"},{\"type\":\"int16\",\"optional\":true,\"field\":\"c2\"},{\"type\":\"int32\",\"optional\":true,\"field\":\"c3\"},{\"type\":\"int64\",\"optional\":true,\"field\":\"c4\"},{\"type\":\"float\",\"optional\":true,\"field\":\"c5\"},{\"type\":\"double\",\"optional\":true,\"field\":\"c6\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Date\",\"optional\":true,\"field\":\"c7\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Timestamp\",\"optional\":true,\"field\":\"c8\"},{\"type\":\"boolean\",\"optional\":true,\"field\":\"c9\"}],\"optional\":false,\"name\":\"foobar\"},\"payload\":{\"c1\":\"ee\",\"c2\":1,\"c3\":2,\"c4\":3,\"c5\":1.1,\"c6\":2.2,\"c7\":11,\"c8\":1659512628000,\"c9\":true}}"; //发送数据 - producer.send(new ProducerRecord("test_kafka",message)); + producer.send(new ProducerRecord("m2",message)); // for (int i=0;i<10;i++){ // producer.send(new ProducerRecord("study","luzelong"+i)); // } @@ -40,4 +47,39 @@ public void test(){ //关闭资源 producer.close(); } + @Test + public void test1() {//自动提交 + //1.创建消费者配置信息 + Properties properties = new Properties(); + //链接的集群 + properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"172.24.4.55:39092"); + //开启自动提交 + properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,true); + //自动提交的延迟 + properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000"); + //key,value的反序列化 + properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer"); + properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer"); + //消费者组 + properties.put(ConsumerConfig.GROUP_ID_CONFIG,"test-consumer-group1"); + properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");//重置消费者offset的方法(达到重复消费的目的),设置该属性也只在两种情况下生效:1.上面设置的消费组还未消费(可以更改组名来消费)2.该offset已经过期 + + + //创建生产者 + KafkaConsumer consumer = new KafkaConsumer<>(properties); + consumer.subscribe(Collections.singletonList("test_kafka")); //Arrays.asList() + + while (true) { + //获取数据 + ConsumerRecords consumerRecords = consumer.poll(100); + + //解析并打印consumerRecords + for (ConsumerRecord consumerRecord : consumerRecords) { + System.out.println(consumerRecord.key() + "----" + consumerRecord.value()); + } + } + + //consumer无需close() + } + } From ec2de7154140395481fec68f756bccda99232154 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 4 Aug 2022 16:41:43 +0800 Subject: [PATCH 115/172] support kafka --- .../openmldb-ecosystem/src/main/resources/kafka.properties | 0 .../java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/resources/kafka.properties diff --git a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/resources/kafka.properties b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/resources/kafka.properties new file mode 100644 index 00000000000..e69de29bb2d diff --git a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java index f81124da66b..f3381b5debf 100644 --- a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java +++ b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java @@ -36,7 +36,7 @@ public void test(){ KafkaProducer producer = new KafkaProducer<>(properties); // String message = "{\"schema\":{\"type\":\"struct\",\"fields\":[{\"type\":\"int16\",\"optional\":true,\"field\":\"c1_int16\"},{\"type\":\"int32\",\"optional\":true,\"field\":\"c2_int32\"},{\"type\":\"int64\",\"optional\":true,\"field\":\"c3_int64\"},{\"type\":\"float\",\"optional\":true,\"field\":\"c4_float\"},{\"type\":\"double\",\"optional\":true,\"field\":\"c5_double\"},{\"type\":\"boolean\",\"optional\":true,\"field\":\"c6_boolean\"},{\"type\":\"string\",\"optional\":true,\"field\":\"c7_string\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Date\",\"optional\":true,\"field\":\"c8_date\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Timestamp\",\"optional\":true,\"field\":\"c9_timestamp\"}],\"optional\":false,\"name\":\"foobar\"},\"payload\":{\"c1_int16\":1,\"c2_int32\":2,\"c3_int64\":3,\"c4_float\":4.4,\"c5_double\":5.555,\"c6_boolean\":true,\"c7_string\":\"c77777\",\"c8_date\":19109,\"c9_timestamp\":1651051906000}}"; // String message = "{\"data\":[{\"ID\":20,\"UUID\":\"11\",\"PID\":11,\"GID\":11,\"CID\":11}],\"database\":\"d1\",\"table\":\"test_kafka\",\"type\":\"insert\"}"; - String message = "{\"data\":[{\"c1\":\"a11\",\"c2\":1,\"c3\":2,\"c4\":3,\"c5\":1.1,\"c6\":2.2,\"c7\":11,\"c8\":1659512628000,\"c9\":true}],\"type\":\"delete\"}"; + String message = "{\"data\":[{\"c1\":\"cc\",\"c2\":1.1,\"c3\":2,\"c4\":3,\"c5\":1.1,\"c6\":2.2,\"c7\":11,\"c8\":1659512628000,\"c9\":true}],\"type\":\"insert\"}"; // String message = "{\"schema\":{\"type\":\"struct\",\"fields\":[{\"type\":\"string\",\"optional\":true,\"field\":\"c1\"},{\"type\":\"int16\",\"optional\":true,\"field\":\"c2\"},{\"type\":\"int32\",\"optional\":true,\"field\":\"c3\"},{\"type\":\"int64\",\"optional\":true,\"field\":\"c4\"},{\"type\":\"float\",\"optional\":true,\"field\":\"c5\"},{\"type\":\"double\",\"optional\":true,\"field\":\"c6\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Date\",\"optional\":true,\"field\":\"c7\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Timestamp\",\"optional\":true,\"field\":\"c8\"},{\"type\":\"boolean\",\"optional\":true,\"field\":\"c9\"}],\"optional\":false,\"name\":\"foobar\"},\"payload\":{\"c1\":\"ee\",\"c2\":1,\"c3\":2,\"c4\":3,\"c5\":1.1,\"c6\":2.2,\"c7\":11,\"c8\":1659512628000,\"c9\":true}}"; //发送数据 producer.send(new ProducerRecord("m2",message)); From 7fdf034294df0e4cc65b0f404ab6cf4e4c49e7da Mon Sep 17 00:00:00 2001 From: zhaowei Date: Fri, 5 Aug 2022 16:25:46 +0800 Subject: [PATCH 116/172] support kafka --- .../cluster/test_cluster_batch.yaml | 199 ++ .../cluster/test_window_row.yaml | 216 ++ .../cluster/test_window_row_range.yaml | 172 ++ .../cluster/window_and_lastjoin.yaml | 620 ++++ .../data_expiration/test_data_expiration.yaml | 70 + cases/integration_test/ddl/test_create.yaml | 560 ++++ .../ddl/test_create_index.yaml | 768 +++++ .../ddl/test_create_no_index.yaml | 284 ++ cases/integration_test/ddl/test_options.yaml | 430 +++ cases/integration_test/ddl/test_ttl.yaml | 322 +++ .../deploy/test_create_deploy.yaml | 621 ++++ .../deploy/test_drop_deploy.yaml | 85 + .../deploy/test_show_deploy.yaml | 88 + .../disk_table/disk_table.yaml | 486 ++++ cases/integration_test/dml/multi_insert.yaml | 287 ++ cases/integration_test/dml/test_delete.yaml | 597 ++++ cases/integration_test/dml/test_insert.yaml | 207 ++ .../dml/test_insert_prepared.yaml | 280 ++ .../ecosystem/test_kafka.yaml | 25 + .../expression/test_arithmetic.yaml | 686 +++++ .../expression/test_condition.yaml | 400 +++ .../expression/test_like.yaml | 1138 ++++++++ .../expression/test_logic.yaml | 135 + .../expression/test_predicate.yaml | 778 +++++ .../expression/test_type.yaml | 674 +++++ .../function/test_calculate.yaml | 254 ++ .../integration_test/function/test_date.yaml | 144 + .../function/test_like_match.yaml | 840 ++++++ .../function/test_string.yaml | 290 ++ .../function/test_udaf_function.yaml | 2563 +++++++++++++++++ .../function/test_udf_function.yaml | 89 + cases/integration_test/fz_ddl/test_bank.yaml | 151 + cases/integration_test/fz_ddl/test_luoji.yaml | 293 ++ cases/integration_test/fz_ddl/test_myhug.yaml | 314 ++ .../join/test_lastjoin_complex.yaml | 1197 ++++++++ .../join/test_lastjoin_simple.yaml | 1068 +++++++ .../long_window/long_window.yaml | 357 +++ .../long_window/test_count_where.yaml | 540 ++++ .../test_multiple_databases.yaml | 383 +++ .../integration_test/out_in/test_out_in.yaml | 894 ++++++ .../select/test_select_sample.yaml | 307 ++ .../select/test_sub_select.yaml | 359 +++ cases/integration_test/select/test_where.yaml | 252 ++ .../spark/generate_yaml_case.py | 191 ++ cases/integration_test/spark/requirements.txt | 3 + cases/integration_test/spark/test_ads.yaml | 176 ++ cases/integration_test/spark/test_credit.yaml | 1012 +++++++ .../spark/test_fqz_studio.yaml | 363 +++ cases/integration_test/spark/test_jd.yaml | 307 ++ cases/integration_test/spark/test_news.yaml | 439 +++ .../integration_test/test_batch_request.yaml | 358 +++ .../test_feature_zero_function.yaml | 176 ++ cases/integration_test/test_fz_sql.yaml | 156 + .../test_index_optimized.yaml | 184 ++ .../test_performance_insensitive.yaml | 401 +++ .../tmp/test_current_time.yaml | 106 + .../ut_case/test_unique_expect.yaml | 56 + .../v040/test_execute_mode.yaml | 81 + cases/integration_test/v040/test_groupby.yaml | 560 ++++ cases/integration_test/v040/test_job.yaml | 176 ++ .../integration_test/v040/test_load_data.yaml | 467 +++ .../v040/test_out_in_offline.yaml | 894 ++++++ cases/integration_test/v040/test_udaf.yaml | 108 + .../integration_test/window/error_window.yaml | 303 ++ .../window/test_current_row.yaml | 1507 ++++++++++ .../integration_test/window/test_maxsize.yaml | 789 +++++ .../integration_test/window/test_window.yaml | 1223 ++++++++ .../test_window_exclude_current_time.yaml | 761 +++++ .../window/test_window_row.yaml | 920 ++++++ .../window/test_window_row_range.yaml | 1497 ++++++++++ .../window/test_window_union.yaml | 1152 ++++++++ .../test_window_union_cluster_thousand.yaml | 1044 +++++++ .../window/window_attributes.yaml | 535 ++++ .../openmldb/ecosystem/common/KafkaTest.java | 83 + .../src/main/resources/kafka.properties | 4 + .../openmldb/ecosystem/tmp/TestKafka.java | 2 +- .../java_sdk_test/common/JDBCTest.java | 2 + .../java_sdk_test/common/OpenMLDBConfig.java | 38 +- .../java_sdk_test/common/OpenMLDBTest.java | 1 + .../java_sdk_test/common/StandaloneTest.java | 1 + .../entity/OpenMLDBProcedureColumn2.java | 26 - .../entity/OpenmldbDeployment2.java | 14 - .../diff_test/DiffResultTest.java | 2 +- .../java_sdk_test/diff_test/MysqlTest.java | 2 +- .../java_sdk_test/diff_test/Sqlite3Test.java | 2 +- .../java_sdk_test/ut/UniqueExpectTest.java | 2 +- .../test_common}/common/BaseTest.java | 4 +- .../model}/OpenMLDBCaseFileList.java | 36 +- .../openmldb/test_common/model/SQLCase.java | 1 + .../openmldb/OpenMLDBGlobalVar.java | 8 + 90 files changed, 36511 insertions(+), 85 deletions(-) create mode 100644 cases/integration_test/cluster/test_cluster_batch.yaml create mode 100644 cases/integration_test/cluster/test_window_row.yaml create mode 100644 cases/integration_test/cluster/test_window_row_range.yaml create mode 100644 cases/integration_test/cluster/window_and_lastjoin.yaml create mode 100644 cases/integration_test/data_expiration/test_data_expiration.yaml create mode 100644 cases/integration_test/ddl/test_create.yaml create mode 100644 cases/integration_test/ddl/test_create_index.yaml create mode 100644 cases/integration_test/ddl/test_create_no_index.yaml create mode 100644 cases/integration_test/ddl/test_options.yaml create mode 100644 cases/integration_test/ddl/test_ttl.yaml create mode 100644 cases/integration_test/deploy/test_create_deploy.yaml create mode 100644 cases/integration_test/deploy/test_drop_deploy.yaml create mode 100644 cases/integration_test/deploy/test_show_deploy.yaml create mode 100644 cases/integration_test/disk_table/disk_table.yaml create mode 100644 cases/integration_test/dml/multi_insert.yaml create mode 100644 cases/integration_test/dml/test_delete.yaml create mode 100644 cases/integration_test/dml/test_insert.yaml create mode 100644 cases/integration_test/dml/test_insert_prepared.yaml create mode 100644 cases/integration_test/ecosystem/test_kafka.yaml create mode 100644 cases/integration_test/expression/test_arithmetic.yaml create mode 100644 cases/integration_test/expression/test_condition.yaml create mode 100644 cases/integration_test/expression/test_like.yaml create mode 100644 cases/integration_test/expression/test_logic.yaml create mode 100644 cases/integration_test/expression/test_predicate.yaml create mode 100644 cases/integration_test/expression/test_type.yaml create mode 100644 cases/integration_test/function/test_calculate.yaml create mode 100644 cases/integration_test/function/test_date.yaml create mode 100644 cases/integration_test/function/test_like_match.yaml create mode 100644 cases/integration_test/function/test_string.yaml create mode 100644 cases/integration_test/function/test_udaf_function.yaml create mode 100644 cases/integration_test/function/test_udf_function.yaml create mode 100644 cases/integration_test/fz_ddl/test_bank.yaml create mode 100644 cases/integration_test/fz_ddl/test_luoji.yaml create mode 100644 cases/integration_test/fz_ddl/test_myhug.yaml create mode 100644 cases/integration_test/join/test_lastjoin_complex.yaml create mode 100644 cases/integration_test/join/test_lastjoin_simple.yaml create mode 100644 cases/integration_test/long_window/long_window.yaml create mode 100644 cases/integration_test/long_window/test_count_where.yaml create mode 100644 cases/integration_test/multiple_databases/test_multiple_databases.yaml create mode 100644 cases/integration_test/out_in/test_out_in.yaml create mode 100644 cases/integration_test/select/test_select_sample.yaml create mode 100644 cases/integration_test/select/test_sub_select.yaml create mode 100644 cases/integration_test/select/test_where.yaml create mode 100755 cases/integration_test/spark/generate_yaml_case.py create mode 100644 cases/integration_test/spark/requirements.txt create mode 100644 cases/integration_test/spark/test_ads.yaml create mode 100644 cases/integration_test/spark/test_credit.yaml create mode 100644 cases/integration_test/spark/test_fqz_studio.yaml create mode 100644 cases/integration_test/spark/test_jd.yaml create mode 100644 cases/integration_test/spark/test_news.yaml create mode 100644 cases/integration_test/test_batch_request.yaml create mode 100644 cases/integration_test/test_feature_zero_function.yaml create mode 100644 cases/integration_test/test_fz_sql.yaml create mode 100644 cases/integration_test/test_index_optimized.yaml create mode 100644 cases/integration_test/test_performance_insensitive/test_performance_insensitive.yaml create mode 100644 cases/integration_test/tmp/test_current_time.yaml create mode 100644 cases/integration_test/ut_case/test_unique_expect.yaml create mode 100644 cases/integration_test/v040/test_execute_mode.yaml create mode 100644 cases/integration_test/v040/test_groupby.yaml create mode 100644 cases/integration_test/v040/test_job.yaml create mode 100644 cases/integration_test/v040/test_load_data.yaml create mode 100644 cases/integration_test/v040/test_out_in_offline.yaml create mode 100644 cases/integration_test/v040/test_udaf.yaml create mode 100644 cases/integration_test/window/error_window.yaml create mode 100644 cases/integration_test/window/test_current_row.yaml create mode 100644 cases/integration_test/window/test_maxsize.yaml create mode 100644 cases/integration_test/window/test_window.yaml create mode 100644 cases/integration_test/window/test_window_exclude_current_time.yaml create mode 100644 cases/integration_test/window/test_window_row.yaml create mode 100644 cases/integration_test/window/test_window_row_range.yaml create mode 100644 cases/integration_test/window/test_window_union.yaml create mode 100644 cases/integration_test/window/test_window_union_cluster_thousand.yaml create mode 100644 cases/integration_test/window/window_attributes.yaml create mode 100644 test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/java/com/_4paradigm/openmldb/ecosystem/common/KafkaTest.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBProcedureColumn2.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenmldbDeployment2.java rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common}/common/BaseTest.java (96%) rename test/integration-test/openmldb-test-java/{openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity => openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model}/OpenMLDBCaseFileList.java (57%) diff --git a/cases/integration_test/cluster/test_cluster_batch.yaml b/cases/integration_test/cluster/test_cluster_batch.yaml new file mode 100644 index 00000000000..329fc9d170d --- /dev/null +++ b/cases/integration_test/cluster/test_cluster_batch.yaml @@ -0,0 +1,199 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: SELECT columns + inputs: + - + columns: ["id int", "c1 string","c6 double","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1, "aa", 1.0, 1590738990000] + - [2, "aa", 2.0, 1590738991000] + - [3, "aa", 3.0, 1590738992000] + - [4, "aa", 4.0, 1590738993000] + - [5, "bb", 5.0, 1590738994000] + - [6, "bb", 6.0, 1590738995000] + - [7, "bb", 7.0, 1590738996000] + - [8, "bb", 8.0, 1590738997000] + - [9, "bb", 9.0, 1590738998000] + - [10, "cc", 1.0, 1590738993000] + - [11, "cc", 2.0, 1590738994000 ] + - [12, "cc", 3.0, 1590738995000 ] + - [13, "cc", 4.0, 1590738996000 ] + - [14, "cc", 5.0, 1590738997000 ] + - [15, "dd", 6.0, 1590738998000 ] + - [16, "dd", 7.0, 1590738999000 ] + sql: | + SELECT id, c1, c6, c7 FROM {0}; + expect: + order: id + columns: ["id int", "c1 string", "c6 double", "c7 timestamp"] + rows: + - [ 1, "aa", 1.0, 1590738990000] + - [ 2, "aa", 2.0, 1590738991000] + - [ 3, "aa", 3.0, 1590738992000] + - [ 4, "aa", 4.0, 1590738993000] + - [ 5, "bb", 5.0, 1590738994000] + - [ 6, "bb", 6.0, 1590738995000] + - [ 7, "bb", 7.0, 1590738996000] + - [ 8, "bb", 8.0, 1590738997000] + - [ 9, "bb", 9.0, 1590738998000] + - [ 10, "cc", 1.0, 1590738993000] + - [ 11, "cc", 2.0, 1590738994000] + - [ 12, "cc", 3.0, 1590738995000] + - [ 13, "cc", 4.0, 1590738996000] + - [ 14, "cc", 5.0, 1590738997000] + - [ 15, "dd", 6.0, 1590738998000] + - [ 16, "dd", 7.0, 1590738999000] + + - + id: 1 + desc: SELECT columns, some tablet result set is empty + inputs: + - + columns: ["id int", "c1 string","c6 double","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1, "aa", 1.0, 1590738990000] + - [2, "aa", 2.0, 1590738991000] + - [3, "aa", 3.0, 1590738992000] + - [4, "aa", 4.0, 1590738993000] + - [15, "dd", 6.0, 1590738998000 ] + - [16, "dd", 7.0, 1590738999000 ] + sql: | + SELECT id, c1, c6, c7 FROM {0}; + expect: + order: id + columns: ["id int", "c1 string", "c6 double", "c7 timestamp"] + rows: + - [ 1, "aa", 1.0, 1590738990000] + - [ 2, "aa", 2.0, 1590738991000] + - [ 3, "aa", 3.0, 1590738992000] + - [ 4, "aa", 4.0, 1590738993000] + - [ 15, "dd", 6.0, 1590738998000] + - [ 16, "dd", 7.0, 1590738999000] + - + id: 2 + desc: SELECT simple expression + inputs: + - + columns: ["id int", "c1 string","c6 double","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1, "aa", 1.0, 1590738990000] + - [2, "aa", 2.0, 1590738991000] + - [3, "aa", 3.0, 1590738992000] + - [4, "aa", 4.0, 1590738993000] + - [5, "bb", 5.0, 1590738994000] + - [6, "bb", 6.0, 1590738995000] + - [7, "bb", 7.0, 1590738996000] + - [8, "bb", 8.0, 1590738997000] + - [9, "bb", 9.0, 1590738998000] + - [10, "cc", 1.0, 1590738993000] + - [11, "cc", 2.0, 1590738994000 ] + - [12, "cc", 3.0, 1590738995000 ] + - [13, "cc", 4.0, 1590738996000 ] + - [14, "cc", 5.0, 1590738997000 ] + - [15, "dd", 6.0, 1590738998000 ] + - [16, "dd", 7.0, 1590738999000 ] + sql: | + SELECT id, c1, c6+1.0 as f1, c7, year(c7) as f2 FROM {0}; + expect: + order: id + columns: ["id int", "c1 string", "f1 double", "c7 timestamp", "f2 int"] + rows: + - [ 1, "aa", 2.0, 1590738990000, 2020] + - [ 2, "aa", 3.0, 1590738991000, 2020] + - [ 3, "aa", 4.0, 1590738992000, 2020] + - [ 4, "aa", 5.0, 1590738993000, 2020] + - [ 5, "bb", 6.0, 1590738994000, 2020] + - [ 6, "bb", 7.0, 1590738995000, 2020] + - [ 7, "bb", 8.0, 1590738996000, 2020] + - [ 8, "bb", 9.0, 1590738997000, 2020] + - [ 9, "bb", 10.0, 1590738998000, 2020] + - [ 10, "cc", 2.0, 1590738993000, 2020] + - [ 11, "cc", 3.0, 1590738994000, 2020] + - [ 12, "cc", 4.0, 1590738995000, 2020] + - [ 13, "cc", 5.0, 1590738996000, 2020] + - [ 14, "cc", 6.0, 1590738997000, 2020] + - [ 15, "dd", 7.0, 1590738998000, 2020] + - [ 16, "dd", 8.0, 1590738999000, 2020] + + - + id: 3 + desc: SELECT simple expression LIMIT 10 + mode: request-unsupport + inputs: + - + columns: ["id int", "c1 string","c6 double","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1, "aa", 1.0, 1590738990000] + - [2, "aa", 2.0, 1590738991000] + - [3, "aa", 3.0, 1590738992000] + - [4, "aa", 4.0, 1590738993000] + - [5, "bb", 5.0, 1590738994000] + - [6, "bb", 6.0, 1590738995000] + - [7, "bb", 7.0, 1590738996000] + - [8, "bb", 8.0, 1590738997000] + - [9, "bb", 9.0, 1590738998000] + - [10, "cc", 1.0, 1590738993000] + - [11, "cc", 2.0, 1590738994000 ] + - [12, "cc", 3.0, 1590738995000 ] + - [13, "cc", 4.0, 1590738996000 ] + - [14, "cc", 5.0, 1590738997000 ] + - [15, "dd", 6.0, 1590738998000 ] + - [16, "dd", 7.0, 1590738999000 ] + sql: | + SELECT id, c1, c6+1.0 as f1, c7, year(c7) as f2 FROM {0} LIMIT 10; + expect: + order: id + columns: ["id int", "c1 string", "f1 double", "c7 timestamp", "f2 int"] + count: 10 + - + id: 4 + desc: SELECT simple expression LIMIT 3 + mode: request-unsupport + inputs: + - + columns: ["id int", "c1 string","c6 double","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1, "aa", 1.0, 1590738990000] + - [2, "aa", 2.0, 1590738991000] + - [3, "aa", 3.0, 1590738992000] + - [4, "aa", 4.0, 1590738993000] + - [5, "bb", 5.0, 1590738994000] + - [6, "bb", 6.0, 1590738995000] + - [7, "bb", 7.0, 1590738996000] + - [8, "bb", 8.0, 1590738997000] + - [9, "bb", 9.0, 1590738998000] + - [10, "cc", 1.0, 1590738993000] + - [11, "cc", 2.0, 1590738994000 ] + - [12, "cc", 3.0, 1590738995000 ] + - [13, "cc", 4.0, 1590738996000 ] + - [14, "cc", 5.0, 1590738997000 ] + - [15, "dd", 6.0, 1590738998000 ] + - [16, "dd", 7.0, 1590738999000 ] + sql: | + SELECT id, c1, c6+1.0 as f1, c7, year(c7) as f2 FROM {0} LIMIT 3; + expect: + order: id + columns: ["id int", "c1 string", "f1 double", "c7 timestamp", "f2 int"] + count: 3 \ No newline at end of file diff --git a/cases/integration_test/cluster/test_window_row.yaml b/cases/integration_test/cluster/test_window_row.yaml new file mode 100644 index 00000000000..35f200af520 --- /dev/null +++ b/cases/integration_test/cluster/test_window_row.yaml @@ -0,0 +1,216 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: 简单rows window + inputs: + - + columns: ["id int", "c1 string","c6 double","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1, "aa", 1.0, 1590738990000] + - [2, "aa", 2.0, 1590738991000] + - [3, "aa", 3.0, 1590738992000] + - [4, "aa", 4.0, 1590738993000] + - [5, "bb", 5.0, 1590738994000] + - [6, "bb", 6.0, 1590738995000] + - [7, "bb", 7.0, 1590738996000] + - [8, "bb", 8.0, 1590738997000] + - [9, "bb", 9.0, 1590738998000] + - [10, "cc", 1.0, 1590738993000] + - [11, "cc", 2.0, 1590738994000 ] + - [12, "cc", 3.0, 1590738995000 ] + - [13, "cc", 4.0, 1590738996000 ] + - [14, "cc", 5.0, 1590738997000 ] + - [15, "dd", 6.0, 1590738998000 ] + - [16, "dd", 7.0, 1590738999000 ] + sql: | + SELECT id, c1, c6, c7, min(c6) OVER w1 as w1_c6_min, count(id) OVER w1 as w1_cnt FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int", "c1 string", "c6 double", "c7 timestamp", "w1_c6_min double","w1_cnt bigint"] + rows: + - [ 1, "aa", 1.0, 1590738990000, 1.0, 1] + - [ 2, "aa", 2.0, 1590738991000, 1.0, 2] + - [ 3, "aa", 3.0, 1590738992000, 1.0, 3] + - [ 4, "aa", 4.0, 1590738993000, 2.0, 3] + - [ 5, "bb", 5.0, 1590738994000, 5.0, 1] + - [ 6, "bb", 6.0, 1590738995000, 5.0, 2] + - [ 7, "bb", 7.0, 1590738996000, 5.0, 3] + - [ 8, "bb", 8.0, 1590738997000, 6.0, 3] + - [ 9, "bb", 9.0, 1590738998000, 7.0, 3] + - [ 10, "cc", 1.0, 1590738993000, 1.0, 1] + - [ 11, "cc", 2.0, 1590738994000, 1.0, 2] + - [ 12, "cc", 3.0, 1590738995000, 1.0, 3] + - [ 13, "cc", 4.0, 1590738996000, 2.0, 3] + - [ 14, "cc", 5.0, 1590738997000, 3.0, 3] + - [ 15, "dd", 6.0, 1590738998000, 6.0, 1] + - [ 16, "dd", 7.0, 1590738999000, 6.0, 2] + - + id: 1 + desc: 简单rows window, union副表 + mode: cluster-unsupport + inputs: + - columns: [ "id int", "c1 string","c6 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ 1, "aa", 1.0, 1590738990000 ] + - [ 2, "aa", 4.0, 1590738993000 ] + - [ 3, "bb", 5.0, 1590738994000 ] + - [ 4, "bb", 9.0, 1590738998000 ] + - [ 5, "cc", 1.0, 1590738993000 ] + - [ 6, "cc", 5.0, 1590738997000 ] + - [ 7, "dd", 7.0, 1590738999000 ] + - + columns: ["x1 string","x6 double","x7 timestamp"] + indexs: ["index1:x1:x7"] + rows: + - ["aa", 2.0, 1590738991000] + - ["aa", 3.0, 1590738992000] + - ["bb", 6.0, 1590738995000] + - ["bb", 7.0, 1590738996000] + - ["bb", 8.0, 1590738997000] + - ["cc", 2.0, 1590738994000 ] + - ["cc", 3.0, 1590738995000 ] + - ["cc", 4.0, 1590738996000 ] + - ["dd", 6.0, 1590738998000 ] + sql: | + SELECT id, c1, c6, c7, min(c6) OVER w1 as w1_c6_min, count(id) OVER w1 as w1_cnt FROM {0} WINDOW + w1 AS (UNION (select 0 as id, x1 as c1, x6 as c6, x7 as c7 from {1}) as t2 PARTITION BY c1 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int", "c1 string", "c6 double", "c7 timestamp", "w1_c6_min double","w1_cnt bigint"] + rows: + - [ 1, "aa", 1.0, 1590738990000, 1.0, 1] + - [ 2, "aa", 4.0, 1590738993000, 2.0, 3] + - [ 3, "bb", 5.0, 1590738994000, 5.0, 1] + - [ 4, "bb", 9.0, 1590738998000, 7.0, 3] + - [ 5, "cc", 1.0, 1590738993000, 1.0, 1] + - [ 6, "cc", 5.0, 1590738997000, 3.0, 3] + - [ 7, "dd", 7.0, 1590738999000, 6.0, 2] + - + id: 2 + desc: 2 window,pk不同 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c7 timestamp"] + indexs: ["index1:c1:c7", "index3:c3:c7"] + rows: + - [1,"aa",20,30,1590738990000] + - [2,"aa",20,31,1590738991000] + - [3,"bb",20,32,1590738992000] + - [4,"bb",20,33,1590738993000] + - [5,"cc",21,34,1590738994000] + - [6,"aa",21,35,1590738995000] + - [7,"aa",21,36,1590738996000] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY c1 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"] + rows: + - [1,"aa",20,30,1] + - [2,"aa",20,61,2] + - [3,"bb",20,93,1] + - [4,"bb",20,96,2] + - [5,"cc",21,34,1] + - [6,"aa",21,69,3] + - [7,"aa",21,105,3] + - + id: 3 + desc: 3 window,pk不同 + inputs: + - + columns : ["id int","c1 string", "c2 string", "c3 int","c4 bigint","c7 timestamp"] + indexs: ["index1:c1:c7", "index2:c2:c7", "index3:c3:c7"] + rows: + - [1,"aa", "1", 20,30,1590738990000] + - [2,"aa", "2", 20,31,1590738991000] + - [3,"bb", "1", 20,32,1590738992000] + - [4,"bb", "2", 20,33,1590738993000] + - [5,"cc", "1", 21,34,1590738994000] + - [6,"aa", "1", 21,35,1590738995000] + - [7,"aa", "1", 21,36,1590738996000] + sql: | + SELECT id, c1, c2, c3, + count(id) OVER w1 as w1_count, + count(id) OVER w2 as w2_count, + sum(c4) OVER w3 as w3_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY c1 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY c2 ORDER BY c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW), + w3 AS (PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string", "c2 string", "c3 int","w1_count bigint","w2_count bigint", "w3_c4_sum bigint"] + rows: + - [1,"aa", "1", 20, 1, 1, 30] + - [2,"aa", "2", 20, 2, 1, 61] + - [3,"bb", "1", 20, 1, 2, 93] + - [4,"bb", "2", 20, 2, 2, 96] + - [5,"cc", "1", 21, 1, 3, 34] + - [6,"aa", "1", 21, 3, 4, 69] + - [7,"aa", "1", 21, 3, 4, 105] + + - id: 4 + desc: 简单rows window, union副表, 主表不进入窗口 40w + tags: ["TODO", "@baoxinqi, batch request unsupport"] + inputs: + - columns: [ "id int", "c1 string","c6 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ 2, "aa", 4.0, 1590738993000 ] + - columns: [ "x1 string","x6 double","x7 timestamp" ] + indexs: [ "index1:x1:x7" ] + repeat: 400 + rows: + - [ "aa", 2.0, 1590738991000 ] + + sql: | + SELECT id, c1, c6, c7, count(id) OVER w1 as w1_cnt, distinct_count(id) OVER w1 as w1_dis_cnt FROM {0} WINDOW + w1 AS (UNION (select 0 as id, x1 as c1, x6 as c6, x7 as c7 from {1}) as t2 PARTITION BY c1 ORDER BY c7 ROWS BETWEEN 400000 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int", "c1 string", "c6 double", "c7 timestamp", "w1_cnt bigint", "w1_dis_cnt bigint" ] + rows: + - [ 2, "aa", 4.0, 1590738993000, 400001, 2 ] + - id: 5 + desc: 简单rows window, union副表, 主表不进入窗口3 4w + mode: batch-request-unsupport, cli-unsupport + inputs: + - columns: [ "id int", "c1 string","c6 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ 2, "aa", 4.0, 1590738993000 ] + - columns: [ "x1 string","x6 double","x7 timestamp" ] + indexs: [ "index1:x1:x7" ] + repeat: 400 + rows: + - [ "aa", 2.0, 1590738991000 ] + + sql: | + SELECT id, c1, c6, c7, min(c6) OVER w1 as w1_min_c6, count(id) OVER w1 as w1_cnt FROM {0} WINDOW + w1 AS (UNION (select 0 as id, x1 as c1, x6 as c6, x7 as c7 from {1}) as t2 PARTITION BY c1 ORDER BY c7 ROWS BETWEEN 400000 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int", "c1 string", "c6 double", "c7 timestamp", "w1_min_c6 double", "w1_cnt bigint" ] + rows: + - [ 2, "aa", 4.0, 1590738993000, 2.0, 401 ] diff --git a/cases/integration_test/cluster/test_window_row_range.yaml b/cases/integration_test/cluster/test_window_row_range.yaml new file mode 100644 index 00000000000..476336fe4c0 --- /dev/null +++ b/cases/integration_test/cluster/test_window_row_range.yaml @@ -0,0 +1,172 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: 简单rows window + inputs: + - + columns: ["id int", "c1 string","c6 double","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1, "aa", 1.0, 1590738990000] + - [2, "aa", 2.0, 1590738991000] + - [3, "aa", 3.0, 1590738992000] + - [4, "aa", 4.0, 1590738993000] + - [5, "bb", 5.0, 1590738994000] + - [6, "bb", 6.0, 1590738995000] + - [7, "bb", 7.0, 1590738996000] + - [8, "bb", 8.0, 1590738997000] + - [9, "bb", 9.0, 1590738998000] + - [10, "cc", 1.0, 1590738993000] + - [11, "cc", 2.0, 1590738994000 ] + - [12, "cc", 3.0, 1590738995000 ] + - [13, "cc", 4.0, 1590738996000 ] + - [14, "cc", 5.0, 1590738997000 ] + - [15, "dd", 6.0, 1590738998000 ] + - [16, "dd", 7.0, 1590738999000 ] + sql: | + SELECT id, c1, c6, c7, min(c6) OVER w1 as w1_c6_min, count(id) OVER w1 as w1_cnt FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int", "c1 string", "c6 double", "c7 timestamp", "w1_c6_min double","w1_cnt bigint"] + rows: + - [ 1, "aa", 1.0, 1590738990000, 1.0, 1] + - [ 2, "aa", 2.0, 1590738991000, 1.0, 2] + - [ 3, "aa", 3.0, 1590738992000, 1.0, 3] + - [ 4, "aa", 4.0, 1590738993000, 2.0, 3] + - [ 5, "bb", 5.0, 1590738994000, 5.0, 1] + - [ 6, "bb", 6.0, 1590738995000, 5.0, 2] + - [ 7, "bb", 7.0, 1590738996000, 5.0, 3] + - [ 8, "bb", 8.0, 1590738997000, 6.0, 3] + - [ 9, "bb", 9.0, 1590738998000, 7.0, 3] + - [ 10, "cc", 1.0, 1590738993000, 1.0, 1] + - [ 11, "cc", 2.0, 1590738994000, 1.0, 2] + - [ 12, "cc", 3.0, 1590738995000, 1.0, 3] + - [ 13, "cc", 4.0, 1590738996000, 2.0, 3] + - [ 14, "cc", 5.0, 1590738997000, 3.0, 3] + - [ 15, "dd", 6.0, 1590738998000, 6.0, 1] + - [ 16, "dd", 7.0, 1590738999000, 6.0, 2] + - + id: 1 + desc: 简单rows window, union副表, 主表进入窗口 + mode: cluster-unsupport + inputs: + - columns: [ "id int", "c1 string","c6 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ 1, "aa", 1.0, 1590738990000 ] + - [ 2, "aa", 4.0, 1590738993000 ] + - [ 3, "bb", 5.0, 1590738994000 ] + - [ 4, "bb", 9.0, 1590738998000 ] + - [ 5, "cc", 1.0, 1590738993000 ] + - [ 6, "cc", 5.0, 1590738997000 ] + - [ 7, "dd", 7.0, 1590738999000 ] + - + columns: ["x1 string","x6 double","x7 timestamp"] + indexs: ["index1:x1:x7"] + rows: + - ["aa", 2.0, 1590738991000] + - ["aa", 3.0, 1590738992000] + - ["bb", 6.0, 1590738995000] + - ["bb", 7.0, 1590738996000] + - ["bb", 8.0, 1590738997000] + - ["cc", 2.0, 1590738994000 ] + - ["cc", 3.0, 1590738995000 ] + - ["cc", 4.0, 1590738996000 ] + - ["dd", 6.0, 1590738998000 ] + sql: | + SELECT id, c1, c6, c7, min(c6) OVER w1 as w1_c6_min, count(id) OVER w1 as w1_cnt FROM {0} WINDOW + w1 AS (UNION (select 0 as id, x1 as c1, x6 as c6, x7 as c7 from {1}) as t2 PARTITION BY c1 ORDER BY c7 ROWS_RANGE + BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int", "c1 string", "c6 double", "c7 timestamp", "w1_c6_min double","w1_cnt bigint"] + rows: + - [ 1, "aa", 1.0, 1590738990000, 1.0, 1] + - [ 2, "aa", 4.0, 1590738993000, 2.0, 3] + - [ 3, "bb", 5.0, 1590738994000, 5.0, 1] + - [ 4, "bb", 9.0, 1590738998000, 7.0, 3] + - [ 5, "cc", 1.0, 1590738993000, 1.0, 1] + - [ 6, "cc", 5.0, 1590738997000, 3.0, 3] + - [ 7, "dd", 7.0, 1590738999000, 6.0, 2] + - + id: 2 + desc: 2 window,pk不同 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c7 timestamp"] + indexs: ["index1:c1:c7", "index3:c3:c7"] + rows: + - [1,"aa",20,30,1590738990000] + - [2,"aa",20,31,1590738991000] + - [3,"bb",20,32,1590738992000] + - [4,"bb",20,33,1590738993000] + - [5,"cc",21,34,1590738994000] + - [6,"aa",21,35,1590738995000] + - [7,"aa",21,36,1590738996000] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY c3 ORDER BY c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY c1 ORDER BY c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"] + rows: + - [1,"aa",20,30,1] + - [2,"aa",20,61,2] + - [3,"bb",20,93,1] + - [4,"bb",20,96,2] + - [5,"cc",21,34,1] + - [6,"aa",21,69,1] + - [7,"aa",21,105,2] + - + id: 3 + desc: 3 window,pk不同 + inputs: + - + columns : ["id int","c1 string", "c2 string", "c3 int","c4 bigint","c7 timestamp"] + indexs: ["index1:c1:c7", "index2:c2:c7", "index3:c3:c7"] + rows: + - [1,"aa", "1", 20,30,1590738990000] + - [2,"aa", "2", 20,31,1590738991000] + - [3,"bb", "1", 20,32,1590738992000] + - [4,"bb", "2", 20,33,1590738993000] + - [5,"cc", "1", 21,34,1590738994000] + - [6,"aa", "1", 21,35,1590738995000] + - [7,"aa", "1", 21,36,1590738996000] + sql: | + SELECT id, c1, c2, c3, + count(id) OVER w1 as w1_count, + count(id) OVER w2 as w2_count, + sum(c4) OVER w3 as w3_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY c1 ORDER BY c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY c2 ORDER BY c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW), + w3 AS (PARTITION BY c3 ORDER BY c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string", "c2 string", "c3 int","w1_count bigint","w2_count bigint", "w3_c4_sum bigint"] + rows: + - [1,"aa", "1", 20, 1, 1, 30] + - [2,"aa", "2", 20, 2, 1, 61] + - [3,"bb", "1", 20, 1, 2, 93] + - [4,"bb", "2", 20, 2, 2, 96] + - [5,"cc", "1", 21, 1, 2, 34] + - [6,"aa", "1", 21, 1, 3, 69] + - [7,"aa", "1", 21, 2, 3, 105] diff --git a/cases/integration_test/cluster/window_and_lastjoin.yaml b/cases/integration_test/cluster/window_and_lastjoin.yaml new file mode 100644 index 00000000000..c20e6e070ee --- /dev/null +++ b/cases/integration_test/cluster/window_and_lastjoin.yaml @@ -0,0 +1,620 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: 简单拼表 + inputs: + - + columns : ["id int", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"] + + indexs: ["index1:card_no:trx_time"] + rows: + - [1, "aaaaaaaaaa",1, 1590738989000, 1.1] + - [2, "aaaaaaaaaa",1, 1590738990000, 2.2] + - [3, "bb",2, 1590738990000, 3.3] + - [4, "cc",3, 1590738990000, 4.0] + - [5, "cc",3, 1590738991000, 5.0] + - [6, "cc",3, 1590738992000, 6.0] + - [7, "cc",2, 1590738993000, 7.0] + - + columns : ["crd_lst_isu_dte timestamp", "merchant_nbr int"] + indexs: ["index2:merchant_nbr:crd_lst_isu_dte"] + rows: + - [1590738988000, 1] + - [1590738990000, 1] + - [1590738991000, 2] + - [1590738989000, 3] + - [1590738992000, 3] + sql: | + select id, card_no, merchant_id, trx_time, crd_lst_isu_dte, merchant_nbr from {0} + last join {1} order by {1}.crd_lst_isu_dte on {0}.merchant_id = {1}.merchant_nbr and {0}.trx_time >= {1}.crd_lst_isu_dte; + expect: + columns: ["id int", "card_no string", "merchant_id int", "trx_time timestamp", + "crd_lst_isu_dte timestamp", "merchant_nbr int"] + order: id + rows: + - [1, "aaaaaaaaaa", 1, 1590738989000, 1590738988000, 1] + - [2, "aaaaaaaaaa", 1, 1590738990000, 1590738990000, 1] + - [3, "bb", 2, 1590738990000, null, null] + - [4, "cc", 3, 1590738990000, 1590738989000, 3] + - [5, "cc", 3, 1590738991000, 1590738989000, 3] + - [6, "cc", 3, 1590738992000, 1590738992000, 3] + - [7, "cc", 2, 1590738993000, 1590738991000, 2] + - + id: 1 + desc: 三表拼表 + inputs: + - + columns : ["id int", "card_no string","merchant_id int", "user string", "trx_time timestamp", "trx_amt float"] + + indexs: ["index1:card_no:trx_time"] + rows: + - [1, "aaaaaaaaaa",1, "user1", 1590738989000, 1.1] + - [2, "aaaaaaaaaa",1, "user2", 1590738990000, 2.2] + - [3, "bb",2, "user3", 1590738990000, 3.3] + - [4, "cc",3, "user4", 1590738990000, 4.0] + - [5, "cc",3, "user5", 1590738991000, 5.0] + - [6, "cc",3, "user6", 1590738992000, 6.0] + - [7, "cc",2, "user7", 1590738993000, 7.0] + - + columns : ["crd_lst_isu_dte timestamp", "merchant_nbr int"] + indexs: ["index2:merchant_nbr:crd_lst_isu_dte"] + rows: + - [1590738988000, 1] + - [1590738990000, 1] + - [1590738991000, 2] + - [1590738989000, 3] + - [1590738992000, 3] + - columns: [ "std_ts timestamp", "username string" ] + indexs: [ "index2:username:std_ts" ] + rows: + - [ 1590738988000, "user1"] + - [ 1590738990000, "user1"] + - [ 1590738991000, "user2"] + - [ 1590738989000, "user2"] + - [ 1590738992000, "user3" ] + sql: | + select id, card_no, merchant_id, user, trx_time, crd_lst_isu_dte, merchant_nbr, std_ts, username from {0} + last join {1} order by {1}.crd_lst_isu_dte on {0}.merchant_id = {1}.merchant_nbr and {0}.trx_time >= {1}.crd_lst_isu_dte + last join {2} order by {2}.std_ts on {0}.user = {2}.username; + expect: + columns: ["id int", "card_no string", "merchant_id int", "user string", "trx_time timestamp", + "crd_lst_isu_dte timestamp", "merchant_nbr int", "std_ts timestamp", "username string"] + order: id + rows: + - [1, "aaaaaaaaaa", 1, "user1", 1590738989000, 1590738988000, 1, 1590738990000, "user1"] + - [2, "aaaaaaaaaa", 1, "user2", 1590738990000, 1590738990000, 1, 1590738991000, "user2"] + - [3, "bb", 2, "user3", 1590738990000, null, null, 1590738992000, "user3", ] + - [4, "cc", 3, "user4", 1590738990000, 1590738989000, 3, null, null] + - [5, "cc", 3, "user5", 1590738991000, 1590738989000, 3, null, null] + - [6, "cc", 3, "user6", 1590738992000, 1590738992000, 3, null, null] + - [7, "cc", 2, "user7", 1590738993000, 1590738991000, 2, null, null] + - + id: 2 + desc: 三表拼表2 + inputs: + - + columns : ["id int", "card_no string","merchant_id int", "user string", "trx_time timestamp", "trx_amt float"] + + indexs: ["index1:card_no:trx_time"] + rows: + - [1, "aaaaaaaaaa",1, "user1", 1590738989000, 1.1] + - [2, "aaaaaaaaaa",1, "user2", 1590738990000, 2.2] + - [3, "bb",2, "user3", 1590738990000, 3.3] + - [4, "cc",3, "user4", 1590738990000, 4.0] + - [5, "cc",3, "user5", 1590738991000, 5.0] + - [6, "cc",3, "user6", 1590738992000, 6.0] + - [7, "cc",2, "user7", 1590738993000, 7.0] + - + columns : ["crd_lst_isu_dte timestamp", "merchant_nbr int", "product_nbr bigint"] + indexs: ["index2:merchant_nbr:crd_lst_isu_dte"] + rows: + - [1590738988000, 1, 1001] + - [1590738990000, 1, 1002] + - [1590738991000, 2, 1003] + - [1590738989000, 3, 1004] + - [1590738992000, 3, 1005] + - columns: [ "std_ts timestamp", "product_id bigint" ] + indexs: [ "index2:product_id:std_ts" ] + rows: + - [ 1590738988000, 1001] + - [ 1590738990000, 1001] + - [ 1590738991000, 1001] + - [ 1590738989000, 1002] + - [ 1590738992000, 1002] + - [ 1590738993000, 1005] + sql: | + select id, card_no, merchant_id, user, trx_time, crd_lst_isu_dte, merchant_nbr, product_nbr, std_ts, product_id from {0} + last join {1} order by {1}.crd_lst_isu_dte on {0}.merchant_id = {1}.merchant_nbr and {0}.trx_time >= {1}.crd_lst_isu_dte + last join {2} order by {2}.std_ts on {1}.product_nbr = {2}.product_id; + expect: + columns: ["id int", "card_no string", "merchant_id int", "user string", "trx_time timestamp", + "crd_lst_isu_dte timestamp", "merchant_nbr int", "product_nbr bigint", "std_ts timestamp", "product_id bigint"] + order: id + rows: + - [1, "aaaaaaaaaa", 1, "user1", 1590738989000, 1590738988000, 1, 1001, 1590738991000, 1001] + - [2, "aaaaaaaaaa", 1, "user2", 1590738990000, 1590738990000, 1, 1002, 1590738992000, 1002] + - [3, "bb", 2, "user3", 1590738990000, null, null, null, null, null] + - [4, "cc", 3, "user4", 1590738990000, 1590738989000, 3, 1004, null, null] + - [5, "cc", 3, "user5", 1590738991000, 1590738989000, 3, 1004, null, null] + - [6, "cc", 3, "user6", 1590738992000, 1590738992000, 3, 1005, 1590738993000, 1005] + - [7, "cc", 2, "user7", 1590738993000, 1590738991000, 2, 1003, null, null] + - + id: 3 + desc: 窗口特征拼接副表 + inputs: + - + columns : ["id int", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"] + + indexs: ["index1:card_no:trx_time"] + rows: + - [1, "aaaaaaaaaa",1, 1590738989000, 1.1] + - [2, "aaaaaaaaaa",1, 1590738990000, 2.2] + - [3, "bb",2, 1590738990000, 3.3] + - [4, "cc",3, 1590738990000, 4.0] + - [5, "cc",3, 1590738991000, 5.0] + - [6, "cc",3, 1590738992000, 6.0] + - [7, "cc",2, 1590738993000, 7.0] + - + columns : ["crd_lst_isu_dte timestamp", "merchant_nbr int"] + indexs: ["index2:merchant_nbr:crd_lst_isu_dte"] + rows: + - [1590738988000, 1] + - [1590738990000, 1] + - [1590738991000, 2] + - [1590738989000, 3] + - [1590738992000, 3] + sql: select * from + (select + id, + card_no, + merchant_id, + trx_time, + sum(trx_amt) over w30d as sum_trx_amt, + count(merchant_id) over w10d as count_merchant_id + from {0} + window w30d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW), + w10d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)) as trx_fe + last join {1} order by {1}.crd_lst_isu_dte on trx_fe.merchant_id = {1}.merchant_nbr and trx_fe.trx_time >= {1}.crd_lst_isu_dte; + expect: + columns: ["id int", "card_no string", "merchant_id int", "trx_time timestamp", + "sum_trx_amt float", "count_merchant_id int64", "crd_lst_isu_dte timestamp", + "merchant_nbr int"] + order: id + rows: + - [1, "aaaaaaaaaa", 1, 1590738989000, 1.1, 1, 1590738988000, 1] + - [2, "aaaaaaaaaa", 1, 1590738990000, 3.3, 2, 1590738990000, 1] + - [3, "bb", 2, 1590738990000, 3.3, 1, null, null] + - [4, "cc", 3, 1590738990000, 4.0, 1, 1590738989000, 3] + - [5, "cc", 3, 1590738991000, 9.0, 2, 1590738989000, 3] + - [6, "cc", 3, 1590738992000, 15.0, 3, 1590738992000, 3] + - [7, "cc", 2, 1590738993000, 22.0, 4, 1590738991000, 2] + - + id: 4 + desc: 3组窗口特征ID拼接 + inputs: + - + columns : ["id int", "c1 string", "c2 string", "c3 string", "c4 string", "c6 double", "c7 timestamp"] + indexs: ["index1:c1:c7", "index2:c2:c7", "index3:c3:c7", "index4:c4:c7"] + rows: + - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000] + - [ 2, "a", "aa", "aaa", "aaaa", "1.0", 1590738991000] + - [ 3, "a", "aa", "aaa", "bbbb", "1.0", 1590738992000] + - [ 4, "a", "aa", "aaa", "bbbb", "1.0", 1590738993000] + - [ 5, "a", "aa", "bbb", "bbbb", "1.0", 1590738994000] + - [ 6, "a", "aa", "bbb", "bbbb", "1.0", 1590738995000] + - [ 7, "a", "bb", "bbb", "bbbb", "1.0", 1590738996000 ] + - [ 8, "a", "bb", "bbb", "bbbb", "1.0", 1590738997000 ] + - [ 9, "b", "bb", "bbb", "bbbb", "1.0", 1590739998000 ] + - [10, "b", "bb", "bbb", "bbbb", "1.0", 1590739999000 ] + sql: | + select * from + ( + select id as out1_id, c1, c6 from {0} + ) as out1 last join + ( + select id as out2_id, c2, sum(c6) over w2 as w2_sum_c6 from {0} + window w2 as (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW) + ) as out2 on out1_id=out2_id last join + ( + select id as out3_id, c3, sum(c6) over w3 as w3_sum_c6 from {0} + window w3 as (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW) + ) as out3 on out1_id=out3_id last join + ( + select id as out4_id, c4, sum(c6) over w4 as w4_sum_c6 from {0} + window w4 as (PARTITION BY {0}.c4 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW) + ) as out4 on out1_id=out4_id; + expect: + columns: ["out1_id int", "c1 string", "c6 double", + "out2_id int", "c2 string", "w2_sum_c6 double", + "out3_id int", "c3 string", "w3_sum_c6 double", + "out4_id int", "c4 string", "w4_sum_c6 double",] + order: out1_id + rows: + - [ 1, "a", 1.0, 1, "aa", 1.0, 1, "aaa", 1.0, 1, "aaaa", 1.0] + - [ 2, "a", 1.0, 2, "aa", 2.0, 2, "aaa", 2.0, 2, "aaaa", 2.0] + - [ 3, "a", 1.0, 3, "aa", 3.0, 3, "aaa", 3.0, 3, "bbbb", 1.0] + - [ 4, "a", 1.0, 4, "aa", 4.0, 4, "aaa", 4.0, 4, "bbbb", 2.0] + - [ 5, "a", 1.0, 5, "aa", 5.0, 5, "bbb", 1.0, 5, "bbbb", 3.0] + - [ 6, "a", 1.0, 6, "aa", 6.0, 6, "bbb", 2.0, 6, "bbbb", 4.0] + - [ 7, "a", 1.0, 7, "bb", 1.0, 7, "bbb", 3.0, 7, "bbbb", 5.0] + - [ 8, "a", 1.0, 8, "bb", 2.0, 8, "bbb", 4.0, 8, "bbbb", 6.0] + - [ 9, "b", 1.0, 9, "bb", 3.0, 9, "bbb", 5.0, 9, "bbbb", 7.0] + - [10, "b", 1.0, 10, "bb", 4.0, 10, "bbb", 6.0, 10, "bbbb", 8.0] + - + id: 5 + desc: 4组窗口特征ID拼接 + inputs: + - + columns : ["id int", "c1 string", "c2 string", "c3 string", "c4 string", "c6 double", "c7 timestamp"] + indexs: ["index1:c1:c7", "index2:c2:c7", "index3:c3:c7", "index4:c4:c7"] + rows: + - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000] + - [ 2, "a", "aa", "aaa", "aaaa", "1.0", 1590738991000] + - [ 3, "a", "aa", "aaa", "bbbb", "1.0", 1590738992000] + - [ 4, "a", "aa", "aaa", "bbbb", "1.0", 1590738993000] + - [ 5, "a", "aa", "bbb", "bbbb", "1.0", 1590738994000] + - [ 6, "a", "aa", "bbb", "bbbb", "1.0", 1590738995000] + - [ 7, "a", "bb", "bbb", "bbbb", "1.0", 1590738996000 ] + - [ 8, "a", "bb", "bbb", "bbbb", "1.0", 1590738997000 ] + - [ 9, "b", "bb", "bbb", "bbbb", "1.0", 1590739998000 ] + - [10, "b", "bb", "bbb", "bbbb", "1.0", 1590739999000 ] + sql: | + select * from + ( + select id as out1_id, c1, sum(c6) over w1 as w1_sum_c6 from {0} + window w1 as (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW) + ) as out1 last join + ( + select id as out2_id, c2, sum(c6) over w2 as w2_sum_c6 from {0} + window w2 as (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW) + ) as out2 on out1_id=out2_id last join + ( + select id as out3_id, c3, sum(c6) over w3 as w3_sum_c6 from {0} + window w3 as (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW) + ) as out3 on out1_id=out3_id last join + ( + select id as out4_id, c4, sum(c6) over w4 as w4_sum_c6 from {0} + window w4 as (PARTITION BY {0}.c4 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW) + ) as out4 on out1_id=out4_id; + request_plan: | + SIMPLE_PROJECT(sources=(out1_id, c1, w1_sum_c6, out2_id, c2, w2_sum_c6, out3_id, c3, w3_sum_c6, out4.out4_id, out4.c4, out4.w4_sum_c6)) + REQUEST_JOIN(type=LastJoin, condition=, left_keys=(out1_id), right_keys=(out4_id), index_keys=) + REQUEST_JOIN(type=LastJoin, condition=, left_keys=(out1_id), right_keys=(out3_id), index_keys=) + REQUEST_JOIN(type=LastJoin, condition=, left_keys=(out1_id), right_keys=(out2_id), index_keys=) + RENAME(name=out1) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + RENAME(name=out2) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c2)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index2) + RENAME(name=out3) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c3)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index3) + RENAME(name=out4) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c4)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index4) + + cluster_request_plan: | + SIMPLE_PROJECT(sources=(out1_id, c1, w1_sum_c6, out2_id, c2, w2_sum_c6, out3_id, c3, w3_sum_c6, out4.out4_id, out4.c4, out4.w4_sum_c6)) + REQUEST_JOIN(type=LastJoin, condition=, left_keys=(out1_id), right_keys=(out4_id), index_keys=) + REQUEST_JOIN(type=LastJoin, condition=, left_keys=(out1_id), right_keys=(out3_id), index_keys=) + REQUEST_JOIN(type=LastJoin, condition=, left_keys=(out1_id), right_keys=(out2_id), index_keys=) + RENAME(name=out1) + SIMPLE_PROJECT(sources=(out1_id, c1, w1_sum_c6)) + REQUEST_JOIN(type=kJoinTypeConcat) + SIMPLE_PROJECT(sources=(id -> out1_id, c1)) + DATA_PROVIDER(request=auto_t0) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + RENAME(name=out2) + SIMPLE_PROJECT(sources=(out2_id, c2, w2_sum_c6)) + REQUEST_JOIN(type=kJoinTypeConcat) + SIMPLE_PROJECT(sources=(id -> out2_id, c2)) + DATA_PROVIDER(request=auto_t0) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c2)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index2) + RENAME(name=out3) + SIMPLE_PROJECT(sources=(out3_id, c3, w3_sum_c6)) + REQUEST_JOIN(type=kJoinTypeConcat) + SIMPLE_PROJECT(sources=(id -> out3_id, c3)) + DATA_PROVIDER(request=auto_t0) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c3)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index3) + RENAME(name=out4) + SIMPLE_PROJECT(sources=(out4_id, c4, w4_sum_c6)) + REQUEST_JOIN(type=kJoinTypeConcat) + SIMPLE_PROJECT(sources=(id -> out4_id, c4)) + DATA_PROVIDER(request=auto_t0) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c4)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index4) + expect: + columns: ["out1_id int", "c1 string", "w1_sum_c6 double", + "out2_id int", "c2 string", "w2_sum_c6 double", + "out3_id int", "c3 string", "w3_sum_c6 double", + "out4_id int", "c4 string", "w4_sum_c6 double",] + order: out1_id + rows: + - [ 1, "a", 1.0, 1, "aa", 1.0, 1, "aaa", 1.0, 1, "aaaa", 1.0] + - [ 2, "a", 2.0, 2, "aa", 2.0, 2, "aaa", 2.0, 2, "aaaa", 2.0] + - [ 3, "a", 3.0, 3, "aa", 3.0, 3, "aaa", 3.0, 3, "bbbb", 1.0] + - [ 4, "a", 4.0, 4, "aa", 4.0, 4, "aaa", 4.0, 4, "bbbb", 2.0] + - [ 5, "a", 5.0, 5, "aa", 5.0, 5, "bbb", 1.0, 5, "bbbb", 3.0] + - [ 6, "a", 6.0, 6, "aa", 6.0, 6, "bbb", 2.0, 6, "bbbb", 4.0] + - [ 7, "a", 7.0, 7, "bb", 1.0, 7, "bbb", 3.0, 7, "bbbb", 5.0] + - [ 8, "a", 8.0, 8, "bb", 2.0, 8, "bbb", 4.0, 8, "bbbb", 6.0] + - [ 9, "b", 1.0, 9, "bb", 3.0, 9, "bbb", 5.0, 9, "bbbb", 7.0] + - [10, "b", 2.0, 10, "bb", 4.0, 10, "bbb", 6.0, 10, "bbbb", 8.0] + - + id: 6 + desc: 窗口特征拼接多张副表, last join 条件表达式1 + inputs: + - + columns : ["id int", "c1 string", "c2 string", "c3 string", "c4 string", "c6 double", "c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000] + - [ 2, "a", "aa", "aaa", "aaaa", "2.0", 1590738991000] + - [ 3, "a", "aa", "aaa", "bbbb", "3.0", 1590738992000] + - [ 4, "a", "aa", "aaa", "bbbb", "4.0", 1590738993000] + - [ 5, "a", "aa", "bbb", "bbbb", "5.0", 1590738994000] + - [ 6, "a", "aa", "bbb", "bbbb", "6.0", 1590738995000] + - [ 7, "a", "bb", "bbb", "bbbb", "7.0", 1590738996000 ] + - [ 8, "a", "bb", "bbb", "bbbb", "8.0", 1590738997000 ] + - [ 9, "b", "bb", "bbb", "bbbb", "9.0", 1590738998000 ] + - [10, "b", "bb", "bbb", "bbbb", "10.0", 1590738999000 ] + - columns: ["rid int", "x1 string", "x2 string", "x3 string", "x4 string", "x6 double", "x7 timestamp"] + indexs: ["index1:x1:x7", "index2:x2:x7", "index3:x3:x7", "index4:x4:x7", ] + rows: + - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000 ] + - [ 2, "a", "aa", "aaa", "aaaa", "1.0", 1590738991000 ] + - [ 3, "a", "aa", "aaa", "bbbb", "1.0", 1590738992000 ] + - [ 4, "a", "aa", "aaa", "bbbb", "1.0", 1590738993000 ] + - [ 5, "a", "aa", "bbb", "bbbb", "1.0", 1590738994000 ] + - [ 6, "a", "aa", "bbb", "bbbb", "1.0", 1590738995000 ] + - [ 7, "a", "bb", "bbb", "bbbb", "1.0", 1590738996000 ] + - [ 8, "a", "bb", "bbb", "bbbb", "1.0", 1590738997000 ] + - [ 9, "b", "bb", "bbb", "bbbb", "1.0", 1590738998000 ] + - [ 10, "b", "bb", "bbb", "bbbb", "1.0",1590738999000 ] + sql: | + select id, c1, c2, c3, c4, c6, c7, cur_hour, today + , w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6 + , t1.rid as t1_rid, t2.rid as t2_rid, t3.rid as t3_rid, t4.rid as t4_rid + from + ( + select id, c1, c2, c3, c4, c6, c7, hour(c7) as cur_hour, day(c7) as today + , sum(c6) over w1 as w1_sum_c6 + , max(c6) over w1 as w1_max_c6 + , min(c6) over w1 as w1_min_c6 + , avg(c6) over w1 as w1_avg_c6 + , count(c6) over w1 as w1_cnt_c6 + from {0} + window w1 as (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW) + ) as w_out last join {1} as t1 order by t1.x7 on c1 = t1.x1 and c7 - 1000 >= t1.x7 + last join {1} as t2 order by t2.x7 on c2 = t2.x2 and c7 - 2000 >= t2.x7 + last join {1} as t3 order by t3.x7 on c3 = t3.x3 and c7 - 3000 >= t3.x7 + last join {1} as t4 order by t4.x7 on c4 = t4.x4 and c7 - 4000 >= t4.x7; + request_plan: | + SIMPLE_PROJECT(sources=(id, c1, c2, c3, c4, c6, c7, cur_hour, today, w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6, t1.rid -> t1_rid, t2.rid -> t2_rid, t3.rid -> t3_rid, t4.rid -> t4_rid)) + REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 4000 >= t4.x7, left_keys=(), right_keys=(), index_keys=(c4)) + REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 3000 >= t3.x7, left_keys=(), right_keys=(), index_keys=(c3)) + REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 2000 >= t2.x7, left_keys=(), right_keys=(), index_keys=(c2)) + REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 1000 >= t1.x7, left_keys=(), right_keys=(), index_keys=(c1)) + RENAME(name=w_out) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index1) + RENAME(name=t2) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index2) + RENAME(name=t3) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index3) + RENAME(name=t4) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index4) + cluster_request_plan: | + SIMPLE_PROJECT(sources=(id, c1, c2, c3, c4, c6, c7, cur_hour, today, w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6, t1.rid -> t1_rid, t2.rid -> t2_rid, t3.rid -> t3_rid, t4.rid -> t4_rid)) + REQUEST_JOIN(type=kJoinTypeConcat) + REQUEST_JOIN(type=kJoinTypeConcat) + REQUEST_JOIN(type=kJoinTypeConcat) + REQUEST_JOIN(type=kJoinTypeConcat) + RENAME(name=w_out) + SIMPLE_PROJECT(sources=(id, c1, c2, c3, c4, c6, c7, cur_hour, today, w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=RowProject) + DATA_PROVIDER(request=auto_t0) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 1000 >= #35, left_keys=(), right_keys=(), index_keys=(#9)) + SIMPLE_PROJECT(sources=(#9 -> c1, #14 -> c7)) + DATA_PROVIDER(request=auto_t0) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index1) + REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 2000 >= #35, left_keys=(), right_keys=(), index_keys=(#10)) + SIMPLE_PROJECT(sources=(#10 -> c2, #14 -> c7)) + DATA_PROVIDER(request=auto_t0) + RENAME(name=t2) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index2) + REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 3000 >= #35, left_keys=(), right_keys=(), index_keys=(#11)) + SIMPLE_PROJECT(sources=(#11 -> c3, #14 -> c7)) + DATA_PROVIDER(request=auto_t0) + RENAME(name=t3) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index3) + REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 4000 >= #35, left_keys=(), right_keys=(), index_keys=(#12)) + SIMPLE_PROJECT(sources=(#12 -> c4, #14 -> c7)) + DATA_PROVIDER(request=auto_t0) + RENAME(name=t4) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index4) + expect: + columns: ["id int", "c1 string", "c2 string", "c3 string", "c4 string", "c6 double", "c7 timestamp", + "cur_hour int32", "today int32", "w1_sum_c6 double", "w1_max_c6 double", + "w1_min_c6 double", "w1_avg_c6 double", "w1_cnt_c6 bigint", + "t1_rid int32", "t2_rid int32", "t3_rid int32", "t4_rid int32"] + order: id + rows: + - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000, 15, 29, 1.0, 1.0, 1.0, 1.0, 1, NULL, NULL, NULL, NULL] + - [ 2, "a", "aa", "aaa", "aaaa", "2.0", 1590738991000, 15, 29, 3.0, 2.0, 1.0, 1.5, 2, 1, NULL, NULL, NULL ] + - [ 3, "a", "aa", "aaa", "bbbb", "3.0", 1590738992000, 15, 29, 6.0, 3.0, 1.0, 2.0, 3, 2 , 1, NULL, NULL] + - [ 4, "a", "aa", "aaa", "bbbb", "4.0", 1590738993000, 15, 29, 10.0, 4.0, 1.0, 2.5, 4, 3 , 2, 1, NULL] + - [ 5, "a", "aa", "bbb", "bbbb", "5.0", 1590738994000, 15, 29, 15.0, 5.0, 1.0, 3.0, 5, 4 , 3, NULL, NULL] + - [ 6, "a", "aa", "bbb", "bbbb", "6.0", 1590738995000, 15, 29, 21.0, 6.0, 1.0, 3.5, 6, 5 , 4, NULL, NULL] + - [ 7, "a", "bb", "bbb", "bbbb", "7.0", 1590738996000, 15, 29, 28.0, 7.0, 1.0, 4.0, 7, 6 , NULL, NULL, 3] + - [ 8, "a", "bb", "bbb", "bbbb", "8.0", 1590738997000, 15, 29, 36.0, 8.0, 1.0, 4.5, 8, 7 , NULL, 5, 4] + - [ 9, "b", "bb", "bbb", "bbbb", "9.0", 1590738998000, 15, 29, 9.0, 9.0, 9.0, 9.0, 1, NULL , 7, 6, 5] + - [ 10, "b", "bb", "bbb", "bbbb", "10.0",1590738999000,15, 29, 19.0, 10.0, 9.0, 9.5, 2, 9, 8, 7, 6] + + + - + id: 7 + desc: 窗口特征拼接多张副表, last join 条件表达式2 + inputs: + - + columns : ["id int", "c1 string", "c2 string", "c3 string", "c4 string", "c6 double", "c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000] + - [ 2, "a", "aa", "aaa", "aaaa", "2.0", 1590738991000] + - [ 3, "a", "aa", "aaa", "bbbb", "3.0", 1590738992000] + - [ 4, "a", "aa", "aaa", "bbbb", "4.0", 1590738993000] + - [ 5, "a", "aa", "bbb", "bbbb", "5.0", 1590738994000] + - [ 6, "a", "aa", "bbb", "bbbb", "6.0", 1590738995000] + - [ 7, "a", "bb", "bbb", "bbbb", "7.0", 1590738996000 ] + - [ 8, "a", "bb", "bbb", "bbbb", "8.0", 1590738997000 ] + - [ 9, "b", "bb", "bbb", "bbbb", "9.0", 1590738998000 ] + - [10, "b", "bb", "bbb", "bbbb", "10.0", 1590738999000 ] + - columns: ["rid int", "x1 string", "x2 string", "x3 string", "x4 string", "x6 double", "x7 timestamp"] + indexs: ["index1:x1:x7", "index2:x2:x7", "index3:x3:x7", "index4:x4:x7", ] + rows: + - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000 ] + - [ 2, "a", "aa", "aaa", "aaaa", "1.0", 1590738991000 ] + - [ 3, "a", "aa", "aaa", "bbbb", "1.0", 1590738992000 ] + - [ 4, "a", "aa", "aaa", "bbbb", "1.0", 1590738993000 ] + - [ 5, "a", "aa", "bbb", "bbbb", "1.0", 1590738994000 ] + - [ 6, "a", "aa", "bbb", "bbbb", "1.0", 1590738995000 ] + - [ 7, "a", "bb", "bbb", "bbbb", "1.0", 1590738996000 ] + - [ 8, "a", "bb", "bbb", "bbbb", "1.0", 1590738997000 ] + - [ 9, "b", "bb", "bbb", "bbbb", "1.0", 1590738998000 ] + - [ 10, "b", "bb", "bbb", "bbbb", "1.0",1590738999000 ] + sql: | + select id, c1, c2, c3, c4, c6, c7, cur_hour, today + , w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6 + , t1.rid as t1_rid, t2.rid as t2_rid, t3.rid as t3_rid, t4.rid as t4_rid + from + ( + select id, c1, c2, c3, c4, c6, c7, hour(c7) as cur_hour, day(c7) as today + , sum(c6) over w1 as w1_sum_c6 + , max(c6) over w1 as w1_max_c6 + , min(c6) over w1 as w1_min_c6 + , avg(c6) over w1 as w1_avg_c6 + , count(c6) over w1 as w1_cnt_c6 + from {0} + window w1 as (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW) + ) as w_out last join {1} as t1 order by t1.x7 on c1 = t1.x1 and c7 - 1000 >= t1.x7 + last join {1} as t2 order by t2.x7 on w_out.c2 = t2.x2 and c7 - 2000 >= t2.x7 + last join {1} as t3 order by t3.x7 on w_out.c3 = t3.x3 and c7 - 3000 >= t3.x7 + last join {1} as t4 order by t4.x7 on w_out.c4 = t4.x4 and c7 - 4000 >= t4.x7; + request_plan: | + SIMPLE_PROJECT(sources=(id, c1, c2, c3, c4, c6, c7, cur_hour, today, w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6, t1.rid -> t1_rid, t2.rid -> t2_rid, t3.rid -> t3_rid, t4.rid -> t4_rid)) + REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 4000 >= t4.x7, left_keys=(), right_keys=(), index_keys=(w_out.c4)) + REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 3000 >= t3.x7, left_keys=(), right_keys=(), index_keys=(w_out.c3)) + REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 2000 >= t2.x7, left_keys=(), right_keys=(), index_keys=(w_out.c2)) + REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 1000 >= t1.x7, left_keys=(), right_keys=(), index_keys=(c1)) + RENAME(name=w_out) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index1) + RENAME(name=t2) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index2) + RENAME(name=t3) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index3) + RENAME(name=t4) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index4) + cluster_request_plan: | + SIMPLE_PROJECT(sources=(id, c1, c2, c3, c4, c6, c7, cur_hour, today, w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6, t1.rid -> t1_rid, t2.rid -> t2_rid, t3.rid -> t3_rid, t4.rid -> t4_rid)) + REQUEST_JOIN(type=kJoinTypeConcat) + REQUEST_JOIN(type=kJoinTypeConcat) + REQUEST_JOIN(type=kJoinTypeConcat) + REQUEST_JOIN(type=kJoinTypeConcat) + RENAME(name=w_out) + SIMPLE_PROJECT(sources=(id, c1, c2, c3, c4, c6, c7, cur_hour, today, w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=RowProject) + DATA_PROVIDER(request=auto_t0) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 1000 >= #35, left_keys=(), right_keys=(), index_keys=(#9)) + SIMPLE_PROJECT(sources=(#9 -> c1, #14 -> c7)) + DATA_PROVIDER(request=auto_t0) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index1) + REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 2000 >= #35, left_keys=(), right_keys=(), index_keys=(#10)) + SIMPLE_PROJECT(sources=(#10 -> w_out.c2, #14 -> c7)) + DATA_PROVIDER(request=auto_t0) + RENAME(name=t2) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index2) + REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 3000 >= #35, left_keys=(), right_keys=(), index_keys=(#11)) + SIMPLE_PROJECT(sources=(#11 -> w_out.c3, #14 -> c7)) + DATA_PROVIDER(request=auto_t0) + RENAME(name=t3) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index3) + REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 4000 >= #35, left_keys=(), right_keys=(), index_keys=(#12)) + SIMPLE_PROJECT(sources=(#12 -> w_out.c4, #14 -> c7)) + DATA_PROVIDER(request=auto_t0) + RENAME(name=t4) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index4) + expect: + columns: ["id int", "c1 string", "c2 string", "c3 string", "c4 string", "c6 double", "c7 timestamp", + "cur_hour int32", "today int32", "w1_sum_c6 double", "w1_max_c6 double", + "w1_min_c6 double", "w1_avg_c6 double", "w1_cnt_c6 bigint", + "t1_rid int32", "t2_rid int32", "t3_rid int32", "t4_rid int32"] + order: id + rows: + - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000, 15, 29, 1.0, 1.0, 1.0, 1.0, 1, NULL, NULL, NULL, NULL] + - [ 2, "a", "aa", "aaa", "aaaa", "2.0", 1590738991000, 15, 29, 3.0, 2.0, 1.0, 1.5, 2, 1, NULL, NULL, NULL ] + - [ 3, "a", "aa", "aaa", "bbbb", "3.0", 1590738992000, 15, 29, 6.0, 3.0, 1.0, 2.0, 3, 2 , 1, NULL, NULL] + - [ 4, "a", "aa", "aaa", "bbbb", "4.0", 1590738993000, 15, 29, 10.0, 4.0, 1.0, 2.5, 4, 3 , 2, 1, NULL] + - [ 5, "a", "aa", "bbb", "bbbb", "5.0", 1590738994000, 15, 29, 15.0, 5.0, 1.0, 3.0, 5, 4 , 3, NULL, NULL] + - [ 6, "a", "aa", "bbb", "bbbb", "6.0", 1590738995000, 15, 29, 21.0, 6.0, 1.0, 3.5, 6, 5 , 4, NULL, NULL] + - [ 7, "a", "bb", "bbb", "bbbb", "7.0", 1590738996000, 15, 29, 28.0, 7.0, 1.0, 4.0, 7, 6 , NULL, NULL, 3] + - [ 8, "a", "bb", "bbb", "bbbb", "8.0", 1590738997000, 15, 29, 36.0, 8.0, 1.0, 4.5, 8, 7 , NULL, 5, 4] + - [ 9, "b", "bb", "bbb", "bbbb", "9.0", 1590738998000, 15, 29, 9.0, 9.0, 9.0, 9.0, 1, NULL , 7, 6, 5] + - [ 10, "b", "bb", "bbb", "bbbb", "10.0",1590738999000,15, 29, 19.0, 10.0, 9.0, 9.5, 2, 9, 8, 7, 6] diff --git a/cases/integration_test/data_expiration/test_data_expiration.yaml b/cases/integration_test/data_expiration/test_data_expiration.yaml new file mode 100644 index 00000000000..d686692bd92 --- /dev/null +++ b/cases/integration_test/data_expiration/test_data_expiration.yaml @@ -0,0 +1,70 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: +- id: 0 + desc: ttl_type=latest,ttl=4,insert 10 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4:4:latest"] + rows: + - ["bb", 2, 3, 1590738989000] + - ["bb", 4, 5, 1590738990000] + - ["bb", 6, 7, 1590738991000] + - ["bb", 8, 9, 1590738992000] + - ["bb", 10, 11, 1590738993000] + - ["bb", 12, 13, 1590738994000] + - ["bb", 14, 15, 1590738995000] + - ["bb", 16, 17, 1590738996000] + - ["bb", 18, 19, 1590738997000] + - ["bb", 20, 21, 1590738998000] + sql: select c1,c2,c3 from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint"] + rows: + - ["bb", 20, 21] + - ["bb", 18, 19] + - ["bb", 16, 17] + - ["bb", 14, 15] + +- id: 16 + desc: 创建磁盘表,ttl_type=absolute,ttl=10m, insert 10 + mode: request-unsupport + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4:10m:absolute"] + storage: hdd + rows: + - ["bb", 2, 3, "{currentTime}-100"] + - ["bb", 4, 5, "{currentTime}-200"] + - ["bb", 6, 7, "{currentTime}-599000"] + - ["bb", 8, 9, "{currentTime}-600000"] + - ["bb", 10, 11, "{currentTime}-600005"] + - ["bb", 12, 13, "{currentTime}-600006"] + - ["bb", 14, 15, "{currentTime}-600007"] + - ["bb", 16, 17, "{currentTime}-600008"] + - ["bb", 18, 19, "{currentTime}-600009"] + - ["bb", 20, 21, "{currentTime}-600010"] + sql: select c1,c2,c3 from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint"] + rows: + - ["bb", 2, 3] + - ["bb", 4, 5] + - ["bb", 6, 7] \ No newline at end of file diff --git a/cases/integration_test/ddl/test_create.yaml b/cases/integration_test/ddl/test_create.yaml new file mode 100644 index 00000000000..7319230b3ac --- /dev/null +++ b/cases/integration_test/ddl/test_create.yaml @@ -0,0 +1,560 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: 创建所有类型的表 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 1 + desc: 创建两个相同时间列的索引的表 + sqlDialect: ["HybridSQL"] + inputs: + - + create: | + create table {0} ( + c1 string, + c2 int, + c3 timestamp, + c4 timestamp, + index(key=(c1),ts=c4), + index(key=(c2),ts=c4)); + insert: | + insert into {0} values ("aa", 1, 1590738990000, 1590738989000); + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + rows: + - ["aa", 1, 1590738990000, 1590738989000] + - + id: 2 + desc: 创建两个不同时间列的索引的表 + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4","index2:c2:c3"] + rows: + - [aa,1,1590738990000,1590738989000] + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + rows: + - [aa,1,1590738990000,1590738989000] + - + id: 3 + desc: 创建一个联合索引的表 + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1|c2:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + rows: + - [aa,1,1590738990000,1590738989000] + - + id: 4 + desc: NotNull的列为index + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["c1 string NOT NULL","c2 int","c3 timestamp","c4 timestamp"] + create: | + create table {0} ( + c1 string NOT NULL, + c2 int, + c3 timestamp, + c4 timestamp, + index(key=(c1),ts=c4)); + insert: | + insert into {0} values ("aa", 1, 1590738990000, 1590738989000); + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + rows: + - [aa,1,1590738990000,1590738989000] + - + id: 5 + desc: 表名以数字开头 + sqlDialect: ["HybridSQL"] + sql: create table 1aaa(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)); + expect: + success: false + - + id: 6-1 + desc: 表名为保留关键字 + sqlDialect: ["HybridSQL"] + sql: create table order(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)); + expect: + success: false + - + id: 6-2 + desc: 表名为非保留关键字 + sqlDialect: ["HybridSQL"] + inputs: + - name: table + sql: create table table(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)); + expect: + success: true + - + id: 7 + desc: 列名以数字开头 + sqlDialect: ["HybridSQL"] + sql: create table {auto}(1c string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)); + expect: + success: false + - + id: 8 + desc: 列名为保留关键字 + sqlDialect: ["HybridSQL"] + sql: create table {auto}(use string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)); + expect: + success: false + - + id: 9 + desc: 语句缺少分号 + sqlDialect: ["HybridSQL"] + sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)); + expect: + success: true + - + id: 10 + desc: 列的类型不存在 + sqlDialect: ["HybridSQL"] + sql: create table {auto}(c1 varchar2 NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)); + expect: + success: false + - + id: 11 + desc: index指定的col不存在 + sqlDialect: ["HybridSQL"] + sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c5),ts=c4,ttl=0m)); + expect: + success: false + - + id: 12 + desc: index指定的ts不存在 + sqlDialect: ["HybridSQL"] + sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c5,ttl=0m)); + expect: + success: false + - + id: 13 + desc: 创建的index不指定ts + sqlDialect: ["HybridSQL"] + sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1))); + expect: + success: true + - + id: 14 + desc: 创建index不指定col + mode: standalone-unsupport + sqlDialect: ["HybridSQL"] + sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(ts=c4,ttl=0m)); + expect: + success: true + - + id: 15 + desc: ts边界-指定的ts为string + sqlDialect: ["HybridSQL"] + sql: create table {auto}(c1 string NOT NULL,c2 string,c3 timestamp,c4 timestamp,index(key=(c1),ts=c2,ttl=0m)); + expect: + success: false + - + id: 16 + desc: ts边界-指定的ts为int + sqlDialect: ["HybridSQL"] + sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c2,ttl=0m)); + expect: + success: false + - + id: 17 + desc: ts边界-指定的ts为smallint + sqlDialect: ["HybridSQL"] + sql: create table {auto}(c1 string NOT NULL,c2 smallint,c3 timestamp,c4 timestamp,index(key=(c1),ts=c2,ttl=0m)); + expect: + success: false + - + id: 18 + desc: ts边界-指定的ts为date + sqlDialect: ["HybridSQL"] + sql: create table {auto}(c1 string NOT NULL,c2 date,c3 timestamp,c4 timestamp,index(key=(c1),ts=c2,ttl=0m)); + expect: + success: false + - + id: 19 + desc: ts边界-指定的ts为float + sqlDialect: ["HybridSQL"] + sql: create table {auto}(c1 string NOT NULL,c2 float,c3 timestamp,c4 timestamp,index(key=(c1),ts=c2,ttl=0m)); + expect: + success: false + - + id: 20 + desc: ts边界-指定的ts为double + sqlDialect: ["HybridSQL"] + sql: create table {auto}(c1 string NOT NULL,c2 double,c3 timestamp,c4 timestamp,index(key=(c1),ts=c2,ttl=0m)); + expect: + success: false + - + id: 21 + desc: ts边界-指定的ts为bool + sqlDialect: ["HybridSQL"] + sql: create table {auto}(c1 string NOT NULL,c2 bool,c3 timestamp,c4 timestamp,index(key=(c1),ts=c2,ttl=0m)); + expect: + success: false + - + id: 22 + desc: 表名使用特殊字符 + sqlDialect: ["HybridSQL"] + sql: create table auto$#kJKytImk(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)); + expect: + success: false + - + id: 23 + desc: 列名使用特殊字符 + sqlDialect: ["HybridSQL"] + sql: create table {auto}(c1$# string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)); + expect: + success: false + - + id: 24 + desc: 指定的ts为bigint + inputs: + - + columns : ["c1 string","c2 bigint","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c2"] + rows: + - [aa,1,1590738990000,1590738989000] + sql: select * from {0}; + expect: + columns: ["c1 string","c2 bigint","c3 timestamp","c4 timestamp"] + - + id: 25 + desc: 指定的ts为bigint+ttl + sqlDialect: ["HybridSQL"] + inputs: + - + create: create table {0} (c1 string NOT NULL,c2 bigint,c3 timestamp, c4 timestamp,index(key=(c1),ts=c2,ttl=0m)); + insert: insert into {0} values ("aa", 1, 1590738990000, 1590738989000); + sql: select * from {0}; + expect: + columns: ["c1 string","c2 bigint","c3 timestamp","c4 timestamp"] + - + id: 26 + desc: 创建已经存在的表 + inputs: + - + columns : ["c1 string","c2 bigint","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + sql: create table {0}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)); + expect: + success: false + - + id: 27 + desc: key边界-bigint为索引列 + inputs: + - + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c4:c7"] + rows: + - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"] + sql: select * from {0}; + expect: + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + rows: + - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"] + - + id: 28 + desc: key边界-int为索引列 + inputs: + - + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"] + sql: select * from {0}; + expect: + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + rows: + - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"] + - + id: 29 + desc: key边界-timestamp为索引列 + inputs: + - + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c7:c4"] + rows: + - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"] + sql: select * from {0}; + expect: + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + rows: + - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"] + - + id: 30 + desc: key边界-date为索引列 + sqlDialect: ["HybridSQL"] + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c8),ts=c7)); + expect: + success: true + - + id: 31 + desc: key边界-float为索引列 + sqlDialect: ["HybridSQL"] + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c5),ts=c7)); + expect: + success: false + - + id: 32 + desc: key边界-double为索引列 + sqlDialect: ["HybridSQL"] + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c6),ts=c7)); + expect: + success: false + - + id: 33 + desc: key边界-smallint为索引列 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c2:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 34 + desc: key边界-bool类型为索引列 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c9:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 35 + desc: key边界-key和ts为同一列 + sqlDialect: ["HybridSQL"] + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c4),ts=c4)); + expect: + success: true + - id: 36 + desc: create col with __prefix + sqlDialect: ["HybridSQL"] + tags: ["TODO", "@chenjing create with __prefix"] + sql: | + create table {auto} ( + __c1 string, __c3 int, __ts bigint, + index(key=__c1, ts=__ts)); + expect: + columns: ["__c1 string","__c3 int", "__ts bigint"] + - + id: 37 + desc: create with replica num + sqlDialect: ["HybridSQL"] + mode: standalone-unsupport + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c3), ts=c4)) + options ( + replicanum = 2 + ); + expect: + success: true + - + id: 38 + desc: create with replica num and distribution + mode: standalone-unsupport + sqlDialect: ["HybridSQL"] + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c3),ts=c4)) + options ( + replicanum = 2, + distribution = [ ('{tb_endpoint_0}', ['{tb_endpoint_1}'])] + ); + expect: + success: true + - + id: 39 + desc: create with replica num and distribution + mode: standalone-unsupport + sqlDialect: ["HybridSQL"] + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c3),ts=c4)) + options ( + replicanum = 3, + distribution = [ ('{tb_endpoint_0}', ['{tb_endpoint_1}'])] + ); + expect: + success: false + - + id: 40 + desc: create with replica num and distribution + mode: standalone-unsupport + sqlDialect: ["HybridSQL"] + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c3),ts=c4)) + options ( + replicanum = 2, + distribution = [ ('{tb_endpoint_0}', ['{tb_endpoint_0}'])] + ); + expect: + success: false + - + id: 41 + desc: create with partition num + sqlDialect: ["HybridSQL"] +# mode: standalone-unsupport + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c3), ts=c4)) + options ( + partitionnum = 8 + ); + expect: + success: true + - + id: 42 + desc: create with partition num + sqlDialect: ["HybridSQL"] + mode: standalone-unsupport + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c3), ts=c4)) + options ( + replicanum = 2, + partitionnum = 8 + ); + expect: + success: true + - + id: 43 + desc: no index + sqlDialect: ["HybridSQL"] + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date); + expect: + success: true + - + id: 44 + desc: bool-insert-1 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",1] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 45 + desc: create with two no ts index + sqlDialect: ["HybridSQL"] + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c3), ttl=(10m,10), ttl_type=absorlat), + index(key=(c4), ttl=(10m,10), ttl_type=absorlat)); + expect: + success: true + - + id: 46 + desc: one has ts and another has not + sqlDialect: ["HybridSQL"] + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c3), ttl=(10m,10), ttl_type=absorlat), + index(key=(c4), ts=c4, ttl=(10m,10), ttl_type=absorlat)); + expect: + success: true + - + id: 47 + desc: create with only key + sqlDialect: ["HybridSQL"] + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c3)), + index(key=(c4))); + expect: + success: true + - + id: 48 + desc: insert min int and max int + sqlDialect: ["HybridSQL"] + inputs: + - + create: | + create table {0} ( + id int64, + order_0_timestamp timestamp, + c_2_int32 int32, + index(key=(id),ts=order_0_timestamp)); + insert: | + insert into {0} values + (0,1538443518561,-2147483648); + sql: select * from {0}; + expect: + success: true diff --git a/cases/integration_test/ddl/test_create_index.yaml b/cases/integration_test/ddl/test_create_index.yaml new file mode 100644 index 00000000000..5549a5db039 --- /dev/null +++ b/cases/integration_test/ddl/test_create_index.yaml @@ -0,0 +1,768 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: 冒烟测试 + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=100m, ttl_type=absolute); + - desc {0}; + expect: + success: true + idxs: + - + keys: ["c1"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c2"] + ts: "c4" + ttl: 100min + ttlType: kAbsoluteTime + - + id: 1 + desc: 指定多个列 + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c1,c2) OPTIONS (ts=c4, ttl=100m, ttl_type=absolute); + - desc {0}; + expect: + success: true + idxs: + - + keys: ["c1"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c1","c2"] + ts: "c4" + ttl: 100min + ttlType: kAbsoluteTime + - + id: 2 + desc: 不指定ts + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sql: CREATE INDEX index1 ON {0} (c1,c2) OPTIONS (ttl=100, ttl_type=absolute); + expect: + success: false + - + id: 3 + desc: 不指定ttl + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl_type=absolute); + - desc {0}; + expect: + success: true + idxs: + - + keys: ["c1"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c2"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + id: 4 + desc: 不指定ttl_type + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=100m); + - desc {0}; + expect: + success: true + idxs: + - + keys: ["c1"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c2"] + ts: "c4" + ttl: 100min + ttlType: kAbsoluteTime + - + id: 5 + desc: ttl_type=latest,ttl=1d + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sql: CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=1d, ttl_type=latest); + expect: + success: false + - + id: 6 + desc: ttl_type=absolute,ttl=1d + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=1d, ttl_type=absolute); + - desc {0}; + expect: + success: true + idxs: + - + keys: ["c1"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c2"] + ts: "c4" + ttl: 1440min + ttlType: kAbsoluteTime + - + id: 7 + desc: ttl_type=absolute,ttl=1h + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=1h, ttl_type=absolute); + - desc {0}; + expect: + success: true + idxs: + - + keys: ["c1"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c2"] + ts: "c4" + ttl: 60min + ttlType: kAbsoluteTime + - + id: 8 + desc: ttl_type=absolute,ttl=1m + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=1m, ttl_type=absolute); + - desc {0}; + expect: + success: true + idxs: + - + keys: ["c1"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c2"] + ts: "c4" + ttl: 1min + ttlType: kAbsoluteTime + - + id: 9 + desc: ttl_type=absolute,ttl=1s + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=1s, ttl_type=absolute); + - desc {0}; + expect: + success: true + idxs: + - + keys: ["c1"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c2"] + ts: "c4" + ttl: 1min + ttlType: kAbsoluteTime + - + id: 10 + desc: ttl_type=absolute,ttl=1 + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sql: CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=1, ttl_type=absolute); + expect: + success: false + - + id: 11 + desc: ttl_type=absolute,ttl=0 + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sql: CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=0, ttl_type=absolute); + expect: + success: false + - + id: 12 + desc: ttl_type=absolute,ttl=0m + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=0m, ttl_type=absolute); + - desc {0}; + expect: + success: true + idxs: + - + keys: ["c1"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c2"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + id: 13 + desc: ttl_type=latest,ttl=0 + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=0, ttl_type=latest); + - desc {0}; + expect: + success: true + idxs: + - + keys: ["c1"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c2"] + ts: "c4" + ttl: 0 + ttlType: kLatestTime + - + id: 14 + desc: ttl_type=latest,ttl=100 + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=100, ttl_type=latest); + - desc {0}; + expect: + success: true + idxs: + - + keys: ["c1"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c2"] + ts: "c4" + ttl: 100 + ttlType: kLatestTime + - + id: 15 + desc: ttl_type=absandlat,ttl=(10m,10) + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,10), ttl_type=absandlat); + - desc {0}; + expect: + success: true + idxs: + - + keys: ["c1"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c2"] + ts: "c4" + ttl: 10min&&10 + ttlType: kAbsAndLat + - + id: 16 + desc: ttl_type=absorlat,ttl=(10m,10) + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,10), ttl_type=absorlat); + - desc {0}; + expect: + success: true + idxs: + - + keys: ["c1"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c2"] + ts: "c4" + ttl: 10min||10 + ttlType: kAbsOrLat + - + id: 17 + desc: ttl_type=absandlat,ttl=(10,10m) + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sql: CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10,10m), ttl_type=absandlat); + expect: + success: false + - + id: 18 + desc: ttl_type=absorlat,ttl=(10,10m) + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sql: CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10,10m), ttl_type=absorlat); + expect: + success: false + - + id: 19 + desc: ttl_type为其他字符 + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sql: CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=1, ttl_type=test); + expect: + success: false + - + id: 20 + desc: ttl为字符 + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sql: CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=aaa, ttl_type=absolute); + expect: + success: false + - + id: 21 + desc: 指定ttl_type=absolute,数据过期 + mode: standalone-unsupport + tags: ["单机版bug,添加索引后,select结果错误,@denglong,https://github.com/4paradigm/OpenMLDB/issues/708"] + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [2,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [3,"aa", 3, 1590738990000,"{currentTime}-60"] + - [4,"aa", 1, 1590738990000,"{currentTime}"] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=10m, ttl_type=absolute); + - select id,c1,c2,c3 from {0} where c2 = 1; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp"] + order: id + rows: + - [4,"aa", 1, 1590738990000] + - + id: 22 + desc: 指定ttl_type=latest,部分数据过期 + tags: ["TODO","latest的过期数据比预期多了一条,@denglong"] + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa", 1, 1590738990000,1590738990000] + - [2,"aa", 1, 1590738990000,1590738991000] + - [3,"aa", 3, 1590738990000,1590738992000] + - [4,"aa", 1, 1590738990000,1590738993000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=2, ttl_type=latest); + - select id,c1,c2,c3 from {0} where c2 = 1; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp"] + order: id + rows: + - [2,"aa", 1, 1590738990000] + - [4,"aa", 1, 1590738990000] + - + id: 23 + desc: 指定ttl_type=absandlat,部分数据过期 + tags: ["TODO","latest的过期数据比预期多了一条,@denglong"] + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa", 1, 1590738990000,1590738990000] + - [2,"aa", 1, 1590738990000,1590738991000] + - [3,"aa", 1, 1590738990000,1590738992000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,2), ttl_type=absandlat); + - select * from {0} where c2 = 1; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp","c4 timestamp"] + order: id + rows: + - [2,"aa", 1, 1590738990000,1590738990000] + - [3,"aa", 1, 1590738990000,1590738990000] + - + id: 24 + desc: 指定ttl_type=absorlat,部分数据过期 + inputs: + - + columns: ["c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4:(10m,2):absorlat"] + rows: + - ["aa", 1, 1590738990000,1590738990000] + - ["aa", 1, 1590738990000,1590738990000] + - ["aa", 1, 1590738990000,1590738990000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,2), ttl_type=absorlat); + - select * from {0} where c2 = 1; + expect: + count: 0 + - + id: 25 + desc: 指定ttl_type=absandlat,部分数据过期-边界 + tags: ["TODO","latest的过期数据比预期多了一条,@denglong"] + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [2,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [3,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [4,"aa", 1, 1590738990000,"{currentTime}"] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,2), ttl_type=absandlat); + - select id,c1,c2,c3 from {0} where c2 = 1; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp"] + order: id + rows: + - [3,"aa", 1, 1590738990000] + - [4,"aa", 1, 1590738990000] + - + id: 26 + desc: 指定ttl_type=absandlat,部分数据过期-边界2 + mode: standalone-unsupport + tags: ["单机版bug,添加索引后,select结果错误,@denglong,https://github.com/4paradigm/OpenMLDB/issues/708"] + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [2,"aa", 1, 1590738990000,"{currentTime}-500000"] + - [3,"aa", 1, 1590738990000,"{currentTime}-500000"] + - [4,"aa", 1, 1590738990000,"{currentTime}"] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,1), ttl_type=absandlat); + - select id,c1,c2,c3 from {0} where c2 = 1; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp"] + order: id + rows: + - [2,"aa", 1, 1590738990000] + - [3,"aa", 1, 1590738990000] + - [4,"aa", 1, 1590738990000] + - + id: 27 + desc: 指定ttl_type=absorlat,部分数据过期-边界 + mode: standalone-unsupport + tags: ["单机版bug,添加索引后,select结果错误,@denglong,https://github.com/4paradigm/OpenMLDB/issues/708"] + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [2,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [3,"aa", 1, 1590738990000,"{currentTime}-500000"] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,2), ttl_type=absorlat); + - select id,c1,c2,c3 from {0} where c2 = 1; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp"] + order: id + rows: + - [3,"aa", 1, 1590738990000] + - + id: 28 + desc: 指定ttl_type=absorlat,部分数据过期-边界2 + tags: ["TODO","latest的过期数据比预期多了一条,@denglong"] + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [2,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [3,"aa", 1, 1590738990000,"{currentTime}-500000"] + - [4,"aa", 1, 1590738990000,"{currentTime}-400000"] + - [5,"aa", 1, 1590738990000,"{currentTime}"] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,2), ttl_type=absorlat); + - select id,c1,c2,c3 from {0} where c2 = 1; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp"] + order: id + rows: + - [4,"aa", 1, 1590738990000] + - [5,"aa", 1, 1590738990000] + - + id: 29 + desc: 先创建索引,在插入数据,测试过期-absolute + mode: standalone-unsupport + tags: ["单机版bug,添加索引后,select结果错误,@denglong,https://github.com/4paradigm/OpenMLDB/issues/708"] + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [2,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [3,"aa", 3, 1590738990000,"{currentTime}-60"] + - [4,"aa", 1, 1590738990000,"{currentTime}"] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=10m, ttl_type=absolute); + - insert into {0} values (5,'aa',1,1590738990000L,1590738990000L); + - select id,c1,c2,c3 from {0} where c2 = 1; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp"] + order: id + rows: + - [4,"aa", 1, 1590738990000] + - + id: 30 + desc: 先创建索引,在插入数据,测试过期-latest + tags: ["TODO","latest的过期数据比预期多了一条,@denglong"] + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa", 1, 1590738990000,1590738990000] + - [2,"aa", 1, 1590738990000,1590738991000] + - [3,"aa", 3, 1590738990000,1590738992000] + - [4,"aa", 1, 1590738990000,1590738993000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=2, ttl_type=latest); + - insert into {0} values (5,'aa',1,1590738990000L,1590738994000L); + - select id,c1,c2,c3 from {0} where c2 = 1; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp"] + order: id + rows: + - [4,"aa", 1, 1590738990000] + - [5,"aa", 1, 1590738990000] + - + id: 31 + desc: 先创建索引,在插入数据,测试过期-absandlat + mode: standalone-unsupport + tags: ["单机版bug,添加索引后,select结果错误,@denglong,https://github.com/4paradigm/OpenMLDB/issues/708"] + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [2,"aa", 1, 1590738990000,"{currentTime}-500000"] + - [3,"aa", 1, 1590738990000,"{currentTime}-500000"] + - [4,"aa", 1, 1590738990000,"{currentTime}"] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,1), ttl_type=absandlat); + - insert into {0} values (5,'aa',1,1590738990000L,1590738994000L); + - select id,c1,c2,c3 from {0} where c2 = 1; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp"] + order: id + rows: + - [2,"aa", 1, 1590738990000] + - [3,"aa", 1, 1590738990000] + - [4,"aa", 1, 1590738990000] + - + id: 32 + desc: 先创建索引,在插入数据,测试过期-absorlat + mode: standalone-unsupport + tags: ["单机版bug,添加索引后,select结果错误,@denglong,https://github.com/4paradigm/OpenMLDB/issues/708"] + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [2,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [3,"aa", 1, 1590738990000,"{currentTime}-500000"] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,2), ttl_type=absorlat); + - insert into {0} values (5,'aa',1,1590738990000L,1590738994000L); + - select id,c1,c2,c3 from {0} where c2 = 1; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp"] + order: id + rows: + - [3,"aa", 1, 1590738990000] + - + id: 33 + desc: key和ts相同 + tags: ["TODO","key和ts相同,认为是相同的索引要添加失败,目前添加成功,@denglong"] + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c1) OPTIONS (ts=c4, ttl=100m, ttl_type=absolute); + - desc {0}; + expect: + success: false + - + id: 34 + desc: 创建索引,ts为一个新的列 + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c3, ttl=100m, ttl_type=absolute); + - desc {0}; + expect: + success: true + idxs: + - + keys: ["c1"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c2"] + ts: "c3" + ttl: 100min + ttlType: kAbsoluteTime + - + id: 35 + desc: 创建一个没有ts的索引 + inputs: + - + columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [aa,1,1590738990000,1590738989000] + sqls: + - CREATE INDEX index1 ON {0} (c2) OPTIONS (ttl=100m, ttl_type=absolute); + - desc {0}; + expect: + success: true + idxs: + - + keys: ["c1"] + ts: "c4" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c2"] + ts: "-" + ttl: 100min + ttlType: kAbsoluteTime \ No newline at end of file diff --git a/cases/integration_test/ddl/test_create_no_index.yaml b/cases/integration_test/ddl/test_create_no_index.yaml new file mode 100644 index 00000000000..f29afdf4717 --- /dev/null +++ b/cases/integration_test/ddl/test_create_no_index.yaml @@ -0,0 +1,284 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: 创建表不指定索引 + inputs: + - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ] + rows: + - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ] + sql: desc {0}; + expect: + idxs: + - + keys: ["id"] + ts: "-" + ttl: 0min + ttlType: kAbsoluteTime + - + id: 1 + desc: 第一列为smallint + inputs: + - + create: | + create table {0} ( + c2 smallint not null, + c3 float not null, + c4 double not null, + c5 bigint not null, + c6 string not null, + c7 timestamp not null, + c8 date not null, + c9 bool not null + ); + sql: desc {0}; + expect: + idxs: + - + keys: ["c2"] + ts: "-" + ttl: 0min + ttlType: kAbsoluteTime + - + id: 2 + desc: 第一列为int + inputs: + - + create: | + create table {0} ( + c1 int not null, + c2 smallint not null, + c3 float not null, + c4 double not null, + c5 bigint not null, + c6 string not null, + c7 timestamp not null, + c8 date not null, + c9 bool not null + ); + sql: desc {0}; + expect: + idxs: + - + keys: ["c1"] + ts: "-" + ttl: 0min + ttlType: kAbsoluteTime + - + id: 3 + desc: 第一列为long + inputs: + - + create: | + create table {0} ( + c5 bigint not null, + c6 string not null, + c7 timestamp not null, + c8 date not null, + c9 bool not null + ); + sql: desc {0}; + expect: + idxs: + - + keys: ["c5"] + ts: "-" + ttl: 0min + ttlType: kAbsoluteTime + - + id: 4 + desc: 第一列为float + inputs: + - + create: | + create table {0} ( + c3 float not null, + c4 double not null, + c5 bigint not null, + c6 string not null, + c7 timestamp not null, + c8 date not null, + c9 bool not null + ); + sql: desc {0}; + expect: + idxs: + - + keys: ["c5"] + ts: "-" + ttl: 0min + ttlType: kAbsoluteTime + - + id: 5 + desc: 第一列为double + inputs: + - + create: | + create table {0} ( + c4 double not null, + c5 bigint not null, + c6 string not null, + c7 timestamp not null, + c8 date not null, + c9 bool not null + ); + sql: desc {0}; + expect: + idxs: + - + keys: ["c5"] + ts: "-" + ttl: 0min + ttlType: kAbsoluteTime + - + id: 6 + desc: 第一列为string + inputs: + - + create: | + create table {0} ( + c6 string not null, + c7 timestamp not null, + c8 date not null, + c9 bool not null + ); + sql: desc {0}; + expect: + idxs: + - + keys: ["c6"] + ts: "-" + ttl: 0min + ttlType: kAbsoluteTime + - + id: 7 + desc: 第一列为timestamp + inputs: + - + create: | + create table {0} ( + c7 timestamp not null, + c8 date not null, + c9 bool not null + ); + sql: desc {0}; + expect: + idxs: + - + keys: ["c7"] + ts: "-" + ttl: 0min + ttlType: kAbsoluteTime + - + id: 8 + desc: 第一列为date + inputs: + - + create: | + create table {0} ( + c8 date not null, + c9 bool not null + ); + sql: desc {0}; + expect: + idxs: + - + keys: ["c8"] + ts: "-" + ttl: 0min + ttlType: kAbsoluteTime + - + id: 9 + desc: 第一列为bool + inputs: + - + create: | + create table {0} ( + c9 bool not null + ); + sql: desc {0}; + expect: + idxs: + - + keys: ["c9"] + ts: "-" + ttl: 0min + ttlType: kAbsoluteTime + - + id: 10 + desc: 只有一列 + inputs: + - + create: | + create table {0} ( + c7 timestamp + ); + sql: desc {0}; + expect: + idxs: + - + keys: ["c7"] + ts: "-" + ttl: 0min + ttlType: kAbsoluteTime + - + id: 11 + desc: 不指定索引创建表,然后增加索引 + tags: ["TODO","还不支持增加索引时指定新的ts列"] + inputs: + - + create: | + create table {0} ( + c5 bigint not null, + c6 string not null, + c7 timestamp not null, + c8 date not null, + c9 bool not null + ); + sqls: + - "CREATE INDEX index1 ON {0} (c6) OPTIONS (ts=c7, ttl=100m, ttl_type=absolute);" + - "desc {0};" + expect: + idxs: + - + keys: ["c5"] + ts: "-" + ttl: 0min + ttlType: kAbsoluteTime + - + keys: ["c6"] + ts: "c7" + ttl: 100min + ttlType: kAbsoluteTime + - + id: 16 + desc: 创建表指定索引,没有默认索引 + inputs: + - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ] + rows: + - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ] + indexs: ["index1:c1:c5"] + sql: desc {0}; + expect: + idxs: + - + keys: ["c1"] + ts: "c5" + ttl: 0min + ttlType: kAbsoluteTime diff --git a/cases/integration_test/ddl/test_options.yaml b/cases/integration_test/ddl/test_options.yaml new file mode 100644 index 00000000000..1c8ed43ad7d --- /dev/null +++ b/cases/integration_test/ddl/test_options.yaml @@ -0,0 +1,430 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: 创建表时没有options + sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)); + expect: + success: true + - + id: 1 + desc: 冒烟测试 + mode: standalone-unsupport + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 1, + replicanum = 3, + distribution = [ ('{tb_endpoint_1}', [ '{tb_endpoint_0}','{tb_endpoint_2}' ])] + ); + expect: + name: t3 + success: true + options: + partitionNum: 1 + replicaNum: 3 + - + id: 2 + desc: 创建表时没有partitionnum + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + replicanum = 1, + distribution = [ ('{tb_endpoint_0}',[])] + ); + expect: + name: t3 + success: true + options: + partitionNum: 8 + replicaNum: 1 + - + id: 3 + desc: 创建表时没有replicanum + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 1, + distribution = [ ('{tb_endpoint_0}',['{tb_endpoint_1}','{tb_endpoint_2}'])] + ); + expect: + name: t3 + success: true + options: + partitionNum: 1 + replicaNum: 3 + - + id: 4 + desc: 创建表时没有distribution + mode: standalone-unsupport + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 1, + replicanum = 3 + ); + expect: + name: t3 + success: true + options: + partitionNum: 1 + replicaNum: 3 + - + id: 5 + desc: distribution多个 + mode: standalone-unsupport + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 2, + replicanum = 3, + distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ]),('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])] + ); + expect: + success: false + - + id: 6 + desc: partitionnum=0,指定distribution + tags: ["TODO","bug修复后验证"] + mode: standalone-unsupport + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 0, + replicanum = 3, + distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])] + ); + expect: + name: t3 + success: true + options: + partitionNum: 1 + replicaNum: 3 + - + id: 7 + desc: partitionnum=10 + mode: standalone-unsupport + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 10, + replicanum = 3 + ); + expect: + name: t3 + success: true + options: + partitionNum: 10 + replicaNum: 3 + - + id: 8 + desc: replicanum=0 + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 1, + replicanum = 0, + distribution = [ ('{tb_endpoint_0}',[])] + ); + expect: + success: false + - + id: 9 + desc: replicanum=1 + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 1, + replicanum = 1, + distribution = [ ('{tb_endpoint_0}',[])] + ); + expect: + name: t3 + success: true + options: + partitionNum: 1 + replicaNum: 1 + - + id: 10 + desc: replicanum=4 + mode: standalone-unsupport + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 1, + replicanum = 4, + distribution = [ ('{tb_endpoint_0}',[])] + ); + expect: + success: false + - + id: 11 + desc: distribution没有指定follower + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 1, + replicanum = 1, + distribution = [ ('{tb_endpoint_0}')] + ); + expect: + success: false + - + id: 12 + desc: distribution的个数和replicanum对不上 + mode: standalone-unsupport + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 1, + replicanum = 1, + distribution = [ ('{tb_endpoint_0}',['{tb_endpoint_1}'])] + ); + expect: + success: false + - + id: 13 + desc: distribution的个数和partitionnum对不上 + mode: standalone-unsupport + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 1, + replicanum = 1, + distribution = [ ('{tb_endpoint_0}',[]),('{tb_endpoint_1}',[])] + ); + expect: + success: false + - + id: 14 + desc: distribution=[] + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 1, + replicanum = 1, + distribution = [] + ); + expect: + success: false + - + id: 15 + desc: partitionnum为字符 + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = a, + replicanum = 1, + distribution = [ ('{tb_endpoint_0}',[])] + ); + expect: + success: false + - + id: 16 + desc: replicanum为字符 + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 1, + replicanum = a, + distribution = [ ('{tb_endpoint_0}',[])] + ); + expect: + success: false + - + id: 17 + desc: 只有partitionnum + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 1 + ); + expect: + name: t3 + success: true + options: + partitionNum: 1 + replicaNum: 3 + - + id: 18 + desc: 只有replicanum + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + replicanum = 1 + ); + expect: + name: t3 + success: true + options: + partitionNum: 8 + replicaNum: 1 + - + id: 19 + desc: 没有replicaNum,distribution的个数和tablet数量不一致 + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + distribution = [ ('{tb_endpoint_0}', [])] + ); + expect: + success: false + - + id: 20 + desc: distribution指定的tablet不存在 + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 1, + replicanum = 1, + distribution = [ ('{tb_endpoint_0}1',[])] + ); + expect: + success: false + - + id: 21 + desc: partitionnum大于distribution的个数 + mode: standalone-unsupport + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 4, + replicanum = 1, + distribution = [ ('{tb_endpoint_0}',[])] + ); + expect: + name: t3 + success: true + options: + partitionNum: 4 + replicaNum: 1 + - + id: 22 + desc: test-case + mode: standalone-unsupport + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + replicaNum: 3 + partitionNum: 1 + distribution: + - leader: "{tb_endpoint_1}" + followers: [ "{tb_endpoint_0}","{tb_endpoint_2}" ] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + sql: select * from {0}; + expect: + name: "{0}" + success: true + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + options: + partitionNum: 1 + replicaNum: 3 + - + id: 23 + tags: ["TODO","bug修复后验证"] + desc: partitionnum=0,没有指定distribution + mode: standalone-unsupport + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 0, + replicanum = 3 + ); + expect: + success: false + - + id: 24 + desc: 没有partitionnum和replicanum,指定distribution + tags: ["TODO","bug修复后验证"] + mode: standalone-unsupport + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])] + ); + expect: + name: t3 + success: true + options: + partitionNum: 1 + replicaNum: 3 + + + + + + + + + + + + + + diff --git a/cases/integration_test/ddl/test_ttl.yaml b/cases/integration_test/ddl/test_ttl.yaml new file mode 100644 index 00000000000..ba2456856c1 --- /dev/null +++ b/cases/integration_test/ddl/test_ttl.yaml @@ -0,0 +1,322 @@ +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: 指定ttl-单位d + sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=3650d)); + expect: + success: true + - + id: 1 + desc: 指定ttl-单位h + sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=3650h)); + expect: + success: true + - + id: 2 + desc: 指定ttl-单位m + sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=3650m)); + expect: + success: true + - + id: 3 + desc: 指定ttl-没有单位 + sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=3650)); + expect: + success: false + - + id: 4 + desc: ttl_type=absolute-没有单位 + sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=3650,ttl_type=absolute)); + expect: + success: false + - + id: 5 + desc: ttl_type=latest-带单位 + sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=3650m,ttl_type=latest)); + expect: + success: false + - + id: 6 + desc: ttl_type=absolute-ttl=(3650m) + sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=(3650m),ttl_type=absolute)); + expect: + success: true + - + id: 7 + desc: ttl_type=latest-ttl=(3650) + sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=(3650),ttl_type=latest)); + expect: + success: false + - + id: 8 + desc: ttl=0m + inputs: + - + create: create table {0} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=0m)); + insert: insert into {0} values ("aa", 1, 1590738990000, 1590738989000); + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + rows: + - ["aa",1,1590738990000,1590738989000] + - + id: 9 + desc: ttl为字符 + sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=aa)); + expect: + success: false + - + id: 10 + desc: 指定ttl_type=absolute + inputs: + - + create: create table {0} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=0m, ttl_type=absolute)); + insert: insert into {0} values ("aa", 1, 1590738990000, 1590738989000); + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + rows: + - ["aa",1,1590738990000,1590738989000] + - + id: 11 + desc: 指定ttl_type=latest + inputs: + - + create: create table {0} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=0, ttl_type=latest)); + insert: insert into {0} values ("aa", 1, 1590738990000, 1590738989000); + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + rows: + - ["aa",1,1590738990000,1590738989000] + - + id: 12 + desc: 指定ttl_type为其他字符 + sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0,ttl_type=aaa)); + expect: + success: false + - + id: 13 + desc: ttl_type=absorlat + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c4),ts=c7, ttl=(10m,10), ttl_type=absorlat)); + expect: + success: true + - + id: 14 + desc: ttl_type=absorlat,ttl=(10,10m) + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c4),ts=c7, ttl=(10,10m), ttl_type=absandlat)); + expect: + success: false + - + id: 15 + desc: ttl_type=absandlat + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c4),ts=c7, ttl=(10m,10), ttl_type=absandlat)); + expect: + success: true + - + id: 16 + desc: ttl_type=absandlat,ttl=(10,10m) + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c4),ts=c7, ttl=(10,10m), ttl_type=absandlat)); + expect: + success: false + - + id: 17 + desc: ttl_type=latest,ttl带单位 + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c4),ts=c7, ttl=10m, ttl_type=latest)); + expect: + success: false + - + id: 18 + desc: ttl_type=latest,ttl=(10m,10) + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c4),ts=c7, ttl=(10m,10), ttl_type=latest)); + expect: + success: false + - + id: 19 + desc: ttl_type=absolute,ttl=(10m,10) + sql: | + create table {auto} ( + c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c4),ts=c7, ttl=(10m,10), ttl_type=absolute)); + expect: + success: false + - + id: 20 + desc: 指定ttl_type=absolute,数据过期 + inputs: + - + create: create table {0} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=10m, ttl_type=absolute)); + insert: insert into {0} values ("aa", 1, 1590738990000, 1614672180000); + sql: select * from {0}; + expect: + count: 0 + - + id: 21 + desc: 指定ttl_type=latest,部分数据过期 + inputs: + - + columns: ["c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4:1:latest"] + rows: + - ["aa", 1, 1590738990000,1590738990000] + - ["aa", 2, 1590738990000,1590738990000] + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"] + rows: + - ["aa", 2, 1590738990000,1590738990000] + - + id: 22 + desc: 指定ttl_type=absandlat,部分数据过期 + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4:(10m,2):absandlat"] + rows: + - [1,"aa", 1, 1590738990000,1590738990000] + - [2,"aa", 2, 1590738990000,1590738991000] + - [3,"aa", 3, 1590738990000,1590738992000] + sql: select * from {0}; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp","c4 timestamp"] + order: id + rows: + - [2,"aa", 2, 1590738990000,1590738991000] + - [3,"aa", 3, 1590738990000,1590738992000] + - + id: 23 + desc: 指定ttl_type=absorlat,部分数据过期 + inputs: + - + columns: ["c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4:(10m,2):absorlat"] + rows: + - ["aa", 1, 1590738990000,1590738990000] + - ["aa", 1, 1590738990000,1590738990000] + - ["aa", 1, 1590738990000,1590738990000] + sql: select * from {0}; + expect: + count: 0 + - + id: 24 + desc: 指定ttl_type=absolute,部分数据过期 + tags: ["TODO","边界case,待离线和c++支持后,在使用"] + inputs: + - + columns: ["c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4:10m:absolute"] + rows: + - ["aa", 1, 1590738990000, "{currentTime}-600001"] + - ["bb", 1, 1590738990000, "{currentTime}"] + sql: select c1,c2,c3 from {0}; + expect: + columns: ["c1 string","c2 int","c3 timestamp"] + rows: + - ["bb", 1, 1590738990000] + - + id: 25 + desc: 指定ttl_type=absandlat,部分数据过期-边界 + tags: ["TODO","边界case,待离线和c++支持后,在使用"] + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4:(10m,2):absandlat"] + rows: + - [1,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [2,"aa", 2, 1590738990000,"{currentTime}-600001"] + - [3,"aa", 3, 1590738990000,"{currentTime}-600001"] + - [4,"aa", 4, 1590738990000,"{currentTime}"] + sql: select id,c1,c2,c3 from {0}; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp"] + order: id + rows: + - [3,"aa", 3, 1590738990000] + - [4,"aa", 4, 1590738990000] + - + id: 26 + desc: 指定ttl_type=absandlat,部分数据过期-边界2 + tags: ["TODO","边界case,待离线和c++支持后,在使用"] + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4:(10m,2):absandlat"] + rows: + - [1,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [2,"aa", 2, 1590738990000,"{currentTime}-500000"] + - [3,"aa", 3, 1590738990000,"{currentTime}-500000"] + - [4,"aa", 4, 1590738990000,"{currentTime}"] + sql: select id,c1,c2,c3 from {0}; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp"] + order: id + rows: + - [2,"aa", 2, 1590738990000] + - [3,"aa", 3, 1590738990000] + - [4,"aa", 4, 1590738990000] + - + id: 27 + desc: 指定ttl_type=absorlat,部分数据过期-边界 + tags: ["TODO","边界case,待离线和c++支持后,在使用"] + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4:(10m,2):absorlat"] + rows: + - [1,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [2,"aa", 2, 1590738990000,"{currentTime}-600001"] + - [3,"aa", 3, 1590738990000,"{currentTime}-500000"] + sql: select id,c1,c2,c3 from {0}; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp"] + order: id + rows: + - [3,"aa", 3, 1590738990000] + - + id: 28 + desc: 指定ttl_type=absorlat,部分数据过期-边界2 + tags: ["TODO","边界case,待离线和c++支持后,在使用"] + inputs: + - + columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] + indexs: ["index1:c1:c4:(10m,2):absorlat"] + rows: + - [1,"aa", 1, 1590738990000,"{currentTime}-600001"] + - [2,"aa", 2, 1590738990000,"{currentTime}-600001"] + - [3,"aa", 3, 1590738990000,"{currentTime}-500000"] + - [4,"aa", 4, 1590738990000,"{currentTime}-400000"] + - [5,"aa", 5, 1590738990000,"{currentTime}"] + sql: select id,c1,c2,c3 from {0}; + expect: + columns: ["id int","c1 string","c2 int","c3 timestamp"] + order: id + rows: + - [4,"aa", 4, 1590738990000] + - [5,"aa", 5, 1590738990000] + - + id: 29 + desc: ttl_type=latest-ttl=(10) + sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=(10),ttl_type=latest)); + expect: + success: true diff --git a/cases/integration_test/deploy/test_create_deploy.yaml b/cases/integration_test/deploy/test_create_deploy.yaml new file mode 100644 index 00000000000..bc90cdaccf2 --- /dev/null +++ b/cases/integration_test/deploy/test_create_deploy.yaml @@ -0,0 +1,621 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +cases: + - + id: 0 + desc: 冒烟测试-正常deploy + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"] + sqls: + - deploy deploy_{0} select * from {0}; + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + deployment : + name: deploy_{0} + dbName: test_zw + sql: | + DEPLOY deploy_{0} SELECT + * + FROM + {0} + ; + inColumns: + - 1,c1,kVarchar,NO + - 2,c2,kInt16,NO + - 3,c3,kInt32,NO + - 4,c4,kInt64,NO + - 5,c5,kFloat,NO + - 6,c6,kDouble,NO + - 7,c7,kTimestamp,NO + - 8,c8,kDate,NO + outColumns: + - 1,c1,kVarchar,NO + - 2,c2,kInt16,NO + - 3,c3,kInt32,NO + - 4,c4,kInt64,NO + - 5,c5,kFloat,NO + - 6,c6,kDouble,NO + - 7,c7,kTimestamp,NO + - 8,c8,kDate,NO + + - id: 1 + desc: deploy一个lastjoin + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + sqls: + - deploy deploy_{0} select {0}.c1,{0}.c2,{1}.c4,{2}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1 last join {2} order by {2}.c4 on {0}.c1={2}.c1; + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + deployment : + name: deploy_{0} + dbName: test_zw + sql: | + DEPLOY deploy_{0} SELECT + {0}.c1, + {0}.c2, + {1}.c4, + {2}.c4 + FROM + {0} + LAST JOIN + {1} + ORDER BY {1}.c4 + ON {0}.c1 = {1}.c1 + LAST JOIN + {2} + ORDER BY {2}.c4 + ON {0}.c1 = {2}.c1 + ; + inColumns: + - 1,c1,kVarchar,NO + - 2,c2,kInt32,NO + - 3,c3,kInt64,NO + - 4,c4,kTimestamp,NO + outColumns: + - 1,c1,kVarchar,NO + - 2,c2,kInt32,NO + - 3,c4,kTimestamp,NO + - 4,c4,kTimestamp,NO + - + id: 2 + desc: deploy一个window-ROWS + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sqls: + - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING); + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + deployment : + name: deploy_{0} + dbName: test_zw + sql: | + DEPLOY deploy_{0} SELECT + id, + c1, + sum(c4) OVER (w1) AS w1_c4_sum + FROM + {0} + WINDOW w1 AS (PARTITION BY {0}.c1 + ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING) + ; + inColumns: + - 1,id,kInt32,NO + - 2,c1,kVarchar,NO + - 3,c3,kInt32,NO + - 4,c4,kInt64,NO + - 5,c5,kFloat,NO + - 6,c6,kDouble,NO + - 7,c7,kTimestamp,NO + - 8,c8,kDate,NO + outColumns: + - 1,id,kInt32,NO + - 2,c1,kVarchar,NO + - 3,w1_c4_sum,kInt64,NO + - + id: 3 + desc: deploy一个window-ROWS_RANGE + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sqls: + - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING); + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + deployment : + name: deploy_{0} + dbName: test_zw + sql: | + DEPLOY deploy_{0} SELECT + id, + c1, + sum(c4) OVER (w1) AS w1_c4_sum + FROM + {0} + WINDOW w1 AS (PARTITION BY {0}.c1 + ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING) + ; + inColumns: + - 1,id,kInt32,NO + - 2,c1,kVarchar,NO + - 3,c3,kInt32,NO + - 4,c4,kInt64,NO + - 5,c5,kFloat,NO + - 6,c6,kDouble,NO + - 7,c7,kTimestamp,NO + - 8,c8,kDate,NO + outColumns: + - 1,id,kInt32,NO + - 2,c1,kVarchar,NO + - 3,w1_c4_sum,kInt64,NO + - + id: 4 + desc: deploy一个子查询 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sqls: + - deploy deploy_{0} select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t; + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + deployment : + name: deploy_{0} + dbName: test_zw + sql: | + DEPLOY deploy_{0} SELECT + v2, + v3 + FROM + ( + SELECT + c2 + 1 AS v2, + c3 + 1 AS v3 + FROM + {0} + ) AS t + ; + inColumns: + - 1,c1,kVarchar,NO + - 2,c2,kInt32,NO + - 3,c3,kInt64,NO + - 4,c4,kTimestamp,NO + outColumns: + - 1,v2,kInt32,NO + - 2,v3,kInt64,NO + - + id: 5 + desc: deploy一个子查询、window、lastjoin + inputs: + - + columns : ["id int", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"] + indexs: ["index1:card_no:trx_time"] + rows: + - [1, "aaaaaaaaaa",1, 1590738989000, 1.1] + - [2, "aaaaaaaaaa",1, 1590738990000, 2.2] + - [3, "bb",10, 1590738990000, 3.3] + - + columns : ["crd_lst_isu_dte timestamp", "crd_nbr string"] + indexs: ["index2:crd_nbr:crd_lst_isu_dte"] + rows: + - [1590738988000, "aaaaaaaaaa"] + - [1590738990000, "aaaaaaaaaa"] + - [1590738989000, "cc"] + - [1590738992000, "cc"] + sqls: + - deploy deploy_{0} select * from(select + id,card_no,trx_time,substr(card_no, 1, 6) as card_no_prefix,sum(trx_amt) over w30d as sum_trx_amt,count(merchant_id) over w10d as count_merchant_id from {0} + window w30d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW), + w10d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)) as trx_fe + last join {1} order by {1}.crd_lst_isu_dte on trx_fe.card_no = {1}.crd_nbr and trx_fe.trx_time >= {1}.crd_lst_isu_dte; + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + deployment : + name: deploy_{0} + dbName: test_zw + sql: | + DEPLOY deploy_{0} SELECT + * + FROM + ( + SELECT + id, + card_no, + trx_time, + substr(card_no, 1, 6) AS card_no_prefix, + sum(trx_amt) OVER (w30d) AS sum_trx_amt, + count(merchant_id) OVER (w10d) AS count_merchant_id + FROM + {0} + WINDOW w30d AS (PARTITION BY {0}.card_no + ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW), w10d AS (PARTITION BY {0}.card_no + ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW) + ) AS trx_fe + LAST JOIN + {1} + ORDER BY {1}.crd_lst_isu_dte + ON trx_fe.card_no = {1}.crd_nbr AND trx_fe.trx_time >= {1}.crd_lst_isu_dte + ; + inColumns: + - 1,id,kInt32,NO + - 2,card_no,kVarchar,NO + - 3,merchant_id,kInt32,NO + - 4,trx_time,kTimestamp,NO + - 5,trx_amt,kFloat,NO + outColumns: + - 1,id,kInt32,NO + - 2,card_no,kVarchar,NO + - 3,trx_time,kTimestamp,NO + - 4,card_no_prefix,kVarchar,NO + - 5,sum_trx_amt,kFloat,NO + - 6,count_merchant_id,kInt64,NO + - 7,crd_lst_isu_dte,kTimestamp,NO + - 8,crd_nbr,kVarchar,NO + - + id: 6 + desc: deploy的sql中指定其他库 + db: db + inputs: + - db: db1 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - db: db2 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sqls: + - deploy deploy_{0} select db1.{0}.c1, db1.{0}.c2,db2.{1}.c3,db2.{1}.c4 from db1.{0} last join db2.{1} ORDER BY db2.{1}.c3 on db1.{0}.c1=db2.{1}.c1; + expect: + success: false + - + id: 7 + desc: deploy sql错误 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"] + sql: deploy deploy_{0} select * from {0}11; + expect: + success: false + - + id: 8 + desc: deploy 同名service + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"] + sqls: + - deploy deploy_{0} select * from {0}; + - deploy deploy_{0} select * from {0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: false + - + id: 9 + desc: deploy 语法错误 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"] + sql: deploy deployment deploy_{0} select * from {0}; + expect: + success: false + - + id: 10 + desc: deploy 一个insert + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"] + sql: deploy deploy_{0} insert into {0} values('aa',1,2,3,1.1,2.1,1590738989000,'2020-05-01'); + expect: + success: false + - + id: 11 + desc: deploy 和表名重复 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"] + sqls: + - deploy {0} select * from {0}; + - show deployment {0}; + tearDown: + - drop deployment {0}; + expect: + success: true + - + id: 12 + desc: 表没有索引,deploy一个window + inputs: + - + create: | + create table {0} ( + id int not null, + c1 int not null, + c7 timestamp not null + ); + dataProvider: + - ["ROWS","ROWS_RANGE"] + sqls: + - deploy deploy_{0} SELECT id, c1, sum(c1) OVER w1 as w1_c1_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND 1 PRECEDING); + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: true + - + id: 13 + desc: 表已经有索引,deploy一个window,使用另一个索引,列和ts都不同 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c4"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sqls: + - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND 1 PRECEDING); + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: true + - + id: 14 + desc: 表已经有索引,deploy一个window,索引的column相同,ts不同 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c4"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sqls: + - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND 1 PRECEDING); + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: true + - + id: 15 + desc: 表已经有索引,deploy一个window,索引的column不同,ts相同,ROWS + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + sqls: + - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING); + - desc {0}; + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: true + - + id: 16 + desc: 表已经有索引,deploy一个window,索引的column不同,ts相同,ROWS_RANGE + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + sqls: + - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING); + - desc {0}; + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: true + - + id: 17 + desc: 表已经有索引,deploy一个window,索引的column不同,ts相同,rows_range带单位 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + sqls: + - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 rows_range BETWEEN 2h PRECEDING AND 1h PRECEDING); + - desc {0}; + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: true + - + id: 18 + desc: deploy的sql中指定其他库,其中一个表使用默认库 + db: db1 + inputs: + - db: db1 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - db: db2 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sqls: + - deploy deploy_{0} select db1.{0}.c1, db1.{0}.c2,db2.{1}.c3,db2.{1}.c4 from db1.{0} last join db2.{1} ORDER BY db2.{1}.c3 on db1.{0}.c1=db2.{1}.c1; + expect: + success: false + - + id: 19 + desc: 多个window + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c4:c7"] + sqls: + - deploy deploy_{0} SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY c1 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + - desc {0}; + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: true + - + id: 20 + desc: 组合索引-ROWS + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1|c3:c7"] + sqls: + - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING); + - desc {0}; + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: true + - + id: 21 + desc: 组合索引-ROWS_RANGE + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1|c3:c7"] + sqls: + - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING); + - desc {0}; + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: true + - + id: 22 + desc: 表已经有索引,deploy一个window,索引的column相同和ts都相同 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + sqls: + - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING); + - desc {0}; + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: true + - + id: 23 + desc: 组合索引-索引相同 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1|c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sqls: + - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING); + - desc {0}; + - show deployment deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: true + - + id: 24 + desc: 表有数据,deploy创建新索引 + tags: ["TODO","cicd大概率失败,@denglong,https://github.com/4paradigm/OpenMLDB/issues/1116"] + mode: standalone-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sqls: + - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING); + expect: + success: false \ No newline at end of file diff --git a/cases/integration_test/deploy/test_drop_deploy.yaml b/cases/integration_test/deploy/test_drop_deploy.yaml new file mode 100644 index 00000000000..7e40d4214df --- /dev/null +++ b/cases/integration_test/deploy/test_drop_deploy.yaml @@ -0,0 +1,85 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +cases: + - + id: 0 + desc: 正常删除service + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + sqls: + - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING); + - drop deployment deploy_{0}; + - show deployments; + expect: + deploymentCount: 0 + - + id: 1 + desc: service不存在 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sqls: + - deploy deploy_{0} select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t; + - drop deployment deploy_{0}11; + tearDown: + - drop deployment deploy_{0}; + expect: + success: false + - + id: 2 + desc: 语法错误 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sqls: + - deploy deploy_{0} select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t; + - drop deployments deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: false + - + id: 3 + desc: 删除其他库的service + db: db + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sqls: + - deploy deploy_{0} select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t; + - drop deployment db.deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: false \ No newline at end of file diff --git a/cases/integration_test/deploy/test_show_deploy.yaml b/cases/integration_test/deploy/test_show_deploy.yaml new file mode 100644 index 00000000000..32d3c27d89f --- /dev/null +++ b/cases/integration_test/deploy/test_show_deploy.yaml @@ -0,0 +1,88 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +cases: + - + id: 0 + desc: 查看所有deployment + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + sqls: + - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING); + - deploy {0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c4 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING); + - show deployments; + tearDown: + - drop deployment deploy_{0}; + - drop deployment {0}; + expect: + deploymentCount: 2 + - + id: 1 + desc: service不存在 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sqls: + - deploy deploy_{0} select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t; + - show deployment deploy_{0}11; + tearDown: + - drop deployment deploy_{0}; + expect: + success: false + - + id: 2 + desc: 语法错误 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sqls: + - deploy deploy_{0} select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t; + - show deployments deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: false + - + id: 3 + desc: 查看其他库的service + db: db + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sqls: + - deploy deploy_{0} select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t; + - show deployment db.deploy_{0}; + tearDown: + - drop deployment deploy_{0}; + expect: + success: true \ No newline at end of file diff --git a/cases/integration_test/disk_table/disk_table.yaml b/cases/integration_test/disk_table/disk_table.yaml new file mode 100644 index 00000000000..33c0b45e0be --- /dev/null +++ b/cases/integration_test/disk_table/disk_table.yaml @@ -0,0 +1,486 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: 创建SSD表,插入多条数据,查询 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: SSD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + id: 1 + desc: 创建HDD表,插入多条数据,查询 + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: HDD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + + - + id: 2 + desc: ssd和内存表,join + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: SSD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: memory + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + id: 3 + desc: hdd和内存表,join + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: HDD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: memory + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + id: 4 + desc: 内存表和ssd,join + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: memory + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: SSD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + id: 5 + desc: 内存表和hdd,join + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: memory + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: HDD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + id: 6 + desc: hdd和ssd,join + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: SSD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: HDD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + id: 7 + desc: hdd和ssd,join + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: memory + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: SSD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: HDD + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + sql: select {1}.c1,{1}.c2,{2}.c3,{0}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1 last join {2} on {0}.c1 = {2}.c1; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - ["bb", 21, 31, 1590738990000] + - ["cc", 41, 51, 1590738991000] + + - id: 8 + desc: ssd union 内存表 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + storage: SSD + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + storage: memory + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 9 + desc: hdd union 内存表 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + storage: HDD + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + storage: memory + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 10 + desc: 内存表 union ssd + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + storage: memory + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + storage: SSD + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 11 + desc: 内存表 union hdd + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + storage: memory + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + storage: HDD + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 12 + desc: SSD 插入索引和ts 一样的数据 + mode: request-unsupport + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: SSD + rows: + - ["aa", 2, 3, 1590738989000] + - ["aa", 2, 3, 1590738989000] + - ["aa", 2, 3, 1590738989000] + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - id: 13 + desc: HDD 插入索引和ts 一样的数据 + mode: request-unsupport + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + storage: HDD + rows: + - ["aa", 2, 3, 1590738989000] + - ["aa", 2, 3, 1590738989000] + - ["aa", 2, 3, 1590738989000] + sql: select * from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa", 2, 3, 1590738989000] + - id: 14 + desc: storage_mode=其他字符 + mode: request-unsupport + sql: | + create table auto_MDYewbTv( + c1 string, + c2 int, + c3 bigint, + c4 timestamp, + index(key=(c1),ts=c4))options(partitionnum=1,replicanum=1,storage_mode="hdp"); + expect: + success: false + + - id: 15 + desc: 创建磁盘表,ttl_type=latest,ttl=4,insert 10 + mode: request-unsupport + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4:4:latest"] + storage: SSD + rows: + - ["bb", 2, 3, 1590738989000] + - ["bb", 4, 5, 1590738990000] + - ["bb", 6, 7, 1590738991000] + - ["bb", 8, 9, 1590738992000] + - ["bb", 10, 11, 1590738993000] + - ["bb", 12, 13, 1590738994000] + - ["bb", 14, 15, 1590738995000] + - ["bb", 16, 17, 1590738996000] + - ["bb", 18, 19, 1590738997000] + - ["bb", 20, 21, 1590738998000] + sql: select c1,c2,c3 from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint"] + rows: + - ["bb", 20, 21] + - ["bb", 18, 19] + - ["bb", 16, 17] + - ["bb", 14, 15] + + - id: 16 + desc: 创建磁盘表,ttl_type=absolute,ttl=10m, insert 10 + mode: request-unsupport + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4:10m:absolute"] + storage: hdd + rows: + - ["bb", 2, 3, "{currentTime}-100"] + - ["bb", 4, 5, "{currentTime}-200"] + - ["bb", 6, 7, "{currentTime}-599000"] + - ["bb", 8, 9, "{currentTime}-600000"] + - ["bb", 10, 11, "{currentTime}-600005"] + - ["bb", 12, 13, "{currentTime}-600006"] + - ["bb", 14, 15, "{currentTime}-600007"] + - ["bb", 16, 17, "{currentTime}-600008"] + - ["bb", 18, 19, "{currentTime}-600009"] + - ["bb", 20, 21, "{currentTime}-600010"] + sql: select c1,c2,c3 from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint"] + rows: + - ["bb", 2, 3] + - ["bb", 4, 5] + - ["bb", 6, 7] + + - id: 17 + desc: 创建磁盘表,有两个索引,分别为latest和absolute,insert=10 + mode: request-unsupport + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index2:c2:c4:4:latest","index1:c1:c4:10m:absolute"] + storage: hdd + rows: + - ["bb", 2, 3, "{currentTime}-100"] + - ["bb", 2, 5, "{currentTime}-200"] + - ["bb", 2, 7, "{currentTime}-59"] + - ["bb", 2, 9, "{currentTime}-600"] + - ["bb", 2, 11, "{currentTime}-602"] + - ["bb", 2, 13, "{currentTime}-600006"] + - ["bb", 2, 15, "{currentTime}-600007"] + - ["bb", 2, 17, "{currentTime}-600008"] + - ["bb", 2, 19, "{currentTime}-600009"] + - ["bb", 2, 21, "{currentTime}-600010"] + sql: select c1,c2,c3 from {0}; + expect: + columns: ["c1 string","c2 int","c3 bigint"] + rows: + - ["bb", 2, 7] + - ["bb", 2, 3] + - ["bb", 2, 5] + - ["bb", 2, 9] + + - id: 18 + desc: 创建磁盘表,有两个索引,分别为latest和absolute,insert=10 ,where条件 + mode: request-unsupport + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index2:c2:c4:4:latest","index1:c1:c4:10m:absolute"] + storage: hdd + rows: + - ["bb", 2, 3, "{currentTime}-100"] + - ["bb", 2, 5, "{currentTime}-200"] + - ["bb", 2, 7, "{currentTime}-59"] + - ["bb", 2, 9, "{currentTime}-600"] + - ["bb", 2, 11, "{currentTime}-602"] + - ["bb", 2, 13, "{currentTime}-600006"] + - ["bb", 2, 15, "{currentTime}-600007"] + - ["bb", 2, 17, "{currentTime}-600008"] + - ["bb", 2, 19, "{currentTime}-600009"] + - ["bb", 2, 21, "{currentTime}-600010"] + sql: select c1,c2,c3 from {0} where c1 = "bb"; + expect: + columns: ["c1 string","c2 int","c3 bigint"] + rows: + - ["bb", 2, 7] + - ["bb", 2, 3] + - ["bb", 2, 5] + - ["bb", 2, 9] + - ["bb", 2, 11] diff --git a/cases/integration_test/dml/multi_insert.yaml b/cases/integration_test/dml/multi_insert.yaml new file mode 100644 index 00000000000..1f606089abe --- /dev/null +++ b/cases/integration_test/dml/multi_insert.yaml @@ -0,0 +1,287 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +db: multi_insert_db +debugs: [] +version: 0.5.0 +cases: + - id: 0 + desc: 简单INSERT + inputs: + - + create: | + create table {0} ( + col0 string not null, + col1 int not null, + col2 smallint not null, + col3 float not null, + col4 double not null, + col5 bigint not null, + col6 string not null, + col7 timestamp not null, + col8 date not null, + col9 bool not null, + index(key=(col2), ts=col5) + ); + insert: insert into {0} values("hello", 1, 2, 3.3f, 4.4, 5L, "world", 12345678L, "2020-05-21", true); + sql: select * from {0}; + expect: + columns: ["col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64", + "col6 string", "col7 timestamp", "col8 date", "col9 bool"] + order: col1 + rows: + - [hello, 1, 2, 3.3, 4.4, 5, world, 12345678, "2020-05-21", true] + - id: 1 + desc: 简单INSERT 多行 + inputs: + - + create: | + create table {0} ( + col0 string not null, + col1 int not null, + col2 smallint not null, + col3 float not null, + col4 double not null, + col5 bigint not null, + col6 string not null, + index(key=(col2), ts=col5) + ); + insert: | + insert into {0} values + ("hello", 1, 2, 3.3, 4.4, 5, "world"), + ("hello", 11, 22, 33.3, 44.4, 55, "fesql"); + + sql: select * from {0}; + expect: + columns: [ "col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64", + "col6 string"] + order: col1 + rows: + - [hello, 1, 2, 3.3, 4.4, 5, world] + - [hello, 11, 22, 33.3, 44.4, 55, fesql] + + - id: 2 + desc: 简单INSERT timestamp + inputs: + - create: | + create table {0} ( + col1 int not null, + col5 bigint not null, + std_ts timestamp not null, + index(key=(col1), ts=col5) + ); + insert: | + insert into {0} values + (1, 10000L, 1590738987000L), + (2, 20000L, 1590738988000L); + sql: select * from {0}; + expect: + columns: ["col1 int", "col5 bigint", "std_ts timestamp"] + order: col1 + rows: + - [1, 10000, 1590738987000] + - [2, 20000, 1590738988000] + + - id: 3 + desc: insert 指定列,其他列默认为NULL + inputs: + - + create: | + create table {0} ( + col1 int not null, + col2 smallint, + col3 float, + col4 double, + col5 bigint not null, + std_ts timestamp not null, + str string, + index(key=(col1), ts=col5) + ); + insert: | + insert into {0} (col1, col5, std_ts) values + (1, 10000L, 1590738987000L), + (2, 20000L, 1590738988000L); + sql: select * from {0}; + expect: + columns: ["col1 int", "col2 int16", "col3 float", "col4 double", "col5 bigint", "std_ts timestamp", "str string"] + order: col1 + rows: + - [1, NULL, NULL, NULL, 10000, 1590738987000, NULL] + - [2, NULL, NULL, NULL, 20000, 1590738988000, NULL] + - id: 4 + desc: Insert date + inputs: + - create: | + create table {0} ( + col1 int not null, + col2 smallint, + col3 float, + col4 double, + col5 bigint not null, + std_date date not null, + str string, + index(key=(col1), ts=col5) + ); + insert: | + insert into {0} (col1, col5, std_date) values + (1, 10000L, '2020-05-27'), + (2, 20000L, '2020-05-28'); + + sql: select * from {0}; + expect: + columns: [ "col1 int", "col2 int16", "col3 float", "col4 double", "col5 bigint", "std_date date", "str string" ] + order: col1 + rows: + - [1, NULL, NULL, NULL, 10000, "2020-05-27", NULL] + - [2, NULL, NULL, NULL, 20000, "2020-05-28", NULL] + - id: 5 + desc: 简单INSERT NULL value + inputs: + - + create: | + create table {0} ( + col0 string not null, + col1 int not null, + col2 smallint, + col3 float not null, + col4 double not null, + col5 bigint not null, + col6 string not null, + index(key=(col2), ts=col5) + ); + insert: | + insert into {0} values ("hello", 1, NULL, 3.3f, 4.4, 5L, "world"), + ("hi", 2, NULL, 33.3f, 44.4, 55L, "db"); + sql: select * from {0}; + expect: + columns: [ "col0 string", "col1 int", "col2 int16", "col3 float", "col4 double", "col5 bigint", "col6 string" ] + order: col1 + rows: + - [hello, 1, NULL, 3.3, 4.4, 5, world] + - [hi, 2, NULL, 33.3, 44.4, 55, db] + - + id: 6 + desc: 所有列插入多条 + inputs: + - + create: | + create table {0} ( + id int not null, + c1 int not null, + c2 smallint not null, + c3 float not null, + c4 double not null, + c5 bigint not null, + c6 string not null, + c7 timestamp not null, + c8 date not null, + c9 bool not null, + index(key=(c1), ts=c5) + ); + insert: | + insert into {0} values + (1, 1, 2, 3.3f, 4.4, 5L, "aa", 12345678L, "2020-05-21", true), + (2, 10, 20, 3.31f, 4.41, 50L, "bb", 12345679L, "2020-05-22", false); + sql: select * from {0}; + expect: + columns : ["id int","c1 int","c2 smallint","c3 float","c4 double","c5 bigint","c6 string","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,1,2,3.3,4.4,5,"aa",12345678,"2020-05-21",true] + - [2,10,20,3.31,4.41,50,"bb",12345679,"2020-05-22",false] + - + id: 7 + desc: 其中一条数据类型不兼容 + inputs: + - + create: | + create table {0} ( + id int, + c1 int, + c2 smallint, + c3 float, + c5 bigint, + index(key=(c1), ts=c5) + ); + sql: | + insert into {0} (id,c3,c5)values + (1, 3.3,12345678), + (2, "aa",12345679); + expect: + success: false + - + id: 8 + desc: 插入多条空串 + mode: cli-unsupport + inputs: + - + create: | + create table {0} ( + id int, + c1 int, + c2 string, + c3 float, + c5 bigint, + index(key=(c1), ts=c5) + ); + insert: | + insert into {0} (id,c2,c3,c5)values + (1, "",null,12345678), + (2, "",null,12345679); + sql: select * from {0}; + expect: + columns : ["id int","c1 int","c2 string","c3 float","c5 bigint"] + order: id + rows: + - [1,null,"",null,12345678] + - [2,null,"",null,12345679] + - + id: 9 + desc: 插入数据和列的数量不匹配 + inputs: + - + create: | + create table {0} ( + id int, + c1 int, + c2 smallint, + c3 float, + c5 bigint, + index(key=(c1), ts=c5) + ); + sql: | + insert into {0} (id,c3,c5)values + (1,12345678), + (2,12345679); + expect: + success: false + - + id: 10 + desc: 其中一条数据少一列 + inputs: + - + create: | + create table {0} ( + id int, + c1 int, + c2 smallint, + c3 float, + c5 bigint, + index(key=(c1), ts=c5) + ); + sql: | + insert into {0} (id,c3,c5)values + (1, 3.3,12345678), + (2,12345679); + expect: + success: false \ No newline at end of file diff --git a/cases/integration_test/dml/test_delete.yaml b/cases/integration_test/dml/test_delete.yaml new file mode 100644 index 00000000000..51e0a39736f --- /dev/null +++ b/cases/integration_test/dml/test_delete.yaml @@ -0,0 +1,597 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: delete 一个key + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa'; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 1 + desc: delete 组合索引 + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1|c2:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [4,"aa",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa' and c2=1; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [4,"aa",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + - + id: 2 + desc: delete 一个索引的两个key + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa' or c1='cc'; + expect: + success: false + msg: fail + - + id: 3 + desc: delete 两个索引的两个key + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7","index2:c2:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa' or c2=1; + expect: + success: false + msg: fail + - + id: 4 + desc: 两个索引 delete 其中一个 + mode: cluster-unsupport + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7","index2:c2:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [4,"aa",1,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c2=2; + sql: SELECT id, c2, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + columns: ["id int","c2 smallint","w1_c4_count bigint"] + order: id + rows: + - [1,1,1] + - [2,1,2] + - [4,1,3] + - + id: 5 + desc: delete 不是索引列 + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c2=1; + expect: + success: false + msg: fail + - + id: 6 + desc: delete key不存在 + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1='cc'; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - + id: 7 + desc: delete null + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,null,1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,null,1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1=null; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 8 + desc: delete 空串 + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1=''; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 10 + desc: delete int + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,3,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c3=3; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 11 + desc: delete smallint + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c2:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c2=1; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 12 + desc: delete bigint + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c4:c7"] + rows: + - [1,"aa",1,2,4,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,4,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c4=4; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - + id: 13 + desc: delete date + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c8:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-02",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c8='2020-05-02'; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - + id: 14 + desc: delete timestamp + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c7:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c7=1590738989000; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - + id: 15 + desc: delete bool + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c9:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",false] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c9=true; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",false] + - + id: 16 + desc: 两次delete相同index 不同的key + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"cc",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa'; + - delete from {0} where c1='cc'; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 17 + desc: 两次delete 不同的index + mode: cluster-unsupport + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7","index1:c2:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [4,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa'; + - delete from {0} where c2=2; + sql: | + SELECT id, c2, count(c4) OVER w1 as w1_c4_count, count(c5) OVER w2 as w2_c5_count FROM {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + columns: ["id int","c2 smallint","w1_c4_count bigint","w2_c5_count bigint"] + order: id + rows: + - [1,1,1,1] + - [2,1,1,2] + - + id: 18 + desc: delete过期数据 + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7:1:latest"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa'; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 19 + desc: delete表不存在 + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sql: delete from {0}1 where c1='aa'; + expect: + success: false + msg: fail + - + id: 20 + desc: delete列不存在 + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c11=1; + expect: + success: false + msg: fail + - + id: 21 + desc: delete 其他库的数据 + inputs: + - + db: d1 + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from d1.{0} where c1='aa'; + - select * from d1.{0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 22 + desc: 两个index中key相同 delete 一个key + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7","index2:c1:c4:1:latest"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa'; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + - + id: 23 + desc: delete全部数据 + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c2:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c2=1; + - select * from {0}; + expect: + count: 0 + - + id: 24 + desc: 两个索引,一个索引数据过期,删除另一个索引 + mode: cluster-unsupport + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7:1:latest","index2:c2:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true] + sqls: + - delete from {0} where c2=1; + sql: SELECT id, c2, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + columns: ["id int","c2 smallint","w1_c4_count bigint"] + order: id + rows: + - [4,2,1] + - [5,2,2] + - + id: 25 + desc: 数据过期,delete其他pk + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7:1:latest"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1='bb'; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - + id: 26 + desc: 不等式删除 + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"cc",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1!='cc'; + expect: + success: false + msg: fail + - + id: 27 + desc: 比较运算符删除 + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c2:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",3,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c2>=2; + expect: + success: false + msg: fail + - + id: 28 + desc: 表名为job delete + inputs: + - + name: job + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"aa",3,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1='aa'; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 29 + desc: delete空表 + inputs: + - + name: job + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - delete from {0} where c1='aa'; + expect: + success: true + - + id: 30 + desc: 组合key有一个是null + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1|c2:c7"] + rows: + - [1,null,2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,null,1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1=null and c2=2; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,null,1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + - + id: 31 + desc: 组合key有一个是空串 + inputs: + - + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1|c2:c7"] + rows: + - [1,"",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [3,"",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] + sqls: + - delete from {0} where c1='' and c2=2; + - select * from {0}; + expect: + columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + diff --git a/cases/integration_test/dml/test_insert.yaml b/cases/integration_test/dml/test_insert.yaml new file mode 100644 index 00000000000..36ae56ca82b --- /dev/null +++ b/cases/integration_test/dml/test_insert.yaml @@ -0,0 +1,207 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: 插入所有类型的数据 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - + id: 1 + desc: 插入所有列的数据 + sqlDialect: ["HybridSQL"] + inputs: + - + create: | + create table {0} (c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c1), ts=c7)); + insert: insert into {0} values('aa',2,3,1.1,2.1,1590738989000L,'2020-05-01'); + sql: select * from {0}; + expect: + columns : ["c1 string", "c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + rows: + - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"] + - + id: 2 + desc: 插入部分列数据 + sqlDialect: ["HybridSQL"] + inputs: + - + create: | + create table {0} (c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c1), ts=c7)); + insert: insert into {0} (c1,c4,c7) values('aa',2,1590738989000L); + sql: select * from {0}; + expect: + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + rows: + - ["aa",null,2,null,null,1590738989000,null] + - + id: 3 + desc: 没有指定NotNull的列插入null + sqlDialect: ["HybridSQL"] + inputs: + - + create: | + create table {0} (c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c1), ts=c7)); + insert: insert into {0} (c1,c3,c4,c5,c6,c7,c8) values('aa',2,NULL,NULL,NULL,1590738989000L,NULL); + sql: select * from {0}; + expect: + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + rows: + - ["aa",2,null,null,null,1590738989000,null] + - + id: 4 + desc: NotNull的列插入null + sqlDialect: ["HybridSQL"] + inputs: + - + create: | + create table {0} ( c1 string NOT NULL, c2 timestamp, + index(key=(c1), ts=c2)); + sql: insert into {0} (c1,c2,c3) values(NULL,1590738989000L); + expect: + success: false + - + id: 5 + desc: 字符串类型插入空串 + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + create: | + create table {0} ( c1 string NOT NULL, c2 timestamp, + index(key=(c1), ts=c2)); + insert: insert into {0} (c1,c2) values('',1590738989000L); + sql: select * from {0}; + expect: + columns : ["c1 string","c2 timestamp"] + rows: + - ["",1590738989000] + - + id: 6 + desc: 表名不存在 + sqlDialect: ["HybridSQL"] + inputs: + - + create: | + create table {0} ( c1 string NOT NULL, c2 timestamp, + index(key=(c1), ts=c2)); + sql: insert into {0}1 (c1,c2) values('aa',1590738989000L); + expect: + success: false + - + id: 7 + desc: 列名不存在 + sqlDialect: ["HybridSQL"] + inputs: + - + create: | + create table {0} ( c1 string NOT NULL, c2 timestamp, + index(key=(c1), ts=c2)); + sql: insert into {0} (c1,c3) values('aa',1590738989000L); + expect: + success: false + - + id: 8 + desc: 没有指定NotNull的列 + sqlDialect: ["HybridSQL"] + inputs: + - + create: | + create table {0} ( c1 string, c2 timestamp, c3 string NOT NULL, + index(key=(c1), ts=c2)); + sql: insert into {0} (c1,c2) values('aa',1590738989000L); + expect: + success: false + - + id: 9 + desc: 插入的字符串没有引号 + sqlDialect: ["HybridSQL"] + inputs: + - + create: | + create table {0} ( c1 string, c2 timestamp, + index(key=(c1), ts=c2)); + sql: insert into {0} (c1,c2) values(aa,1590738989000L); + expect: + success: false + - + id: 10 + desc: 相同时间戳数据 + mode: disk-unsupport + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",3,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",4,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",5,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",6,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",7,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",8,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",9,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",10,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",11,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",12,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",13,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",14,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",15,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",16,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",17,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",18,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",19,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",20,2,3,1.1,2.1,1590738989000,"2020-05-01"] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + order: c2 + rows: + - [ "aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",3,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",4,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",5,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",6,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",7,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",8,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",9,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",10,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",11,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",12,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",13,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",14,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",15,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",16,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",17,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",18,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",19,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",20,2,3,1.1,2.1,1590738989000,"2020-05-01" ] diff --git a/cases/integration_test/dml/test_insert_prepared.yaml b/cases/integration_test/dml/test_insert_prepared.yaml new file mode 100644 index 00000000000..f43f5662094 --- /dev/null +++ b/cases/integration_test/dml/test_insert_prepared.yaml @@ -0,0 +1,280 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: 插入所有类型的数据 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - + id: 1 + desc: 插入所有列的数据 + sqlDialect: ["HybridSQL"] + inputs: + - + create: | + create table {0} (c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c1), ts=c7)); + insert: insert into {0} values(?,?,?,?,?,?,?); + rows: + - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"] + sql: select * from {0}; + expect: + columns : ["c1 string", "c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + rows: + - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"] + - + id: 2 + desc: 插入部分列数据 + sqlDialect: ["HybridSQL"] + inputs: + - + create: | + create table {0} (c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c1), ts=c7)); + insert: insert into {0} (c1,c4,c7) values(?,?,?); + rows: + - ["aa",2,1590738989000] + sql: select * from {0}; + expect: + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + rows: + - ["aa",null,2,null,null,1590738989000,null] + - + id: 3 + desc: 没有指定NotNull的列插入null + sqlDialect: ["HybridSQL"] + inputs: + - + create: | + create table {0} (c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date, + index(key=(c1), ts=c7)); + insert: insert into {0} (c1,c3,c4,c5,c6,c7,c8) values(?,?,?,?,?,?,?); + rows: + - ["aa",2,null,null,null,1590738989000,null] + sql: select * from {0}; + expect: + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + rows: + - ["aa",2,null,null,null,1590738989000,null] + - + id: 4 + desc: 字符串类型插入空串 + sqlDialect: ["HybridSQL"] + inputs: + - + create: | + create table {0} ( c1 string NOT NULL, c2 timestamp, + index(key=(c1), ts=c2)); + insert: insert into {0} (c1,c2) values(?,?); + rows: + - ["",1590738989000] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 timestamp"] + rows: + - ["",1590738989000] + - + id: 5 + desc: 相同时间戳数据 + mode: disk-unsupport + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",3,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",4,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",5,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",6,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",7,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",8,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",9,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",10,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",11,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",12,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",13,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",14,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",15,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",16,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",17,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",18,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",19,2,3,1.1,2.1,1590738989000,"2020-05-01"] + - ["aa",20,2,3,1.1,2.1,1590738989000,"2020-05-01"] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + order: c2 + rows: + - [ "aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",3,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",4,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",5,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",6,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",7,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",8,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",9,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",10,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",11,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",12,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",13,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",14,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",15,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",16,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",17,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",18,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",19,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - [ "aa",20,2,3,1.1,2.1,1590738989000,"2020-05-01" ] + - + id: 6 + desc: 时间年初 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,1590738989000,"2020-01-01"] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + rows: + - ["aa",1,2,1590738989000,"2020-01-01"] + - + id: 7 + desc: 时间年末 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,1590738989000,"2020-12-31"] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + rows: + - ["aa",1,2,1590738989000,"2020-12-31"] + - + id: 8 + desc: 时间月初 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,1590738989000,"2020-12-01"] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + rows: + - ["aa",1,2,1590738989000,"2020-12-01"] + - + id: 9 + desc: 时间月末 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,1590738989000,"2020-11-30"] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + rows: + - ["aa",1,2,1590738989000,"2020-11-30"] + - + id: 10 + desc: 时间2月末 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,1590738989000,"2020-02-28"] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + rows: + - ["aa",1,2,1590738989000,"2020-02-28"] + - + id: 11 + desc: 时间3月初 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,1590738989000,"2020-03-01"] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + rows: + - ["aa",1,2,1590738989000,"2020-03-01"] + - + id: 12 + desc: 时间1970-01-01 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,1590738989000,"1970-01-01"] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + rows: + - ["aa",1,2,1590738989000,"1970-01-01"] + - + id: 13 + desc: 时间1969-12-31 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,1590738989000,"1969-12-31"] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + rows: + - ["aa",1,2,1590738989000,"1969-12-31"] + - + id: 14 + desc: 时间-0330 + inputs: + - + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",1,2,1590738989000,"2020-03-30"] + sql: select * from {0}; + expect: + columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"] + rows: + - ["aa",1,2,1590738989000,"2020-03-30"] diff --git a/cases/integration_test/ecosystem/test_kafka.yaml b/cases/integration_test/ecosystem/test_kafka.yaml new file mode 100644 index 00000000000..a4852ae1938 --- /dev/null +++ b/cases/integration_test/ecosystem/test_kafka.yaml @@ -0,0 +1,25 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + json: {"data":[{"c1":"aa","c2":1,"c3":2,"c4":3,"c5":1.1,"c6":2.2,"c7":1590738989000,"c8":1659512628000,"c9":true}],"type":"INSERT"} + sql: select * from {table} + expect: + columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - ["aa",1,2,3,1.1,2.1,1590738989000,"2022-08-03",true] \ No newline at end of file diff --git a/cases/integration_test/expression/test_arithmetic.yaml b/cases/integration_test/expression/test_arithmetic.yaml new file mode 100644 index 00000000000..d90c7422c60 --- /dev/null +++ b/cases/integration_test/expression/test_arithmetic.yaml @@ -0,0 +1,686 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +sqlDialect: ["HybridSQL"] +cases: + - id: 0 + desc: "smallint_[%/MOD/*]_整型_正确" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["%","MOD","*","-","/"] + sql: select {0}.c2 d[0] {1}.c2 as b2,{0}.c2 d[0] {1}.c3 as b3,{0}.c2 d[0] {1}.c4 as b4,{0}.c2 d[0] {1}.c5 as b5,{0}.c2 d[0] {1}.c6 as b6,{0}.c2 d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + columns: ["b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b9 smallint"] + expectProvider: + 0: + rows: + - [0,10,0,7.8,5.8,0] + 1: + rows: + - [0,10,0,7.8,5.8,0] + 2: + rows: + - [0,600,900,333,363,30] + 3: + rows: + - [30,10,0,18.9,17.9,29] + 4: + columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"] + rows: + - [Infinity,1.5,1.0,2.7027026098198896,2.479338842975207,30.0] + - id: 1 + desc: "int_算术运算_整型_正确" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["%","MOD","*","-","/"] + sql: select {0}.c3 d[0] {1}.c2 as b2,{0}.c3 d[0] {1}.c3 as b3,{0}.c3 d[0] {1}.c4 as b4,{0}.c3 d[0] {1}.c5 as b5,{0}.c3 d[0] {1}.c6 as b6,{0}.c3 d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + columns: ["b2 int","b3 int","b4 bigint","b5 float","b6 double","b9 int"] + expectProvider: + 0: + rows: + - [0,10,0,7.8,5.8,0] + 1: + rows: + - [0,10,0,7.8,5.8,0] + 2: + rows: + - [0,600,900,333,363,30] + 3: + rows: + - [30,10,0,18.9,17.9,29] + 4: + columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"] + rows: + - [Infinity,1.5,1.0,2.7027026098198896,2.479338842975207,30.0] + - id: 2 + desc: "bigint_算术运算_整型_正确" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["%","MOD","*","-","/"] + sql: select {0}.c4 d[0] {1}.c2 as b2,{0}.c4 d[0] {1}.c3 as b3,{0}.c4 d[0] {1}.c4 as b4,{0}.c4 d[0] {1}.c5 as b5,{0}.c4 d[0] {1}.c6 as b6,{0}.c4 d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + columns: ["b2 bigint","b3 bigint","b4 bigint","b5 float","b6 double","b9 bigint"] + expectProvider: + 0: + rows: + - [0,10,0,7.8,5.8,0] + 1: + rows: + - [0,10,0,7.8,5.8,0] + 2: + rows: + - [0,600,900,333,363,30] + 3: + rows: + - [30,10,0,18.9,17.9,29] + 4: + columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"] + rows: + - [Infinity,1.5,1.0,2.7027026098198896,2.479338842975207,30.0] + - id: 3 + desc: "float_算术运算_整型_正确" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["%","MOD","*","-","/"] + sql: select {0}.c5 d[0] {1}.c2 as b2,{0}.c5 d[0] {1}.c3 as b3,{0}.c5 d[0] {1}.c4 as b4,{0}.c5 d[0] {1}.c5 as b5,{0}.c5 d[0] {1}.c6 as b6,{0}.c5 d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + columns: ["b2 float","b3 float","b4 float","b5 float","b6 double","b9 float"] + expectProvider: + 0: + rows: + - [NAN,10,0,7.8,5.8,0] + 1: + rows: + - [NAN,10,0,7.8,5.8,0] + 2: + rows: + - [0,600,900,333,363,30] + 3: + rows: + - [30,10,0,18.9,17.9,29] + 4: + columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"] + rows: + - [Infinity,1.5,1.0,2.7027026098198896,2.479338842975207,30.0] + - id: 4 + desc: "double_算术运算_整型_正确" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["%","MOD","*","-","/"] + sql: select {0}.c6 d[0] {1}.c2 as b2,{0}.c6 d[0] {1}.c3 as b3,{0}.c6 d[0] {1}.c4 as b4,{0}.c6 d[0] {1}.c5 as b5,{0}.c6 d[0] {1}.c6 as b6,{0}.c6 d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"] + expectProvider: + 0: + rows: + - [NAN,10,0,7.7999992370605469,5.8,0] + 1: + rows: + - [NAN,10,0,7.7999992370605469,5.8,0] + 2: + rows: + - [0,600,900,333.0000114440918,363,30] + 3: + rows: + - [30,10,0,18.899999618530273,17.9,29] + 4: + columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"] + rows: + - [Infinity,1.5,1.0,2.7027026098198896,2.479338842975207,30.0] + - id: 5 + desc: "+_正确" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c9"] + sql: select d[0] + {1}.c2 as b2,d[0] + {1}.c3 as b3,d[0] + {1}.c4 as b4,d[0] + {1}.c5 as b5,d[0] + {1}.c6 as b6,d[0] + {1}.c7 as b7,d[0] + {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expectProvider: + 0: + columns: ["b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b7 timestamp","b9 smallint"] + rows: + - [30,50,60,41.1,42.1,1590738989031,31] + 1: + columns: ["b2 int","b3 int","b4 bigint","b5 float","b6 double","b7 timestamp","b9 int"] + rows: + - [30,50,60,41.1,42.1,1590738989031,31] + 2: + columns: ["b2 bigint","b3 bigint","b4 bigint","b5 float","b6 double","b7 timestamp","b9 bigint"] + rows: + - [30,50,60,41.1,42.1,1590738989031,31] + 3: + columns: ["b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b7 timestamp","b9 bool"] + rows: + - [0,20,30,11.1,12.1,1590738989001,true] + - id: 6 + desc: "浮点型+_正确" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["{0}.c5","{0}.c6"] + sql: select d[0] + {1}.c2 as b2,d[0] + {1}.c3 as b3,d[0] + {1}.c4 as b4,d[0] + {1}.c5 as b5,d[0] + {1}.c6 as b6,d[0] + {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expectProvider: + 0: + columns: ["b2 float","b3 float","b4 float","b5 float","b6 double","b9 float"] + rows: + - [30,50,60,41.100000381469727,42.1,31] + 1: + columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"] + rows: + - [30,50,60,41.100000381469727,42.1,31] + - id: 7 + desc: "timestamp+_正确" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["{0}.c7"] + sql: select d[0] + {1}.c2 as b2,d[0] + {1}.c3 as b3,d[0] + {1}.c4 as b4,d[0] + {1}.c7 as b7,d[0] + {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expectProvider: + 0: + columns: ["b2 timestamp","b3 timestamp","b4 timestamp","b7 timestamp","b9 timestamp"] + rows: + - [1590738989000,1590738989020,1590738989030,3181477978001,1590738989001] + - id: 8 + desc: "timestamp_-_正确" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["-"] + sql: select {0}.c7 d[0] {1}.c2 as b2,{0}.c7 d[0] {1}.c3 as b3,{0}.c7 d[0] {1}.c4 as b4,{0}.c7 d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expectProvider: + 0: + columns: ["b2 timestamp","b3 timestamp","b4 timestamp","b9 timestamp"] + rows: + - [1590738989000,1590738988980,1590738988970,1590738988999] + - id: 9 + desc: "整型_[%MOD*-/]_各种类型_错误" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["%","MOD","*","-","/"] + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c9"] + - ["{1}.c7","{1}.c8","{1}.c1"] + sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + success: false + - id: 10 + desc: "整型_+_各种类型_错误" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["+"] + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c9"] + - ["{1}.c8","{1}.c1"] + sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + success: false + - id: 11 + desc: "各种类型_[%MOD*/]_各种类型_错误" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["%","MOD","*","/"] + - ["{0}.c7","{0}.c8","{0}.c1"] + - ["{1}.c1","{1}.c2","{1}.c3","{1}.c4","{1}.c5","{1}.c6","{1}.c7","{1}.c8","{1}.c9"] + sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + success: false + - id: 12 + desc: "timetamp_-_各种类型_错误" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["-"] + - ["{0}.c7"] + - ["{1}.c1","{1}.c7","{1}.c8"] + sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + success: false + - id: 13 + desc: "timetamp_+_各种类型_错误" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["+"] + - ["{0}.c7"] + - ["{1}.c1","{1}.c8"] + sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + success: false + - id: 14 + desc: "date/string_[+-]_各种类型_错误" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["+","-"] + - ["{0}.c8","{0}.c1"] + - ["{1}.c1","{1}.c2","{1}.c3","{1}.c4","{1}.c5","{1}.c6","{1}.c7","{1}.c8","{1}.c9"] + sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + success: false + - id: 15 + desc: "-_整型_正确" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",true] + - [2,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",false] + sql: select id, - {0}.c2 as b2,- {0}.c3 as b3,- {0}.c4 as b4,- {0}.c5 as b5,- {0}.c6 as b6,- {0}.c9 as b9 from {0}; + expect: + order: id + columns: ["id bigint", "b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b9 bool"] + rows: + - [1,-30,30,-30,-30,-30,true] + - [2,-30,30,-30,-30,-30,false] + - id: 16 + desc: "-_其他类型_错误" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",true] + - [1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",false] + sql: select - d[0] as b2 from {0}; + dataProvider: + - ["{0}.c7","{0}.c8","{0}.c1"] + expect: + success: false + - id: 17 + desc: "int_DIV_int_正确" + tags: ["TODO","bug,@baoxinqi,DIV 0有问题"] + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",false] + dataProvider: + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c9"] + sql: select d[0] DIV {1}.c2 as b2,d[0] DIV {1}.c3 as b3,d[0] DIV {1}.c4 as b4,d[0] DIV {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expectProvider: + 0: + columns: ["b2 smallint","b3 int","b4 bigint","b9 smallint"] + rows: + - [Infinity,1,1,Infinity] + 1: + columns: ["b2 int","b3 int","b4 bigint","b9 int"] + rows: + - [Infinity,1,1,Infinity] + 2: + columns: ["b2 bigint","b3 bigint","b4 bigint","b9 bigint"] + rows: + - [Infinity,0,0,Infinity] + 3: + columns: ["b2 smallint","b3 int","b4 bigint","b9 smallint"] + rows: + - [Infinity,1,1,Infinity] + - id: 18 + desc: "int_DIV_各种类型_错误" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["DIV"] + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c9"] + - ["{1}.c1","{1}.c5","{1}.c6","{1}.c7","{1}.c8"] + sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + success: false + - id: 19 + desc: "各种类型_DIV_各种类型_错误" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["DIV"] + - ["{1}.c1","{1}.c5","{1}.c6","{1}.c7","{1}.c8"] + - ["{1}.c1","{1}.c2","{1}.c3","{1}.c4","{1}.c5","{1}.c6","{1}.c7","{1}.c8","{1}.c9"] + sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + success: false + + - id: 19 + desc: 算数表达式操作数为null时返回null + inputs: + - columns: ["c1 int16","c2 int32","c3 bigint", + "c4 float","c5 double", "c6 timestamp", "c7 bool", "nullcol int32"] + indexs: ["index1:c3:c6"] + rows: + - [1, 911, 1024, 3.14, 0.99, 1590738989000, true, NULL] + sql: select + c1 + nullcol as r1, c1 - nullcol as r2, c1 * nullcol as r3, c1 / nullcol as r4, c1 % nullcol as r5, c1 DIV nullcol as r6, + c2 + nullcol as r7, c2 - nullcol as r8, c2 * nullcol as r9, c2 / nullcol as r10, c2 % nullcol as r11, c2 DIV nullcol as r12, + c3 + nullcol as r13, c3 - nullcol as r14, c3 * nullcol as r15, c3 / nullcol as r16, c3 % nullcol as r17, c3 DIV nullcol as r18, + c4 + nullcol as r19, c4 - nullcol as r20, c4 * nullcol as r21, c4 / nullcol as r22, c4 % nullcol as r23, + c5 + nullcol as r25, c5 - nullcol as r26, c5 * nullcol as r27, c5 / nullcol as r28, c5 % nullcol as r29, + year(c6) + nullcol as r31, year(c6) - nullcol as r32, year(c6) * nullcol as r33, year(c6) / nullcol as r34, year(c6) % nullcol as r35, year(c6) DIV nullcol as r36, + -nullcol as r37, + c7 + nullcol as r38, c7 - nullcol as r39, c7 * nullcol as r40, c7 / nullcol as r41, c7 % nullcol as r42 + from {0}; + expect: + columns: ["r1 int32", "r2 int32", "r3 int32", "r4 double", "r5 int32", "r6 int32", + "r7 int32", "r8 int32", "r9 int32", "r10 double", "r11 int32", "r12 int32", + "r13 bigint", "r14 bigint", "r15 bigint", "r16 double", "r17 bigint", "r18 bigint", + "r19 float", "r20 float", "r21 float", "r22 double", "r23 float", + "r25 double", "r26 double", "r27 double", "r28 double", "r29 double", + "r31 int32", "r32 int32", "r33 int32", "r34 double", "r35 int32", "r36 int32", "r37 int32", + "r38 int32", "r39 int32", "r40 int32","r41 double","r42 int32"] + rows: + - [NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL] + - id: 20 + desc: 算数表达式操作数为const null时返回null-left + inputs: + - columns: ["c1 int16","c2 int32","c3 bigint", + "c4 float","c5 double", "c6 timestamp", "c7 bool", "colnull int32"] + indexs: ["index1:c3:c6"] + rows: + - [1, 911, 1024, 3.14, 0.99, 1590738989000, true, NULL] + sql: select + NULL + c1 as r1, NULL - c1 as r2, NULL * c1 as r3, NULL / c1 as r4, NULL % c1 as r5, NULL DIV c1 as r6, + NULL + c2 as r7, NULL - c2 as r8, NULL * c2 as r9, NULL / c2 as r10, NULL % c2 as r11, NULL DIV c2 as r12, + NULL + c3 as r13, NULL - c3 as r14, NULL * c3 as r15, NULL / c3 as r16, NULL % c3 as r17, NULL DIV c3 as r18, + NULL + c4 as r19, NULL - c4 as r20, NULL * c4 as r21, NULL / c4 as r22, NULL % c4 as r23, + NULL + c5 as r25, NULL - c5 as r26, NULL * c5 as r27, NULL / c5 as r28, NULL % c5 as r29, + year(c6) + NULL as r31, year(c6) - NULL as r32, year(c6) * NULL as r33, year(c6) / NULL as r34, year(c6) % NULL as r35, year(c6) DIV NULL as r36, + NULL as r37, + c7 + NULL as r38, c7 - NULL as r39, c7 * NULL as r40, c7 / NULL as r41, c7 % NULL as r42 + from {0}; + expect: + columns: ["r1 int16", "r2 int16", "r3 int16", "r4 double", "r5 int16", "r6 int16", + "r7 int32", "r8 int32", "r9 int32", "r10 double", "r11 int32", "r12 int32", + "r13 bigint", "r14 bigint", "r15 bigint", "r16 double", "r17 bigint", "r18 bigint", + "r19 float", "r20 float", "r21 float", "r22 double", "r23 float", + "r25 double", "r26 double", "r27 double", "r28 double", "r29 double", + "r31 int32", "r32 int32", "r33 int32", "r34 double", "r35 int32", "r36 int32", "r37 bool", + "r38 bool", "r39 bool", "r40 bool","r41 double","r42 bool"] + rows: + - [NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL] + - id: bitwise_operators + desc: bitwise and/or/xor + inputs: + - columns: ["c1 int16","c2 int32","c3 bigint", "c6 timestamp"] + indexs: ["index1:c3:c6"] + rows: + - [3, 6, 12, 1590738989000] + dataProvider: + - ['&', '|', '^'] + sql: | + select c1 d[0] c1 as r11, c1 d[0] c2 as r12, c1 d[0] c3 as r13, c2 d[0] c2 as r22, c2 d[0] c3 as r23, c3 d[0] c3 as r33 from {0}; + expect: + columns: [ 'r11 int16', 'r12 int32', 'r13 bigint', 'r22 int32', 'r23 bigint', 'r33 bigint' ] + expectProvider: + 0: + rows: + - [ 3, 2, 0, 6, 4, 12 ] + 1: + rows: + - [ 3, 7, 15, 6, 14, 12 ] + 2: + rows: + - [ 0, 5, 15, 0, 10, 0 ] + - id: bitwise_operators_fail + desc: bitwise and/or/xor, fail on non-integral operands + inputs: + - columns: [ "c0 int", "c1 bool", "c2 float", "c3 double", "c4 string", "c5 date", "c6 timestamp" ] + indexs: ["index1:c0:c6"] + rows: + - [1, true, 1.0, 2.0, "abc", "2012-8-11", 1590738989000] + sql: | + select d[1] d[0] 10 as r1 from {0}; + dataProvider: + - ['&', '|', '^'] + - [ '{0}.c1', '{0}.c2', '{0}.c3', '{0}.c4', '{0}.c5', '{0}.c6' ] + expect: + success: false + - id: bitwise_operators_not + desc: bitwise not + inputs: + - columns: ["c1 int16","c2 int32","c3 bigint", "c6 timestamp"] + indexs: ["index1:c3:c6"] + rows: + - [3, 6, 12, 1590738989000] + sql: | + select ~c1 as r1, ~c2 as r2, ~c3 as r3 from {0}; + expect: + columns: [ 'r1 int16', 'r2 int32', 'r3 bigint'] + rows: + - [ -4, -7, -13 ] + - id: bitwise_not_fail + desc: bitwise not, fail on non-integral operand + inputs: + - columns: [ "c0 int", "c1 bool", "c2 float", "c3 double", "c4 string", "c5 date", "c6 timestamp" ] + indexs: ["index1:c0:c6"] + rows: + - [1, true, 1.0, 2.0, "abc", "2012-8-11", 1590738989000] + sql: | + select d[0] d[1] as r1 from {0}; + dataProvider: + - ['~'] + - [ '{0}.c1', '{0}.c2', '{0}.c3', '{0}.c4', '{0}.c5', '{0}.c6' ] + expect: + success: false + - id: bitwise_null_operands + desc: bitwise operation return null if any of operands is null + inputs: + - columns: ["c1 int16","c2 int32","c3 bigint", "c4 int16", "c6 timestamp"] + indexs: ["index1:c3:c6"] + rows: + - [3, 6, 12, NULL, 1590738989000] + sql: | + select {0}.c1 & {0}.c4 as r1, {0}.c2 | {0}.c4 as r2, {0}.c3 ^ {0}.c4 as r3, ~ {0}.c4 as r4 from {0}; + expect: + columns: [ 'r1 int16', 'r2 int32', 'r3 int64', 'r4 int16' ] + rows: + - [ NULL, NULL, NULL, NULL ] + - id: bitwise_const_null_operands + desc: bitwise operation return null if any of operands is null + inputs: + - columns: ["c1 int16","c2 int32","c3 bigint", "c4 int", "c6 timestamp"] + indexs: ["index1:c3:c6"] + rows: + - [3, 6, 12, NULL, 1590738989000] + sql: | + select {0}.c1 & NULL as r1, {0}.c2 | NULL as r2, {0}.c3 ^ NULL as r3, ~ NULL as r4 from {0}; + expect: + columns: [ 'r1 int16', 'r2 int32', 'r3 int64', 'r4 bool' ] + rows: + - [ NULL, NULL, NULL, NULL ] diff --git a/cases/integration_test/expression/test_condition.yaml b/cases/integration_test/expression/test_condition.yaml new file mode 100644 index 00000000000..54d1dd4ad4d --- /dev/null +++ b/cases/integration_test/expression/test_condition.yaml @@ -0,0 +1,400 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - id: 0 + desc: SIMPLE CASE WHEN 表达式 + inputs: + - columns: ["col1 int","col2 string", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, "aa",1590738989000] + - [2, "cc",1590738989000] + - [3, "bb",1590738989000] + - [4, "dd",1590738989000] + sql: | + select col1, col2, case col2 + when 'aa' then 'apple' + else 'nothing' + end as case_f1 from {0}; + expect: + columns: ["col1 int", "col2 string", "case_f1 string"] + order: col1 + rows: + - [1, "aa", "apple"] + - [2, "cc", "nothing"] + - [3, "bb", "nothing"] + - [4, "dd", "nothing"] + - id: 1 + desc: SIMPLE CASE WHEN 表达式无ELSE + inputs: + - columns: ["col1 int","col2 string", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, "aa",1590738989000] + - [2, "cc",1590738989000] + - [3, "bb",1590738989000] + - [4, "dd",1590738989000] + sql: | + select col1, col2, case col2 + when 'aa' then 'apple' + end as case_f1 from {0}; + expect: + columns: ["col1 int", "col2 string", "case_f1 string"] + order: col1 + rows: + - [1, "aa", "apple"] + - [2, "cc", null] + - [3, "bb", null] + - [4, "dd", null] + - id: 2 + desc: SIMPLE CASE WHEN 表达式 ELSE NULL + inputs: + - columns: ["col1 int","col2 string", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, "aa",1590738989000] + - [2, "cc",1590738989000] + - [3, "bb",1590738989000] + - [4, "dd",1590738989000] + sql: | + select col1, col2, case col2 + when 'aa' then 'apple' + else NULL + end as case_f1 from {0}; + expect: + columns: ["col1 int", "col2 string", "case_f1 string"] + order: col1 + rows: + - [1, "aa", "apple"] + - [2, "cc", null] + - [3, "bb", null] + - [4, "dd", null] + - id: 3 + desc: SIMPLE CASE WHEN 表达式 THEN NULL + inputs: + - columns: ["col1 int","col2 string", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, "aa",1590738989000] + - [2, "cc",1590738989000] + - [3, "bb",1590738989000] + - [4, "dd",1590738989000] + - [5, null ,1590738989000] + sql: | + select col1, col2, case col2 + when 'aa' then 'apple' + when 'bb' then NULL + when 'cc' then 'cake' + else 'nothing' + end as case_f1 from {0}; + expect: + columns: ["col1 int", "col2 string", "case_f1 string"] + order: col1 + rows: + - [1, "aa", "apple"] + - [2, "cc", "cake"] + - [3, "bb", null] + - [4, "dd", "nothing"] + - [5, null, "nothing"] + - id: 4 + desc: SEARCHED CASE WHEN 表达式 + inputs: + - columns: ["col1 int","col2 string", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, "aa",1590738989000] + - [2, "cc",1590738989000] + - [3, "bb",1590738989000] + - [4, "dd",1590738989000] + sql: | + select col1, col2, case + when col2='aa' then 'apple' + when col2='bb' then 'banana' + when col2='cc' then 'cake' + else 'nothing' + end as case_f1 from {0}; + expect: + columns: ["col1 int", "col2 string", "case_f1 string"] + order: col1 + rows: + - [1, "aa", "apple"] + - [2, "cc", "cake"] + - [3, "bb", "banana"] + - [4, "dd", "nothing"] + - id: 5 + desc: SEARCHED CASE WHEN 表达式无 ELSE + inputs: + - columns: ["col1 int","col2 string", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, "aa",1590738989000] + - [2, "cc",1590738989000] + - [3, "bb",1590738989000] + - [4, "dd",1590738989000] + sql: | + select col1, col2, case + when col2='aa' then 'apple' + end as case_f1 from {0}; + expect: + columns: ["col1 int", "col2 string", "case_f1 string"] + order: col1 + rows: + - [1, "aa", "apple"] + - [2, "cc", null] + - [3, "bb", null] + - [4, "dd", null] + - id: 6 + desc: SEARCHED CASE WHEN 表达式 ELSE + inputs: + - columns: ["col1 int","col2 string", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, "aa",1590738989000] + - [2, "cc",1590738989000] + - [3, "bb",1590738989000] + - [4, "dd",1590738989000] + sql: | + select col1, col2, case + when col2='aa' then 'apple' + when col2='bb' then 'banana' + when col2='cc' then 'cake' + else 'nothing' + end as case_f1 from {0}; + expect: + columns: ["col1 int", "col2 string", "case_f1 string"] + order: col1 + rows: + - [1, "aa", "apple"] + - [2, "cc", "cake"] + - [3, "bb", "banana"] + - [4, "dd", "nothing"] + - id: 7 + desc: 条件表达式null测试 + sqlDialect: ["HybridSQL"] + inputs: + - columns: ["id int64", "c1 bool", "c2 string", "c3 string"] + indexs: ["index1:c1:id"] + rows: + - [1, true, "xxx", "aaa"] + - [2, true, "xxx", NULL] + - [3, true, NULL, "aaa"] + - [4, true, NULL, NULL] + - [5, false, "xxx", "aaa"] + - [6, false, "xxx", NULL] + - [7, false, NULL, "aaa"] + - [8, false, NULL, NULL] + - [9, NULL, "xxx", "aaa"] + - [10, NULL, "xxx", NULL] + - [11, NULL, NULL, "aaa"] + - [12, NULL, NULL, NULL] + sql: select id, case when c1 then c2 else c3 end as result from {0}; + expect: + columns: ["id int64", "result string"] + order: id + rows: + - [1, "xxx"] + - [2, "xxx"] + - [3, NULL] + - [4, NULL] + - [5, "aaa"] + - [6, NULL] + - [7, "aaa"] + - [8, NULL] + - [9, "aaa"] + - [10, NULL] + - [11, "aaa"] + - [12, NULL] + - id: 8 + desc: IFNULL + sqlDialect: ["HybridSQL"] + mode: cli-unsupport + inputs: + - columns: ["col1 int","col2 string", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, "aa",1590738989000] + - [2, null,1590738989000] + - [3, "",1590738989000] + sql: | + select col1,ifnull(col2,"abc") as e1 from {0}; + expect: + columns: ["col1 int", "e1 string"] + order: col1 + rows: + - [1, "aa"] + - [2, "abc"] + - [3, ""] + - id: 9 + desc: IFNULL-不同类型 + sqlDialect: ["HybridSQL"] + inputs: + - columns: ["col1 int","col2 int", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, 0,1590738989000] + - [2, null,1590738989000] + - [3, 1,1590738989000] + sql: | + select col1,ifnull(col2,"abc") as e1 from {0}; + expect: + success: false + - id: 10 + desc: IFNULL-表达式 + sqlDialect: ["HybridSQL"] + inputs: + - columns: ["col1 int","col2 int", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, 0,1590738989000] + - [2, null,1590738989000] + - [3, 1,1590738989000] + sql: | + select col1,ifnull(col2,100) as e1,ifnull(col2+1,100) as e2 from {0}; + expect: + columns: ["col1 int", "e1 int", "e2 int"] + order: col1 + rows: + - [1, 0,1] + - [2, 100,100] + - [3, 1,2] + - id: 11-1 + desc: IFNULL-表达式-/0 + sqlDialect: ["HybridSQL"] + inputs: + - columns: ["col1 int","col2 int", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, 0,1590738989000] + - [2, null,1590738989000] + - [3, 1,1590738989000] + sql: | + select col1,ifnull(col2 /0 ,100) as e3 from {0}; + expect: + success: false + - id: 11-2 + mode: cli-unsupport + desc: NVL is synonyms to ifnull + inputs: + - columns: ["col1 int","col2 string", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, "aa",1590738989000] + - [2, null,1590738989000] + - [3, "",1590738989000] + sql: | + select col1,nvl(col2,"abc") as e1 from {0}; + expect: + columns: ["col1 int", "e1 string"] + order: col1 + rows: + - [1, "aa"] + - [2, "abc"] + - [3, ""] + - id: 11-3 + desc: NVL-表达式-/0 + inputs: + - columns: ["col1 int","col2 int", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, 0,1590738989000] + - [2, null,1590738989000] + - [3, 1,1590738989000] + sql: | + select col1,nvl(col2 /0 ,100) as e3 from {0}; + expect: + success: false + - id: 12 + desc: IFNULL-兼容类型 + sqlDialect: ["HybridSQL"] + inputs: + - columns: ["col1 int","col2 bigint", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, 0,1590738989000] + - [2, null,1590738989000] + - [3, 1,1590738989000] + sql: | + select col1,ifnull(col2,100) as e1 from {0}; + expect: + success: false + - id: 13 + desc: IFNULL-浮点型 + sqlDialect: ["HybridSQL"] + inputs: + - columns: ["col1 int","col2 bigint", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, 0,1590738989000] + - [2, null,1590738989000] + - [3, 1,1590738989000] + sql: | + select col1,ifnull(col2,1.1) as e2 from {0}; + expect: + success: false + + - id: NVL2-1 + desc: NVL2 + inputs: + - columns: ["col1 int","col2 int", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, 0,1590738989000] + - [2, null,1590738989000] + - [3, 1,1590738989000] + sql: | + select col1,nvl2(col2, "abc", "def") as e1 from {0}; + expect: + columns: ["col1 int", "e1 string"] + order: col1 + rows: + - [1, "abc"] + - [2, "def"] + - [3, "abc"] + + - id: NVL2-2 + desc: NVL2, type not match + inputs: + - columns: ["col1 int","col2 int", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, 0,1590738989000] + - [2, null,1590738989000] + - [3, 1,1590738989000] + sql: | + select col1,nvl2(col2, "abc", col1 + 1) as e1 from {0}; + expect: + success: false + + - id: NVL2-3 + desc: NVL2, sub expression + inputs: + - columns: ["col1 int","col2 int", "col4 timestamp"] + indexs: ["index1:col1:col4"] + rows: + - [1, 0,1590738989000] + - [2, null,1590738989000] + - [3, 1,1590738989000] + sql: | + select col1, nvl2(col2, col1 * col1, col1 + 1) as e1 from {0}; + expect: + columns: ["col1 int", "e1 int"] + order: col1 + rows: + - [1, 1] + - [2, 3] + - [3, 9] \ No newline at end of file diff --git a/cases/integration_test/expression/test_like.yaml b/cases/integration_test/expression/test_like.yaml new file mode 100644 index 00000000000..d47bb57b616 --- /dev/null +++ b/cases/integration_test/expression/test_like.yaml @@ -0,0 +1,1138 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +sqlDialect: ["HybridSQL"] +version: 0.5.0 +cases: + - id: 0 + desc: "使用_" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"a_b",30,30,30,30.0,30.0,1590738990000,"2020-05-01",false] + - [2,"aab",30,30,30,30.0,30.0,1590738991000,"2020-05-01",false] + - [3,"a%b",30,30,30,30.0,30.0,1590738992000,"2020-05-01",false] + - [4,"b_c",30,30,30,30.0,30.0,1590738993000,"2020-05-01",false] + - [5,"abc",30,30,30,30.0,30.0,1590738994000,"2020-05-01",false] + - [6,"A0b",30,30,30,30.0,30.0,1590738995000,"2020-05-01",false] + - [7,"a#B",30,30,30,30.0,30.0,1590738996000,"2020-05-01",false] + - [8,"aaab",30,30,30,30.0,30.0,1590738991000,"2020-05-01",false] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1,c1 d[0] 'a_b' as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"a_b",true] + - [2,"aab",true] + - [3,"a%b",true] + - [4,"b_c",false] + - [5,"abc",false] + - [6,"A0b",false] + - [7,"a#B",false] + - [8,"aaab",false] + 1: + rows: + - [1,"a_b",false] + - [2,"aab",false] + - [3,"a%b",false] + - [4,"b_c",true] + - [5,"abc",true] + - [6,"A0b",true] + - [7,"a#B",true] + - [8,"aaab",true] + 2: + rows: + - [1,"a_b",true] + - [2,"aab",true] + - [3,"a%b",true] + - [4,"b_c",false] + - [5,"abc",false] + - [6,"A0b",true] + - [7,"a#B",true] + - [8,"aaab",false] + 3: + rows: + - [1,"a_b",false] + - [2,"aab",false] + - [3,"a%b",false] + - [4,"b_c",true] + - [5,"abc",true] + - [6,"A0b",false] + - [7,"a#B",false] + - [8,"aaab",true] + - id: 1 + desc: "使用%" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"a%_b",1590738992000] + - [4,"b_c",1590738993000] + - [5,"abc",1590738994000] + - [6,"A0b",1590738995000] + - [7,"a#B",1590738996000] + - [8,"aaab",1590738997000] + - [9,"ab",1590738998000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1,c1 d[0] 'a%b' as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"a_b",true] + - [2,"aabb",true] + - [3,"a%_b",true] + - [4,"b_c",false] + - [5,"abc",false] + - [6,"A0b",false] + - [7,"a#B",false] + - [8,"aaab",true] + - [9,"ab",true] + 1: + rows: + - [1,"a_b",false] + - [2,"aabb",false] + - [3,"a%_b",false] + - [4,"b_c",true] + - [5,"abc",true] + - [6,"A0b",true] + - [7,"a#B",true] + - [8,"aaab",false] + - [9,"ab",false] + 2: + rows: + - [1,"a_b",true] + - [2,"aabb",true] + - [3,"a%_b",true] + - [4,"b_c",false] + - [5,"abc",false] + - [6,"A0b",true] + - [7,"a#B",true] + - [8,"aaab",true] + - [9,"ab",true] + 3: + rows: + - [1,"a_b",false] + - [2,"aabb",false] + - [3,"a%_b",false] + - [4,"b_c",true] + - [5,"abc",true] + - [6,"A0b",false] + - [7,"a#B",false] + - [8,"aaab",false] + - [9,"ab",false] + - id: 2 + desc: "同时使用%和_" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA0b",1590738995000] + - [7,"aa#0B",1590738996000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1,c1 d[0] '_a%b' as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"aa#0B",false] + 1: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA0b",true] + - [7,"aa#0B",true] + 2: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",true] + - [7,"aa#0B",true] + 3: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA0b",false] + - [7,"aa#0B",false] + - id: 3 + desc: "使用默认的escape" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA0b",1590738995000] + - [7,"_a#0B",1590738996000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1,c1 d[0] "\\_a%b" ESCAPE "\\" as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",false] + 1: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA0b",true] + - [7,"_a#0B",true] + 2: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",true] + 3: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA0b",true] + - [7,"_a#0B",false] + - id: 4 + desc: "指定escape为#" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA0b",1590738995000] + - [7,"_a#0B",1590738996000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1,c1 d[0] '#_a%b' ESCAPE '#' as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",false] + 1: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA0b",true] + - [7,"_a#0B",true] + 2: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",true] + 3: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA0b",true] + - [7,"_a#0B",false] + - id: 5 + desc: "指定escape为_" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA0b",1590738995000] + - [7,"_a#0B",1590738996000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1,c1 d[0] '__a%b' ESCAPE '_' as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",false] + 1: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA0b",true] + - [7,"_a#0B",true] + 2: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",true] + 3: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA0b",true] + - [7,"_a#0B",false] + - id: 6 + desc: "指定escape为%" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA%b",1590738995000] + - [7,"_a#0B",1590738996000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1,c1 d[0] '_a%%b' ESCAPE '%' as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA%b",false] + - [7,"_a#0B",false] + 1: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA%b",true] + - [7,"_a#0B",true] + 2: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA%b",true] + - [7,"_a#0B",false] + 3: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA%b",false] + - [7,"_a#0B",true] + - id: 7 + desc: "escape不指定" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA0b",1590738995000] + - [7,"_a#0B",1590738996000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1,c1 d[0] "\\_a%b" as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",false] + 1: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA0b",true] + - [7,"_a#0B",true] + 2: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",true] + 3: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA0b",true] + - [7,"_a#0B",false] + - id: 8 + desc: "escape为空串,使用\\" + mode: cluster-unsupport + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,'\\\%a_b',1590738990000] + - [2,'\\\aabb',1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,'\\\bA0b',1590738995000] + - [7,'\\\_a#0B',1590738996000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1,c1 d[0] "\\_a%b" escape "" as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,'\%a_b',true] + - [2,'\aabb',true] + - [3,'_a%_b',false] + - [4,'ba_c',false] + - [5,"abb",false] + - [6,'\bA0b',false] + - [7,'\_a#0B',false] + 1: + rows: + - [1,'\%a_b',false] + - [2,'\aabb',false] + - [3,"_a%_b",true] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,'\bA0b',true] + - [7,'\_a#0B',true] + 2: + rows: + - [1,'\%a_b',true] + - [2,'\aabb',true] + - [3,"_a%_b",false] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,'\bA0b',true] + - [7,'\_a#0B',true] + 3: + rows: + - [1,'\%a_b',false] + - [2,'\aabb',false] + - [3,"_a%_b",true] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,'\bA0b',false] + - [7,'\_a#0B',false] + - id: 9 + desc: "使用两个%" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"a%_b%0",1590738992000] + - [4,"b_c",1590738993000] + - [5,"abc",1590738994000] + - [6,"A0b",1590738995000] + - [7,"a#Bb",1590738996000] + - [8,"aaabbcc",1590738991000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1,c1 d[0] 'a%b%' as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"a_b",true] + - [2,"aabb",true] + - [3,"a%_b%0",true] + - [4,"b_c",false] + - [5,"abc",true] + - [6,"A0b",false] + - [7,"a#Bb",true] + - [8,"aaabbcc",true] + 1: + rows: + - [1,"a_b",false] + - [2,"aabb",false] + - [3,"a%_b%0",false] + - [4,"b_c",true] + - [5,"abc",false] + - [6,"A0b",true] + - [7,"a#Bb",false] + - [8,"aaabbcc",false] + 2: + rows: + - [1,"a_b",true] + - [2,"aabb",true] + - [3,"a%_b%0",true] + - [4,"b_c",false] + - [5,"abc",true] + - [6,"A0b",true] + - [7,"a#Bb",true] + - [8,"aaabbcc",true] + 3: + rows: + - [1,"a_b",false] + - [2,"aabb",false] + - [3,"a%_b%0",false] + - [4,"b_c",true] + - [5,"abc",false] + - [6,"A0b",false] + - [7,"a#Bb",false] + - [8,"aaabbcc",false] + - id: 10 + desc: "使用两个_" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA0b",1590738995000] + - [7,"aa#0B",1590738996000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1,c1 d[0] '_a_b' as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",false] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"aa#0B",false] + 1: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA0b",true] + - [7,"aa#0B",true] + 2: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",false] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",true] + - [7,"aa#0B",false] + 3: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA0b",false] + - [7,"aa#0B",true] + - id: 11 + desc: "使用两个%,其中一个被转义" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"a_b",1590738990000] + - [2,"aab%",1590738991000] + - [3,"a%_b%0",1590738992000] + - [4,"b_c",1590738993000] + - [5,"ab%",1590738994000] + - [6,"A0b",1590738995000] + - [7,"a#B%",1590738996000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1,c1 d[0] 'a%b#%' escape '#' as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"a_b",false] + - [2,"aab%",true] + - [3,"a%_b%0",false] + - [4,"b_c",false] + - [5,"ab%",true] + - [6,"A0b",false] + - [7,"a#B%",false] + 1: + rows: + - [1,"a_b",true] + - [2,"aab%",false] + - [3,"a%_b%0",true] + - [4,"b_c",true] + - [5,"ab%",false] + - [6,"A0b",true] + - [7,"a#B%",true] + 2: + rows: + - [1,"a_b",false] + - [2,"aab%",true] + - [3,"a%_b%0",false] + - [4,"b_c",false] + - [5,"ab%",true] + - [6,"A0b",false] + - [7,"a#B%",true] + 3: + rows: + - [1,"a_b",true] + - [2,"aab%",false] + - [3,"a%_b%0",true] + - [4,"b_c",true] + - [5,"ab%",false] + - [6,"A0b",true] + - [7,"a#B%",false] + - id: 12 + desc: "使用两个_,其中一个被转义" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"_A0b",1590738995000] + - [7,"aa#0B",1590738996000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1,c1 d[0] '#_a_b' escape '#' as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"_A0b",false] + - [7,"aa#0B",false] + 1: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"_A0b",true] + - [7,"aa#0B",true] + 2: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"_A0b",true] + - [7,"aa#0B",false] + 3: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"_A0b",false] + - [7,"aa#0B",true] + - id: 13 + desc: "同时使用%和_,其中_被转义" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"_A0b",1590738995000] + - [7,"_a#0B",1590738996000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1,c1 d[0] '#_a%b' escape '#' as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"_A0b",false] + - [7,"_a#0B",false] + 1: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"_A0b",true] + - [7,"_a#0B",true] + 2: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"_A0b",true] + - [7,"_a#0B",true] + 3: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",false] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"_A0b",false] + - [7,"_a#0B",false] + - id: 14 + desc: "同时使用%和_,其中%被转义" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a%b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA%b",1590738995000] + - [7,"aa#0B",1590738996000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1,c1 d[0] '_a#%b' escape '#' as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a%b",true] + - [2,"aabb",false] + - [3,"_a%_b",false] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA%b",false] + - [7,"aa#0B",false] + 1: + rows: + - [1,"%a%b",false] + - [2,"aabb",true] + - [3,"_a%_b",true] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA%b",true] + - [7,"aa#0B",true] + 2: + rows: + - [1,"%a%b",true] + - [2,"aabb",false] + - [3,"_a%_b",false] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA%b",true] + - [7,"aa#0B",false] + 3: + rows: + - [1,"%a%b",false] + - [2,"aabb",true] + - [3,"_a%_b",true] + - [4,"ba_c",true] + - [5,"abb",true] + - [6,"bA%b",false] + - [7,"aa#0B",true] + - id: 15 + desc: "列中有null和空串" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"",1590738990000] + - [2,null,1590738991000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1 d[0] 'a%b' as v1 from {0}; + expect: + order: id + columns: ["id bigint","v1 bool"] + rows: + - [1,false] + - [2,null] + expectProvider: + 1: + rows: + - [1,true] + - [2,null] + 3: + rows: + - [1,true] + - [2,null] + - id: 16 + desc: "使用空串" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"",1590738990000] + - [2,"aa",1590738990000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1 d[0] '' as v1 from {0}; + expect: + order: id + columns: ["id bigint","v1 bool"] + rows: + - [1,true] + - [2,false] + expectProvider: + 1: + rows: + - [1,false] + - [2,true] + 3: + rows: + - [1,false] + - [2,true] + - id: 17 + desc: "使用null" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"",1590738990000] + - [2,"aa",1590738991000] + - [3,null,1590738992000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1 d[0] null as v1 from {0}; + expect: + success: false + - id: 18 + desc: "escape使用null" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"",1590738990000] + - [2,"aa",1590738991000] + - [3,null,1590738992000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1 d[0] 'a%' escape null as v1 from {0}; + expect: + success: false + - id: 19 + desc: "int类型" + inputs: + - + columns : ["id bigint","c1 int","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,12,1590738990000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1 d[0] '1%' as v1 from {0}; + expect: + success: false + - id: 20 + desc: "bigint类型" + inputs: + - + columns : ["id bigint","c1 bigint","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,12,1590738990000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1 d[0] '1%' as v1 from {0}; + expect: + success: false + - id: 21 + desc: "smallint类型" + inputs: + - + columns : ["id bigint","c1 smallint","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,12,1590738990000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1 d[0] '1%' as v1 from {0}; + expect: + success: false + - id: 22 + desc: "float类型" + inputs: + - + columns : ["id bigint","c1 float","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,12.0,1590738990000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1 d[0] '1%' as v1 from {0}; + expect: + success: false + - id: 23 + desc: "double类型" + inputs: + - + columns : ["id bigint","c1 double","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,12.0,1590738990000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1 d[0] '1%' as v1 from {0}; + expect: + success: false + - id: 24 + desc: "timestamp类型" + inputs: + - + columns : ["id bigint","c1 timestamp","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,12,1590738990000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1 d[0] '1%' as v1 from {0}; + expect: + success: false + - id: 25 + desc: "date类型" + inputs: + - + columns : ["id bigint","c1 date","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"2012-05-01",1590738990000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1 d[0] '1%' as v1 from {0}; + expect: + success: false + - id: 26 + desc: "bool类型" + inputs: + - + columns : ["id bigint","c1 bool","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,true,1590738990000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1 d[0] '1%' as v1 from {0}; + expect: + success: false + - id: 27 + desc: "列不存在" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",1590738990000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c2 d[0] '1%' as v1 from {0}; + expect: + success: false + - id: 28 + desc: "escape为多个字符" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"",1590738990000] + - [2,"aa",1590738991000] + - [3,null,1590738992000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1 d[0] 'a%' escape '<>' as v1 from {0}; + expect: + success: false + - id: 29 + desc: "pattern以escape character结尾" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"ab#",1590738990000] + - [2,"aa",1590738991000] + dataProvider: + - ["like","not like","ilike","not ilike"] + sql: select id,c1 d[0] 'a%#' escape '#' as v1 from {0}; + expect: + success: true + columns : ["id bigint","v1 bool"] + rows: + - [1,false] + - [2,false] + expectProvider: + 1: + rows: + - [1,true] + - [2,true] + 3: + rows: + - [1,true] + - [2,true] diff --git a/cases/integration_test/expression/test_logic.yaml b/cases/integration_test/expression/test_logic.yaml new file mode 100644 index 00000000000..d1ce41b7825 --- /dev/null +++ b/cases/integration_test/expression/test_logic.yaml @@ -0,0 +1,135 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +sqlDialect: ["HybridSQL"] +cases: + - id: 0 + desc: "各种类型_逻辑运算_各种类型_正确" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",true] + - [2,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",true] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",false] + - [2,"",0,0,0,0.0,0.0,0,null,true] + dataProvider: + - ["AND","OR","XOR"] + - ["{0}.c1","{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c7","{0}.c8","{0}.c9"] + sql: select d[1] d[0] {1}.c1 as b1,d[1] d[0] {1}.c2 as b2,d[1] d[0] {1}.c3 as b3,d[1] d[0] {1}.c4 as b4,d[1] d[0] {1}.c5 as b5,d[1] d[0] {1}.c6 as b6,d[1] d[0] {1}.c7 as b7,d[1] d[0] {1}.c8 as b8,d[1] d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + columns: ["b1 bool","b2 bool","b3 bool","b4 bool","b5 bool","b6 bool","b7 bool","b8 bool","b9 bool"] + expectProvider: + 0: + rows: + - [true,true,true,true,true,true,true,true,false] + - [false,false,false,false,false,false,false,null,true] + 1: + rows: + - [true,true,true,true,true,true,true,true,true] + - [true,true,true,true,true,true,true,true,true] + 2: + rows: + - [false,false,false,false,false,false,false,false,true] + - [true,true,true,true,true,true,true,null,false] + - id: 1 + desc: "各种类型_逻辑非_各种类型_正确" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",false] + - [2,"",0,0,0,0.0,0.0,0,null,true] + dataProvider: + - ["NOT","!"] + sql: select d[0] {0}.c1 as b1,d[0] {0}.c2 as b2,d[0] {0}.c3 as b3,d[0] {0}.c4 as b4,d[0] {0}.c5 as b5,d[0] {0}.c6 as b6,d[0] {0}.c7 as b7,d[0] {0}.c8 as b8,d[0] {0}.c9 as b9 from {0}; + expect: + columns: ["b1 bool","b2 bool","b3 bool","b4 bool","b5 bool","b6 bool","b7 bool","b8 bool","b9 bool"] + rows: + - [false,false,false,false,false,false,false,false,true] + - [true,true,true,true,true,true,true,null,false] + - id: 2 + desc: 三值bool逻辑 + inputs: + - columns: ["id int64", "tt int64", "c1 bool", "c2 bool"] + indexs: ["index1:id:tt"] + rows: + - [1, 1, true, true] + - [2, 2, true, false] + - [3, 3, true, NULL] + - [4, 4, false, true] + - [5, 5, false, false] + - [6, 6, false, NULL] + - [7, 7, NULL, true] + - [8, 8, NULL, false] + - [9, 9, NULL, NULL] + sql: select id, c1, c2, c1 and c2 as c_and, c1 or c2 as c_or, c1 xor c2 as c_xor, not c1 as c_not from {0}; + expect: + order: id + columns: ["id int64", "c1 bool", "c2 bool", "c_and bool", "c_or bool", "c_xor bool", "c_not bool"] + rows: + - [1, true, true, true, true, false, false] + - [2, true, false, false, true, true, false] + - [3, true, NULL, NULL, true, NULL, false] + - [4, false, true, false, true, true, true] + - [5, false, false, false, false, false, true] + - [6, false, NULL, false, NULL, NULL, true] + - [7, NULL, true, NULL, true, NULL, NULL] + - [8, NULL, false, false, NULL, NULL, NULL] + - [9, NULL, NULL, NULL, NULL, NULL, NULL] + - id: 3 + desc: 逻辑表达式不使用布尔表达式 + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp","c5 date"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000,"2020-05-01"] + sql: select c2=2 and (c2-1) as f1 from {0}; + expect: + columns: ["f1 bool"] + rows: + - [true] + - id: 4 + desc: 逻辑表达式不使用布尔表达式! + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp","c5 date"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000,"2020-05-01"] + sql: select !c2 as not_c2 from {0}; + expect: + columns: ["not_c2 bool"] + rows: + - [false] + - id: 5 + desc: 逻辑表达式不使用布尔表达式-常量 + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp","c5 date"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000,"2020-05-01"] + sql: select c2==2 and false as flag1,!true as flag2 from {0}; + expect: + columns: ["flag1 bool", "flag2 bool"] + rows: + - [false,false] diff --git a/cases/integration_test/expression/test_predicate.yaml b/cases/integration_test/expression/test_predicate.yaml new file mode 100644 index 00000000000..db183a878e7 --- /dev/null +++ b/cases/integration_test/expression/test_predicate.yaml @@ -0,0 +1,778 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +sqlDialect: ["HybridSQL"] +cases: + - id: 0 + desc: "string_比较运算_各种类型" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"10",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",false] + dataProvider: + - [">",">=","<","<=","<>","!=","=","=="] + sql: select {0}.c1 d[0] {1}.c1 as b1,{0}.c1 d[0] {1}.c2 as b2,{0}.c1 d[0] {1}.c3 as b3,{0}.c1 d[0] {1}.c4 as b4,{0}.c1 d[0] {1}.c5 as b5,{0}.c1 d[0] {1}.c6 as b6,{0}.c1 d[0] {1}.c7 as b7,{0}.c1 d[0] {1}.c8 as b8,{0}.c1 d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + columns: ["b1 bool","b2 bool","b3 bool","b4 bool","b5 bool","b6 bool", "b7 bool", "b8 bool", "b9 bool"] + expectProvider: + 0: + rows: + - [false,false,false,false,false,false,false,false,false] + 1: + rows: + - [false,true,false,false,false,false,false,false,false] + 2: + rows: + - [true,false,true,true,true,true,true,true,true] + 3: + rows: + - [true,true,true,true,true,true,true,true,true] + 4: + rows: + - [true,false,true,true,true,true,true,true,true] + 5: + rows: + - [true,false,true,true,true,true,true,true,true] + 6: + rows: + - [false,true,false,false,false,false,false,false,false] + 7: + rows: + - [false,true,false,false,false,false,false,false,false] + - id: 1 + desc: "整型_比较运算_各种类型_正确" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - [">",">=","<","<=","<>","!=","=","=="] + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6"] + sql: select d[1]d[0]{1}.c1 as b1,d[1]d[0]{1}.c2 as b2,d[1]d[0]{1}.c3 as b3,d[1]d[0]{1}.c4 as b4,d[1]d[0]{1}.c5 as b5,d[1]d[0]{1}.c6 as b6,d[1]d[0]{1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + columns: ["b1 bool","b2 bool","b3 bool","b4 bool","b5 bool","b6 bool","b9 bool"] + expectProvider: + 0: + rows: + - [false,false,false,false,false,false,true] + 1: + rows: + - [false,true,false,false,false,false,true] + 2: + rows: + - [true,false,true,true,true,true,false] + 3: + rows: + - [true,true,true,true,true,true,false] + 4: + rows: + - [true,false,true,true,true,true,true] + 5: + rows: + - [true,false,true,true,true,true,true] + 6: + rows: + - [false,true,false,false,false,false,false] + 7: + rows: + - [false,true,false,false,false,false,false] + - id: 2 + desc: "整型_比较运算_各种类型_错误" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - [">",">=","<","<=","<>","!=","=","=="] + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c9"] + - ["{1}.c7","{1}.c8"] + sql: select d[1]d[0]d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + success: false + - id: 3 + desc: "时间类型_比较运算_各种类型_正确" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"10",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"10",1,2,3,1.1,2.1,1590738989001,"2020-05-02",true] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"2020-05-29 15:56:29",10,20,30,11.1,12.1,1590738989001,"2020-05-02",false] + - [2,"2020-05-02",10,20,30,11.1,12.1,1590738989001,"2020-05-02",false] + dataProvider: + - [">",">=","<","<=","<>","!=","=","=="] + sql: select {0}.c8 d[0] {1}.c1 as b1,{0}.c8 d[0] {1}.c8 as b2,{0}.c7 d[0] {1}.c1 as b3,{0}.c7 d[0] {1}.c7 as b4 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + columns: ["b1 bool","b2 bool","b3 bool","b4 bool"] + expectProvider: + 0: + rows: + - [false,false,false,false] + - [false,false,true,false] + 1: + rows: + - [false,false,true,false] + - [true,true,true,true] + 2: + rows: + - [true,true,false,true] + - [false,false,false,false] + 3: + rows: + - [true,true,true,true] + - [true,true,false,true] + 4: + rows: + - [true,true,false,true] + - [false,false,true,false] + 5: + rows: + - [true,true,false,true] + - [false,false,true,false] + 6: + rows: + - [false,false,true,false] + - [true,true,false,true] + 7: + rows: + - [false,false,true,false] + - [true,true,false,true] + - id: 4 + desc: "timestamp_比较运算_各种类型_错误" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - [">",">=","<","<=","<>","!=","=","=="] + - ["{1}.c7"] + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c9","{1}.c8"] + sql: select d[1]d[0]d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + success: false + - id: 5 + desc: "date_比较运算_各种类型_错误" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - [">",">=","<","<=","<>","!=","=","=="] + - ["{1}.c8"] + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c9","{1}.c7"] + sql: select d[1]d[0]d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + success: false + - id: 6 + desc: "bool_比较运算_各种类型" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"10",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool","c10 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"1",1,20,30,11.1,12.1,1590738989001,"2020-05-02",false,true] + dataProvider: + - [">",">=","<","<=","<>","!=","=","=="] + sql: select {0}.c9 d[0] {1}.c1 as b1,{0}.c9 d[0] {1}.c2 as b2,{0}.c9 d[0] {1}.c3 as b3,{0}.c9 d[0] {1}.c4 as b4,{0}.c9 d[0] {1}.c5 as b5,{0}.c9 d[0] {1}.c6 as b6,{0}.c9 d[0] {1}.c9 as b9,{0}.c9 d[0] {1}.c10 as b10 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + columns: ["b1 bool","b2 bool","b3 bool","b4 bool","b5 bool","b6 bool", "b9 bool","b10 bool"] + expectProvider: + 0: + rows: + - [true,false,false,false,false,false,true,false] + 1: + rows: + - [true,true,false,false,false,false,true,true] + 2: + rows: + - [false,false,true,true,true,true,false,false] + 3: + rows: + - [false,true,true,true,true,true,false,true] + 4: + rows: + - [true,false,true,true,true,true,true,false] + 5: + rows: + - [true,false,true,true,true,true,true,false] + 6: + rows: + - [false,true,false,false,false,false,false,true] + 7: + rows: + - [false,true,false,false,false,false,false,true] + - id: 7 + desc: "IS_NULL_各种类型" + tags: ["TODO","目前不支持"] + inputs: + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"10",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,2,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["c1","c2","c3","c4","c5","c6","c7","c8","c9"] + sql: select * from {0} where d[0] is null; + expect: + columns: ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"10",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - id: 8 + desc: "ISNULL()" + inputs: + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"10",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,2,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + sql: select isnull(c1) as b1,isnull(c2) as b2,isnull(c3) as b3,isnull(c4) as b4,isnull(c5) as b5,isnull(c6) as b6,isnull(c7) as b7,isnull(c8) as b8,isnull(c9) as b9 from {0}; + expect: + order: id + columns: ["b1 bool","b2 bool","b3 bool","b4 bool","b5 bool","b6 bool", "b7 bool", "b8 bool", "b9 bool"] + rows: + - [false,false,false,false,false,false,false,false,false] + - [true,true,true,true,true,true,true,true,true] + - id: 9 + desc: 直接和NULL比较返回NULL + inputs: + - columns: ["c1 int16","c2 int32","c3 bigint", + "c4 float","c5 double", "c6 timestamp", "c7 string", + "nullcol int32", "nulltime timestamp", "nullstr string"] + indexs: ["index1:c3:c6"] + rows: + - [1, 911, 1024, 3.14, 0.99, 1590738989000, "str", NULL, NULL, NULL] + sql: select + c1 > nullcol as r1, c1 >= nullcol as r2, c1 < nullcol as r3, c1 <= nullcol as r4, c1 = nullcol as r5, c1 != nullcol as r6, + c2 > nullcol as r7, c2 >= nullcol as r8, c2 < nullcol as r9, c2 <= nullcol as r10, c2 = nullcol as r11, c2 != nullcol as r12, + c3 > nullcol as r13, c3 >= nullcol as r14, c3 < nullcol as r15, c3 <= nullcol as r16, c3 = nullcol as r17, c3 != nullcol as r18, + c4 > nullcol as r19, c4 >= nullcol as r20, c4 < nullcol as r21, c4 <= nullcol as r22, c4 = nullcol as r23, c4 != nullcol as r24, + c5 > nullcol as r25, c5 >= nullcol as r26, c5 < nullcol as r27, c5 <= nullcol as r28, c5 = nullcol as r29, c5 != nullcol as r30, + c6 > nulltime as r31, c6 >= nulltime as r32, c6 < nulltime as r33, c6 <= nulltime as r34, c6 = nulltime as r35, c6 != nulltime as r36, + c7 > nullstr as r37, c7 >= nullstr as r38, c7 < nullstr as r39, c7 <= nullstr as r40, c7 = nullstr as r41, c7 != nullstr as r42, + nullstr > nullstr as r43, nullstr >= nullstr as r44, nullstr < nullstr as r45, + nullstr <= nullstr as r46, nullstr = nullstr as r47, nullstr != nullstr as r48 + from {0}; + expect: + columns: ["r1 bool", "r2 bool", "r3 bool", "r4 bool", "r5 bool", "r6 bool", "r7 bool", "r8 bool", + "r9 bool", "r10 bool", "r11 bool", "r12 bool", "r13 bool", "r14 bool", "r15 bool", "r16 bool", + "r17 bool", "r18 bool", "r19 bool", "r20 bool", "r21 bool", "r22 bool", "r23 bool", "r24 bool", + "r25 bool", "r26 bool", "r27 bool", "r28 bool", "r29 bool", "r30 bool", "r31 bool", "r32 bool", + "r33 bool", "r34 bool", "r35 bool", "r36 bool", "r37 bool", "r38 bool", "r39 bool", "r40 bool", + "r41 bool", "r42 bool", "r43 bool", "r44 bool", "r45 bool", "r46 bool", "r47 bool", "r48 bool"] + rows: + - [NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL] + - id: 10 + desc: 直接和Const NULL比较返回NULL + inputs: + - columns: ["c1 int16","c2 int32","c3 bigint", + "c4 float","c5 double", "c6 timestamp", "c7 string", + "nullcol int32", "nulltime timestamp", "nullstr string"] + indexs: ["index1:c3:c6"] + rows: + - [1, 911, 1024, 3.14, 0.99, 1590738989000, "str", NULL, NULL, NULL] + sql: select + c1 > NULL as r1, c1 >= NULL as r2, c1 < NULL as r3, c1 <= NULL as r4, c1 = NULL as r5, c1 != NULL as r6, + c2 > NULL as r7, c2 >= NULL as r8, c2 < NULL as r9, c2 <= NULL as r10, c2 = NULL as r11, c2 != NULL as r12, + c3 > NULL as r13, c3 >= NULL as r14, c3 < NULL as r15, c3 <= NULL as r16, c3 = NULL as r17, c3 != NULL as r18, + c4 > NULL as r19, c4 >= NULL as r20, c4 < NULL as r21, c4 <= NULL as r22, c4 = NULL as r23, c4 != NULL as r24, + c5 > NULL as r25, c5 >= NULL as r26, c5 < NULL as r27, c5 <= NULL as r28, c5 = NULL as r29, c5 != NULL as r30, + c6 > NULL as r31, c6 >= NULL as r32, c6 < NULL as r33, c6 <= NULL as r34, c6 = NULL as r35, c6 != NULL as r36, + c7 > NULL as r37, c7 >= NULL as r38, c7 < NULL as r39, c7 <= NULL as r40, c7 = NULL as r41, c7 != NULL as r42, + NULL > NULL as r43, NULL >= NULL as r44, NULL < NULL as r45, + NULL <= NULL as r46, NULL = NULL as r47, NULL != NULL as r48 + from {0}; + expect: + columns: ["r1 bool", "r2 bool", "r3 bool", "r4 bool", "r5 bool", "r6 bool", "r7 bool", "r8 bool", + "r9 bool", "r10 bool", "r11 bool", "r12 bool", "r13 bool", "r14 bool", "r15 bool", "r16 bool", + "r17 bool", "r18 bool", "r19 bool", "r20 bool", "r21 bool", "r22 bool", "r23 bool", "r24 bool", + "r25 bool", "r26 bool", "r27 bool", "r28 bool", "r29 bool", "r30 bool", "r31 bool", "r32 bool", + "r33 bool", "r34 bool", "r35 bool", "r36 bool", "r37 bool", "r38 bool", "r39 bool", "r40 bool", + "r41 bool", "r42 bool", "r43 bool", "r44 bool", "r45 bool", "r46 bool", "r47 bool", "r48 bool"] + rows: + - [NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL] + + - id: between_predicate_1 + desc: between predicate, numberic between + inputs: + - columns: [ "id bigint", "name string", "code string", "ctime bigint", "cdate date" ] + indexs: [ "index1:id:ctime" ] + rows: + - [1, Lucy, A, 1609459201000, 2021-1-1] + - [2, Zoey, B, 1609545841000, 2021-1-2] + - columns: [ "id bigint", "std_ts bigint", "score int" ] + indexs: ["index1:id:std_ts"] + rows: + - [1, 1609459201000, 10] + - [2, 1609459202000, 100] + - [3, 1609459203000, 20] + - [4, 1609459204000, 30] + - [5, 1609459205000, 50] + sql: | + SELECT {0}.id, {0}.name, {1}.score FROM {0} LAST JOIN {1} ORDER BY {1}.std_ts ON {0}.id = {1}.id AND {0}.id BETWEEN 1 AND 4; + expect: + columns: ["id bigint", "name string", "score int"] + rows: + - [1, Lucy, 10] + - [2, Zoey, 100] + - id: between_predicate_2 + desc: between predicate, string between + inputs: + - columns: [ "id bigint", "name string", "code string", "ctime timestamp", "cdate date" ] + indexs: [ "index1:id:ctime" ] + rows: + - [1, Lucy, ABC, 1609459201000, 2021-1-1] + - [2, Zoey, BBC, 1609545841000, 2021-1-2] + - columns: [ "id bigint", "std_ts bigint", "score int" ] + indexs: ["index1:id:std_ts"] + rows: + - [1, 1609459201000, 10] + - [2, 1609459202000, 100] + - [3, 1609459203000, 20] + - [4, 1609459204000, 30] + - [5, 1609459205000, 50] + sql: | + SELECT {0}.id, {0}.name, {0}.code, {1}.score FROM {0} LAST JOIN {1} ORDER BY {1}.std_ts ON {0}.id = {1}.id AND {0}.code NOT BETWEEN 'BBB' AND 'CCC'; + expect: + columns: ["id bigint", "name string", "code string", "score int"] + rows: + - [1, Lucy, ABC, 10] + - [2, Zoey, BBC, NULL] + - id: between_predicate_3 + desc: between predicate, timestamp between + inputs: + - columns: [ "id bigint", "name string", "code string", "ctime timestamp", "cdate date" ] + indexs: [ "index1:id:ctime" ] + rows: + - [1, Lucy, A, 1609459201000, 2021-1-1] + - [2, Zoey, B, 1633265330000, 2021-10-3] + - columns: [ "id bigint", "std_ts bigint", "score int" ] + indexs: ["index1:id:std_ts"] + rows: + - [1, 1609459201000, 10] + - [2, 1609459202000, 100] + - [3, 1609459203000, 20] + - [4, 1609459204000, 30] + - [5, 1609459205000, 50] + sql: | + SELECT {0}.id, {0}.name, {1}.score FROM {0} LAST JOIN {1} ORDER BY {1}.std_ts + ON {0}.id = {1}.id AND {0}.ctime BETWEEN timestamp("2021-01-01") AND timestamp("2021-01-30"); + expect: + columns: ["id bigint", "name string", "score int"] + rows: + - [1, Lucy, 10] + - [2, Zoey, NULL] + - id: between_predicate_4 + desc: between predicate with aggregation function + sql: | + SELECT id, col1, std_ts, + sum(id) OVER w1 BETWEEN 2 AND 6 as w1_id + FROM {0} + WINDOW w1 AS (PARTITION BY col1 ORDER BY std_ts ROWS BETWEEN 1 PRECEDING AND CURRENT ROW); + inputs: + - columns: ["id bigint", "col1 int32", "std_ts timestamp"] + indexs: ["index1:id:std_ts", "index2:col1:std_ts"] + rows: + - [1, 1, 1590115420000] + - [3, 1, 1590115430000] + - [5, 1, 1590115440000] + - [7, 1, 1590115450000] + - [9, 1, 1590115460000] + expect: + columns: ["id bigint", "col1 int32", "std_ts timestamp", "w1_id bool"] + rows: + - [1, 1, 1590115420000, false] + - [3, 1, 1590115430000, true] + - [5, 1, 1590115440000, false] + - [7, 1, 1590115450000, false] + - [9, 1, 1590115460000, false] + - id: in_predicate_normal + desc: normal in predicates + mode: hybridse-only + sql: | + SELECT {0}.id, {0}.name, {0}.code, {1}.score FROM {0} LAST JOIN {1} ORDER BY {1}.std_ts + ON {0}.id = {1}.id AND {0}.code d[0] ('A', 'B'); + inputs: + - columns: [ "id bigint", "name string", "code string", "ctime timestamp", "cdate date" ] + indexs: [ "index1:id:ctime" ] + rows: + - [1, Lucy, A, 1609459201000, 2021-1-1] + - [2, Zoey, B, 1633265330000, 2021-10-3] + - columns: [ "id bigint", "std_ts bigint", "score int" ] + indexs: ["index1:id:std_ts"] + rows: + - [1, 1609459201000, 10] + - [2, 1609459202000, 100] + - [3, 1609459203000, 20] + - [4, 1609459204000, 30] + - [5, 1609459205000, 50] + dataProvider: + - ["in", "not in"] + expect: + columns: ["id:bigint", "name:string", "code:string", "score:int"] + expectProvider: + 0: + rows: + - [ 1, Lucy, A, 10 ] + - [ 2, Zoey, B, 100 ] + 1: + rows: + - [ 1, Lucy, A, NULL ] + - [ 2, Zoey, B, NULL ] + - id: in_predicate_type_conversion + desc: type conversion occurred between lhs and in_list + mode: hybridse-only + sql: | + SELECT {0}.id, {0}.name, {0}.code, {1}.score FROM {0} LAST JOIN {1} ORDER BY {1}.std_ts + ON {0}.id = {1}.id AND {0}.id d[0] ('1', 3.0); + inputs: + - columns: [ "id bigint", "name string", "code string", "ctime timestamp", "cdate date" ] + indexs: [ "index1:id:ctime" ] + rows: + - [1, Lucy, A, 1609459201000, 2021-1-1] + - [2, Zoey, B, 1633265330000, 2021-10-3] + - columns: [ "id bigint", "std_ts bigint", "score int" ] + indexs: ["index1:id:std_ts"] + rows: + - [1, 1609459201000, 10] + - [2, 1609459202000, 100] + - [3, 1609459203000, 20] + - [4, 1609459204000, 30] + - [5, 1609459205000, 50] + dataProvider: + - ["in", "not in"] + expect: + columns: ["id:bigint", "name:string", "code:string", "score:int"] + expectProvider: + 0: + rows: + - [1, Lucy, A, 10] + - [2, Zoey, B, NULL] + 1: + rows: + - [1, Lucy, A, NULL] + - [2, Zoey, B, 100] + - id: in_predicate_subexpr + desc: sub expr in in list + mode: hybridse-only + sql: | + SELECT {0}.id, {0}.name, {0}.code, {1}.score FROM {0} LAST JOIN {1} ORDER BY {1}.std_ts + ON {0}.id = {1}.id AND {0}.id d[0] ( {1}.score / 10, {1}.score ); + inputs: + - columns: [ "id bigint", "name string", "code string", "ctime timestamp", "cdate date" ] + indexs: [ "index1:id:ctime" ] + rows: + - [1, Lucy, A, 1609459201000, 2021-1-1] + - [2, Zoey, B, 1633265330000, 2021-10-3] + - columns: [ "id bigint", "std_ts bigint", "score int" ] + indexs: ["index1:id:std_ts"] + rows: + - [1, 1609459201000, 10] + - [2, 1609459202000, 100] + - [3, 1609459203000, 20] + - [4, 1609459204000, 30] + - [5, 1609459205000, 50] + dataProvider: + - ["in", "not in"] + expect: + columns: ["id:bigint", "name:string", "code:string", "score:int"] + expectProvider: + 0: + rows: + - [1, Lucy, A, 10] + - [2, Zoey, B, NULL] + 1: + rows: + - [1, Lucy, A, NULL] + - [2, Zoey, B, 100] + - id: in_predicate_with_window + desc: test_expresion refers window + mode: hybridse-only + sql: | + SELECT id, col1, std_ts, + sum(id) OVER w1 d[0] ( 4, 8, 12 ) as w1_id + FROM {0} + WINDOW w1 AS (PARTITION BY col1 ORDER BY std_ts ROWS BETWEEN 1 PRECEDING AND CURRENT ROW); + dataProvider: + - ["in", "not in"] + inputs: + - columns: ["id bigint", "col1 int32", "std_ts timestamp"] + indexs: ["index1:id:std_ts", "index2:col1:std_ts"] + rows: + - [1, 1, 1590115420000] + - [3, 1, 1590115430000] + - [5, 1, 1590115440000] + - [7, 1, 1590115450000] + - [9, 1, 1590115460000] + expect: + columns: ["id bigint", "col1 int32", "std_ts timestamp", "w1_id bool"] + expectProvider: + 0: + rows: + - [1, 1, 1590115420000, false] + - [3, 1, 1590115430000, true] + - [5, 1, 1590115440000, true] + - [7, 1, 1590115450000, true] + - [9, 1, 1590115460000, false] + 1: + rows: + - [1, 1, 1590115420000, true] + - [3, 1, 1590115430000, false] + - [5, 1, 1590115440000, false] + - [7, 1, 1590115450000, false] + - [9, 1, 1590115460000, true] +# - id: like_predicate_1 +# desc: like predicate without escape +# inputs: +# - columns: ["id int", "std_ts timestamp"] +# indexs: ["index1:id:std_ts"] +# rows: +# - [1, 1590115420000 ] +# - [2, 1590115430000 ] +# - [3, 1590115440000 ] +# - [4, 1590115450000 ] +# - [5, 1590115460000 ] +# - [6, 1590115470000 ] +# - columns: ["id int", "ts timestamp", "col2 string"] +# indexs: ["idx:id:ts"] +# rows: +# - [1, 1590115420000, John] +# - [2, 1590115430000, Mary] +# - [3, 1590115440000, mike] +# - [4, 1590115450000, Dan] +# - [5, 1590115460000, Evan_W] +# - [6, 1590115470000, M] +# dataProvider: +# - ["LIKE", "NOT LIKE", "ILIKE", "NOT ILIKE"] # LIKE / NOT LIKE +# - ["m%", "M_ry" ] # match pattern +# sql: | +# select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] 'd[1]'; +# expect: +# columns: ["id int", "col2 string"] +# order: id +# expectProvider: +# 0: +# 0: +# rows: +# - [1, null] +# - [2, null] +# - [3, mike] +# - [4, null] +# - [5, null] +# - [6, null] +# 1: +# rows: +# - [1, null] +# - [2, Mary] +# - [3, null] +# - [4, null] +# - [5, null] +# - [6, null] +# 1: +# 0: +# rows: +# - [1, John] +# - [2, Mary] +# - [3, null] +# - [4, Dan] +# - [5, Evan_W] +# - [6, M] +# 1: +# rows: +# - [1, John] +# - [2, null] +# - [3, mike] +# - [4, Dan] +# - [5, Evan_W] +# - [6, M] +# 2: +# 0: +# rows: +# - [1, null] +# - [2, Mary] +# - [3, mike] +# - [4, null] +# - [5, null] +# - [6, M] +# 1: +# rows: +# - [1, null] +# - [2, Mary] +# - [3, null] +# - [4, null] +# - [5, null] +# - [6, null] +# 3: +# 0: +# rows: +# - [1, John] +# - [2, null] +# - [3, null] +# - [4, Dan] +# - [5, Evan_W] +# - [6, null] +# 1: +# rows: +# - [1, John] +# - [2, null] +# - [3, mike] +# - [4, Dan] +# - [5, Evan_W] +# - [6, M] +# - id: like_predicate_2 +# desc: like predicate with escape +# inputs: +# - columns: ["id int", "std_ts timestamp"] +# indexs: ["index1:id:std_ts"] +# rows: +# - [1, 1590115420000 ] +# - [2, 1590115430000 ] +# - [3, 1590115440000 ] +# - [4, 1590115450000 ] +# - [5, 1590115460000 ] +# - [6, 1590115470000 ] +# - columns: ["id int", "ts timestamp", "col2 string"] +# indexs: ["idx:id:ts"] +# rows: +# - [1, 1590115420000, a*_b] +# - [2, 1590115430000, a*mb] +# - [3, 1590115440000, "%a_%b"] +# - [4, 1590115450000, "Ta_sub"] +# - [5, 1590115460000, "lamrb"] +# - [6, 1590115470000, "%a*_%b"] +# dataProvider: +# - ["LIKE", "NOT ILIKE"] +# - ["%", "*", ""] # escape with % or disable +# sql: | +# select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] '%a*_%b' ESCAPE 'd[1]'; +# expect: +# columns: ["id int", "col2 string"] +# order: id +# expectProvider: +# 0: +# 0: +# rows: +# - [1, a*_b] +# - [2, a*mb] +# - [3, null] +# - [4, null] +# - [5, null] +# - [6, null] +# 1: +# rows: +# - [1, null] +# - [2, null] +# - [3, "%a_%b"] +# - [4, Ta_sub] +# - [5, null] +# - [6, null] +# 2: +# rows: +# - [1, a*_b] +# - [2, a*mb] +# - [3, null] +# - [4, null] +# - [5, null] +# - [6, "%a*_%b"] +# 1: +# 0: +# rows: +# - [1, null] +# - [2, null] +# - [3, "%a_%b"] +# - [4, "Ta_sub"] +# - [5, "lamrb"] +# - [6, "%a*_%b"] +# 1: +# rows: +# - [1, a*_b] +# - [2, a*mb] +# - [3, null] +# - [4, null] +# - [5, "lamrb"] +# - [6, "%a*_%b"] +# 2: +# rows: +# - [1, null] +# - [2, null] +# - [3, "%a_%b"] +# - [4, "Ta_sub"] +# - [5, "lamrb"] +# - [6, null] diff --git a/cases/integration_test/expression/test_type.yaml b/cases/integration_test/expression/test_type.yaml new file mode 100644 index 00000000000..45aac74cf8b --- /dev/null +++ b/cases/integration_test/expression/test_type.yaml @@ -0,0 +1,674 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +sqlDialect: ["HybridSQL"] +cases: + - id: 0 + mode: "offline-unsupport" + desc: "cast_各种类型_正确" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + dataProvider: + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c9"] + sql: select cast(d[0] as bool) as b1,cast(d[0] as smallint) as b2,cast(d[0] as int) as b3,cast(d[0] as bigint) as b4,cast(d[0] as float) as b5,cast(d[0] as double) as b6,cast(d[0] as timestamp) as b7,cast(d[0] as string) as b9 from {0}; + expect: + columns: ["b1 bool","b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b7 timestamp","b9 string"] + expectProvider: + 0: + rows: + - [true,30,30,30,30.0,30.0,30,"30"] + 1: + rows: + - [true,30,30,30,30.0,30.0,30,"30"] + 2: + rows: + - [true,30,30,30,30.0,30.0,30,"30"] + 3: + rows: + - [true,30,30,30,30.0,30.0,30,"30"] + 4: + rows: + - [true,30,30,30,30.0,30.0,30,"30"] + 5: + rows: + - [false,0,0,0,0.0,0.0,0,"false"] + - id: 1 + desc: "cast_timestamp/string_正确" + tags: ["TODO","本地成功,CICD失败,原因待定位"] + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + dataProvider: + - ["{0}.c1","{0}.c7"] + sql: select cast(d[0] as bool) as b1,cast(d[0] as smallint) as b2,cast(d[0] as int) as b3,cast(d[0] as bigint) as b4,cast(d[0] as float) as b5,cast(d[0] as double) as b6,cast(d[0] as timestamp) as b7,cast(d[0] as date) as b8,cast(d[0] as string) as b9 from {0}; + expect: + columns: ["b1 bool","b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b7 timestamp","b8 date","b9 string"] + expectProvider: + 0: + rows: + - [true,null,null,null,null,null,null,null,aa] + 1: + rows: + - [true,-20536,1601089480,1590738989000,1590738989000,1590738989000,1590738989000,"2020-05-29","2020-05-29 15:56:29"] + - id: 2 + desc: "cast_string_正确" + tags: ["TODO","本地成功,CICD失败,原因待定位"] + inputs: + - + columns : ["id bigint","c1 string","c2 string","c3 string","c4 string","c5 string","c6 string","c7 string","c8 string","c9 string","ts1 timestamp"] + indexs: ["index1:id:ts1"] + rows: + - [1,"aa","30","30","30","30.0","30.0","1590738989000","2020-05-01","false",1590738989000] + sql: select cast(c9 as bool) as b1,cast(c2 as smallint) as b2,cast(c3 as int) as b3,cast(c4 as bigint) as b4,cast(c5 as float) as b5,cast(c6 as double) as b6,cast(c7 as timestamp) as b7,cast(c8 as date) as b8,cast(c1 as string) as b9 from {0}; + expect: + columns: ["b1 bool","b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b7 timestamp","b8 date","b9 string"] + expectProvider: + 0: + rows: + - [false,30,30,30,30.0,30.0,1590738989000,"2020-05-01",aa] + - id: 3 + desc: "cast_date_正确" + tags: ["TODO","本地成功,CICD失败,原因待定位"] + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + dataProvider: + - ["{0}.c8"] + sql: select cast(d[0] as bool) as b1,cast(d[0] as timestamp) as b7,cast(d[0] as date) as b8,cast(d[0] as string) as b9 from {0}; + expect: + columns: ["b1 bool","b7 timestamp","b8 date","b9 string"] + expectProvider: + 0: + rows: + - [true,1588262400000,"2020-05-01","2020-05-01"] + - id: 4 + desc: "cast_其他类型_date_错误" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + dataProvider: + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6"] + sql: select cast(d[0] as date) as b1 from {0}; + expect: + success: false + - id: 5 + desc: "cast_date_其他类型_错误" + level: 5 + tags: ["TODO", "bug"] + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + dataProvider: + - ["smallint","nit","bigint","float","double"] + sql: select cast(c8 as d[0]) as b1 from {0}; + expect: + success: false + - id: 6 + desc: SQL标准Cast语法-Cast(常量 as type) + inputs: + - columns: ["c1 int", "c2 float", "c5 bigint"] + indexs: ["index1:c1:c5"] + rows: + - [1, 1.0, 1590115420000] + sql: | + select cast(1 as int) as f1, cast(2 as bigint) as f2, cast(1 as float) as f3, + cast(1 as double) as f4, cast(1 as bool) as f5, cast(1590115420000 as timestamp) as f6, + cast(1 as string) as f7 , cast("2020-05-20" as date) as f8 from {0}; + expect: + columns: ["f1 int", "f2 bigint", "f3 float", "f4 double", "f5 bool", "f6 timestamp", + "f7 string", "f8 date"] + rows: + - [1, 2, 1.0, 1.0, true, 1590115420000, "1", "2020-05-20"] + - id: 7 + desc: SQL标准Cast语法-Cast(表达式 as type) + inputs: + - columns: ["c1 int", "c2 float", "c5 bigint"] + indexs: ["index1:c1:c5"] + rows: + - [1, 1.0, 1590115420000] + sql: | + select cast(c2 as int) as f1, cast(c1+c2 as bigint) as f2, cast(c1 as float) as f3, + cast(c1 as double) as f4, cast(c1 as bool) as f5, cast(c5 as timestamp) as f6, + cast(c1 as string) as f7 from {0}; + expect: + columns: ["f1 int", "f2 bigint", "f3 float", "f4 double", "f5 bool", "f6 timestamp", + "f7 string"] + rows: + - [1, 2, 1.0, 1.0, true, 1590115420000, "1"] + - id: 8 + desc: cast函数 + inputs: + - columns: ["c1 int", "c2 float", "c5 bigint"] + indexs: ["index1:c1:c5"] + rows: + - [1, 1.0, 1590115420000] + sql: | + select int(1) as f1, bigint(2) as f2, float(1) as f3, + double(1) as f4, bool(1) as f5, timestamp(1590115420000) as f6, + string(1) as f7 from {0}; + expect: + columns: ["f1 int", "f2 bigint", "f3 float", "f4 double", "f5 bool", "f6 timestamp", + "f7 string"] + rows: + - [1, 2, 1.0, 1.0, true, 1590115420000, "1"] + - id: 9 + desc: SQL标准Cast语法-Cast(表达式 as type) + inputs: + - columns: ["c1 int", "c2 float", "c5 bigint"] + indexs: ["index1:c1:c5"] + rows: + - [1, 1.0, 1590115420000] + sql: | + select int(c1) as f1, bigint(c1+c2) as f2, float(c1) as f3, + double(c1) as f4, bool(c1) as f5, timestamp(c5) as f6, + string(c1) as f7 from {0}; + expect: + columns: ["f1 int", "f2 bigint", "f3 float", "f4 double", "f5 bool", "f6 timestamp", + "f7 string"] + rows: + - [1, 2, 1.0, 1.0, true, 1590115420000, "1"] + - id: 10 + desc: SQL标准Cast语法-Cast(表达式 as type) + inputs: + - columns: ["c0 string", "std_ts bigint", "c1 int", "c2 float", "c5 bigint"] + indexs: ["index1:c0:std_ts"] + rows: + - ["pk", 1, NULL, NULL, NULL] + sql: | + select cast(c2 as int) as f1, cast(c1+c2 as bigint) as f2, cast(c1 as float) as f3, + cast(c1 as double) as f4, cast(c1 as bool) as f5, cast(c5 as timestamp) as f6, + cast(c1 as string) as f7 from {0}; + expect: + columns: ["f1 int", "f2 bigint", "f3 float", "f4 double", "f5 bool", "f6 timestamp", + "f7 string"] + rows: + - [NULL, NULL, NULL, NULL, NULL, NULL, NULL] + - id: 11 + desc: SQL标准Cast语法-Cast(NULL as type) + inputs: + - columns: ["c1 int", "c2 float", "c5 bigint"] + indexs: ["index1:c1:c5"] + rows: + - [1, 1.0, 1590115420000] + sql: | + select cast(NULL as int) as f1, cast(NULL as bigint) as f2, cast(NULL as float) as f3, + cast(NULL as double) as f4, cast(NULL as bool) as f5, cast(NULL as timestamp) as f6, + cast(NULL as date) as f7 from {0}; + expect: + columns: ["f1 int", "f2 bigint", "f3 float", "f4 double", "f5 bool", "f6 timestamp", + "f7 date"] + rows: + - [NULL, NULL, NULL, NULL, NULL, NULL, NULL] + - id: 12 + tags: ["TODO", "@chenjing, @baoxinqi: encode处理常量NULL string"] + desc: SQL标准Cast语法-Cast(NULL as string) + inputs: + - columns: ["c1 int", "c2 float", "c5 bigint"] + indexs: ["index1:c1:c5"] + rows: + - [1, 1.0, 1590115420000] + sql: | + select cast(NULL as string) as f1 from {0}; + expect: + columns: ["f1 string"] + rows: + - [NULL] + - id: 13 + desc: cast函数多层子查询 + mode: "offline-unsupport" + tags: ["离线有时差问题"] + inputs: + - columns: ["c1 int", "c2 string", "c5 bigint"] + indexs: ["index1:c1:c5"] + rows: + - [1, "2020-05-22 10:43:40", 1] + sql: | + select c1, bigint(c2) DIV 1000 as c2_sec from (select c1, timestamp(c2) as c2 from {0}); + expect: + columns: ["c1 int", "c2_sec bigint"] + rows: + - [1, 1590115420] + - id: 14 + desc: cast as int + mode: offline-unsupport + tags: ["@chendihao, @baoxinqi, support simple project node with column cast"] + inputs: + - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double", + "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"] + indexs: ["index1:c6:c8"] + rows: + - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"] + - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"] + - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""] + sql: | + select id, + int(c1) as int_c1, int(c2) as int_c2, int(c3) as int_c3, int(c4) as int_c4, + int(c5) as int_c5, int(c6) as int_c6, int(c8) as int_c8, int(c9) as int_c9, int(c10) as int_c10 + from {0}; + expect: + order: id + columns: ["id int32", "int_c1 int", "int_c2 int", "int_c3 int", "int_c4 int", "int_c5 int", "int_c6 int", + "int_c8 int", "int_c9 int", "int_c10 int"] + rows: + - [1, 1, 1, 1, 1, 1, NULL, 977520480, 1, 1] + - [2, -1, -1, -1, -1, -1, NULL, 977520480, 0, -1] + - [3, -1, -1, -1, -1, -1, NULL, 977520480, 0, NULL] + - id: 15 + desc: cast as smallint + mode: offline-unsupport + tags: ["@chendihao, @baoxinqi, support simple project node with column cast"] + inputs: + - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double", + "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"] + indexs: ["index1:c6:c8"] + rows: + - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"] + - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"] + - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""] + sql: | + select id, + smallint(c1) as smallint_c1, smallint(c2) as smallint_c2, smallint(c3) as smallint_c3, smallint(c4) as smallint_c4, + smallint(c5) as smallint_c5, smallint(c6) as smallint_c6, smallint(c8) as smallint_c8, smallint(c9) as smallint_c9, + smallint(c10) as smallint_c10 + from {0}; + expect: + order: id + columns: ["id int32", "smallint_c1 smallint", "smallint_c2 smallint", "smallint_c3 smallint", "smallint_c4 smallint", "smallint_c5 smallint", + "smallint_c6 smallint", "smallint_c8 smallint", "smallint_c9 smallint", "smallint_c10 smallint"] + rows: + - [1, 1, 1, 1, 1, 1, NULL, -14496, 1, 1] + - [2, -1, -1, -1, -1, -1, NULL, -14496, 0, -1] + - [3, -1, -1, -1, -1, -1, NULL, -14496, 0, NULL] + - id: 16 + desc: cast as bigint + mode: offline-unsupport + tags: ["@chendihao, @baoxinqi, support simple project node with column cast"] + inputs: + - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double", + "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"] + indexs: ["index1:c6:c8"] + rows: + - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"] + - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"] + - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""] + sql: | + select id, + bigint(c1) as bigint_c1, bigint(c2) as bigint_c2, bigint(c3) as bigint_c3, bigint(c4) as bigint_c4, + bigint(c5) as bigint_c5, bigint(c6) as bigint_c6, bigint(c8) as bigint_c8, bigint(c9) as bigint_c9, + bigint(c10) as bigint_c10 + from {0}; + expect: + order: id + columns: ["id int32", "bigint_c1 bigint", "bigint_c2 bigint", "bigint_c3 bigint", "bigint_c4 bigint", "bigint_c5 bigint", + "bigint_c6 bigint", "bigint_c8 bigint", "bigint_c9 bigint", "bigint_c10 bigint"] + rows: + - [1, 1, 1, 1, 1, 1, NULL, 1590115420000, 1, 1] + - [2, -1, -1, -1, -1, -1, NULL, 1590115420000, 0, -1] + - [3, -1, -1, -1, -1, -1, NULL, 1590115420000, 0, NULL] + - id: 17 + desc: cast as float + mode: offline-unsupport, python-unsupport + tags: ["@chendihao, @baoxinqi, support simple project node with column cast"] + inputs: + - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double", + "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"] + indexs: ["index1:c6:c8"] + rows: + - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"] + - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"] + - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""] + sql: | + select id, + float(c1) as float_c1, float(c2) as float_c2, float(c3) as float_c3, float(c4) as float_c4, float(c5) as float_c5, + float(c6) as float_c6, float(c8) as float_c8, float(c9) as float_c9, float(c10) as float_c10 + from {0}; + expect: + order: id + columns: ["id int32", "float_c1 float", "float_c2 float", "float_c3 float", "float_c4 float", "float_c5 float", + "float_c6 float", "float_c8 float", "float_c9 float", "float_c10 float"] + rows: + - [1, 1.0, 1.0, 1.0, 1.0, 1.0, NULL, 1590115420000.0, 1.0, 1.0] + - [2, -1.0, -1.0, -1.0, -1.0, -1.0, NULL, 1590115420000.0, 0.0, -1.0] + - [3, -1.0, -1.0, -1.0, -1.0, -1.0, NULL, 1590115420000.0, 0.0, NULL] + - id: 18 + desc: cast as double + mode: offline-unsupport + tags: ["@chendihao, @baoxinqi, support simple project node with column cast"] + inputs: + - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double", + "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"] + indexs: ["index1:c6:c8"] + rows: + - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"] + - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"] + - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""] + sql: | + select id, + double(c1) as double_c1, double(c2) as double_c2, double(c3) as double_c3, double(c4) as double_c4, double(c5) as double_c5, + double(c6) as double_c6, double(c8) as double_c8, double(c9) as double_c9, double(c10) as double_c10 + from {0}; + expect: + order: id + columns: ["id int32", "double_c1 double", "double_c2 double", "double_c3 double", "double_c4 double", "double_c5 double", + "double_c6 double", "double_c8 double", "double_c9 double", "double_c10 double"] + rows: + - [1, 1.0, 1.0, 1.0, 1.0, 1.0, NULL, 1590115420000.0, 1.0, 1.0] + - [2, -1.0, -1.0, -1.0, -1.0, -1.0, NULL, 1590115420000.0, 0.0, -1.0] + - [3, -1.0, -1.0, -1.0, -1.0, -1.0, NULL, 1590115420000.0, 0.0, NULL] + - id: 19 + desc: cast as string + mode: offline-unsupport,cli-unsupport + tags: ["@chendihao, @baoxinqi, support simple project node with column cast"] + inputs: + - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double", + "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"] + indexs: ["index1:c6:c8"] + rows: + - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"] + - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"] + - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""] + sql: | + select id, + string(c1) as string_c1, string(c2) as string_c2, string(c3) as string_c3, string(c4) as string_c4, + string(c5) as string_c5, string(c6) as string_c6, string(c7) as string_c7, string(c8) as string_c8, string(c9) as string_c9, + string(c10) as string_c10 + from {0}; + expect: + order: id + columns: ["id int32", "string_c1 string", "string_c2 string", "string_c3 string", "string_c4 string", "string_c5 string", + "string_c6 string", "string_c7 string", "string_c8 string", "string_c9 string", "string_c10 string"] + rows: + - [1, "1", "1", "1", "1", "1", "2020-05-22 10:43:40", "2020-05-22", "2020-05-22 10:43:40", "true", "1"] + - [2, "-1", "-1", "-1", "-1", "-1", "2020-05-22 10:43:40", "2020-05-22", "2020-05-22 10:43:40", "false", "-1"] + - [3, "-1", "-1", "-1", "-1", "-1", NULL, "2020-05-22", "2020-05-22 10:43:40", "false", ""] + - id: 20 + desc: cast as date + mode: offline-unsupport + tags: ["@chendihao, @baoxinqi, support simple project node with column cast"] + inputs: + - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double", + "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"] + indexs: ["index1:c6:c8"] + rows: + - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"] + - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"] + - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""] + sql: | + select id, + date(c6) as date_c6, date(c7) as date_c7, date(c8) as date_c8, date(c10) as date_c10 + from {0}; + expect: + order: id + columns: ["id int32", "date_c6 date", "date_c7 date", "date_c8 date", "date_c10 date"] + rows: + - [1, "2020-05-22", "2020-05-22", "2020-05-22", NULL] + - [2, "2020-05-22", "2020-05-22", "2020-05-22", NULL] + - [3, NULL, "2020-05-22", "2020-05-22", NULL] + - id: 21 + desc: cast as timestamp + mode: offline-unsupport + tags: ["@chendihao, @baoxinqi, support simple project node with column cast"] + inputs: + - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double", + "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"] + indexs: ["index1:c6:c8"] + rows: + - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"] + - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"] + - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""] + sql: | + select id, + timestamp(c1) as timestamp_c1, timestamp(c2) as timestamp_c2, timestamp(c3) as timestamp_c3, timestamp(c4) as timestamp_c4, timestamp(c5) as timestamp_c5, + timestamp(c6) as timestamp_c6, timestamp(c7) as timestamp_c7, timestamp(c8) as timestamp_c8, timestamp(c9) as timestamp_c9, timestamp(c10) as timestamp_c10 + from {0}; + expect: + order: id + columns: ["id int32", "timestamp_c1 timestamp", "timestamp_c2 timestamp", "timestamp_c3 timestamp", "timestamp_c4 timestamp", "timestamp_c5 timestamp", + "timestamp_c6 timestamp", "timestamp_c7 timestamp", "timestamp_c8 timestamp", "timestamp_c9 timestamp", "timestamp_c10 timestamp"] + rows: + - [1, 1, 1, 1, 1, 1, 1590115420000, 1590076800000, 1590115420000, 1, NULL] + - [2, NULL, NULL, NULL, NULL, NULL, 1590115420000, 1590076800000, 1590115420000, 0, NULL] + - [3, NULL, NULL, NULL, NULL, NULL, NULL, 1590076800000, 1590115420000, 0, NULL] + - id: 22 + desc: cast as bool + mode: offline-unsupport + tags: ["TODO", "@chendihao, @baoxinqi, support simple project node with column cast"] + inputs: + - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double", + "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"] + indexs: ["index1:c6:c8"] + rows: + - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"] + - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"] + - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""] + sql: | + select id, + bool(c1) as bool_c1, bool(c2) as bool_c2, bool(c3) as bool_c3, bool(c4) as bool_c4, bool(c5) as bool_c5, + bool(c6) as bool_c6, bool(c8) as bool_c8, bool(c9) as bool_c9, bool(c10) as bool_c10 + from {0}; + expect: + order: id + columns: ["id int32", "bool_c1 bool", "bool_c2 bool", "bool_c3 bool", "bool_c4 bool", "bool_c5 bool", + "bool_c6 bool", "bool_c8 bool", "bool_c9 bool", "bool_c10 bool"] + rows: + - [1, true, true, true, true, true, true, true, true, true] + - [2, true, true, true, true, true, true, true, false, true] + - [3, true, true, true, true, true, NULL, true, false, false] + - id: 23 + desc: cast string as bool + inputs: + - columns: ["id int64", "c2 int32", "c6 string"] + indexs: ["index1:c2:id"] + rows: + - [1, 1, "t"] + - [2, 1, "true"] + - [3, 1, "f"] + - [4, 1, "false"] + - [5, 1, "1"] + - [6, 1, "0"] + - [7, 1, "y"] + - [8, 1, "n"] + - [9, 1, "yes"] + - [10, 1, "no"] + - [11, 1, ""] + - [12, 1, "abc"] + sql: | + select id, bool(c6) as bool_c6 from {0}; + expect: + order: id + columns: ["id int64", "bool_c6 bool"] + rows: + - [1, true] + - [2, true] + - [3, false] + - [4, false] + - [5, true] + - [6, false] + - [7, true] + - [8, false] + - [9, true] + - [10, false] + - [11, NULL] + - [12, NULL] + - id: 24 + desc: cast float as string + inputs: + - columns: ["id int64", "c2 int32", "c6 float"] + indexs: ["index1:c2:id"] + rows: + - [1, 1, 1.1] + sql: | + select id, string(c6) as string_c6 from {0}; + expect: + order: id + columns: ["id int64", "string_c6 string"] + rows: + - [1, "1.1"] + - id: 25 + mode: "offline-unsupport" + tags: ["离线有时差问题"] + desc: column name prefix with _ + inputs: + - columns: ["_c1 int", "_c2 string", "_c5 bigint"] + indexs: ["index1:_c1:_c5"] + rows: + - [1, "2020-05-22 10:43:40", 1] + sql: | + select _c1, bigint(_c2) DIV 1000 as _c2_sec from (select _c1, timestamp(_c2) as _c2 from {0}); + expect: + columns: ["_c1 int", "_c2_sec bigint"] + rows: + - [1, 1590115420] + - id: 26 + desc: cast int to date raise error + inputs: + - columns: ["id int64", "c2 int32", "c6 int32"] + indexs: ["index1:c2:id"] + rows: + - [1, 1, 1] + sql: | + select id, date(c6) as date_c6 from {0}; + expect: + success: false + - id: 27 + desc: cast bigint to date raise error + inputs: + - columns: ["id int64", "c2 int32", "c6 int64"] + indexs: ["index1:c2:id"] + rows: + - [1, 1, 1] + sql: | + select id, date(c6) as date_c6 from {0}; + expect: + success: false + - id: 28 + desc: cast smallint to date raise error + inputs: + - columns: ["id int64", "c2 int32", "c6 int16"] + indexs: ["index1:c2:id"] + rows: + - [1, 1, 1] + sql: | + select id, date(c6) as date_c6 from {0}; + expect: + success: false + - id: 29 + desc: cast float to date raise error + inputs: + - columns: ["id int64", "c2 int32", "c6 float"] + indexs: ["index1:c2:id"] + rows: + - [1, 1, 1.0] + sql: | + select id, date(c6) as date_c6 from {0}; + expect: + success: false + - id: 30 + desc: cast double to date raise error + inputs: + - columns: ["id int64", "c2 int32", "c6 double"] + indexs: ["index1:c2:id"] + rows: + - [1, 1, 1.0] + sql: | + select id, date(c6) as date_c6 from {0}; + expect: + success: false + - id: 31 + desc: cast double to date raise error + inputs: + - columns: ["id int64", "c2 int32", "c6 bool"] + indexs: ["index1:c2:id"] + rows: + - [1, 1, true] + sql: | + select id, date(c6) as date_c6 from {0}; + expect: + success: false + - id: 32 + desc: cast date numbers + inputs: + - columns: ["id int64", "c2 int32", "c6 date"] + indexs: ["index1:c2:id"] + rows: + - [1, 1, "2020-10-12"] + sql: | + select id, int16(c6) as int16_c6, int32(c6) as int32_c6, int64(c6) as int64_c6, + float(c6) as float_c6, double(c6) as double_c6, bool(c6) as bool_c6 from {0}; + expect: + columns: [ "id int64", "int16_c6 int16", "int32_c6 int32", "int64_c6 int64", + "float_c6 float", "double_c6 double", "bool_c6 bool" ] + rows: + - [ 1, NULL, NULL, NULL, NULL, NULL, NULL] + - id: 33 + desc: SQL标准Cast语法-VARCHAR(expr) + inputs: + - columns: ["c1 int", "c2 float", "c5 bigint"] + indexs: ["index1:c1:c5"] + rows: + - [1, 1.0, 1590115420000] + sql: | + select int(c1) as f1, timestamp(c5) as f2, + VARCHAR(c1) as f3 from {0}; + expect: + columns: ["f1 int", "f2 timestamp", "f3 string"] + rows: + - [1, 1590115420000, "1"] + - id: 34 + desc: SQL标准Cast语法-Cast(expr as VARCHAR) + inputs: + - columns: ["c1 int", "c2 float", "c5 bigint"] + indexs: ["index1:c1:c5"] + rows: + - [1, 1.0, 1590115420000] + sql: | + select CAST(c1 as int) as f1, CAST(c5 as timestamp) as f2, + CAST(c1 as VARCHAR) as f3, CAST(c1 as VARCHAR(60)) as f4 from {0}; + expect: + columns: ["f1 int", "f2 timestamp", "f3 string", "f4 string"] + rows: + - [1, 1590115420000, "1", "1"] + - id: 35 + desc: SQL标准Cast语法-Cast(NULL表达式 as VARCHAR) + inputs: + - columns: ["c0 string", "std_ts bigint", "c1 int", "c2 float", "c5 bigint"] + indexs: ["index1:c0:std_ts"] + rows: + - ["pk", 1, NULL, NULL, NULL] + sql: | + select cast(c2 as int) as f1, cast(c1 as VARCHAR) as f2, cast(c1 as VARCHAR(60)) as f3 from {0}; + expect: + columns: ["f1 int", "f2 string", "f3 string"] + rows: + - [NULL, NULL, NULL] \ No newline at end of file diff --git a/cases/integration_test/function/test_calculate.yaml b/cases/integration_test/function/test_calculate.yaml new file mode 100644 index 00000000000..7e4b5f5a3c9 --- /dev/null +++ b/cases/integration_test/function/test_calculate.yaml @@ -0,0 +1,254 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +sqlDialect: ["HybridSQL"] +version: 0.5.0 +cases: + - id: 0 + desc: abs-normal + inputs: + - columns: ["id int", "c0 smallint", "c1 int", "c2 bigint", "c3 float", "c4 double","c5 bool"] + indexs: ["index1:c0:c2"] + rows: + - [1, -1, 2, 2, -0.2, -0.5,true] + - [2, NULL, NULL, 2, NULL, NULL,false] + sql: select id as id, + abs(c0) as r0, + abs(c1) as r1, + abs(c2) as r2, + abs(c3) as r3, + abs(c4) as r4, + abs(c5) as r5 from {0}; + expect: + order: id + columns: ["id int", "r0 int", "r1 int", "r2 bigint", "r3 double", "r4 double","r5 double"] + rows: + - [1, 1, 2, 2, 0.20000000298023224, 0.5,1.0] + - [2, NULL, NULL, 2, NULL , NULL,0.0] + - id: 1 + desc: 三角函数 + tags: ["暂时不支持bool类型列"] + inputs: + - columns: ["id int", "c0 smallint", "c1 int", "c2 bigint", "c3 float", "c4 double","c5 bool"] + indexs: ["index1:c0:c2"] + rows: + - [1, 0, 1, 1, 1.0, 0.0,true] + - [2, NULL, NULL, 1, NULL, NULL,false] + sql: select id as id, + cos(c0) as r0, + cot(c1) as r1, + sin(c2) as r2, + tan(c3) as r3, + tan(c4) as r4 from {0}; + expect: + order: id + columns: ["id int", "r0 double", "r1 double", "r2 double", "r3 float","r4 double"] + rows: + - [1, 1, 0.6420926159343306, 0.8414709848078965, 1.5574077,0.0] + - [2, NULL, NULL, 0.8414709848078965, NULL,NULL] + - id: 2 + desc: 反三角函数 + inputs: + - columns: ["id int", "c0 smallint", "c1 int", "c2 bigint", "c3 float", "c4 double","c5 bool"] + indexs: ["index1:c0:c2"] + rows: + - [1, 1, 2, 2, 0.2, 0.5,true] + - [2, NULL, NULL, 2, NULL, NULL,false] + sql: select id as id, + acos(c4) as r0, + asin(c3) as r1, + atan(c1) as r2, + atan2(c1, c2) as r3, + asin(c4) as r4 + from {0}; + expect: + order: id + columns: ["id int", "r0 double", "r1 float", "r2 double", "r3 double","r4 double"] + rows: + - [1, 1.0471975511965979, 0.2013579207903308, 1.1071487177940904, 0.78539816339744828,0.5235987755982989] + - [2, NULL, NULL, NULL, NULL,NULL] + - id: 3 + desc: 反三角函数-三角函数-常量 + inputs: + - columns: ["id int", "c0 smallint", "c1 int", "c2 bigint", "c3 float", "c4 double","c5 bool"] + indexs: ["index1:c0:c2"] + rows: + - [1, 1, 2, 2, 0.2, 0.5,true] + sql: select id as id, + cos(1) as r0, + cot(2) as r1, + sin(1.1) as r2, + tan(1) as r3, + acos(0) as r4, + asin(2.0) as r5, + atan(2.2) as r6, + atan2(1, 2) as r7, + asin(2) as r8 + from {0}; + expect: + order: id + columns: ["id int", "r0 double", "r1 double", "r2 double", "r3 double","r4 double", "r5 double", "r6 double", "r7 double", "r8 double"] + rows: + - [1,0.5403023058681398,-0.45765755436028577,0.8912073600614354,1.5574077246549023,1.5707963267948966,NaN,1.1441688336680205,0.4636476090008061,NaN] + - id: 4 + desc: 对数函数 + inputs: + - columns: ["id int", "c0 smallint", "c1 int", "c2 bigint", "c3 float", "c4 double","c5 bool"] + indexs: ["index1:c0:c2"] + rows: + - [1, 1, 1, 3, 1, 1,true] + - [2, NULL, NULL, 3, NULL, NULL,false] + sql: select id as id, + log(c0) as r0, + log(c2, c1) as r1, + log2(c3) as r2, + log10(c4) as r3, + ln(c1) as r4, + log(c5) as r5 from {0}; + expect: + order: id + columns: ["id int", "r0 double", "r1 double", "r2 float", "r3 double", "r4 double","r5 double"] + rows: + - [1, 0, 0, 0, 0, 0,0.0] + - [2, NULL, NULL, NULL, NULL, NULL,-Infinity] + + - id: 5 + desc: 数值位数函数 + inputs: + - columns: ["id int", "c0 smallint", "c1 int", "c2 bigint", "c3 float", "c4 double","c5 bool"] + indexs: ["index1:c0:c2"] + rows: + - [1, 1, 2, 2, 0.5, 0.7,true] + - [2, NULL, NULL, 2, NULL, NULL,false] + sql: select id as id, + ceil(c0) as r0, + ceiling(c1) as r1, + floor(c2) as r2, + round(c3) as r3, + truncate(c4) as r4, + floor(c5) as r5 from {0}; + expect: + order: id + columns: ["id int", "r0 bigint", "r1 bigint", "r2 bigint", "r3 double", "r4 double","r5 double"] + rows: + - [1, 1, 2, 2, 1.000000, 0.000000,1.0] + - [2, NULL, NULL, 2, NULL, NULL,0.0] + + - id: 6 + desc: 数值幂函数 + inputs: + - columns: ["id int", "c0 smallint", "c1 int", "c2 bigint", "c3 float", "c4 double","c5 bool"] + indexs: ["index1:c0:c2"] + rows: + - [1, 0, 2, 10, 1, 100,true] + - [2, NULL, NULL, 10, NULL, NULL,false] + sql: select id as id, + exp(c0) as r0, + pow(c1, c2) as r1, + power(c2, c3) as r2, + sqrt(c4) as r3, + pow(c5,c1) as r4 + from {0}; + expect: + order: id + columns: ["id int", "r0 double", "r1 double", "r2 double", "r3 double","r4 double"] + rows: + - [1, 1, 1024.000000, 10.000000, 10.000000,1.0] + - [2, NULL, NULL, NULL, NULL,NULL] + - id: 7 + desc: "计算函数-单参数-fail" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",true] + sql: select d[0](d[1]) from {0}; + dataProvider: + - ["abs","cos","cot","sin","tan","acos","asin","atan","log","log2","log10","ln","ceil","ceiling","floor","round","truncate","exp","sqrt"] + - ["{0}.c1","{0}.c7","{0}.c8"] + expect: + success: false + - id: 8 + desc: "计算函数-单参数-bool-fail" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",true] + sql: select d[0](d[1]) from {0}; + dataProvider: + - ["cos","cot","sin","tan","acos","asin","atan","sqrt"] + - ["{0}.c9"] + expect: + success: false + - id: 9 + desc: "计算函数-两参数-fail" + level: 5 + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",true] + sql: select d[0](d[1],d[1]) from {0}; + dataProvider: + - ["log","pow","power","atan2"] + - ["{0}.c1","{0}.c7","{0}.c8"] + expect: + success: false + - id: 10 + desc: "mod()_整型_正确" + tags: ["TODO","暂时不支持mod()"] + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + dataProvider: + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6"] + sql: select mod(d[0],{1}.c2) as b2,mod(d[0],{1}.c3) as b3,mod(d[0],{1}.c4) as b4,mod(d[0],{1}.c5) as b5,mod(d[0],{1}.c6) as b6,mod(d[0],{1}.c9) as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id; + expect: + columns: ["b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b9 smallint"] + expectProvider: + 0: + rows: + - [0,10,0,7.8,5.8,0] + 1: + rows: + - [0,10,0,7.8,5.8,0] + 2: + rows: + - [0,600,900,333,363,30] + 3: + rows: + - [30,50,60,41.1,42.1,31] + 4: + rows: + - [30,10,0,18.9,17.9,29] + 5: + columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"] + rows: + - [Infinity,1.5,1.0,2.7027026098198896,2.479338842975207,30.0] diff --git a/cases/integration_test/function/test_date.yaml b/cases/integration_test/function/test_date.yaml new file mode 100644 index 00000000000..66e1ce9cbbd --- /dev/null +++ b/cases/integration_test/function/test_date.yaml @@ -0,0 +1,144 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +sqlDialect: ["HybridSQL"] +version: 0.5.0 +cases: + - id: 0 + desc: date_format-normal + mode: cli-unsupport + inputs: + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",true] + - [2,2,"aa",30,-30,30,30.0,30.0,NULL,NULL,true] + sql: select id as id,date_format(c7,"%Y-%m-%d %H:%M:%S") as e1,date_format(c8,"%Y-%m-%d %H:%M:%S") as e2 from {0}; + expect: + order: id + columns: ["id bigint", "e1 string","e2 string"] + rows: + - [1, "2020-05-29 15:56:29","2020-05-01 00:00:00"] + - [2, NULL,NULL] + - id: 1 + desc: date_format-fail + level: 5 + inputs: + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",true] + - [2,2,"aa",30,-30,30,30.0,30.0,NULL,NULL,true] + dataProvider: + - ["{0}.c1","{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c9"] + sql: select id as id,date_format(d[0],"%Y-%m-%d %H:%M:%S") as e1 from {0}; + expect: + success: false + - id: 2 + desc: 日期函数-normal + inputs: + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-02",true] + - [2,2,"aa",30,-30,NULL,30.0,30.0,NULL,NULL,true] + dataProvider: + - ["{0}.c4","{0}.c7","{0}.c8"] + sql: | + select id as id, + day(d[0]) as e1, + dayofmonth(d[0]) as e2, + dayofweek(d[0]) as e3, + month(d[0]) as e4, + week(d[0]) as e5, + weekofyear(d[0]) as e6, + year(d[0]) as e7 + from {0}; + expect: + order: id + columns: ["id bigint", "e1 int","e2 int","e3 int","e4 int","e5 int","e6 int","e7 int"] + expectProvider: + 0: + rows: + - [1,1,1,5,1,1,1,1970] + - [2,null,null,null,null,null,null,null] + 1: + rows: + - [1,29,29,6,5,22,22,2020] + - [2,null,null,null,null,null,null,null] + 2: + rows: + - [1,2,2,7,5,18,18,2020] + - [2,null,null,null,null,null,null,null] + - id: 3 + desc: 一些时间函数-fail + level: 5 + inputs: + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-02",true] + - [2,2,"aa",30,-30,NULL,30.0,30.0,NULL,NULL,true] + dataProvider: + - ["day","dayofmonth","dayofweek","week","weekofyear","year","month"] + - ["{0}.c1","{0}.c2","{0}.c3","{0}.c5","{0}.c6","{0}.c9"] + sql: select id as id,d[0](d[1]) as e1 from {0}; + expect: + success: false + - id: 4 + desc: hour-minute-normal + inputs: + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-02",true] + - [2,2,"aa",30,-30,NULL,30.0,30.0,NULL,NULL,true] + dataProvider: + - ["{0}.c4","{0}.c7"] + sql: select id as id,hour(d[0]) as e1,minute(d[0]) as e2 from {0}; + expect: + order: id + columns: ["id bigint", "e1 int","e2 int"] + expectProvider: + 0: + rows: + - [1,8,0] + - [2,null,null] + 1: + rows: + - [1,15,56] + - [2,null,null] + - id: 5 + desc: hour-minute-fail + level: 5 + inputs: + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-02",true] + - [2,2,"aa",30,-30,NULL,30.0,30.0,NULL,NULL,true] + dataProvider: + - ["hour","minute"] + - ["{0}.c1","{0}.c2","{0}.c3","{0}.c5","{0}.c6","{0}.c8","{0}.c9"] + sql: select id as id,d[0](d[1]) as e1 from {0}; + expect: + success: false \ No newline at end of file diff --git a/cases/integration_test/function/test_like_match.yaml b/cases/integration_test/function/test_like_match.yaml new file mode 100644 index 00000000000..5300a4f85e5 --- /dev/null +++ b/cases/integration_test/function/test_like_match.yaml @@ -0,0 +1,840 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +sqlDialect: ["HybridSQL"] +version: 0.5.0 +cases: + - id: 0 + desc: "使用_" + inputs: + - + columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:c7"] + rows: + - [1,"a_b",30,30,30,30.0,30.0,1590738990000,"2020-05-01",false] + - [2,"aab",30,30,30,30.0,30.0,1590738991000,"2020-05-01",false] + - [3,"a%b",30,30,30,30.0,30.0,1590738992000,"2020-05-01",false] + - [4,"b_c",30,30,30,30.0,30.0,1590738993000,"2020-05-01",false] + - [5,"abc",30,30,30,30.0,30.0,1590738994000,"2020-05-01",false] + - [6,"A0b",30,30,30,30.0,30.0,1590738995000,"2020-05-01",false] + - [7,"a#B",30,30,30,30.0,30.0,1590738996000,"2020-05-01",false] + - [8,"aaab",30,30,30,30.0,30.0,1590738991000,"2020-05-01",false] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c1,'a_b') as v1 from {0}; + expect: + order: id + columns: ["id bigint","v1 bool"] + expectProvider: + 0: + rows: + - [1,true] + - [2,true] + - [3,true] + - [4,false] + - [5,false] + - [6,false] + - [7,false] + - [8,false] + 1: + rows: + - [1,true] + - [2,true] + - [3,true] + - [4,false] + - [5,false] + - [6,true] + - [7,true] + - [8,false] + - id: 1 + desc: "使用%" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"a%_b",1590738992000] + - [4,"b_c",1590738993000] + - [5,"abc",1590738994000] + - [6,"A0b",1590738995000] + - [7,"a#B",1590738996000] + - [8,"aaab",1590738997000] + - [9,"ab",1590738998000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,c1,d[0](c1,'a%b') as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"a_b",true] + - [2,"aabb",true] + - [3,"a%_b",true] + - [4,"b_c",false] + - [5,"abc",false] + - [6,"A0b",false] + - [7,"a#B",false] + - [8,"aaab",true] + - [9,"ab",true] + 1: + rows: + - [1,"a_b",true] + - [2,"aabb",true] + - [3,"a%_b",true] + - [4,"b_c",false] + - [5,"abc",false] + - [6,"A0b",true] + - [7,"a#B",true] + - [8,"aaab",true] + - [9,"ab",true] + - id: 2 + desc: "同时使用%和_" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA0b",1590738995000] + - [7,"aa#0B",1590738996000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,c1,d[0](c1,'_a%b') as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"aa#0B",false] + 1: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",true] + - [7,"aa#0B",true] + - id: 3 + desc: "使用默认的escape" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA0b",1590738995000] + - [7,"_a#0B",1590738996000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,c1,d[0](c1,"\\_a%b","\\") as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",false] + 1: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",true] + - id: 4 + desc: "指定escape为#" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA0b",1590738995000] + - [7,"_a#0B",1590738996000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,c1,d[0](c1,'#_a%b','#') as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",false] + 1: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",true] + - id: 5 + desc: "指定escape为_" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA0b",1590738995000] + - [7,"_a#0B",1590738996000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,c1,d[0](c1,'__a%b','_') as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",false] + 1: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",true] + - id: 6 + desc: "指定escape为%" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA%b",1590738995000] + - [7,"_a#0B",1590738996000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,c1,d[0](c1,'_a%%b','%') as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA%b",false] + - [7,"_a#0B",false] + 1: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA%b",true] + - [7,"_a#0B",false] + - id: 7 + desc: "escape不指定" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA0b",1590738995000] + - [7,"_a#0B",1590738996000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,c1,d[0](c1,"\\_a%b") as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",false] + 1: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"_a#0B",true] + - id: 8 + desc: "escape为空串,使用\\" + mode: cluster-unsupport + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,'\\\%a_b',1590738990000] + - [2,'\\\aabb',1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,'\\\bA0b',1590738995000] + - [7,'\\\_a#0B',1590738996000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,c1,d[0](c1,"\\_a%b","") as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,'\%a_b',true] + - [2,'\aabb',true] + - [3,'_a%_b',false] + - [4,'ba_c',false] + - [5,"abb",false] + - [6,'\bA0b',false] + - [7,'\_a#0B',false] + 1: + rows: + - [1,'\%a_b',true] + - [2,'\aabb',true] + - [3,"_a%_b",false] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,'\bA0b',true] + - [7,'\_a#0B',true] + - id: 9 + desc: "使用两个%" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"a%_b%0",1590738992000] + - [4,"b_c",1590738993000] + - [5,"abc",1590738994000] + - [6,"A0b",1590738995000] + - [7,"a#Bb",1590738996000] + - [8,"aaabbcc",1590738991000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,c1,d[0](c1,'a%b%') as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"a_b",true] + - [2,"aabb",true] + - [3,"a%_b%0",true] + - [4,"b_c",false] + - [5,"abc",true] + - [6,"A0b",false] + - [7,"a#Bb",true] + - [8,"aaabbcc",true] + 1: + rows: + - [1,"a_b",true] + - [2,"aabb",true] + - [3,"a%_b%0",true] + - [4,"b_c",false] + - [5,"abc",true] + - [6,"A0b",true] + - [7,"a#Bb",true] + - [8,"aaabbcc",true] + - id: 10 + desc: "使用两个_" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA0b",1590738995000] + - [7,"aa#0B",1590738996000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,c1,d[0](c1,'_a_b') as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",false] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",false] + - [7,"aa#0B",false] + 1: + rows: + - [1,"%a_b",true] + - [2,"aabb",true] + - [3,"_a%_b",false] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA0b",true] + - [7,"aa#0B",false] + - id: 11 + desc: "使用两个%,其中一个被转义" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"a_b",1590738990000] + - [2,"aab%",1590738991000] + - [3,"a%_b%0",1590738992000] + - [4,"b_c",1590738993000] + - [5,"ab%",1590738994000] + - [6,"A0b",1590738995000] + - [7,"a#B%",1590738996000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,c1,d[0](c1,'a%b#%','#') as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"a_b",false] + - [2,"aab%",true] + - [3,"a%_b%0",false] + - [4,"b_c",false] + - [5,"ab%",true] + - [6,"A0b",false] + - [7,"a#B%",false] + 1: + rows: + - [1,"a_b",false] + - [2,"aab%",true] + - [3,"a%_b%0",false] + - [4,"b_c",false] + - [5,"ab%",true] + - [6,"A0b",false] + - [7,"a#B%",true] + - id: 12 + desc: "使用两个_,其中一个被转义" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"_A0b",1590738995000] + - [7,"aa#0B",1590738996000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,c1,d[0](c1,'#_a_b','#') as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"_A0b",false] + - [7,"aa#0B",false] + 1: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"_A0b",true] + - [7,"aa#0B",false] + - id: 13 + desc: "同时使用%和_,其中_被转义" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a_b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"_A0b",1590738995000] + - [7,"_a#0B",1590738996000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,c1,d[0](c1,'#_a%b','#') as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"_A0b",false] + - [7,"_a#0B",false] + 1: + rows: + - [1,"%a_b",false] + - [2,"aabb",false] + - [3,"_a%_b",true] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"_A0b",true] + - [7,"_a#0B",true] + - id: 14 + desc: "同时使用%和_,其中%被转义" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"%a%b",1590738990000] + - [2,"aabb",1590738991000] + - [3,"_a%_b",1590738992000] + - [4,"ba_c",1590738993000] + - [5,"abb",1590738994000] + - [6,"bA%b",1590738995000] + - [7,"aa#0B",1590738996000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,c1,d[0](c1,'_a#%b','#') as v1 from {0}; + expect: + order: id + columns: ["id bigint","c1 string","v1 bool"] + expectProvider: + 0: + rows: + - [1,"%a%b",true] + - [2,"aabb",false] + - [3,"_a%_b",false] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA%b",false] + - [7,"aa#0B",false] + 1: + rows: + - [1,"%a%b",true] + - [2,"aabb",false] + - [3,"_a%_b",false] + - [4,"ba_c",false] + - [5,"abb",false] + - [6,"bA%b",true] + - [7,"aa#0B",false] + - id: 15 + desc: "列中有null和空串" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"",1590738990000] + - [2,null,1590738991000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c1,'a%b') as v1 from {0}; + expect: + order: id + columns: ["id bigint","v1 bool"] + rows: + - [1,false] + - [2,null] + - id: 16 + desc: "使用空串" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"",1590738990000] + - [2,"aa",1590738990000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c1,'') as v1 from {0}; + expect: + order: id + columns: ["id bigint","v1 bool"] + rows: + - [1,true] + - [2,false] + - id: 17 + desc: "使用null" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"",1590738990000] + - [2,"aa",1590738991000] + - [3,null,1590738992000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c1,null) as v1 from {0}; + expect: + success: false + - id: 18 + desc: "escape使用null" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"",1590738990000] + - [2,"aa",1590738991000] + - [3,null,1590738992000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c1,'a%',null) as v1 from {0}; + expect: + success: false + - id: 19 + desc: "int类型" + inputs: + - + columns : ["id bigint","c1 int","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,12,1590738990000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c1,'1%') as v1 from {0}; + expect: + success: false + - id: 20 + desc: "bigint类型" + inputs: + - + columns : ["id bigint","c1 bigint","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,12,1590738990000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c1,'1%') as v1 from {0}; + expect: + success: false + - id: 21 + desc: "smallint类型" + inputs: + - + columns : ["id bigint","c1 smallint","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,12,1590738990000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c1,'1%') as v1 from {0}; + expect: + success: false + - id: 22 + desc: "float类型" + inputs: + - + columns : ["id bigint","c1 float","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,12.0,1590738990000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c1,'1%') as v1 from {0}; + expect: + success: false + - id: 23 + desc: "double类型" + inputs: + - + columns : ["id bigint","c1 double","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,12.0,1590738990000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c1,'1%') as v1 from {0}; + expect: + success: false + - id: 24 + desc: "timestamp类型" + inputs: + - + columns : ["id bigint","c1 timestamp","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,12,1590738990000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c1,'1%') as v1 from {0}; + expect: + success: false + - id: 25 + desc: "date类型" + inputs: + - + columns : ["id bigint","c1 date","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"2012-05-01",1590738990000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c1,'1%') as v1 from {0}; + expect: + success: false + - id: 26 + desc: "bool类型" + inputs: + - + columns : ["id bigint","c1 bool","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,true,1590738990000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c1,'1%') as v1 from {0}; + expect: + success: false + - id: 27 + desc: "列不存在" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",1590738990000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c2,'1%') as v1 from {0}; + expect: + success: false + - id: 28 + desc: "escape为多个字符" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"",1590738990000] + - [2,"aa",1590738991000] + - [3,null,1590738992000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c1,'a%','<>') as v1 from {0}; + expect: + success: true + order: id + columns : ["id bigint","v1 bool"] + rows: + - [1,false] + - [2,false] + - [3,null] + - id: 29 + desc: "pattern以escape character结尾" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"ab#",1590738990000] + - [2,"aa",1590738991000] + dataProvider: + - ["like_match","ilike_match"] + sql: select id,d[0](c1,'a%#','#') as v1 from {0}; + expect: + success: true + columns : ["id bigint","v1 bool"] + rows: + - [1,false] + - [2,false] + diff --git a/cases/integration_test/function/test_string.yaml b/cases/integration_test/function/test_string.yaml new file mode 100644 index 00000000000..4b9220122f0 --- /dev/null +++ b/cases/integration_test/function/test_string.yaml @@ -0,0 +1,290 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - id: 0 + desc: "concat_各种类型组合" + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - [2,2,null,null,null,null,null,null,null,null,null] + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + - [2,2,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + dataProvider: + - ["{0}.c1","{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c7","{0}.c8","{0}.c9"] + sql: | + select + concat(d[0],{1}.c1) as b1, + concat(d[0],{1}.c2) as b2, + concat(d[0],{1}.c3) as b3, + concat(d[0],{1}.c4) as b4, + concat(d[0],{1}.c5) as b5, + concat(d[0],{1}.c6) as b6, + concat(d[0],{1}.c7) as b7, + concat(d[0],{1}.c8) as b8, + concat(d[0],{1}.c9) as b9 + from {0} last join {1} ORDER BY {1}.ts1 on {0}.id={1}.id; + expect: + columns: ["b1 string","b2 string","b3 string","b4 string","b5 string","b6 string","b7 string","b8 string","b9 string"] + expectProvider: + 0: + rows: + - ["aa","aa0","aa20","aa30","aa11.1","aa12.1","aa2020-05-29 15:56:29","aa2020-05-02","aatrue"] + - [null,null,null,null,null,null,null,null,null] + 1: + rows: + - ["30","300","3020","3030","3011.1","3012.1","302020-05-29 15:56:29","302020-05-02","30true"] + - [null,null,null,null,null,null,null,null,null] + 2: + rows: + - ["30","300","3020","3030","3011.1","3012.1","302020-05-29 15:56:29","302020-05-02","30true"] + - [null,null,null,null,null,null,null,null,null] + 3: + rows: + - ["30","300","3020","3030","3011.1","3012.1","302020-05-29 15:56:29","302020-05-02","30true"] + - [null,null,null,null,null,null,null,null,null] + 4: + rows: + - ["30","300","3020","3030","3011.1","3012.1","302020-05-29 15:56:29","302020-05-02","30true"] + - [null,null,null,null,null,null,null,null,null] + 5: + rows: + - ["30","300","3020","3030","3011.1","3012.1","302020-05-29 15:56:29","302020-05-02","30true"] + - [null,null,null,null,null,null,null,null,null] + 6: + rows: + - ["2020-05-29 15:56:29","2020-05-29 15:56:290","2020-05-29 15:56:2920","2020-05-29 15:56:2930","2020-05-29 15:56:2911.1","2020-05-29 15:56:2912.1","2020-05-29 15:56:292020-05-29 15:56:29","2020-05-29 15:56:292020-05-02","2020-05-29 15:56:29true"] + - [null,null,null,null,null,null,null,null,null] + 7: + rows: + - ["2020-05-01","2020-05-010","2020-05-0120","2020-05-0130","2020-05-0111.1","2020-05-0112.1","2020-05-012020-05-29 15:56:29","2020-05-012020-05-02","2020-05-01true"] + - [null,null,null,null,null,null,null,null,null] + 8: + rows: + - ["false","false0","false20","false30","false11.1","false12.1","false2020-05-29 15:56:29","false2020-05-02","falsetrue"] + - [null,null,null,null,null,null,null,null,null] + - id: 1 + desc: concat三个字符串 + sqlDialect: ["HybridSQL","MYSQL"] + inputs: + - columns: ["id int", "c1 string","c2 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1, "aa","bbb",1590738989000] + sql: select id, c1, c2, concat(c1, c2,"cc") as c12 from {0}; + expect: + columns: ["id int", "c1 string","c2 string", "c12 string"] + rows: + - [1, "aa", "bbb", "aabbbcc"] + + - id: 2 + desc: concat_ws一个字符串和三个字符串 + sqlDialect: ["HybridSQL","MYSQL"] + inputs: + - columns: ["id int", "c1 string","c2 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1, "aa","bbb",1590738989000] + sql: select id, c1, concat_ws("-",c2) as c2, concat_ws("-", c1, c2,"cc") as c1_2 from {0}; + expect: + columns: ["id int", "c1 string","c2 string","c1_2 string"] + rows: + - [1, "aa", "bbb", "aa-bbb-cc"] + - id: 3 + mode: cli-unsupport + desc: "concat_ws-所有类型" + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - [2,2,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true] + - [2,2,null,null,null,null,null,null,null,null,null] + dataProvider: + - ["{0}.c1","{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c7","{0}.c8","{0}.c9"] + sql: | + select + concat_ws(d[0],{0}.c1,{1}.c1) as b1, + concat_ws(d[0],{0}.c1,{1}.c2) as b2, + concat_ws(d[0],{0}.c1,{1}.c3) as b3, + concat_ws(d[0],{0}.c1,{1}.c4) as b4, + concat_ws(d[0],{0}.c1,{1}.c5) as b5, + concat_ws(d[0],{0}.c1,{1}.c6) as b6, + concat_ws(d[0],{0}.c1,{1}.c7) as b7, + concat_ws(d[0],{0}.c1,{1}.c8) as b8, + concat_ws(d[0],{0}.c1,{1}.c9) as b9 + from {0} last join {1} ORDER BY {1}.ts1 on {0}.id={1}.id; + expect: + columns: ["b1 string","b2 string","b3 string","b4 string","b5 string","b6 string","b7 string","b8 string","b9 string"] + expectProvider: + 0: + rows: + - ["aaaa","aaaa0","aaaa20","aaaa30","aaaa11.1","aaaa12.1","aaaa2020-05-29 15:56:29","aaaa2020-05-02","aaaatrue"] + - [null,null,null,null,null,null,null,null,null] + 1: + rows: + - ["aa30","aa300","aa3020","aa3030","aa3011.1","aa3012.1","aa302020-05-29 15:56:29","aa302020-05-02","aa30true"] + - [null,null,null,null,null,null,null,null,null] + 2: + rows: + - ["aa30","aa300","aa3020","aa3030","aa3011.1","aa3012.1","aa302020-05-29 15:56:29","aa302020-05-02","aa30true"] + - [null,null,null,null,null,null,null,null,null] + 3: + rows: + - ["aa30","aa300","aa3020","aa3030","aa3011.1","aa3012.1","aa302020-05-29 15:56:29","aa302020-05-02","aa30true"] + - [null,null,null,null,null,null,null,null,null] + 4: + rows: + - ["aa30","aa300","aa3020","aa3030","aa3011.1","aa3012.1","aa302020-05-29 15:56:29","aa302020-05-02","aa30true"] + - [null,null,null,null,null,null,null,null,null] + 5: + rows: + - ["aa30","aa300","aa3020","aa3030","aa3011.1","aa3012.1","aa302020-05-29 15:56:29","aa302020-05-02","aa30true"] + - [null,null,null,null,null,null,null,null,null] + 6: + rows: + - ["aa2020-05-29 15:56:29","aa2020-05-29 15:56:290","aa2020-05-29 15:56:2920","aa2020-05-29 15:56:2930","aa2020-05-29 15:56:2911.1","aa2020-05-29 15:56:2912.1","aa2020-05-29 15:56:292020-05-29 15:56:29","aa2020-05-29 15:56:292020-05-02","aa2020-05-29 15:56:29true"] + - [null,null,null,null,null,null,null,null,null] + 7: + rows: + - ["aa2020-05-01","aa2020-05-010","aa2020-05-0120","aa2020-05-0130","aa2020-05-0111.1","aa2020-05-0112.1","aa2020-05-012020-05-29 15:56:29","aa2020-05-012020-05-02","aa2020-05-01true"] + - [null,null,null,null,null,null,null,null,null] + 8: + rows: + - ["aafalse","aafalse0","aafalse20","aafalse30","aafalse11.1","aafalse12.1","aafalse2020-05-29 15:56:29","aafalse2020-05-02","aafalsetrue"] + - [null,null,null,null,null,null,null,null,null] + - id: 4 + desc: strcmp 两个字符串 + sqlDialect: ["HybridSQL"] + inputs: + - columns: ["id int", "c1 string","c2 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1, "text","text2",1590738989000] + - [2, "text","text",1590738989000] + - [3, "text2","text",1590738989000] + - [4, null,"text",1590738989000] + - [5, "text",null,1590738989000] + - [6, null,null,1590738989000] + sql: select id, c1, c2, strcmp(c1, c2) as cmp_c1c2 from {0}; + expect: + columns: ["id int", "c1 string","c2 string","cmp_c1c2 int"] + order: id + rows: + - [1, "text", "text2", -1] + - [2, "text", "text", 0] + - [3, "text2", "text", 1] + - [4, null,"text",null] + - [5, "text",null,null] + - [6, null,null,null] + - id: 5 + desc: "strcmp-fail" + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"123456789",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + dataProvider: + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c7","{0}.c8","{0}.c9"] + - ["{0}.c1","{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c7","{0}.c8","{0}.c9"] + sql: select strcmp(d[0],d[1]) from {0}; + expect: + success: false + - id: 6 + desc: "strcmp-string-fail" + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"123456789",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + dataProvider: + - ["{0}.c1"] + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c7","{0}.c8","{0}.c9"] + sql: select strcmp(d[0],d[1]) from {0}; + expect: + success: false + - id: 7 + desc: "substr-normal" + mode: cli-unsupport + inputs: + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"123456789",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - [2,2,null,null,null,null,null,null,null,null,null] + dataProvider: + - ["substr","substring"] + sql: | + select + d[0](c1,3) as b1, + d[0](c1,3,2) as b2, + d[0](c1,3,20) as b3, + d[0](c1,30,2) as b4, + d[0](c1,30) as b5 + from {0}; + expect: + columns: ["b1 string","b2 string","b3 string","b4 string","b5 string"] + expectProvider: + 0: + rows: + - ["3456789","34","3456789","",""] + - [null,null,null,null,null] + 1: + rows: + - ["3456789","34","3456789","",""] + - [null,null,null,null,null] + + - id: 8 + desc: "substr-fail" + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:id:ts1"] + rows: + - [1,1,"123456789",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false] + - [2,2,null,null,null,null,null,null,null,null,null] + dataProvider: + - ["substr","substring"] + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c7","{0}.c8","{0}.c9"] + sql: select d[0](d[1],1) from {0}; + expect: + success: false diff --git a/cases/integration_test/function/test_udaf_function.yaml b/cases/integration_test/function/test_udaf_function.yaml new file mode 100644 index 00000000000..0642ed737fa --- /dev/null +++ b/cases/integration_test/function/test_udaf_function.yaml @@ -0,0 +1,2563 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: max + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + sql: | + SELECT {0}.id, c1, max(c2) OVER w1 as m2,max(c3) OVER w1 as m3,max(c4) OVER w1 as m4,max(c5) OVER w1 as m5,max(c6) OVER w1 as m6,max(c7) OVER w1 as m7,max(c8) OVER w1 as m8,max(c9) OVER w1 as m9 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float","m6 double","m7 timestamp","m8 date","m9 string"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a"] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c"] + - [3,"aa",4,4,33,1.4,2.4,1590738992000,"2020-05-03","c"] + - [4,"aa",4,4,33,1.4,2.4,1590738993000,"2020-05-03","c"] + - + id: 1 + desc: min + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + sql: | + SELECT {0}.id, c1, min(c2) OVER w1 as m2,min(c3) OVER w1 as m3,min(c4) OVER w1 as m4,min(c5) OVER w1 as m5,min(c6) OVER w1 as m6,min(c7) OVER w1 as m7,min(c8) OVER w1 as m8,min(c9) OVER w1 as m9 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float","m6 double","m7 timestamp","m8 date","m9 string"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a"] + - [2,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a"] + - [3,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a"] + - [4,"aa",3,3,32,1.3,2.3,1590738991000,"2020-05-02","b"] + - + id: 2 + desc: count + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + sql: | + SELECT {0}.id, c1, count(c2) OVER w1 as m2,count(c3) OVER w1 as m3,count(c4) OVER w1 as m4,count(c5) OVER w1 as m5,count(c6) OVER w1 as m6,count(c7) OVER w1 as m7,count(c8) OVER w1 as m8,count(c9) OVER w1 as m9 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 bigint","m3 bigint","m4 bigint","m5 bigint","m6 bigint","m7 bigint","m8 bigint","m9 bigint"] + rows: + - [1,"aa",1,1,1,1,1,1,1,1] + - [2,"aa",2,2,2,2,2,2,2,2] + - [3,"aa",3,3,3,3,3,3,3,3] + - [4,"aa",2,2,2,2,2,3,2,2] + - + id: 3 + desc: sum + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + sql: | + SELECT {0}.id, c1, sum(c2) OVER w1 as m2,sum(c3) OVER w1 as m3,sum(c4) OVER w1 as m4,sum(c5) OVER w1 as m5,sum(c6) OVER w1 as m6 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float","m6 double"] + rows: + - [1,"aa",1,1,30,1.1,2.1] + - [2,"aa",5,5,63,2.5,4.5] + - [3,"aa",8,8,95,3.7999997,6.799999999999999] + - [4,"aa",7,7,65,2.7,4.7] + - + id: 4 + desc: avg + version: 0.6.0 + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1, NULL,30, 1.1, 2.1, 1590738990000,"2020-05-01","a",true] + - [2,"aa",4, 4, 33, 1.4, 2.4, 1590738991000,"2020-05-03","c",false] + - [3,"aa",1, 1, 33, 1.1, 2.1, 1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + sql: | + SELECT {0}.id, c1, + avg(c2) OVER w1 as m2, + avg(c3) OVER w1 as m3, + avg(c4) OVER w1 as m4, + avg(c5) OVER w1 as m5, + avg(c6) OVER w1 as m6, + avg(c3 + 1) over w1 as m7 + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 double","m3 double","m4 double","m5 double","m6 double", "m7 double"] + rows: + - [1, aa, 1, NULL, 30, 1.100000023841858,2.1, NULL] + - [2, aa, 2.5, 4.0, 31.5, 1.25, 2.25, 5.0] + - [3, aa, 2, 2.5, 32, 1.200000007947286,2.1999999999999997,3.5] + - [4, aa, 2.5, 2.5, 33, 1.25, 2.25, 3.5] + - + id: 5 + desc: distinct_count + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool","ts timestamp"] + indexs: ["index1:c1:ts"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true,1590738990000] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-02","c",false,1590738991000] + - [3,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-02","c",true,1590738992000] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL,1590738993000] + sql: | + SELECT {0}.id, c1, distinct_count(c2) OVER w1 as m2,distinct_count(c3) OVER w1 as m3,distinct_count(c4) OVER w1 as m4,distinct_count(c5) OVER w1 as m5,distinct_count(c6) OVER w1 as m6,distinct_count(c7) OVER w1 as m7,distinct_count(c8) OVER w1 as m8,distinct_count(c9) OVER w1 as m9 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.ts ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 bigint","m3 bigint","m4 bigint","m5 bigint","m6 bigint","m7 bigint","m8 bigint","m9 bigint"] + rows: + - [1,"aa",1,1,1,1,1,1,1,1] + - [2,"aa",2,2,2,2,2,2,2,2] + - [3,"aa",2,2,2,2,2,2,2,2] + - [4,"aa",2,2,2,2,2,2,2,2] + - + id: 6 + desc: count/distinct_count-bool + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",1,1,33,1.1,2.1,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + sql: | + SELECT {0}.id, count(c10) OVER w1 as count_bool, distinct_count(c10) OVER w1 as distinct_count_bool + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int", "count_bool bigint", "distinct_count_bool bigint"] + rows: + - [1,1,1] + - [2,2,2] + - [3,3,2] + - [4,2,2] + - + id: 7 + desc: sum-timestamp + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id bigint","c1 string", "c2 timestamp", "c3 timestamp"] + indexs: ["index1:c1:c2"] + rows: + - [1,"aa",1590738990000,1590738990000] + - [2,"aa",1590738991000,1590738991000] + - [3,"aa",1590738992000,1590738992000] + - [4,"aa",1590738993000,NULL] + sql: | + SELECT {0}.id, sum(c3) OVER w1 as m2 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c2 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id bigint", "m2 timestamp"] + rows: + - [1, 1590738990000] + - [2, 3181477981000] + - [3, 4772216973000] + - [4, 3181477983000] + - + id: 8 + desc: avg-timestamp + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,30,1.1,2.1,1590738990000,"2020-05-01","a"] + - [2,"aa",4,33,1.4,2.4,1590738991000,"2020-05-03","c"] + - [3,"aa",3,32,1.3,2.3,1590738992000,"2020-05-02","b"] + sql: | + SELECT {0}.id, c1,avg(c7) OVER w1 as m7 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 9 + desc: sum-date + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,30,1.1,2.1,1590738990000,"2020-05-01","a"] + - [2,"aa",4,33,1.4,2.4,1590738991000,"2020-05-03","c"] + - [3,"aa",3,32,1.3,2.3,1590738992000,"2020-05-02","b"] + sql: | + SELECT {0}.id, c1,sum(c8) OVER w1 as m8 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 10 + desc: sum-string + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,30,1.1,2.1,1590738990000,"2020-05-01","a"] + - [2,"aa",4,33,1.4,2.4,1590738991000,"2020-05-03","c"] + - [3,"aa",3,32,1.3,2.3,1590738992000,"2020-05-02","b"] + sql: | + SELECT {0}.id, c1,sum(c9) OVER w1 as m9 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 11 + desc: avg-date + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,30,1.1,2.1,1590738990000,"2020-05-01","a"] + - [2,"aa",4,33,1.4,2.4,1590738991000,"2020-05-03","c"] + - [3,"aa",3,32,1.3,2.3,1590738992000,"2020-05-02","b"] + sql: | + SELECT {0}.id, c1,avg(c8) OVER w1 as m8 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 12 + desc: avg-string + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,30,1.1,2.1,1590738990000,"2020-05-01","a"] + - [2,"aa",4,33,1.4,2.4,1590738991000,"2020-05-03","c"] + - [3,"aa",3,32,1.3,2.3,1590738992000,"2020-05-02","b"] + sql: | + SELECT {0}.id, c1,avg(c9) OVER w1 as m9 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 13 + desc: MAX_WHERE-normal + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [0, "00", 5, 3, 10, 1.0, 4.4, 1590738990000, "2020-05-01", "a", false] + - [1, "aa", 1, 1, 30, 1.1, 2.1, 1590738990000, "2020-05-01", "a", true] + - [2, "aa", 4, 4, 33, 1.4, 2.4, 1590738991000, "2020-05-03", "c", false] + - [3, "aa", 3, 3, 32, 1.3, 2.3, 1590738992000, "2020-05-02", "b", true] + - [4, "aa", NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + sql: | + SELECT {0}.id, c1, max_where(c2,c2<4) OVER w1 as m2,max_where(c3,c3<4) OVER w1 as m3,max_where(c4,c10) OVER w1 as m4,max_where(c5,c5<=1.3) OVER w1 as m5,max_where(c6,c6<=2.3) OVER w1 as m6 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float","m6 double"] + rows: + - [0,"00",NULL,3,NULL,1.0,NULL] + - [1,"aa",1,1,30,1.1,2.1] + - [2,"aa",1,1,30,1.1,2.1] + - [3,"aa",3,3,32,1.3,2.3] + - [4,"aa",3,3,32,1.3,2.3] + - + id: 14 + desc: MIN_WHERE-normal + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + sql: | + SELECT {0}.id, c1, min_where(c2,c2>2) OVER w1 as m2,min_where(c3,c3>=3) OVER w1 as m3,min_where(c4,c4<33) OVER w1 as m4,min_where(c5,c5<=2) OVER w1 as m5,min_where(c6,c10) OVER w1 as m6 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float","m6 double"] + rows: + - [1,"aa",NULL,NULL,30,1.1,2.1] + - [2,"aa",4,4,30,1.1,2.1] + - [3,"aa",3,3,30,1.1,2.1] + - [4,"aa",3,3,32,1.3,2.3] + - + id: 15 + desc: SUM_WHERE-normal + sqlDialect: ["HybridSQL"] + version: 0.6.0 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1, 1, 30, NULL,2.1, 1590738990000, "2020-05-01","a",true] + - [2,"aa",4, 4, NULL,1.4, 2.4, 1590738991000, "2020-05-03","c",false] + - [3,"aa",3, NULL,33, 1.3, 2.3, 1590738992000, "2020-05-02","b",true] + - [4,"aa",NULL,3, 32, 1.1, NULL,1590738993000, NULL,NULL,NULL] + sql: | + SELECT {0}.id, c1, + sum_where(c2,c2<4) OVER w1 as m2, + sum_where(c3,c3<4) OVER w1 as m3, + sum_where(c4,c4<33) OVER w1 as m4, + sum_where(c5,c5<=1.3) OVER w1 as m5, + sum_where(c6,c10) OVER w1 as m6, + sum_where(c2, c2 = null) over w1 as m7 + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float","m6 double", "m7 smallint"] + rows: + - [1, "aa", 1, 1, 30, NULL, 2.1, NULL] + - [2, "aa", 1, 1, 30, NULL, 2.1, NULL] + - [3, "aa", 4, 1, 30, 1.3, 4.4, NULL] + - [4, "aa", 3, 3, 32, 2.4, 2.3, NULL] + - + id: 16 + desc: AVG_WHERE-normal + sqlDialect: ["HybridSQL"] + version: 0.6.0 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1, "aa", 1, 1, 30, NULL,2.1, 1590738990000, "2020-05-01", "a", true] + - [2, "aa", 4, 4, NULL,1.4, 2.4, 1590738991000, "2020-05-03", "c", false] + - [3, "aa", 3, NULL,32, 1.3, 2.3, 1590738992000, "2020-05-02", "b", true] + - [4, "aa", NULL,3, 33, 1.1, NULL,1590738993000, NULL, NULL,NULL] + sql: | + SELECT {0}.id, c1, + avg_where(c2, c2<4) OVER w1 as m2, + avg_where(c3, c3<4) OVER w1 as m3, + avg_where(c4, c4<33) OVER w1 as m4, + avg_where(c5, c5<=1.3) OVER w1 as m5, + avg_where(c6, c10) OVER w1 as m6, + avg_where(c3, c3 = null) over w1 as m7 + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 double","m3 double","m4 double","m5 double","m6 double", "m7 double"] + rows: + - [1, aa, 1, 1, 30, NULL, 2.1, NULL] + - [2, aa, 1, 1, 30, NULL, 2.1, NULL] + - [3, aa, 2, 1, 31, 1.2999999523162842, 2.2, NULL] + - [4, aa, 3, 3, 32, 1.199999988079071, 2.3, NULL] + - + id: 17 + desc: COUNT_WHERE-normal + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + sql: | + SELECT {0}.id, c1, count_where(c2,c2<4) OVER w1 as m2,count_where(c3,c3<4) OVER w1 as m3,count_where(c4,c4<33) OVER w1 as m4,count_where(c5,c5<=1.3) OVER w1 as m5,count_where(c6,c10) OVER w1 as m6, + count_where(c7,c10) OVER w1 as m7,count_where(c8,c10) OVER w1 as m8,count_where(c9,c10) OVER w1 as m9, count_where(*,c3<4) over w1 as m10 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 bigint","m3 bigint","m4 bigint","m5 bigint","m6 bigint","m7 bigint","m8 bigint","m9 bigint","m10 bigint"] + rows: + - [1,"aa",1,1,1,1,1,1,1,1,1] + - [2,"aa",1,1,1,1,1,1,1,1,1] + - [3,"aa",2,2,2,2,2,2,2,2,2] + - [4,"aa",1,1,1,1,1,1,1,1,1] + - + id: 18 + desc: AVG_WHERE/MAX_WHERE/MIN_WHERE/SUM_WHERE-fail + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + dataProvider: + - ["avg_where","sum_where","max_where","min_where"] + - ["c7","c8","c9","c10"] + sql: | + SELECT {0}.id, c1, d[0](d[1],c10) OVER w1 as m2 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 19 + desc: COUNT_WHERE-fail + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + dataProvider: + - ["count_where"] + - ["c10"] + sql: | + SELECT {0}.id, c1, d[0](d[1],c10) OVER w1 as m2 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 20 + desc: max_cate-normal + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"] + sql: | + SELECT {0}.id, {0}.c1, + max_cate({0}.c2,d[0]) OVER w1 as m2, + max_cate({0}.c3,d[0]) OVER w1 as m3, + max_cate({0}.c4,d[0]) OVER w1 as m4, + max_cate({0}.c5,d[0]) OVER w1 as m5, + max_cate({0}.c6,d[0]) OVER w1 as m6 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"] + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:4","1:4","1:33","1:1.400000","1:2.400000"] + - [3,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"] + - [4,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"] + expectProvider: + 0: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:4","1:4","1:33","1:1.400000","1:2.400000"] + - [3,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"] + - [4,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"] + 1: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:4","1:4","1:33","1:1.400000","1:2.400000"] + - [3,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"] + - [4,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"] + 2: + rows: + - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [2,"aa","30:4","30:4","30:33","30:1.400000","30:2.400000"] + - [3,"aa","30:4,32:3","30:4,32:3","30:33,32:32","30:1.400000,32:1.300000","30:2.400000,32:2.300000"] + - [4,"aa","30:4,32:3","30:4,32:3","30:33,32:32","30:1.400000,32:1.300000","30:2.400000,32:2.300000"] + 3: + rows: + - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [2,"aa","2020-05-29 15:56:30:4","2020-05-29 15:56:30:4","2020-05-29 15:56:30:33","2020-05-29 15:56:30:1.400000","2020-05-29 15:56:30:2.400000"] + - [3,"aa","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:33,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.400000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.400000,2020-05-29 15:56:32:2.300000"] + - [4,"aa","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:33,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.400000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.400000,2020-05-29 15:56:32:2.300000"] + 4: + rows: + - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [2,"aa","2020-05-01:4","2020-05-01:4","2020-05-01:33","2020-05-01:1.400000","2020-05-01:2.400000"] + - [3,"aa","2020-05-01:4,2020-05-02:3","2020-05-01:4,2020-05-02:3","2020-05-01:33,2020-05-02:32","2020-05-01:1.400000,2020-05-02:1.300000","2020-05-01:2.400000,2020-05-02:2.300000"] + - [4,"aa","2020-05-01:4,2020-05-02:3","2020-05-01:4,2020-05-02:3","2020-05-01:33,2020-05-02:32","2020-05-01:1.400000,2020-05-02:1.300000","2020-05-01:2.400000,2020-05-02:2.300000"] + 5: + rows: + - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [2,"aa","a:4","a:4","a:33","a:1.400000","a:2.400000"] + - [3,"aa","a:4,b:3","a:4,b:3","a:33,b:32","a:1.400000,b:1.300000","a:2.400000,b:2.300000"] + - [4,"aa","a:4,b:3","a:4,b:3","a:33,b:32","a:1.400000,b:1.300000","a:2.400000,b:2.300000"] + - + id: 21 + desc: min_cate-normal + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"] + sql: | + SELECT {0}.id, {0}.c1, + min_cate({0}.c2,d[0]) OVER w1 as m2, + min_cate({0}.c3,d[0]) OVER w1 as m3, + min_cate({0}.c4,d[0]) OVER w1 as m4, + min_cate({0}.c5,d[0]) OVER w1 as m5, + min_cate({0}.c6,d[0]) OVER w1 as m6 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"] + expectProvider: + 0: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"] + - [4,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"] + 1: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"] + - [4,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"] + 2: + rows: + - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [2,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [3,"aa","30:1,32:3","30:1,32:3","30:30,32:32","30:1.100000,32:1.300000","30:2.100000,32:2.300000"] + - [4,"aa","30:4,32:3","30:4,32:3","30:33,32:32","30:1.400000,32:1.300000","30:2.400000,32:2.300000"] + 3: + rows: + - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [3,"aa","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:30,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.100000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.100000,2020-05-29 15:56:32:2.300000"] + - [4,"aa","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:33,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.400000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.400000,2020-05-29 15:56:32:2.300000"] + 4: + rows: + - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [3,"aa","2020-05-01:1,2020-05-02:3","2020-05-01:1,2020-05-02:3","2020-05-01:30,2020-05-02:32","2020-05-01:1.100000,2020-05-02:1.300000","2020-05-01:2.100000,2020-05-02:2.300000"] + - [4,"aa","2020-05-01:4,2020-05-02:3","2020-05-01:4,2020-05-02:3","2020-05-01:33,2020-05-02:32","2020-05-01:1.400000,2020-05-02:1.300000","2020-05-01:2.400000,2020-05-02:2.300000"] + 5: + rows: + - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [2,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [3,"aa","a:1,b:3","a:1,b:3","a:30,b:32","a:1.100000,b:1.300000","a:2.100000,b:2.300000"] + - [4,"aa","a:4,b:3","a:4,b:3","a:33,b:32","a:1.400000,b:1.300000","a:2.400000,b:2.300000"] + - + id: 22 + desc: count_cate-normal + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"] + sql: | + SELECT {0}.id, {0}.c1, + count_cate({0}.c2,d[0]) OVER w1 as m2, + count_cate({0}.c3,d[0]) OVER w1 as m3, + count_cate({0}.c4,d[0]) OVER w1 as m4, + count_cate({0}.c5,d[0]) OVER w1 as m5, + count_cate({0}.c6,d[0]) OVER w1 as m6 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"] + expectProvider: + 0: + rows: + - [1,"aa","1:1","1:1","1:1","1:1","1:1"] + - [2,"aa","1:2","1:2","1:2","1:2","1:2"] + - [3,"aa","1:2,2:1","1:2,2:1","1:2,2:1","1:2,2:1","1:2,2:1"] + - [4,"aa","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1"] + 1: + rows: + - [1,"aa","1:1","1:1","1:1","1:1","1:1"] + - [2,"aa","1:2","1:2","1:2","1:2","1:2"] + - [3,"aa","1:2,2:1","1:2,2:1","1:2,2:1","1:2,2:1","1:2,2:1"] + - [4,"aa","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1"] + 2: + rows: + - [1,"aa","30:1","30:1","30:1","30:1","30:1"] + - [2,"aa","30:2","30:2","30:2","30:2","30:2"] + - [3,"aa","30:2,32:1","30:2,32:1","30:2,32:1","30:2,32:1","30:2,32:1"] + - [4,"aa","30:1,32:1","30:1,32:1","30:1,32:1","30:1,32:1","30:1,32:1"] + 3: + rows: + - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1"] + - [2,"aa","2020-05-29 15:56:30:2","2020-05-29 15:56:30:2","2020-05-29 15:56:30:2","2020-05-29 15:56:30:2","2020-05-29 15:56:30:2"] + - [3,"aa","2020-05-29 15:56:30:2,2020-05-29 15:56:32:1","2020-05-29 15:56:30:2,2020-05-29 15:56:32:1","2020-05-29 15:56:30:2,2020-05-29 15:56:32:1","2020-05-29 15:56:30:2,2020-05-29 15:56:32:1","2020-05-29 15:56:30:2,2020-05-29 15:56:32:1"] + - [4,"aa","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1"] + 4: + rows: + - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1"] + - [2,"aa","2020-05-01:2","2020-05-01:2","2020-05-01:2","2020-05-01:2","2020-05-01:2"] + - [3,"aa","2020-05-01:2,2020-05-02:1","2020-05-01:2,2020-05-02:1","2020-05-01:2,2020-05-02:1","2020-05-01:2,2020-05-02:1","2020-05-01:2,2020-05-02:1"] + - [4,"aa","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1"] + 5: + rows: + - [1,"aa","a:1","a:1","a:1","a:1","a:1"] + - [2,"aa","a:2","a:2","a:2","a:2","a:2"] + - [3,"aa","a:2,b:1","a:2,b:1","a:2,b:1","a:2,b:1","a:2,b:1"] + - [4,"aa","a:1,b:1","a:1,b:1","a:1,b:1","a:1,b:1","a:1,b:1"] + - + id: 23 + desc: sum_cate-normal + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"] + sql: | + SELECT {0}.id, {0}.c1, + sum_cate({0}.c2,d[0]) OVER w1 as m2, + sum_cate({0}.c3,d[0]) OVER w1 as m3, + sum_cate({0}.c4,d[0]) OVER w1 as m4, + sum_cate({0}.c5,d[0]) OVER w1 as m5, + sum_cate({0}.c6,d[0]) OVER w1 as m6 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"] + expectProvider: + 0: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:5","1:5","1:63","1:2.500000","1:4.500000"] + - [3,"aa","1:5,2:3","1:5,2:3","1:63,2:32","1:2.500000,2:1.300000","1:4.500000,2:2.300000"] + - [4,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"] + 1: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:5","1:5","1:63","1:2.500000","1:4.500000"] + - [3,"aa","1:5,2:3","1:5,2:3","1:63,2:32","1:2.500000,2:1.300000","1:4.500000,2:2.300000"] + - [4,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"] + 2: + rows: + - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [2,"aa","30:5","30:5","30:63","30:2.500000","30:4.500000"] + - [3,"aa","30:5,32:3","30:5,32:3","30:63,32:32","30:2.500000,32:1.300000","30:4.500000,32:2.300000"] + - [4,"aa","30:4,32:3","30:4,32:3","30:33,32:32","30:1.400000,32:1.300000","30:2.400000,32:2.300000"] + 3: + rows: + - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [2,"aa","2020-05-29 15:56:30:5","2020-05-29 15:56:30:5","2020-05-29 15:56:30:63","2020-05-29 15:56:30:2.500000","2020-05-29 15:56:30:4.500000"] + - [3,"aa","2020-05-29 15:56:30:5,2020-05-29 15:56:32:3","2020-05-29 15:56:30:5,2020-05-29 15:56:32:3","2020-05-29 15:56:30:63,2020-05-29 15:56:32:32","2020-05-29 15:56:30:2.500000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:4.500000,2020-05-29 15:56:32:2.300000"] + - [4,"aa","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:33,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.400000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.400000,2020-05-29 15:56:32:2.300000"] + 4: + rows: + - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [2,"aa","2020-05-01:5","2020-05-01:5","2020-05-01:63","2020-05-01:2.500000","2020-05-01:4.500000"] + - [3,"aa","2020-05-01:5,2020-05-02:3","2020-05-01:5,2020-05-02:3","2020-05-01:63,2020-05-02:32","2020-05-01:2.500000,2020-05-02:1.300000","2020-05-01:4.500000,2020-05-02:2.300000"] + - [4,"aa","2020-05-01:4,2020-05-02:3","2020-05-01:4,2020-05-02:3","2020-05-01:33,2020-05-02:32","2020-05-01:1.400000,2020-05-02:1.300000","2020-05-01:2.400000,2020-05-02:2.300000"] + 5: + rows: + - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [2,"aa","a:5","a:5","a:63","a:2.500000","a:4.500000"] + - [3,"aa","a:5,b:3","a:5,b:3","a:63,b:32","a:2.500000,b:1.300000","a:4.500000,b:2.300000"] + - [4,"aa","a:4,b:3","a:4,b:3","a:33,b:32","a:1.400000,b:1.300000","a:2.400000,b:2.300000"] + - + id: 24 + desc: avg_cate-normal + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"] + sql: | + SELECT {0}.id, {0}.c1, + avg_cate({0}.c2,d[0]) OVER w1 as m2, + avg_cate({0}.c3,d[0]) OVER w1 as m3, + avg_cate({0}.c4,d[0]) OVER w1 as m4, + avg_cate({0}.c5,d[0]) OVER w1 as m5, + avg_cate({0}.c6,d[0]) OVER w1 as m6 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"] + expectProvider: + 0: + rows: + - [1,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"] + - [2,"aa","1:2.500000","1:2.500000","1:31.500000","1:1.250000","1:2.250000"] + - [3,"aa","1:2.500000,2:3.000000","1:2.500000,2:3.000000","1:31.500000,2:32.000000","1:1.250000,2:1.300000","1:2.250000,2:2.300000"] + - [4,"aa","1:4.000000,2:3.000000","1:4.000000,2:3.000000","1:33.000000,2:32.000000","1:1.400000,2:1.300000","1:2.400000,2:2.300000"] + 1: + rows: + - [1,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"] + - [2,"aa","1:2.500000","1:2.500000","1:31.500000","1:1.250000","1:2.250000"] + - [3,"aa","1:2.500000,2:3.000000","1:2.500000,2:3.000000","1:31.500000,2:32.000000","1:1.250000,2:1.300000","1:2.250000,2:2.300000"] + - [4,"aa","1:4.000000,2:3.000000","1:4.000000,2:3.000000","1:33.000000,2:32.000000","1:1.400000,2:1.300000","1:2.400000,2:2.300000"] + 2: + rows: + - [1,"aa","30:1.000000","30:1.000000","30:30.000000","30:1.100000","30:2.100000"] + - [2,"aa","30:2.500000","30:2.500000","30:31.500000","30:1.250000","30:2.250000"] + - [3,"aa","30:2.500000,32:3.000000","30:2.500000,32:3.000000","30:31.500000,32:32.000000","30:1.250000,32:1.300000","30:2.250000,32:2.300000"] + - [4,"aa","30:4.000000,32:3.000000","30:4.000000,32:3.000000","30:33.000000,32:32.000000","30:1.400000,32:1.300000","30:2.400000,32:2.300000"] + 3: + rows: + - [1,"aa","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:30.000000","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [2,"aa","2020-05-29 15:56:30:2.500000","2020-05-29 15:56:30:2.500000","2020-05-29 15:56:30:31.500000","2020-05-29 15:56:30:1.250000","2020-05-29 15:56:30:2.250000"] + - [3,"aa","2020-05-29 15:56:30:2.500000,2020-05-29 15:56:32:3.000000","2020-05-29 15:56:30:2.500000,2020-05-29 15:56:32:3.000000","2020-05-29 15:56:30:31.500000,2020-05-29 15:56:32:32.000000","2020-05-29 15:56:30:1.250000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.250000,2020-05-29 15:56:32:2.300000"] + - [4,"aa","2020-05-29 15:56:30:4.000000,2020-05-29 15:56:32:3.000000","2020-05-29 15:56:30:4.000000,2020-05-29 15:56:32:3.000000","2020-05-29 15:56:30:33.000000,2020-05-29 15:56:32:32.000000","2020-05-29 15:56:30:1.400000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.400000,2020-05-29 15:56:32:2.300000"] + 4: + rows: + - [1,"aa","2020-05-01:1.000000","2020-05-01:1.000000","2020-05-01:30.000000","2020-05-01:1.100000","2020-05-01:2.100000"] + - [2,"aa","2020-05-01:2.500000","2020-05-01:2.500000","2020-05-01:31.500000","2020-05-01:1.250000","2020-05-01:2.250000"] + - [3,"aa","2020-05-01:2.500000,2020-05-02:3.000000","2020-05-01:2.500000,2020-05-02:3.000000","2020-05-01:31.500000,2020-05-02:32.000000","2020-05-01:1.250000,2020-05-02:1.300000","2020-05-01:2.250000,2020-05-02:2.300000"] + - [4,"aa","2020-05-01:4.000000,2020-05-02:3.000000","2020-05-01:4.000000,2020-05-02:3.000000","2020-05-01:33.000000,2020-05-02:32.000000","2020-05-01:1.400000,2020-05-02:1.300000","2020-05-01:2.400000,2020-05-02:2.300000"] + 5: + rows: + - [1,"aa","a:1.000000","a:1.000000","a:30.000000","a:1.100000","a:2.100000"] + - [2,"aa","a:2.500000","a:2.500000","a:31.500000","a:1.250000","a:2.250000"] + - [3,"aa","a:2.500000,b:3.000000","a:2.500000,b:3.000000","a:31.500000,b:32.000000","a:1.250000,b:1.300000","a:2.250000,b:2.300000"] + - [4,"aa","a:4.000000,b:3.000000","a:4.000000,b:3.000000","a:33.000000,b:32.000000","a:1.400000,b:1.300000","a:2.400000,b:2.300000"] + - + id: 25 + desc: "*_cate-fail1" + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["avg_cate","sum_cate","max_cate","min_cate","count_cate"] + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6"] + - ["{1}.c5","{1}.c6","{1}.c10"] + sql: | + SELECT {0}.id, {0}.c1, + d[0](d[1],d[2]) OVER w1 as m2 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 26 + desc: "*_cate-fail2" + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["avg_cate","sum_cate","max_cate","min_cate","count_cate"] + - ["{0}.c7","{0}.c8","{0}.c9","{0}.c10"] + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c5","{1}.c6","{1}.c7","{1}.c8","{1}.c9","{1}.c10"] + sql: | + SELECT {0}.id, {0}.c1, + d[0](d[1],d[2]) OVER w1 as m2 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 27 + desc: max_cate_where-normal + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"] + sql: | + SELECT {0}.id, {0}.c1, + max_cate_where({0}.c2,{0}.c10,d[0]) OVER w1 as m2, + max_cate_where({0}.c3,{0}.c10,d[0]) OVER w1 as m3, + max_cate_where({0}.c4,{0}.c10,d[0]) OVER w1 as m4, + max_cate_where({0}.c5,{0}.c10,d[0]) OVER w1 as m5, + max_cate_where({0}.c6,{0}.c10,d[0]) OVER w1 as m6 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"] + expectProvider: + 0: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"] + - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + 1: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"] + - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + 2: + rows: + - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [2,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [3,"aa","30:1,32:3","30:1,32:3","30:30,32:32","30:1.100000,32:1.300000","30:2.100000,32:2.300000"] + - [4,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"] + 3: + rows: + - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [3,"aa","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:30,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.100000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.100000,2020-05-29 15:56:32:2.300000"] + - [4,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"] + 4: + rows: + - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [3,"aa","2020-05-01:1,2020-05-02:3","2020-05-01:1,2020-05-02:3","2020-05-01:30,2020-05-02:32","2020-05-01:1.100000,2020-05-02:1.300000","2020-05-01:2.100000,2020-05-02:2.300000"] + - [4,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"] + 5: + rows: + - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [2,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [3,"aa","a:1,b:3","a:1,b:3","a:30,b:32","a:1.100000,b:1.300000","a:2.100000,b:2.300000"] + - [4,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"] + - + id: 28 + desc: min_cate_where-normal + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"] + sql: | + SELECT {0}.id, {0}.c1, + min_cate_where({0}.c2,{0}.c10,d[0]) OVER w1 as m2, + min_cate_where({0}.c3,{0}.c10,d[0]) OVER w1 as m3, + min_cate_where({0}.c4,{0}.c10,d[0]) OVER w1 as m4, + min_cate_where({0}.c5,{0}.c10,d[0]) OVER w1 as m5, + min_cate_where({0}.c6,{0}.c10,d[0]) OVER w1 as m6 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"] + expectProvider: + 0: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"] + - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + 1: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"] + - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + 2: + rows: + - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [2,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [3,"aa","30:1,32:3","30:1,32:3","30:30,32:32","30:1.100000,32:1.300000","30:2.100000,32:2.300000"] + - [4,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"] + 3: + rows: + - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [3,"aa","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:30,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.100000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.100000,2020-05-29 15:56:32:2.300000"] + - [4,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"] + 4: + rows: + - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [3,"aa","2020-05-01:1,2020-05-02:3","2020-05-01:1,2020-05-02:3","2020-05-01:30,2020-05-02:32","2020-05-01:1.100000,2020-05-02:1.300000","2020-05-01:2.100000,2020-05-02:2.300000"] + - [4,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"] + 5: + rows: + - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [2,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [3,"aa","a:1,b:3","a:1,b:3","a:30,b:32","a:1.100000,b:1.300000","a:2.100000,b:2.300000"] + - [4,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"] + - + id: 29 + desc: count_cate_where-normal + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"] + sql: | + SELECT {0}.id, {0}.c1, + count_cate_where({0}.c2,{0}.c10,d[0]) OVER w1 as m2, + count_cate_where({0}.c3,{0}.c10,d[0]) OVER w1 as m3, + count_cate_where({0}.c4,{0}.c10,d[0]) OVER w1 as m4, + count_cate_where({0}.c5,{0}.c10,d[0]) OVER w1 as m5, + count_cate_where({0}.c6,{0}.c10,d[0]) OVER w1 as m6 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"] + expectProvider: + 0: + rows: + - [1,"aa","1:1","1:1","1:1","1:1","1:1"] + - [2,"aa","1:1","1:1","1:1","1:1","1:1"] + - [3,"aa","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1"] + - [4,"aa","2:1","2:1","2:1","2:1","2:1"] + 1: + rows: + - [1,"aa","1:1","1:1","1:1","1:1","1:1"] + - [2,"aa","1:1","1:1","1:1","1:1","1:1"] + - [3,"aa","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1"] + - [4,"aa","2:1","2:1","2:1","2:1","2:1"] + 2: + rows: + - [1,"aa","30:1","30:1","30:1","30:1","30:1"] + - [2,"aa","30:1","30:1","30:1","30:1","30:1"] + - [3,"aa","30:1,32:1","30:1,32:1","30:1,32:1","30:1,32:1","30:1,32:1"] + - [4,"aa","32:1","32:1","32:1","32:1","32:1"] + 3: + rows: + - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1"] + - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1"] + - [3,"aa","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1"] + - [4,"aa","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1"] + 4: + rows: + - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1"] + - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1"] + - [3,"aa","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1"] + - [4,"aa","2020-05-02:1","2020-05-02:1","2020-05-02:1","2020-05-02:1","2020-05-02:1"] + 5: + rows: + - [1,"aa","a:1","a:1","a:1","a:1","a:1"] + - [2,"aa","a:1","a:1","a:1","a:1","a:1"] + - [3,"aa","a:1,b:1","a:1,b:1","a:1,b:1","a:1,b:1","a:1,b:1"] + - [4,"aa","b:1","b:1","b:1","b:1","b:1"] + - + id: 30 + desc: sum_cate_where-normal + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"] + sql: | + SELECT {0}.id, {0}.c1, + sum_cate_where({0}.c2,{0}.c10,d[0]) OVER w1 as m2, + sum_cate_where({0}.c3,{0}.c10,d[0]) OVER w1 as m3, + sum_cate_where({0}.c4,{0}.c10,d[0]) OVER w1 as m4, + sum_cate_where({0}.c5,{0}.c10,d[0]) OVER w1 as m5, + sum_cate_where({0}.c6,{0}.c10,d[0]) OVER w1 as m6 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"] + expectProvider: + 0: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"] + - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + 1: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"] + - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + 2: + rows: + - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [2,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [3,"aa","30:1,32:3","30:1,32:3","30:30,32:32","30:1.100000,32:1.300000","30:2.100000,32:2.300000"] + - [4,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"] + 3: + rows: + - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [3,"aa","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:30,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.100000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.100000,2020-05-29 15:56:32:2.300000"] + - [4,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"] + 4: + rows: + - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [3,"aa","2020-05-01:1,2020-05-02:3","2020-05-01:1,2020-05-02:3","2020-05-01:30,2020-05-02:32","2020-05-01:1.100000,2020-05-02:1.300000","2020-05-01:2.100000,2020-05-02:2.300000"] + - [4,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"] + 5: + rows: + - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [2,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [3,"aa","a:1,b:3","a:1,b:3","a:30,b:32","a:1.100000,b:1.300000","a:2.100000,b:2.300000"] + - [4,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"] + - + id: 31 + desc: avg_cate_where-normal + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"] + sql: | + SELECT {0}.id, {0}.c1, + avg_cate_where({0}.c2,{0}.c10,d[0]) OVER w1 as m2, + avg_cate_where({0}.c3,{0}.c10,d[0]) OVER w1 as m3, + avg_cate_where({0}.c4,{0}.c10,d[0]) OVER w1 as m4, + avg_cate_where({0}.c5,{0}.c10,d[0]) OVER w1 as m5, + avg_cate_where({0}.c6,{0}.c10,d[0]) OVER w1 as m6 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"] + expectProvider: + 0: + rows: + - [1,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"] + - [2,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"] + - [3,"aa","1:1.000000,2:3.000000","1:1.000000,2:3.000000","1:30.000000,2:32.000000","1:1.100000,2:1.300000","1:2.100000,2:2.300000"] + - [4,"aa","2:3.000000","2:3.000000","2:32.000000","2:1.300000","2:2.300000"] + 1: + rows: + - [1,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"] + - [2,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"] + - [3,"aa","1:1.000000,2:3.000000","1:1.000000,2:3.000000","1:30.000000,2:32.000000","1:1.100000,2:1.300000","1:2.100000,2:2.300000"] + - [4,"aa","2:3.000000","2:3.000000","2:32.000000","2:1.300000","2:2.300000"] + 2: + rows: + - [1,"aa","30:1.000000","30:1.000000","30:30.000000","30:1.100000","30:2.100000"] + - [2,"aa","30:1.000000","30:1.000000","30:30.000000","30:1.100000","30:2.100000"] + - [3,"aa","30:1.000000,32:3.000000","30:1.000000,32:3.000000","30:30.000000,32:32.000000","30:1.100000,32:1.300000","30:2.100000,32:2.300000"] + - [4,"aa","32:3.000000","32:3.000000","32:32.000000","32:1.300000","32:2.300000"] + 3: + rows: + - [1,"aa","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:30.000000","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [2,"aa","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:30.000000","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [3,"aa","2020-05-29 15:56:30:1.000000,2020-05-29 15:56:32:3.000000","2020-05-29 15:56:30:1.000000,2020-05-29 15:56:32:3.000000","2020-05-29 15:56:30:30.000000,2020-05-29 15:56:32:32.000000","2020-05-29 15:56:30:1.100000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.100000,2020-05-29 15:56:32:2.300000"] + - [4,"aa","2020-05-29 15:56:32:3.000000","2020-05-29 15:56:32:3.000000","2020-05-29 15:56:32:32.000000","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"] + 4: + rows: + - [1,"aa","2020-05-01:1.000000","2020-05-01:1.000000","2020-05-01:30.000000","2020-05-01:1.100000","2020-05-01:2.100000"] + - [2,"aa","2020-05-01:1.000000","2020-05-01:1.000000","2020-05-01:30.000000","2020-05-01:1.100000","2020-05-01:2.100000"] + - [3,"aa","2020-05-01:1.000000,2020-05-02:3.000000","2020-05-01:1.000000,2020-05-02:3.000000","2020-05-01:30.000000,2020-05-02:32.000000","2020-05-01:1.100000,2020-05-02:1.300000","2020-05-01:2.100000,2020-05-02:2.300000"] + - [4,"aa","2020-05-02:3.000000","2020-05-02:3.000000","2020-05-02:32.000000","2020-05-02:1.300000","2020-05-02:2.300000"] + 5: + rows: + - [1,"aa","a:1.000000","a:1.000000","a:30.000000","a:1.100000","a:2.100000"] + - [2,"aa","a:1.000000","a:1.000000","a:30.000000","a:1.100000","a:2.100000"] + - [3,"aa","a:1.000000,b:3.000000","a:1.000000,b:3.000000","a:30.000000,b:32.000000","a:1.100000,b:1.300000","a:2.100000,b:2.300000"] + - [4,"aa","b:3.000000","b:3.000000","b:32.000000","b:1.300000","b:2.300000"] + - + id: 32 + desc: "*_cate_where-fail1" + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["avg_cate_where","sum_cate_where","max_cate_where","min_cate_where","count_cate_where"] + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6"] + - ["{1}.c5","{1}.c6","{1}.c10"] + sql: | + SELECT {0}.id, {0}.c1, + d[0](d[1],{0}.c10,d[2]) OVER w1 as m2 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 33 + desc: "*_cate_where-fail2" + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["avg_cate_where","sum_cate_where","max_cate_where","min_cate_where","count_cate_where"] + - ["{0}.c7","{0}.c8","{0}.c9","{0}.c10"] + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c5","{1}.c6","{1}.c7","{1}.c8","{1}.c9","{1}.c10"] + sql: | + SELECT {0}.id, {0}.c1, + d[0](d[1],{0}.c10,d[2]) OVER w1 as m2 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 34 + desc: top_n_key_max_cate_where-normal + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"] + sql: | + SELECT {0}.id, {0}.c1, + top_n_key_max_cate_where({0}.c2,{0}.c10,d[0],1) OVER w1 as m2, + top_n_key_max_cate_where({0}.c3,{0}.c10,d[0],1) OVER w1 as m3, + top_n_key_max_cate_where({0}.c4,{0}.c10,d[0],1) OVER w1 as m4, + top_n_key_max_cate_where({0}.c5,{0}.c10,d[0],1) OVER w1 as m5, + top_n_key_max_cate_where({0}.c6,{0}.c10,d[0],1) OVER w1 as m6 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"] + expectProvider: + 0: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [3,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + 1: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [3,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + 2: + rows: + - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [2,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [3,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"] + - [4,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"] + 3: + rows: + - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [3,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"] + - [4,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"] + 4: + rows: + - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [3,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"] + - [4,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"] + 5: + rows: + - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [2,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [3,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"] + - [4,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"] + - + id: 35 + desc: top_n_key_min_cate_where-normal + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"] + sql: | + SELECT {0}.id, {0}.c1, + top_n_key_min_cate_where({0}.c2,{0}.c10,d[0],1) OVER w1 as m2, + top_n_key_min_cate_where({0}.c3,{0}.c10,d[0],1) OVER w1 as m3, + top_n_key_min_cate_where({0}.c4,{0}.c10,d[0],1) OVER w1 as m4, + top_n_key_min_cate_where({0}.c5,{0}.c10,d[0],1) OVER w1 as m5, + top_n_key_min_cate_where({0}.c6,{0}.c10,d[0],1) OVER w1 as m6 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"] + expectProvider: + 0: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [3,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + 1: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [3,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + 2: + rows: + - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [2,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [3,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"] + - [4,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"] + 3: + rows: + - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [3,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"] + - [4,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"] + 4: + rows: + - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [3,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"] + - [4,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"] + 5: + rows: + - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [2,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [3,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"] + - [4,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"] + - + id: 36 + desc: top_n_key_sum_cate_where-normal + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"] + sql: | + SELECT {0}.id, {0}.c1, + top_n_key_sum_cate_where({0}.c2,{0}.c10,d[0],1) OVER w1 as m2, + top_n_key_sum_cate_where({0}.c3,{0}.c10,d[0],1) OVER w1 as m3, + top_n_key_sum_cate_where({0}.c4,{0}.c10,d[0],1) OVER w1 as m4, + top_n_key_sum_cate_where({0}.c5,{0}.c10,d[0],1) OVER w1 as m5, + top_n_key_sum_cate_where({0}.c6,{0}.c10,d[0],1) OVER w1 as m6 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"] + expectProvider: + 0: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [3,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + 1: + rows: + - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"] + - [3,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"] + 2: + rows: + - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [2,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"] + - [3,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"] + - [4,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"] + 3: + rows: + - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [3,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"] + - [4,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"] + 4: + rows: + - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"] + - [3,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"] + - [4,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"] + 5: + rows: + - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [2,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"] + - [3,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"] + - [4,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"] + - + id: 37 + desc: top_n_key_avg_cate_where-normal + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"] + sql: | + SELECT {0}.id, {0}.c1, + top_n_key_avg_cate_where({0}.c2,{0}.c10,d[0],1) OVER w1 as m2, + top_n_key_avg_cate_where({0}.c3,{0}.c10,d[0],1) OVER w1 as m3, + top_n_key_avg_cate_where({0}.c4,{0}.c10,d[0],1) OVER w1 as m4, + top_n_key_avg_cate_where({0}.c5,{0}.c10,d[0],1) OVER w1 as m5, + top_n_key_avg_cate_where({0}.c6,{0}.c10,d[0],1) OVER w1 as m6 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"] + expectProvider: + 0: + rows: + - [1,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"] + - [2,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"] + - [3,"aa","2:3.000000","2:3.000000","2:32.000000","2:1.300000","2:2.300000"] + - [4,"aa","2:3.000000","2:3.000000","2:32.000000","2:1.300000","2:2.300000"] + 1: + rows: + - [1,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"] + - [2,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"] + - [3,"aa","2:3.000000","2:3.000000","2:32.000000","2:1.300000","2:2.300000"] + - [4,"aa","2:3.000000","2:3.000000","2:32.000000","2:1.300000","2:2.300000"] + 2: + rows: + - [1,"aa","30:1.000000","30:1.000000","30:30.000000","30:1.100000","30:2.100000"] + - [2,"aa","30:1.000000","30:1.000000","30:30.000000","30:1.100000","30:2.100000"] + - [3,"aa","32:3.000000","32:3.000000","32:32.000000","32:1.300000","32:2.300000"] + - [4,"aa","32:3.000000","32:3.000000","32:32.000000","32:1.300000","32:2.300000"] + 3: + rows: + - [1,"aa","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:30.000000","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [2,"aa","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:30.000000","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"] + - [3,"aa","2020-05-29 15:56:32:3.000000","2020-05-29 15:56:32:3.000000","2020-05-29 15:56:32:32.000000","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"] + - [4,"aa","2020-05-29 15:56:32:3.000000","2020-05-29 15:56:32:3.000000","2020-05-29 15:56:32:32.000000","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"] + 4: + rows: + - [1,"aa","2020-05-01:1.000000","2020-05-01:1.000000","2020-05-01:30.000000","2020-05-01:1.100000","2020-05-01:2.100000"] + - [2,"aa","2020-05-01:1.000000","2020-05-01:1.000000","2020-05-01:30.000000","2020-05-01:1.100000","2020-05-01:2.100000"] + - [3,"aa","2020-05-02:3.000000","2020-05-02:3.000000","2020-05-02:32.000000","2020-05-02:1.300000","2020-05-02:2.300000"] + - [4,"aa","2020-05-02:3.000000","2020-05-02:3.000000","2020-05-02:32.000000","2020-05-02:1.300000","2020-05-02:2.300000"] + 5: + rows: + - [1,"aa","a:1.000000","a:1.000000","a:30.000000","a:1.100000","a:2.100000"] + - [2,"aa","a:1.000000","a:1.000000","a:30.000000","a:1.100000","a:2.100000"] + - [3,"aa","b:3.000000","b:3.000000","b:32.000000","b:1.300000","b:2.300000"] + - [4,"aa","b:3.000000","b:3.000000","b:32.000000","b:1.300000","b:2.300000"] + - + id: 38 + desc: top_n_key_count_cate_where-normal + mode: cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"] + sql: | + SELECT {0}.id, {0}.c1, + top_n_key_count_cate_where({0}.c2,{0}.c10,d[0],1) OVER w1 as m2, + top_n_key_count_cate_where({0}.c3,{0}.c10,d[0],1) OVER w1 as m3, + top_n_key_count_cate_where({0}.c4,{0}.c10,d[0],1) OVER w1 as m4, + top_n_key_count_cate_where({0}.c5,{0}.c10,d[0],1) OVER w1 as m5, + top_n_key_count_cate_where({0}.c6,{0}.c10,d[0],1) OVER w1 as m6 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"] + expectProvider: + 0: + rows: + - [1,"aa","1:1","1:1","1:1","1:1","1:1"] + - [2,"aa","1:1","1:1","1:1","1:1","1:1"] + - [3,"aa","2:1","2:1","2:1","2:1","2:1"] + - [4,"aa","2:1","2:1","2:1","2:1","2:1"] + 1: + rows: + - [1,"aa","1:1","1:1","1:1","1:1","1:1"] + - [2,"aa","1:1","1:1","1:1","1:1","1:1"] + - [3,"aa","2:1","2:1","2:1","2:1","2:1"] + - [4,"aa","2:1","2:1","2:1","2:1","2:1"] + 2: + rows: + - [1,"aa","30:1","30:1","30:1","30:1","30:1"] + - [2,"aa","30:1","30:1","30:1","30:1","30:1"] + - [3,"aa","32:1","32:1","32:1","32:1","32:1"] + - [4,"aa","32:1","32:1","32:1","32:1","32:1"] + 3: + rows: + - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1"] + - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1"] + - [3,"aa","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1"] + - [4,"aa","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1"] + 4: + rows: + - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1"] + - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1"] + - [3,"aa","2020-05-02:1","2020-05-02:1","2020-05-02:1","2020-05-02:1","2020-05-02:1"] + - [4,"aa","2020-05-02:1","2020-05-02:1","2020-05-02:1","2020-05-02:1","2020-05-02:1"] + 5: + rows: + - [1,"aa","a:1","a:1","a:1","a:1","a:1"] + - [2,"aa","a:1","a:1","a:1","a:1","a:1"] + - [3,"aa","b:1","b:1","b:1","b:1","b:1"] + - [4,"aa","b:1","b:1","b:1","b:1","b:1"] + - + id: 39 + desc: "top_n_key_*_cate_where-fail1" + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["top_n_key_count_cate_where","top_n_key_sum_cate_where","top_n_key_avg_cate_where","top_n_key_max_cate_where","top_n_key_min_cate_where"] + - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6"] + - ["{1}.c5","{1}.c6","{1}.c10"] + sql: | + SELECT {0}.id, {0}.c1, + d[0](d[1],{0}.c10,d[2],1) OVER w1 as m2 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 40 + desc: "top_n_key_*_cate_where-fail2" + sqlDialect: ["HybridSQL"] + level: 5 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:c7","index2:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + - + columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:id:timecol"] + rows: + - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false] + - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + dataProvider: + - ["top_n_key_count_cate_where","top_n_key_sum_cate_where","top_n_key_avg_cate_where","top_n_key_max_cate_where","top_n_key_min_cate_where"] + - ["{0}.c7","{0}.c8","{0}.c9","{0}.c10"] + - ["{1}.c2","{1}.c3","{1}.c4","{1}.c5","{1}.c6","{1}.c7","{1}.c8","{1}.c9","{1}.c10"] + sql: | + SELECT {0}.id, {0}.c1, + d[0](d[1],{0}.c10,d[2],1) OVER w1 as m2 + FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - id: 41 + desc: arithmetic_and_udf_before_udaf + inputs: + - columns: ["id bigint", "c1 double", "c2 float", "c3 int"] + indexs: ["index1:c3:id"] + rows: + - [1, 10.0, 1.0, 5] + - [2, 9.0, 2.0, 5] + - [3, 8.0, 3.0, 5] + - [4, 7.0, 4.0, 2] + - [5, 6.0, 5.0, 2] + sql: | + SELECT {0}.id, + sum((c1 - c2) / c3) OVER w1 AS r1, + sum(log(c1 + c2) + c3) OVER w1 as r2 + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id bigint","r1 double", "r2 double"] + rows: + - [1, 1.8, 7.3978952727983707] + - [2, 3.2, 14.795790545596741] + - [3, 4.2, 22.19368581839511] + - [4, 1.5, 4.3978952727983707] + - [5, 2.0, 8.7957905455967413] + + - id: 42 + desc: arithmetic_and_udf_after_udaf + sqlDialect: ["HybridSQL"] + tags: ["目前只能f(udaf()) over w,否则无法进入window agg节点"] + inputs: + - columns: ["id bigint", "c1 double", "c2 float", "c3 int"] + indexs: ["index1:c3:id"] + rows: + - [1, 10.0, 1.0, 5] + - [2, 9.0, 2.0, 5] + - [3, 8.0, 3.0, 5] + - [4, 7.0, 4.0, 2] + - [5, 6.0, 5.0, 2] + sql: | + SELECT {0}.id, + abs(sum(c3)) OVER w1 as r1, + log((sum(c1) + sum(c2)) / c3) OVER w1 AS r2 + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id bigint","r1 int", "r2 double"] + rows: + - [1, 5, 0.78845736036427028] + - [2, 10, 1.4816045409242156] + - [3, 15, 1.8870696490323797] + - [4, 2, 1.7047480922384253] + - [5, 4, 2.3978952727983707] + + - id: 43 + desc: nested udaf + sqlDialect: ["HybridSQL"] + inputs: + - columns: ["id bigint", "c1 double", "c2 float", "c3 int"] + indexs: ["index1:c3:id"] + rows: + - [1, 10.0, 1.0, 5] + - [2, 9.0, 2.0, 5] + - [3, 8.0, 3.0, 5] + - [4, 7.0, 4.0, 2] + - [5, 6.0, 5.0, 2] + sql: | + SELECT {0}.id, + sum(c1 - count(c1)) OVER w1 AS r1, + abs(sum(log(c1) - log(count(c1)))) OVER w1 AS r2, + sum(c1 + sum(c2 * count(c3))) OVER w1 AS r3 + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id bigint","r1 double", "r2 double", "r3 double"] + rows: + - [1, 9.0, 2.3025850929940459, 11.0] + - [2, 15.0, 3.1135153092103747, 31.0] + - [3, 18.0, 3.2834143460057721, 81.0] + - [4, 6.0, 1.9459101490553132, 11.0] + - [5, 9.0, 2.3513752571634776, 49.0] + + - id: 44 + desc: cast after udaf + sqlDialect: ["HybridSQL"] + inputs: + - columns: ["id bigint", "c1 double", "c2 float", "c3 int"] + indexs: ["index1:c3:id"] + rows: + - [1, 10.0, 1.0, 5] + - [2, 9.0, 2.0, 5] + - [3, 8.0, 3.0, 5] + - [4, 7.0, 4.0, 2] + - [5, 6.0, 5.0, 2] + sql: | + SELECT {0}.id, c3, + CAST(sum(c1) OVER w1 AS string) AS r1, + string(sum(c1) OVER w1) AS r2, + `string`(sum(c1) OVER w1) AS r3 + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id bigint", "c3 int", "r1 string", "r2 string", "r3 string"] + rows: + - [1, 5, "10", "10", "10"] + - [2, 5, "19", "19", "19"] + - [3, 5, "27", "27", "27"] + - [4, 2, "7", "7", "7"] + - [5, 2, "13", "13", "13"] + + - id: 45 + desc: aggregate where + sqlDialect: ["HybridSQL"] + mode: request-unsupport + inputs: + - columns: ["id bigint", "c1 double", "c2 float", "c3 bigint"] + indexs: ["index1:c3:id"] + rows: + - [1, 1.0, 1.1, 0] + - [2, 2.0, 7.7, 0] + - [3, NULL, 0.1, 0] + - [4, 3.0, NULL, 0] + - [5, 4.0, 5.5, 0] + - [6, 5.0, 3.3, 1] + - [7, NULL, 2.2, 1] + - [8, 7.0, NULL, 1] + - [9, 8.0, 4.4, 1] + sql: | + SELECT {0}.id, + count_where(c1, c1 < c2) OVER w1 AS count_where_1, + avg_where(c1, c1 < c2) OVER w1 AS avg_where_1, + count_where(c2, c2 > 4) OVER w1 AS count_where_2, + avg_where(c2, c2 > 4) OVER w1 AS avg_where_2 + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id bigint", "count_where_1 bigint", "avg_where_1 double", "count_where_2 bigint", "avg_where_2 double"] + rows: + - [1, 1, 1.0, 0, NULL] + - [2, 2, 1.5, 1, 7.6999998092651367] + - [3, 2, 1.5, 1, 7.6999998092651367] + - [4, 2, 1.5, 1, 7.6999998092651367] + - [5, 3, 2.3333333333333335, 2, 6.5999999046325684] + - [6, 0, NULL, 0, NULL] + - [7, 0, NULL, 0, NULL] + - [8, 0, NULL, 0, NULL] + - [9, 0, NULL, 1, 4.4000000953674316] + + - id: 46 + desc: window lag functions + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","pk bigint","c1 string","c2 int","c3 bigint","c4 float", + "c5 double","c6 timestamp","c7 date","c8 bool"] + indexs: ["index1:pk:c6"] + rows: + - [1, 1, "a", 1, 30, 1.1, 2.1, 1590738990000, "2020-05-01", true] + - [2, 1, "c", 4, 33, 1.4, 2.4, 1590738991000, "2020-05-03", false] + - [3, 1, "b", 3, 32, 1.3, 2.3, 1590738992000, "2020-05-02", true,] + - [4, 1, NULL, NULL, NULL, NULL, NULL, 1590738993000, NULL, NULL] + - [5, 1, "d", 5, 35, 1.5, 2.5, 1590738994000, "2020-05-04", false] + sql: | + SELECT {0}.id, + lag(c1, 0) OVER w1 as m1, + lag(c1, 2) OVER w1 as m2, + lag(c2, 0) OVER w1 as m3, + lag(c2, 2) OVER w1 as m4, + lag(c3, 0) OVER w1 as m5, + lag(c3, 2) OVER w1 as m6, + lag(c4, 0) OVER w1 as m7, + lag(c4, 2) OVER w1 as m8, + lag(c5, 0) OVER w1 as m9, + lag(c5, 2) OVER w1 as m10, + lag(c6, 0) OVER w1 as m11, + lag(c6, 2) OVER w1 as m12, + lag(c7, 0) OVER w1 as m13, + lag(c7, 2) OVER w1 as m14, + lag(c8, 0) OVER w1 as m15, + lag(c8, 2) OVER w1 as m16 + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.pk ORDER BY {0}.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","m1 string", "m2 string", "m3 int", "m4 int", "m5 bigint", "m6 bigint", + "m7 float", "m8 float", "m9 double", "m10 double", + "m11 timestamp", "m12 timestamp", "m13 date", "m14 date", "m15 bool", "m16 bool"] + rows: + - [1, "a", NULL, 1, NULL, 30, NULL, 1.1, NULL, 2.1, NULL, + 1590738990000, NULL, "2020-05-01", NULL, true, NULL] + - [2, "c", NULL, 4, NULL, 33, NULL, 1.4, NULL, 2.4, NULL, + 1590738991000, NULL, "2020-05-03", NULL, false, NULL] + - [3, "b", "a", 3, 1, 32, 30, 1.3, 1.1, 2.3, 2.1, + 1590738992000, 1590738990000, "2020-05-02", "2020-05-01", true, true] + - [4, NULL, "c", NULL, 4, NULL, 33, NULL, 1.4, NULL, 2.4, + 1590738993000, 1590738991000, NULL, "2020-05-03", NULL, false] + - [5, "d", "b", 5, 3, 35, 32, 1.5, 1.3, 2.5, 2.3, + 1590738994000, 1590738992000, "2020-05-04", "2020-05-02", false, true] + + - id: 47 + desc: count where value equals first value + sqlDialect: ["HybridSQL"] + inputs: + - columns: ["id bigint", "pk bigint", "c1 string"] + indexs: ["index1:pk:id"] + rows: + - [1, 0, "a"] + - [2, 0, "b"] + - [3, 0, "c"] + - [4, 0, NULL] + - [5, 0, "b"] + - [6, 0, NULL] + - [7, 0, "c"] + - [8, 0, "a"] + - [9, 0, NULL] + - [10, 0, "c"] + - [11, 0, "a"] + - [12, 0, "b"] + sql: | + SELECT {0}.id, + count_where(id, ifnull(c1, "a") = ifnull(first_value(c1), "a")) OVER w1 AS count_where + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id bigint", "count_where bigint"] + rows: + - [1, 1] + - [2, 1] + - [3, 1] + - [4, 2] + - [5, 2] + - [6, 3] + - [7, 2] + - [8, 4] + - [9, 5] + - [10, 3] + - [11, 6] + - [12, 3] + - id: 48 + desc: count where value equals lag + sqlDialect: ["HybridSQL"] + inputs: + - columns: ["id bigint", "pk bigint", "c1 string"] + indexs: ["index1:pk:id"] + rows: + - [1, 0, "a"] + - [2, 0, "b"] + - [3, 0, "c"] + - [4, 0, NULL] + - [5, 0, "b"] + - [6, 0, NULL] + - [7, 0, "c"] + - [8, 0, "a"] + - [9, 0, NULL] + - [10, 0, "c"] + - [11, 0, "a"] + - [12, 0, "b"] + - [13, 0, "a"] + - [14, 0, "a"] + sql: | + SELECT {0}.id, + count_where(id, ifnull(c1, "a") = ifnull(lag(c1, 0), "a")) OVER w1 AS count_where_w1, + count_where(id, ifnull(c1, "a") = ifnull(lag(c1, 0), "a")) OVER w2 AS count_where_w2 + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS_RANGE BETWEEN 100s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id bigint", "count_where_w1 bigint", "count_where_w2 bigint"] + rows: + - [1, 1, 1] + - [2, 1, 1] + - [3, 1, 1] + - [4, 2, 2] + - [5, 2, 2] + - [6, 3, 3] + - [7, 2, 2] + - [8, 4, 4] + - [9, 5, 5] + - [10, 3, 3] + - [11, 6, 6] + - [12, 3, 3] + - [13, 6, 7] + - [14, 7, 8] + - id: 49 + desc: count where value equals case when lag + sqlDialect: ["HybridSQL"] + inputs: + - columns: ["id bigint", "pk bigint", "c1 string"] + indexs: ["index1:pk:id"] + rows: + - [1, 0, "a"] + - [2, 0, "b"] + - [3, 0, "c"] + - [4, 0, NULL] + - [5, 0, "b"] + - [6, 0, NULL] + - [7, 0, "c"] + - [8, 0, "a"] + - [9, 0, NULL] + - [10, 0, "c"] + - [11, 0, "a"] + - [12, 0, "b"] + - [13, 0, "a"] + - [14, 0, "a"] + sql: | + SELECT {0}.id, + case when !isnull(lag(c1,0)) OVER w1 then count_where(id, c1 = lag(c1, 0)) OVER w1 else null end AS count_where_w1, + case when !isnull(lag(c1,0)) OVER w2 then count_where(id, c1 = lag(c1, 0)) OVER w2 else null end AS count_where_w2 + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS_RANGE BETWEEN 100s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id bigint", "count_where_w1 bigint", "count_where_w2 bigint"] + rows: + - [1, 1, 1] + - [2, 1, 1] + - [3, 1, 1] + - [4, NULL, NULL] + - [5, 2, 2] + - [6, NULL, NULL] + - [7, 2, 2] + - [8, 2, 2] + - [9, NULL, NULL] + - [10, 3, 3] + - [11, 3, 3] + - [12, 3, 3] + - [13, 3, 4] + - [14, 4, 5] + - + id: 50 + desc: 重复的聚合表达式 + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w1 as w1_c4_sum2 + FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: c3 + columns: ["c1 string","c3 int","w1_c4_sum bigint", "w1_c4_sum2 bigint"] + rows: + - ["aa",20,30, 30] + - ["aa",21,61, 61] + - ["aa",22,93, 93] + - ["aa",23,96, 96] + - ["bb",24,34, 34] + + - + id: 51 + desc: 重复的聚合表达式 + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w1 as w1_c4_sum2 + FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: c3 + columns: ["c1 string","c3 int","w1_c4_sum bigint", "w1_c4_sum2 bigint"] + rows: + - ["aa",20,30, 30] + - ["aa",21,61, 61] + - ["aa",22,93, 93] + - ["aa",23,96, 96] + - ["bb",24,34, 34] + + - id: 52 + desc: 多个可合并窗口上的多个聚合函数计算 + sqlDialect: ["HybridSQL"] + version: 0.6.0 + sql: | + SELECT {0}.id, pk, col1, std_ts, + distinct_count(col1) OVER w1 as a1, + distinct_count(col1) OVER w2 as a2, + distinct_count(col1) OVER w3 as a3, + sum(col1 * 1.0) OVER w1 as b1, + sum(col1 * 1.0) OVER w2 as b2, + sum(col1 * 1.0) OVER w3 as b3 + FROM {0} WINDOW + w1 AS (PARTITION BY pk ORDER BY std_ts ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY pk ORDER BY std_ts ROWS BETWEEN 4 PRECEDING AND 3 PRECEDING), + w3 AS (PARTITION BY pk ORDER BY std_ts ROWS BETWEEN 6 PRECEDING AND 5 PRECEDING); + inputs: + - + columns: ["id int", "pk string", "col1 int32", "std_ts timestamp"] + indexs: ["index1:pk:std_ts"] + rows: + - [1, A, 1, 1590115420000] + - [2, A, 1, 1590115430000] + - [3, A, 2, 1590115440000] + - [4, A, 2, 1590115450000] + - [5, A, 2, 1590115460000] + - [6, A, 3, 1590115470000] + - [7, A, 3, 1590115480000] + - [8, A, 3, 1590115490000] + - [9, A, 3, 1590115500000] + - [10, B, 1, 1590115420000] + - [11, B, 2, 1590115430000] + - [12, B, 3, 1590115440000] + - [13, B, 4, 1590115450000] + - [14, B, 5, 1590115460000] + expect: + columns: ["id int32", "pk string", "col1 int32", "std_ts timestamp", + "a1 bigint", "a2 bigint", "a3 bigint", + "b1 double" ,"b2 double", "b3 double"] + order: id + rows: + - [1, A, 1, 1590115420000, 1, 0, 0, 1.0, NULL, NULL] + - [2, A, 1, 1590115430000, 1, 0, 0, 2.0, NULL, NULL] + - [3, A, 2, 1590115440000, 2, 0, 0, 4.0, NULL, NULL] + - [4, A, 2, 1590115450000, 2, 1, 0, 5.0, 1.0, NULL] + - [5, A, 2, 1590115460000, 1, 1, 0, 6.0, 2.0, NULL] + - [6, A, 3, 1590115470000, 2, 2, 1, 7.0, 3.0, 1.0] + - [7, A, 3, 1590115480000, 2, 1, 1, 8.0, 4.0, 2.0] + - [8, A, 3, 1590115490000, 1, 1, 2, 9.0, 4.0, 3.0] + - [9, A, 3, 1590115500000, 1, 2, 1, 9.0, 5.0, 4.0] + - [10, B, 1, 1590115420000, 1, 0, 0, 1.0, NULL, NULL] + - [11, B, 2, 1590115430000, 2, 0, 0, 3.0, NULL, NULL] + - [12, B, 3, 1590115440000, 3, 0, 0, 6.0, NULL, NULL] + - [13, B, 4, 1590115450000, 3, 1, 0, 9.0, 1.0, NULL] + - [14, B, 5, 1590115460000, 3, 2, 0, 12.0, 3.0, NULL] + + - id: 53 + desc: 同窗口下多类聚合函数 + sqlDialect: ["HybridSQL"] + version: 0.6.0 + sql: | + SELECT {0}.id, pk, col1, std_ts, + sum(col1 + count(col1)) OVER w as a1, + distinct_count(col1) OVER w as a2, + sum_where(col1, std_ts > timestamp(1590115440000)) OVER w as a3, + count_where(col1, std_ts > timestamp(1590115440000)) OVER w as a4, + avg_where(col1, std_ts > timestamp(1590115440000)) OVER w as a5, + sum(col1) OVER w as a6, + count(col1) OVER w as a7, + fz_topn_frequency(id, 3) OVER w as a8 + FROM {0} WINDOW + w AS (PARTITION BY pk ORDER BY std_ts ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + inputs: + - + columns: ["id int", "pk string", "col1 int32", "std_ts timestamp"] + indexs: ["index1:pk:std_ts"] + rows: + - [1, A, 1, 1590115420000] + - [2, A, 2, 1590115430000] + - [3, A, 3, 1590115440000] + - [4, A, 4, 1590115450000] + - [5, A, 5, 1590115460000] + expect: + columns: ["id int32", "pk string", "col1 int32", "std_ts timestamp", + "a1 bigint", "a2 bigint", "a3 int32", "a4 bigint", + "a5 double" ,"a6 int32", "a7 bigint", "a8 string"] + order: id + rows: + - [1, A, 1, 1590115420000, 2, 1, null, 0, null, 1, 1, "1,NULL,NULL"] + - [2, A, 2, 1590115430000, 7, 2, null, 0, null, 3, 2, "1,2,NULL"] + - [3, A, 3, 1590115440000, 15, 3, null, 0, null, 6, 3, "1,2,3"] + - [4, A, 4, 1590115450000, 18, 3, 4, 1, 4.0, 9, 3, "2,3,4"] + - [5, A, 5, 1590115460000, 21, 3, 9, 2, 4.5, 12, 3, "3,4,5"] + + - id: 54 + desc: max空窗口 + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float", + "c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + sql: | + SELECT {0}.id, c1, max(c2) OVER w1 as m2, max(c3) OVER w1 as m3, max(c4) OVER w1 as m4, + max(c5) OVER w1 as m5,max(c6) OVER w1 as m6,max(c7) OVER w1 as m7, + max(c8) OVER w1 as m8,max(c9) OVER w1 as m9 + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 4 PRECEDING AND 2 PRECEDING); + expect: + order: id + columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float", + "m6 double","m7 timestamp","m8 date","m9 string"] + rows: + - [1,"aa",NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + - [2,"aa",NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + - [3,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a"] + - [4,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c"] + + - id: 55 + desc: min空窗口 + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + sql: | + SELECT {0}.id, c1, min(c2) OVER w1 as m2,min(c3) OVER w1 as m3,min(c4) OVER w1 as m4,min(c5) OVER w1 as m5,min(c6) OVER w1 as m6,min(c7) OVER w1 as m7,min(c8) OVER w1 as m8,min(c9) OVER w1 as m9 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 4 PRECEDING AND 2 PRECEDING); + expect: + order: id + columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float","m6 double","m7 timestamp","m8 date","m9 string"] + rows: + - [1,"aa",NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + - [2,"aa",NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] + - [3,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a"] + - [4,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a"] + - id: 56 + desc: window at functions, at is synonym to lag + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int","pk bigint","c1 string","c2 int","c3 bigint","c4 float", + "c5 double","c6 timestamp","c7 date","c8 bool"] + indexs: ["index1:pk:c6"] + rows: + - [1, 1, "a", 1, 30, 1.1, 2.1, 1590738990000, "2020-05-01", true] + - [2, 1, "c", 4, 33, 1.4, 2.4, 1590738991000, "2020-05-03", false] + - [3, 1, "b", 3, 32, 1.3, 2.3, 1590738992000, "2020-05-02", true,] + - [4, 1, NULL, NULL, NULL, NULL, NULL, 1590738993000, NULL, NULL] + - [5, 1, "d", 5, 35, 1.5, 2.5, 1590738994000, "2020-05-04", false] + sql: | + SELECT {0}.id, + at(c1, 0) OVER w1 as m1, + at(c1, 2) OVER w1 as m2, + at(c2, 0) OVER w1 as m3, + at(c2, 2) OVER w1 as m4, + at(c3, 0) OVER w1 as m5, + at(c3, 2) OVER w1 as m6, + at(c4, 0) OVER w1 as m7, + at(c4, 2) OVER w1 as m8, + at(c5, 0) OVER w1 as m9, + at(c5, 2) OVER w1 as m10, + at(c6, 0) OVER w1 as m11, + at(c6, 2) OVER w1 as m12, + at(c7, 0) OVER w1 as m13, + at(c7, 2) OVER w1 as m14, + at(c8, 0) OVER w1 as m15, + at(c8, 2) OVER w1 as m16 + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.pk ORDER BY {0}.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","m1 string", "m2 string", "m3 int", "m4 int", "m5 bigint", "m6 bigint", + "m7 float", "m8 float", "m9 double", "m10 double", + "m11 timestamp", "m12 timestamp", "m13 date", "m14 date", "m15 bool", "m16 bool"] + rows: + - [1, "a", NULL, 1, NULL, 30, NULL, 1.1, NULL, 2.1, NULL, + 1590738990000, NULL, "2020-05-01", NULL, true, NULL] + - [2, "c", NULL, 4, NULL, 33, NULL, 1.4, NULL, 2.4, NULL, + 1590738991000, NULL, "2020-05-03", NULL, false, NULL] + - [3, "b", "a", 3, 1, 32, 30, 1.3, 1.1, 2.3, 2.1, + 1590738992000, 1590738990000, "2020-05-02", "2020-05-01", true, true] + - [4, NULL, "c", NULL, 4, NULL, 33, NULL, 1.4, NULL, 2.4, + 1590738993000, 1590738991000, NULL, "2020-05-03", NULL, false] + - [5, "d", "b", 5, 3, 35, 32, 1.5, 1.3, 2.5, 2.3, + 1590738994000, 1590738992000, "2020-05-04", "2020-05-02", false, true] + + - id: 57 + desc: | + correctness for at/lag when offset out-of-range rows_range window frame bound. + keynote, lag returns value evaluated at the row that is offset rows before the current row within the partition. + refer https://github.com/4paradigm/OpenMLDB/issues/1554 + inputs: + - columns: [ "id int","ts timestamp","group1 string","val1 int" ] + indexs: [ "index1:group1:ts" ] + name: t1 + data: | + 1, 1612130400000, g1, 1 + 2, 1612130401000, g1, 2 + 3, 1612130402000, g1, 3 + 4, 1612130403000, g1, 4 + 5, 1612130404000, g1, 5 + 6, 1612130404000, g2, 4 + 7, 1612130405000, g2, 3 + 8, 1612130406000, g2, 2 + sql: | + select + `id`, + `val1`, + lag(val1, 0) over w1 as agg1, + lag(val1, 1) over w1 as agg2, + lag(val1, 3) over w1 as agg3 + from `t1` WINDOW + w1 as (partition by `group1` order by `ts` rows_range between 2s preceding and 1s preceding MAXSIZE 10); + expect: + columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"] + order: id + rows: + - [1, 1, 1, NULL, NULL] + - [2, 2, 2, 1, NULL] + - [3, 3, 3, 2, NULL] + - [4, 4, 4, 3, 1] + - [5, 5, 5, 4, 2] + - [6, 4, 4, NULL, NULL] + - [7, 3, 3, 4, NULL] + - [8, 2, 2, 3, NULL] + + - id: 58 + desc: | + correctness for at/lag when offset out-of-range rows_range window frame bound, together with other window function. + refer https://github.com/4paradigm/OpenMLDB/issues/1554 + inputs: + - columns: [ "id int","ts timestamp","group1 string","val1 int" ] + indexs: [ "index1:group1:ts" ] + name: t1 + data: | + 1, 1612130400000, g1, 1 + 2, 1612130401000, g1, 2 + 3, 1612130402000, g1, 3 + 4, 1612130403000, g1, 4 + 5, 1612130404000, g1, 5 + 6, 1612130405000, g2, 4 + 7, 1612130406000, g2, 3 + 8, 1612130407000, g2, 2 + sql: | + select + `id`, + `val1`, + lag(val1, 0) over w1 as agg1, + lag(val1, 3) over w1 as agg2, + first_value(val1) over w1 as agg3 + from `t1` WINDOW + w1 as (partition by `group1` order by `ts` rows_range between 2s preceding and 1s preceding MAXSIZE 10); + expect: + columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"] + order: id + rows: + - [1, 1, 1, NULL, NULL] + - [2, 2, 2, NULL, 1] + - [3, 3, 3, NULL, 2] + - [4, 4, 4, 1, 3] + - [5, 5, 5, 2, 4] + - [6, 4, 4, NULL, NULL] + - [7, 3, 3, NULL, 4] + - [8, 2, 2, NULL, 3] + + - id: 59 + desc: | + correctness for at/lag when offset out-of-range window frame bound. + keynote, lag returns value evaluated at the row that is offset rows before the current row within the partition. + refer https://github.com/4paradigm/OpenMLDB/issues/1554 + inputs: + - columns: [ "id int","ts timestamp","group1 string","val1 int" ] + indexs: [ "index1:group1:ts" ] + name: t1 + data: | + 1, 1612130400000, g1, 1 + 2, 1612130401000, g1, 2 + 3, 1612130402000, g1, 3 + 4, 1612130403000, g1, 4 + 5, 1612130404000, g1, 5 + 6, 1612130405000, g2, 4 + 7, 1612130406000, g2, 3 + 8, 1612130407000, g2, 2 + sql: | + select + `id`, + `val1`, + lag(val1, 0) over w1 as agg1, + lag(val1, 1) over w1 as agg2, + lag(val1, 3) over w1 as agg3 + from `t1` WINDOW + w1 as (partition by `group1` order by `ts` rows between 2 preceding and 1 preceding); + expect: + columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"] + order: id + rows: + - [1, 1, 1, NULL, NULL] + - [2, 2, 2, 1, NULL] + - [3, 3, 3, 2, NULL] + - [4, 4, 4, 3, 1] + - [5, 5, 5, 4, 2] + - [6, 4, 4, NULL, NULL] + - [7, 3, 3, 4, NULL] + - [8, 2, 2, 3, NULL] + + - id: 60 + desc: | + correctness for at/lag when offset out-of-range rows window frame bound + refer https://github.com/4paradigm/OpenMLDB/issues/1554 + inputs: + - columns: [ "id int","ts timestamp","group1 string","val1 int" ] + indexs: [ "index1:group1:ts" ] + name: t1 + data: | + 1, 1612130400000, g1, 1 + 2, 1612130401000, g1, 2 + 3, 1612130402000, g1, 3 + 4, 1612130403000, g1, 4 + 5, 1612130404000, g1, 5 + 6, 1612130405000, g2, 4 + 7, 1612130406000, g2, 3 + 8, 1612130407000, g2, 2 + sql: | + select + `id`, + `val1`, + lag(val1, 0) over w1 as agg1, + lag(val1, 3) over w1 as agg2, + first_value(val1) over w1 as agg3 + from `t1` WINDOW + w1 as (partition by `group1` order by `ts` rows between 2 preceding and 1 preceding); + expect: + columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"] + order: id + rows: + - [1, 1, 1, NULL, NULL] + - [2, 2, 2, NULL, 1] + - [3, 3, 3, NULL, 2] + - [4, 4, 4, 1, 3] + - [5, 5, 5, 2, 4] + - [6, 4, 4, NULL, NULL] + - [7, 3, 3, NULL, 4] + - [8, 2, 2, NULL, 3] + + - id: 61 + desc: median + sqlDialect: ["HybridSQL"] + version: 0.6.0 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true] + - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false] + - [3,"aa",1,1,33,1.1,2.1,1590738992000,"2020-05-02","b",true] + - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL] + sql: | + SELECT {0}.id, c1, median(c2) OVER w1 as m2,median(c3) OVER w1 as m3,median(c4) OVER w1 as m4,median(c5) OVER w1 as m5,median(c6) OVER w1 as m6 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m2 double","m3 double","m4 double","m5 double","m6 double"] + rows: + - [1,"aa",1,1,30,1.1000000238418579,2.1] + - [2,"aa",2.5,2.5,31.5,1.25,2.25] + - [3,"aa",1,1,33,1.1000000238418579,2.1] + - [4,"aa",2.5,2.5,33,1.25,2.25] diff --git a/cases/integration_test/function/test_udf_function.yaml b/cases/integration_test/function/test_udf_function.yaml new file mode 100644 index 00000000000..7165f09182a --- /dev/null +++ b/cases/integration_test/function/test_udf_function.yaml @@ -0,0 +1,89 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +sqlDialect: ["HybridSQL"] +version: 0.5.0 +cases: + - id: 0 + desc: 默认udf null处理逻辑:返回null + inputs: + - columns: ["id int64", "c1 string", "c2 int", "c3 double", + "c4 date", "c5 timestamp", "std_ts timestamp"] + indexs: ["index1:id:std_ts"] + rows: + - [1, NULL, 42, 3.14, "2020-05-20", 1590738989000, 1590738989000] + - [2, "hello world", NULL, NULL, NULL, NULL, 1590738989000] + sql: select id, + substring(c1, 1, 5) as r1, + substring(c1, 1, c2) as r2, + pow(c2, 2) as r3, + floor(c3) as r4, + dayofweek(c4) as r5, + dayofweek(c5) as r6 + from {0}; + expect: + order: id + columns: ["id int64", "r1 string", "r2 string", "r3 double", "r4 double", + "r5 int", "r6 int"] + rows: + - [1, NULL, NULL, 1764, 3.00, 4, 6] + - [2, "hello", NULL, NULL, NULL, NULL, NULL] + + - id: 1 + desc: udf使用中间结果null值 + inputs: + - columns: ["id int64", "c1 string", "c2 int", "c3 double", + "c4 date", "c5 timestamp", "std_ts timestamp"] + indexs: ["index1:id:std_ts"] + rows: + - [1, NULL, 42, 3.14, "2020-05-20", 1590738989000, 1590738989000] + - [2, "hello world", NULL, NULL, NULL, NULL, 1590738989000] + sql: select id, + substring(substring(c1, 1, 5), 1, 1) as r1, + substring(substring(c1, 1, c2), c2, 1) as r2, + abs(pow(c2, 2)) as r3, + abs(floor(c3)) as r4, + abs(dayofweek(c4)) as r5, + abs(dayofweek(c5)) as r6 + from {0}; + expect: + order: id + columns: ["id int64", "r1 string", "r2 string", "r3 double", "r4 double", + "r5 int", "r6 int"] + rows: + - [1, NULL, NULL, 1764, 3.00, 4, 6] + - [2, "h", NULL, NULL, NULL, NULL, NULL] + + - id: 2 + desc: 函数名大小写不敏感 + inputs: + - columns: ["id int64", "c1 double", "c2 timestamp"] + indexs: ["index1:id:c2"] + rows: + - [1, 1.0, 1590738989000] + sql: select id, + SUM(c1) over w as r1, sUm(c1) over w as r2, sum(c1) over w as r3, log(c1) as r4 + from {0} window w as (PARTITION BY id ORDER BY c2 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int64", "r1 double", "r2 double", "r3 double", "r4 double"] + rows: + - [1, 1, 1, 1, 0] + + + + + diff --git a/cases/integration_test/fz_ddl/test_bank.yaml b/cases/integration_test/fz_ddl/test_bank.yaml new file mode 100644 index 00000000000..4b725afd22c --- /dev/null +++ b/cases/integration_test/fz_ddl/test_bank.yaml @@ -0,0 +1,151 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: bank +version: 0.5.0 +cases: + - desc: bank test + id: 0 + inputs: + - columns: [ reqId string, eventTime timestamp, main_id string, new_user_id string, + loan_ts bigint, split_id int, time1 string ] + indexs: [ "index1:new_user_id:eventTime" ] + name: flattenRequest + - columns: [reqId string, eventTime timestamp, ingestionTime timestamp, actionValue + int] + indexs: ["index1:reqId:eventTime"] + name: action + - columns: [ingestionTime timestamp, new_user_id string, trx_ts bigint, trx_typ + string, trx_amt double, is_slry string] + indexs: ["index1:new_user_id:ingestionTime"] + name: bo_detail + - columns: [ingestionTime timestamp, new_user_id string, bill_ts bigint, bank_id string, + lst_bill_amt double, lst_repay_amt double, card_limit double, cur_blc double, cur_bill_min_repay double, + buy_cnt double, cur_bill_amt double, adj_amt double, rev_credit double, avl_amt double, advc_limit double, repay_status string] + indexs: ["index1:new_user_id:ingestionTime"] + name: bo_bill_detail + - columns: [ingestionTime timestamp, new_user_id string, sex string, prof string, + edu string, marriage string, hukou_typ string] + indexs: ["index1:new_user_id:ingestionTime"] + name: bo_user + - columns: [ingestionTime timestamp, new_user_id string, bws_ts bigint, action string, + subaction string] + indexs: ["index1:new_user_id:ingestionTime"] + name: bo_browse_history + batch_request: + columns: [reqId string, eventTime timestamp, main_id string, new_user_id string, + loan_ts bigint, split_id int, time1 string] + indexs: ["index1:new_user_id:eventTime"] + common_column_indices: [1, 2, 3, 4, 5] + rows: + - [reqId1, 1609894067190, "main_id1", "new_user_id1", 1609894067190, 1, "time1_1"] + expect: + success: true + sql: "select * from \n(\nselect\n reqId as reqId_1,\n `reqId` as flattenRequest_reqId_original_0,\n\ + \ `eventTime` as flattenRequest_eventTime_original_1,\n `main_id` as flattenRequest_main_id_original_2,\n\ + \ `new_user_id` as flattenRequest_new_user_id_original_3\nfrom\n `flattenRequest`\n\ + \ )\nas out0\nlast join\n(\nselect\n flattenRequest.reqId as reqId_5,\n\ + \ `action_reqId`.`actionValue` as action_actionValue_multi_direct_4,\n `bo_user_new_user_id`.`edu`\ + \ as bo_user_edu_multi_direct_5,\n `bo_user_new_user_id`.`hukou_typ` as bo_user_hukou_typ_multi_direct_6,\n\ + \ `bo_user_new_user_id`.`ingestionTime` as bo_user_ingestionTime_multi_direct_7,\n\ + \ `bo_user_new_user_id`.`marriage` as bo_user_marriage_multi_direct_8,\n \ + \ `bo_user_new_user_id`.`prof` as bo_user_prof_multi_direct_9,\n `bo_user_new_user_id`.`sex`\ + \ as bo_user_sex_multi_direct_10\nfrom\n `flattenRequest`\n last join `action`\ + \ as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`\n \ + \ last join `bo_user` as `bo_user_new_user_id` on `flattenRequest`.`new_user_id`\ + \ = `bo_user_new_user_id`.`new_user_id`)\nas out1\non out0.reqId_1 = out1.reqId_5\n\ + last join\n(\nselect\n reqId as reqId_12,\n max(`adj_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_adj_amt_multi_max_11,\n min(`adj_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_adj_amt_multi_min_12,\n max(`advc_limit`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_advc_limit_multi_max_13,\n avg(`advc_limit`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_advc_limit_multi_avg_14,\n min(`avl_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_avl_amt_multi_min_15,\n avg(`avl_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_avl_amt_multi_avg_16,\n min(`buy_cnt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_buy_cnt_multi_min_17,\n min(`buy_cnt`) over bo_bill_detail_new_user_id_ingestionTime_0s_5529601s\ + \ as bo_bill_detail_buy_cnt_multi_min_18,\n max(`card_limit`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_card_limit_multi_max_19,\n min(`card_limit`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_card_limit_multi_min_20,\n max(`cur_bill_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_cur_bill_amt_multi_max_21,\n max(`cur_bill_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_5529601s\ + \ as bo_bill_detail_cur_bill_amt_multi_max_22,\n min(`cur_bill_min_repay`)\ + \ over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s as bo_bill_detail_cur_bill_min_repay_multi_min_23,\n\ + \ max(`cur_bill_min_repay`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_cur_bill_min_repay_multi_max_24,\n max(`cur_blc`) over\ + \ bo_bill_detail_new_user_id_ingestionTime_0s_2764801s as bo_bill_detail_cur_blc_multi_max_25,\n\ + \ max(`cur_blc`) over bo_bill_detail_new_user_id_ingestionTime_0s_5529601s\ + \ as bo_bill_detail_cur_blc_multi_max_26,\n max(`lst_bill_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_lst_bill_amt_multi_max_27,\n avg(`lst_bill_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_5529601s\ + \ as bo_bill_detail_lst_bill_amt_multi_avg_28,\n avg(`lst_repay_amt`) over\ + \ bo_bill_detail_new_user_id_ingestionTime_0s_2764801s as bo_bill_detail_lst_repay_amt_multi_avg_29,\n\ + \ max(`lst_repay_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_lst_repay_amt_multi_max_30,\n min(`rev_credit`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_rev_credit_multi_min_31,\n avg(`rev_credit`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_rev_credit_multi_avg_32,\n fz_topn_frequency(`bank_id`,\ + \ 3) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s as bo_bill_detail_bank_id_multi_top3frequency_33,\n\ + \ distinct_count(`bank_id`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_bank_id_multi_unique_count_34,\n fz_topn_frequency(`repay_status`,\ + \ 3) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s as bo_bill_detail_repay_status_multi_top3frequency_35,\n\ + \ distinct_count(`repay_status`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as bo_bill_detail_repay_status_multi_unique_count_36\nfrom\n (select `eventTime`\ + \ as `ingestionTime`, `new_user_id` as `new_user_id`, bigint(0) as `bill_ts`,\ + \ '' as `bank_id`, double(0) as `lst_bill_amt`, double(0) as `lst_repay_amt`,\ + \ double(0) as `card_limit`, double(0) as `cur_blc`, double(0) as `cur_bill_min_repay`,\ + \ double(0) as `buy_cnt`, double(0) as `cur_bill_amt`, double(0) as `adj_amt`,\ + \ double(0) as `rev_credit`, double(0) as `avl_amt`, double(0) as `advc_limit`,\ + \ '' as `repay_status`, reqId from `flattenRequest`)\n window bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\ + \ as (\nUNION (select `ingestionTime`, `new_user_id`, `bill_ts`, `bank_id`, `lst_bill_amt`,\ + \ `lst_repay_amt`, `card_limit`, `cur_blc`, `cur_bill_min_repay`, `buy_cnt`, `cur_bill_amt`,\ + \ `adj_amt`, `rev_credit`, `avl_amt`, `advc_limit`, `repay_status`, '' as reqId\ + \ from `bo_bill_detail`) partition by `new_user_id` order by `ingestionTime` rows_range\ + \ between 2764801s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),\n bo_bill_detail_new_user_id_ingestionTime_0s_5529601s\ + \ as (\nUNION (select `ingestionTime`, `new_user_id`, `bill_ts`, `bank_id`, `lst_bill_amt`,\ + \ `lst_repay_amt`, `card_limit`, `cur_blc`, `cur_bill_min_repay`, `buy_cnt`, `cur_bill_amt`,\ + \ `adj_amt`, `rev_credit`, `avl_amt`, `advc_limit`, `repay_status`, '' as reqId\ + \ from `bo_bill_detail`) partition by `new_user_id` order by `ingestionTime` rows_range\ + \ between 5529601s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW))\nas out2\n\ + on out0.reqId_1 = out2.reqId_12\nlast join\n(\nselect\n reqId as reqId_38,\n\ + \ distinct_count(`action`) over bo_browse_history_new_user_id_ingestionTime_0s_5529601s\ + \ as bo_browse_history_action_multi_unique_count_37,\n distinct_count(`action`)\ + \ over bo_browse_history_new_user_id_ingestionTime_0_10 as bo_browse_history_action_multi_unique_count_38,\n\ + \ distinct_count(`subaction`) over bo_browse_history_new_user_id_ingestionTime_0s_5529601s\ + \ as bo_browse_history_subaction_multi_unique_count_39,\n distinct_count(`subaction`)\ + \ over bo_browse_history_new_user_id_ingestionTime_0_10 as bo_browse_history_subaction_multi_unique_count_40\n\ + from\n (select `eventTime` as `ingestionTime`, `new_user_id` as `new_user_id`,\ + \ bigint(0) as `bws_ts`, '' as `action`, '' as `subaction`, reqId from `flattenRequest`)\n\ + \ window bo_browse_history_new_user_id_ingestionTime_0s_5529601s as (\nUNION\ + \ (select `ingestionTime`, `new_user_id`, `bws_ts`, `action`, `subaction`, ''\ + \ as reqId from `bo_browse_history`) partition by `new_user_id` order by `ingestionTime`\ + \ rows_range between 5529601s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),\n\ + \ bo_browse_history_new_user_id_ingestionTime_0_10 as (\nUNION (select `ingestionTime`,\ + \ `new_user_id`, `bws_ts`, `action`, `subaction`, '' as reqId from `bo_browse_history`)\ + \ partition by `new_user_id` order by `ingestionTime` rows_range between 10 preceding\ + \ and 0 preceding INSTANCE_NOT_IN_WINDOW))\nas out3\non out0.reqId_1 = out3.reqId_38\n\ + last join\n(\nselect\n reqId as reqId_42,\n max(`trx_amt`) over bo_detail_new_user_id_ingestionTime_0s_5529601s\ + \ as bo_detail_trx_amt_multi_max_41,\n avg(`trx_amt`) over bo_detail_new_user_id_ingestionTime_0s_5529601s\ + \ as bo_detail_trx_amt_multi_avg_42,\n distinct_count(`is_slry`) over bo_detail_new_user_id_ingestionTime_0_10\ + \ as bo_detail_is_slry_multi_unique_count_43,\n distinct_count(`is_slry`) over\ + \ bo_detail_new_user_id_ingestionTime_0s_5529601s as bo_detail_is_slry_multi_unique_count_44,\n\ + \ distinct_count(`trx_typ`) over bo_detail_new_user_id_ingestionTime_0_10 as\ + \ bo_detail_trx_typ_multi_unique_count_45,\n distinct_count(`trx_typ`) over\ + \ bo_detail_new_user_id_ingestionTime_0s_5529601s as bo_detail_trx_typ_multi_unique_count_46\n\ + from\n (select `eventTime` as `ingestionTime`, `new_user_id` as `new_user_id`,\ + \ bigint(0) as `trx_ts`, '' as `trx_typ`, double(0) as `trx_amt`, '' as `is_slry`,\ + \ reqId from `flattenRequest`)\n window bo_detail_new_user_id_ingestionTime_0s_5529601s\ + \ as (\nUNION (select `ingestionTime`, `new_user_id`, `trx_ts`, `trx_typ`, `trx_amt`,\ + \ `is_slry`, '' as reqId from `bo_detail`) partition by `new_user_id` order by\ + \ `ingestionTime` rows_range between 5529601s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),\n\ + \ bo_detail_new_user_id_ingestionTime_0_10 as (\nUNION (select `ingestionTime`,\ + \ `new_user_id`, `trx_ts`, `trx_typ`, `trx_amt`, `is_slry`, '' as reqId from `bo_detail`)\ + \ partition by `new_user_id` order by `ingestionTime` rows_range between 10 preceding\ + \ and 0 preceding INSTANCE_NOT_IN_WINDOW))\nas out4\non out0.reqId_1 = out4.reqId_42\n\ + ;" diff --git a/cases/integration_test/fz_ddl/test_luoji.yaml b/cases/integration_test/fz_ddl/test_luoji.yaml new file mode 100644 index 00000000000..65b8056909f --- /dev/null +++ b/cases/integration_test/fz_ddl/test_luoji.yaml @@ -0,0 +1,293 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: luoji +version: 0.5.0 +cases: +- id: 0 + desc: luoji test + mode: rtidb-batch-unsupport + inputs: + - columns: [ + reqId string, + eventTime timestamp, + f_requestId string, + f_cId string, + f_uId string, + f_cSrc string, + f_cLength double] + indexs: [ + index1:f_requestId:eventTime, + index2:f_uId:eventTime] + repeat: 100 + name: flattenRequest + rows: + - [reqId1, 1609894067190, f_requestId1, f_cId1-1, f_uId1, f_cSrc1-1, 1.0] + - [reqId1, 1609894067190, f_requestId1, f_cId1-1, f_uId1, f_cSrc1-1, 1.0] + - [reqId1, 1609894067190, f_requestId1, f_cId1-2, f_uId1, f_cSrc1-2, 1.0] + - [reqId2, 1609894067190, f_requestId2, f_cId2-1, f_uId2, f_cSrc2-1, 2.0] + - [reqId2, 1609894067190, f_requestId2, f_cId2-2, f_uId2, f_cSrc2-2, 2.0] + - [NULL, 1609894067190, f_requestIdNull, f_cIdNull, f_uIdNull, f_cSrcNul, 3.0] + - columns: [ + reqId string, + eventTime timestamp, + ingestionTime timestamp, + actionValue int, + ] + indexs: [index1:reqId:null:1:latest] + name: action + rows: + - [reqId1, 1609894067191, 1609894067191, 1] + - [NULL, 1609894067191, 1609894067191, 3] + sql: | + select * from + ( + select + reqId as reqId_1, + `reqId` as flattenRequest_reqId_original_0, + `eventTime` as flattenRequest_eventTime_original_1, + `f_requestId` as flattenRequest_f_requestId_original_2, + `f_cId` as flattenRequest_f_cId_original_3, + `f_cSrc` as flattenRequest_f_cSrc_original_8, + `f_uId` as flattenRequest_f_uId_original_17, + `f_cLength` as flattenRequest_f_cLength_original_10, + sum(`f_cLength`) over flattenRequest_f_requestId_eventTime_0s_604801s as flattenRequest_f_cLength_window_sum_32, + distinct_count(`f_cId`) over flattenRequest_f_uId_eventTime_0_10 as flattenRequest_f_cId_window_unique_count_38, + fz_top1_ratio(`f_cId`) over flattenRequest_f_requestId_eventTime_0_10 as flattenRequest_f_cId_window_top1_ratio_39, + fz_top1_ratio(`f_cId`) over flattenRequest_f_requestId_eventTime_0s_604801s as flattenRequest_f_cId_window_top1_ratio_40, + sum(`f_cLength`) over flattenRequest_f_requestId_eventTime_0s_432001s as flattenRequest_f_cLength_window_sum_41, + case when !isnull(lag(`f_cSrc`, 0)) over flattenRequest_f_requestId_eventTime_0s_604801s then count(`f_cSrc`) over flattenRequest_f_requestId_eventTime_0s_604801s else null end as flattenRequest_f_cSrc_window_count_42, + case when !isnull(lag(`f_cSrc`, 0)) over flattenRequest_f_uId_eventTime_0s_604801s then count(`f_cSrc`) over flattenRequest_f_uId_eventTime_0s_604801s else null end as flattenRequest_f_cSrc_window_count_43, + fz_top1_ratio(`f_cId`) over flattenRequest_f_uId_eventTime_0_10 as flattenRequest_f_cId_window_top1_ratio_44, + fz_top1_ratio(`f_cId`) over flattenRequest_f_uId_eventTime_0s_604801s as flattenRequest_f_cId_window_top1_ratio_45, + fz_top1_ratio(`f_cId`) over flattenRequest_f_uId_eventTime_0s_432001s as flattenRequest_f_cId_window_top1_ratio_46, + case when !isnull(lag(`f_cId`, 0)) over flattenRequest_f_requestId_eventTime_0s_604801s then count(`f_cId`) over flattenRequest_f_requestId_eventTime_0s_604801s else null end as flattenRequest_f_cId_window_count_47, + case when !isnull(lag(`f_cId`, 0)) over flattenRequest_f_uId_eventTime_0s_432001s then count(`f_cId`) over flattenRequest_f_uId_eventTime_0s_432001s else null end as flattenRequest_f_cId_window_count_48, + case when !isnull(lag(`f_cId`, 0)) over flattenRequest_f_uId_eventTime_0s_604801s then count(`f_cId`) over flattenRequest_f_uId_eventTime_0s_604801s else null end as flattenRequest_f_cId_window_count_49 + from + `flattenRequest` + window flattenRequest_f_requestId_eventTime_0s_604801s as (partition by `f_requestId` order by `eventTime` rows_range between 604801s preceding and 0s preceding), + flattenRequest_f_uId_eventTime_0_10 as (partition by `f_uId` order by `eventTime` rows_range between 10 preceding and 0 preceding), + flattenRequest_f_requestId_eventTime_0_10 as (partition by `f_requestId` order by `eventTime` rows_range between 10 preceding and 0 preceding), + flattenRequest_f_requestId_eventTime_0s_432001s as (partition by `f_requestId` order by `eventTime` rows_range between 432001s preceding and 0s preceding), + flattenRequest_f_uId_eventTime_0s_604801s as (partition by `f_uId` order by `eventTime` rows_range between 604801s preceding and 0s preceding), + flattenRequest_f_uId_eventTime_0s_432001s as (partition by `f_uId` order by `eventTime` rows_range between 432001s preceding and 0s preceding)) + as out0 + last join + ( + select + flattenRequest.reqId as reqId_32, + `action_reqId`.`actionValue` as action_actionValue_multi_direct_31 + from + `flattenRequest` + last join `action` as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`) + as out1 + on out0.reqId_1 = out1.reqId_32; + batch_request: + columns: [ + reqId string, + eventTime timestamp, + f_requestId string, + f_cId string, + f_uId string, + f_cSrc string, + f_cLength double ] + rows: + - [reqId1, 1609894067190, f_requestId1, f_cId1, f_uId1, f_cSrc1, 1.0] + - [reqId1, 1609894067190, f_requestId1, f_cId1, f_uId1, NULL, 1.0] + - [reqId1, 1609894067190, f_requestId1, NULL, f_uId1, f_cSrc1, 1.0] + - [reqId2, 1609894067190, f_requestId2, f_cId2, f_uId2, f_cSrc2, 2.0] + - [NULL, 1609894067190, f_requestIdNull, f_cIdNull, f_uIdNull, f_cSrcNul, 3.0] + expect: + success: true + schema: reqId_1:string, flattenRequest_reqId_original_0:string, flattenRequest_eventTime_original_1:timestamp, flattenRequest_f_requestId_original_2:string, flattenRequest_f_cId_original_3:string, flattenRequest_f_cSrc_original_8:string, flattenRequest_f_uId_original_17:string, flattenRequest_f_cLength_original_10:double, flattenRequest_f_cLength_window_sum_32:double, flattenRequest_f_cId_window_unique_count_38:bigint, flattenRequest_f_cId_window_top1_ratio_39:double, flattenRequest_f_cId_window_top1_ratio_40:double, flattenRequest_f_cLength_window_sum_41:double, flattenRequest_f_cSrc_window_count_42:bigint, flattenRequest_f_cSrc_window_count_43:bigint, flattenRequest_f_cId_window_top1_ratio_44:double, flattenRequest_f_cId_window_top1_ratio_45:double, flattenRequest_f_cId_window_top1_ratio_46:double, flattenRequest_f_cId_window_count_47:bigint, flattenRequest_f_cId_window_count_48:bigint, flattenRequest_f_cId_window_count_49:bigint, reqId_32:string, action_actionValue_multi_direct_31:int + rows: + - [ reqId1, reqId1, 1609894067190, f_requestId1, f_cId1, f_cSrc1, f_uId1, 1.000000, + 301.000000, # flattenRequest_f_cLength_window_sum_32 + 3, # distinct_count f_cId1, f_cId1-1 f_cId1-2 + 0.66445182724252494, # fz_top1_ratio f_cId1-1:200, f_cId1-2:100 f_cId1:1 -> 200/301 + 0.66445182724252494, + 301.000000, + 301, + 301, + 0.66445182724252494, + 0.66445182724252494, + 0.66445182724252494, + 301, 301, 301, reqId1, 1 ] + - [ reqId1, reqId1, 1609894067190, f_requestId1, f_cId1, NULL, f_uId1, 1.000000, + 301.000000, + 3, + 0.66445182724252494, + 0.66445182724252494, + 301.000000, + NULL, # case when !isnull(lag(`f_cSrc`, 0)) ... else NULL end + NULL, # case when !isnull(lag(`f_cSrc`, 0)) ... else NULL end + 0.66445182724252494, + 0.66445182724252494, + 0.66445182724252494, + 301, 301, 301, reqId1, 1 ] + - [ reqId1, reqId1, 1609894067190, f_requestId1, NULL, f_cSrc1, f_uId1, 1.000000, + 301.000000, # flattenRequest_f_cLength_window_sum_32 + 3, # distinct_count f_cId1, f_cId1-1 f_cId1-2 + 0.66666666666666663, + 0.66666666666666663, + 301.000000, + 301, + 301, + 0.66666666666666663, + 0.66666666666666663, + 0.66666666666666663, + NULL, NULL, NULL, # case when !isnull(lag(`f_cId`, 0)) then ... else NULL + reqId1, 1 ] + - [reqId2, reqId2, 1609894067190, f_requestId2, f_cId2, f_cSrc2, f_uId2, 2.000000, + 402.000000, + 3, + 0.49751243781094528, 0.49751243781094528, + 402.000000, + 201, 201, + 0.49751243781094528, 0.49751243781094528, 0.49751243781094528, + 201, 201, 201, + reqId2, NULL] + - [ NULL, NULL, 1609894067190, f_requestIdNull, f_cIdNull, f_cSrcNul, f_uIdNull, 3.000000, + 303.000000, + 1, + 1.000000, 1.000000, + 303.000000, + 101, 101, + 1.000000, 1.000000, 1.000000, + 101, 101, 101, + NULL, 3 ] + +- id: 1 + desc: luoji test window flattenRequest_f_requestId_eventTime_0s_604801s without ttl + mode: rtidb-batch-unsupport + inputs: + - columns: [ + reqId string, + eventTime timestamp, + f_requestId string, + f_cId string, + f_uId string, + f_cSrc string, + f_cLength double] + indexs: [ + index1:f_requestId:eventTime, + index2:f_uId:eventTime] + repeat: 100 + name: flattenRequest + rows: + - [reqId1, 1609894067190, f_requestId1, f_cId1-1, f_uId1, f_cSrc1-1, 1.0] + - [reqId1, 1609894067190, f_requestId1, f_cId1-1, f_uId1, f_cSrc1-1, 1.0] + - [reqId1, 1609894067190, f_requestId1, f_cId1-2, f_uId1, f_cSrc1-2, 1.0] + - [reqId2, 1609894067190, f_requestId2, f_cId2-1, f_uId2, f_cSrc2-1, 2.0] + - [reqId2, 1609894067190, f_requestId2, f_cId2-2, f_uId2, f_cSrc2-2, 2.0] + - [NULL, 1609894067190, f_requestIdNull, f_cIdNull, f_uIdNull, f_cSrcNul, 3.0] + - columns: [ + reqId string, + eventTime timestamp, + ingestionTime timestamp, + actionValue int, + ] + indexs: [index1:reqId:null:1:latest] + name: action + rows: + - [reqId1, 1609894067191, 1609894067191, 1] + - [NULL, 1609894067191, 1609894067191, 3] + sql: | + select * from + ( + select + reqId as reqId_1, + `reqId` as flattenRequest_reqId_original_0, + `eventTime` as flattenRequest_eventTime_original_1, + `f_requestId` as flattenRequest_f_requestId_original_2, + `f_cId` as flattenRequest_f_cId_original_3, + `f_cSrc` as flattenRequest_f_cSrc_original_8, + `f_uId` as flattenRequest_f_uId_original_17, + `f_cLength` as flattenRequest_f_cLength_original_10, + sum(`f_cLength`) over flattenRequest_f_requestId_eventTime_0s_604801s as flattenRequest_f_cLength_window_sum_32, + fz_top1_ratio(`f_cId`) over flattenRequest_f_requestId_eventTime_0s_604801s as flattenRequest_f_cId_window_top1_ratio_40, + case when !isnull(lag(`f_cSrc`, 0)) over flattenRequest_f_requestId_eventTime_0s_604801s then count(`f_cSrc`) over flattenRequest_f_requestId_eventTime_0s_604801s else null end as flattenRequest_f_cSrc_window_count_42, + case when !isnull(lag(`f_cId`, 0)) over flattenRequest_f_requestId_eventTime_0s_604801s then count(`f_cId`) over flattenRequest_f_requestId_eventTime_0s_604801s else null end as flattenRequest_f_cId_window_count_47 + from + `flattenRequest` + window flattenRequest_f_requestId_eventTime_0s_604801s as (partition by `f_requestId` order by `eventTime` rows_range between 604801s preceding and 0s preceding)) + as out0 + last join + ( + select + flattenRequest.reqId as reqId_32, + `action_reqId`.`actionValue` as action_actionValue_multi_direct_31 + from + `flattenRequest` + last join `action` as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`) + as out1 + on out0.reqId_1 = out1.reqId_32; + batch_request: + columns: [ + reqId string, + eventTime timestamp, + f_requestId string, + f_cId string, + f_uId string, + f_cSrc string, + f_cLength double ] + rows: + - [reqId1, 1609894067190, f_requestId1, f_cId1, f_uId1, f_cSrc1, 1.0] + - [reqId1, 1609894067190, f_requestId1, f_cId1, f_uId1, NULL, 1.0] + - [reqId1, 1609894067190, f_requestId1, NULL, f_uId1, f_cSrc1, 1.0] + - [reqId2, 1609894067190, f_requestId2, f_cId2, f_uId2, f_cSrc2, 2.0] + - [NULL, 1609894067190, f_requestIdNull, f_cIdNull, f_uIdNull, f_cSrcNul, 3.0] + expect: + success: true + schema: reqId_1:string, flattenRequest_reqId_original_0:string, flattenRequest_eventTime_original_1:timestamp, flattenRequest_f_requestId_original_2:string, flattenRequest_f_cId_original_3:string, flattenRequest_f_cSrc_original_8:string, flattenRequest_f_uId_original_17:string, flattenRequest_f_cLength_original_10:double, flattenRequest_f_cLength_window_sum_32:double, flattenRequest_f_cId_window_top1_ratio_40:double, flattenRequest_f_cSrc_window_count_42:bigint, flattenRequest_f_cId_window_count_47:bigint, reqId_32:string, action_actionValue_multi_direct_31:int + rows: + - [ reqId1, reqId1, 1609894067190, f_requestId1, f_cId1, f_cSrc1, f_uId1, 1.000000, 301.000000, 0.66445182724252494, 301, 301, reqId1, 1 ] + - [ reqId1, reqId1, 1609894067190, f_requestId1, f_cId1, NULL, f_uId1, 1.000000, 301.000000, 0.66445182724252494, NULL, 301, reqId1, 1 ] + - [ reqId1, reqId1, 1609894067190, f_requestId1, NULL, f_cSrc1, f_uId1, 1.000000, 301.000000, 0.66666666666666663, 301, NULL, reqId1, 1 ] + - [ reqId2, reqId2, 1609894067190, f_requestId2, f_cId2, f_cSrc2, f_uId2, 2.000000, 402.000000, 0.49751243781094528, 201, 201, reqId2, NULL ] + - [ NULL, NULL, 1609894067190, f_requestIdNull, f_cIdNull, f_cSrcNul, f_uIdNull, 3.000000, 303.000000, 1.000000, 101, 101, NULL, 3 ] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cases/integration_test/fz_ddl/test_myhug.yaml b/cases/integration_test/fz_ddl/test_myhug.yaml new file mode 100644 index 00000000000..02d0f971040 --- /dev/null +++ b/cases/integration_test/fz_ddl/test_myhug.yaml @@ -0,0 +1,314 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: mybug +version: 0.5.0 +cases: +- id: 0 + desc: mybug test + mode: rtidb-batch-unsupport + inputs: + - + columns: ["reqId string","eventTime timestamp","uUserId string","zUserId string", + "uSex string","zSex string","zChannel string","uPlayGame string", + "uHasJoinedGroup string","uWatchMorning double","uWatchEvening double", + "uWatchAvgLength double","zSWihsperNum double" ] + indexs: [ + "index1:uUserId:eventTime", + "index2:zChannel:eventTime", + "index3:uSex:eventTime", + "index4:zUserId:eventTime", + "index5:uPlayGame:eventTime", + "index6:uHasJoinedGroup:eventTime", + "index7:zUserId|uUserId:eventTime" ] + repeat: 100 + name: flattenRequest + rows: + - [reqId1, 1609894067190, uUserId1, zUserId1, uSex1, zSex1, zChannel1, uPlayGame1, uHasJoinedGroup1, 1.0, 2.0, 3.0, 4.0] + - [reqId2, 1609894067190, uUserId2, zUserId2, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2, 1.0, 2.0, 3.0, 4.0] + - [NULL, 1609894067190, uUserIdNull, zUserIdNull, uSexNull, zSexNull, zChannelNull, uPlayGameNull, uHasJoinedGroupNull, 1.0, 2.0, 3.0, 4.0] + - columns: [ + "reqId string", + "eventTime timestamp", + "ingestionTime timestamp", + "actionValue double"] + indexs: ["index1:reqId:eventTime"] + name: action + rows: + - [reqId1, 1609894067191, 1609894067191, 1.1] + - [NULL, 1609894067191, 1609894067191, 3.3] + - columns: [ "ingestionTime timestamp", + "zUserId string", + "uUserId string", + "nRequestTime timestamp", + "fWatchedTimeLen double" ] + indexs: [ "index1:zUserId|uUserId:ingestionTime" ] + name: bo_hislabel + rows: + - [ 1609894067191, zUserId1, uUserId1, 1609894067191, 1.0 ] + - [ 1609894067191, zUserId2, uUserId2, 1609894067191, 1.0 ] + - [ 1609894067191, zUserIdNull, uUserIdNull, 1609894067191, 1.0 ] + sql: | + select * from + ( + select + `reqId` as reqId_1, + `reqId` as flattenRequest_reqId_original_0, + `eventTime` as flattenRequest_eventTime_original_1, + `uUserId` as flattenRequest_uUserId_original_2, + `zUserId` as flattenRequest_zUserId_original_3, + `uSex` as flattenRequest_uSex_combine_77, + `zSex` as flattenRequest_zSex_original_8, + `zChannel` as flattenRequest_zChannel_original_14, + `uPlayGame` as flattenRequest_uPlayGame_original_67, + `uHasJoinedGroup` as flattenRequest_uHasJoinedGroup_original_46, + + `uWatchMorning` as flattenRequest_uWatchMorning_original_60, + `uWatchEvening` as flattenRequest_uWatchEvening_original_62, + `uWatchAvgLength` as flattenRequest_uWatchAvgLength_original_63, + `zSWihsperNum` as flattenRequest_zSWihsperNum_original_23, + + sum(`uWatchAvgLength`) over flattenRequest_uUserId_eventTime_0_10 as flattenRequest_uWatchAvgLength_window_sum_76, + avg(`uWatchMorning`) over flattenRequest_uUserId_eventTime_0_10 as flattenRequest_uWatchMorning_window_avg_78, + avg(`uWatchEvening`) over flattenRequest_uUserId_eventTime_0_10 as flattenRequest_uWatchEvening_window_avg_79, + sum(`zSWihsperNum`) over flattenRequest_zChannel_eventTime_0s_172801s as flattenRequest_zSWihsperNum_window_sum_80, + avg(`uWatchAvgLength`) over flattenRequest_uUserId_eventTime_0_10 as flattenRequest_uWatchAvgLength_window_avg_81, + + case when !isnull(lag(`zUserId`, 0)) over flattenRequest_uUserId_eventTime_0s_36001s then count(`zUserId`) over flattenRequest_uUserId_eventTime_0s_36001s else null end as flattenRequest_zUserId_window_count_82, + case when !isnull(lag(`zUserId`, 0)) over flattenRequest_uUserId_eventTime_0s_172801s then count(`zUserId`) over flattenRequest_uUserId_eventTime_0s_172801s else null end as flattenRequest_zUserId_window_count_83, + case when !isnull(lag(`zUserId`, 0)) over flattenRequest_uSex_eventTime_0_10 then count(`zUserId`) over flattenRequest_uSex_eventTime_0_10 else null end as flattenRequest_zUserId_window_count_84, + case when !isnull(lag(`zUserId`, 0)) over flattenRequest_uUserId_eventTime_0_10 then count(`zUserId`) over flattenRequest_uUserId_eventTime_0_10 else null end as flattenRequest_zUserId_window_count_85, + case when !isnull(lag(`uUserId`, 0)) over flattenRequest_zUserId_eventTime_0s_36001s then count(`uUserId`) over flattenRequest_zUserId_eventTime_0s_36001s else null end as flattenRequest_uUserId_window_count_86, + case when !isnull(lag(`uUserId`, 0)) over flattenRequest_zUserId_eventTime_0s_172801s then count(`uUserId`) over flattenRequest_zUserId_eventTime_0s_172801s else null end as flattenRequest_uUserId_window_count_87, + case when !isnull(lag(`uUserId`, 0)) over flattenRequest_uPlayGame_eventTime_0s_36001s then count(`uUserId`) over flattenRequest_uPlayGame_eventTime_0s_36001s else null end as flattenRequest_uUserId_window_count_88, + case when !isnull(lag(`uUserId`, 0)) over flattenRequest_uHasJoinedGroup_eventTime_0s_36001s then count(`uUserId`) over flattenRequest_uHasJoinedGroup_eventTime_0s_36001s else null end as flattenRequest_uUserId_window_count_89, + case when !isnull(lag(`uUserId`, 0)) over flattenRequest_uHasJoinedGroup_eventTime_0s_172801s then count(`uUserId`) over flattenRequest_uHasJoinedGroup_eventTime_0s_172801s else null end as flattenRequest_uUserId_window_count_90, + case when !isnull(lag(`uUserId`, 0)) over flattenRequest_uSex_eventTime_0s_172801s then count(`uUserId`) over flattenRequest_uSex_eventTime_0s_172801s else null end as flattenRequest_uUserId_window_count_91, + case when !isnull(lag(`uUserId`, 0)) over flattenRequest_uSex_eventTime_0s_36001s then count(`uUserId`) over flattenRequest_uSex_eventTime_0s_36001s else null end as flattenRequest_uUserId_window_count_92 + from + `flattenRequest` + window flattenRequest_uUserId_eventTime_0_10 as (partition by `uUserId` order by `eventTime` rows between 10 preceding and 0 preceding), + flattenRequest_zChannel_eventTime_0s_172801s as (partition by `zChannel` order by `eventTime` rows_range between 172801s preceding and 0s preceding), + flattenRequest_uUserId_eventTime_0s_36001s as (partition by `uUserId` order by `eventTime` rows_range between 36001s preceding and 0s preceding), + flattenRequest_uUserId_eventTime_0s_172801s as (partition by `uUserId` order by `eventTime` rows_range between 172801s preceding and 0s preceding), + flattenRequest_uSex_eventTime_0_10 as (partition by `uSex` order by `eventTime` rows between 10 preceding and 0 preceding), + flattenRequest_zUserId_eventTime_0s_36001s as (partition by `zUserId` order by `eventTime` rows_range between 36001s preceding and 0s preceding), + flattenRequest_zUserId_eventTime_0s_172801s as (partition by `zUserId` order by `eventTime` rows_range between 172801s preceding and 0s preceding), + flattenRequest_uPlayGame_eventTime_0s_36001s as (partition by `uPlayGame` order by `eventTime` rows_range between 36001s preceding and 0s preceding), + flattenRequest_uHasJoinedGroup_eventTime_0s_36001s as (partition by `uHasJoinedGroup` order by `eventTime` rows_range between 36001s preceding and 0s preceding), + flattenRequest_uHasJoinedGroup_eventTime_0s_172801s as (partition by `uHasJoinedGroup` order by `eventTime` rows_range between 172801s preceding and 0s preceding), + flattenRequest_uSex_eventTime_0s_172801s as (partition by `uSex` order by `eventTime` rows_range between 172801s preceding and 0s preceding), + flattenRequest_uSex_eventTime_0s_36001s as (partition by `uSex` order by `eventTime` rows_range between 36001s preceding and 0s preceding)) + as out0 + last join + ( + select + flattenRequest.reqId as reqId_74, + `action_reqId`.`actionValue` as action_actionValue_multi_direct_73 + from + `flattenRequest` + last join `action` as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`) + as out1 + on out0.reqId_1 = out1.reqId_74 + last join + ( + select + reqId as reqId_75, + max(`fWatchedTimeLen`) over bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s as bo_hislabel_fWatchedTimeLen_multi_max_74, + avg(`fWatchedTimeLen`) over bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s as bo_hislabel_fWatchedTimeLen_multi_avg_75 + from + (select `eventTime` as `ingestionTime`, `zUserId` as `zUserId`, `uUserId` as `uUserId`, timestamp('2019-07-18 09:20:20') as `nRequestTime`, double(0) as `fWatchedTimeLen`, reqId from `flattenRequest`) + window bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s as ( + UNION (select `ingestionTime`, `zUserId`, `uUserId`, `nRequestTime`, `fWatchedTimeLen`, '' as reqId from `bo_hislabel`) partition by `zUserId`,`uUserId` order by `ingestionTime` rows_range between 172801s preceding and 1s preceding INSTANCE_NOT_IN_WINDOW)) + as out2 + on out0.reqId_1 = out2.reqId_75 + ; + batch_request: + columns: [ + "reqId string", + "eventTime timestamp", + "uUserId string", + "zUserId string", + "uSex string", + "zSex string", + "zChannel string", + "uPlayGame string", + "uHasJoinedGroup string", + "uWatchMorning double", + "uWatchEvening double", + "uWatchAvgLength double", + "zSWihsperNum double"] + rows: + - [reqId1, 1609894067191, uUserId1, zUserId1, uSex1, zSex1, zChannel1, uPlayGame1, uHasJoinedGroup1, 1.0, 2.0, 3.0, 4.0] + - [reqId2, 1609894068191, uUserId2, zUserId2, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2, 1.0, 2.0, 3.0, 4.0] + - [reqId2, 1609894068191, uUserId2, NULL, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2, 1.0, 2.0, 3.0, 4.0] + - [NULL, 1609894068191, uUserIdNull, zUserIdNull, uSexNull, zSexNull, zChannelNull, uPlayGameNull, uHasJoinedGroupNull, 1.0, 2.0, 3.0, 4.0] + expect: + schema: > + reqId_1:string, flattenRequest_reqId_original_0:string, flattenRequest_eventTime_original_1:timestamp, flattenRequest_uUserId_original_2:string, + flattenRequest_zUserId_original_3:string, flattenRequest_uSex_combine_77:string, flattenRequest_zSex_original_8:string, + flattenRequest_zChannel_original_14:string, flattenRequest_uPlayGame_original_67:string, flattenRequest_uHasJoinedGroup_original_46:string, + flattenRequest_uWatchMorning_original_60:double, flattenRequest_uWatchEvening_original_62:double, flattenRequest_uWatchAvgLength_original_63:double, + flattenRequest_zSWihsperNum_original_23:double, flattenRequest_uWatchAvgLength_window_sum_76:double, flattenRequest_uWatchMorning_window_avg_78:double, + flattenRequest_uWatchEvening_window_avg_79:double, flattenRequest_zSWihsperNum_window_sum_80:double, flattenRequest_uWatchAvgLength_window_avg_81:double, + flattenRequest_zUserId_window_count_82:bigint, flattenRequest_zUserId_window_count_83:bigint, flattenRequest_zUserId_window_count_84:bigint, + flattenRequest_zUserId_window_count_85:bigint, flattenRequest_uUserId_window_count_86:bigint, flattenRequest_uUserId_window_count_87:bigint, + flattenRequest_uUserId_window_count_88:bigint, flattenRequest_uUserId_window_count_89:bigint, flattenRequest_uUserId_window_count_90:bigint, + flattenRequest_uUserId_window_count_91:bigint, flattenRequest_uUserId_window_count_92:bigint, reqId_74:string, action_actionValue_multi_direct_73:double, + reqId_75:string, bo_hislabel_fWatchedTimeLen_multi_max_74:double, bo_hislabel_fWatchedTimeLen_multi_avg_75:double + rows: + - [ reqId1, reqId1, 1609894067191, uUserId1, zUserId1, uSex1, zSex1, zChannel1, uPlayGame1, uHasJoinedGroup1, + 1.000000, 2.000000, 3.000000, 4.000000, + 33.000000, 1.000000, 2.000000, 404.000000, 3.000000, + 101, 101, 11, 11, 101, 101, 101, 101, 101, 101, 101, + reqId1, 1.1, reqId1, NULL, NULL ] + - [ reqId2, reqId2, 1609894068191, uUserId2, zUserId2, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2, + 1.000000, 2.000000, 3.000000, 4.000000, + 33.000000, 1.000000, 2.000000, 404.000000, 3.000000, + 101, 101, 11, 11, 101, 101, 101, 101, 101, 101, 101, + reqId2, NULL, reqId2, 1.000000, 1.000000 ] + - [ reqId2, reqId2, 1609894068191, uUserId2, NULL, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2, + 1.000000, 2.000000, 3.000000, 4.000000, + 33.000000, 1.000000, 2.000000, 404.000000, 3.000000, + NULL, NULL, NULL, NULL, 1, 1, 101, 101, 101, 101, 101, + reqId2, NULL, reqId2, NULL, NULL ] + - [ NULL, NULL, 1609894068191, uUserIdNull, zUserIdNull, uSexNull, zSexNull, zChannelNull, uPlayGameNull, uHasJoinedGroupNull, + 1.000000, 2.000000, 3.000000, 4.000000, + 33.000000, 1.000000, 2.000000, 404.000000, 3.000000, + 101, 101, 11, 11, 101, 101, 101, 101, 101, 101, 101, + NULL, 3.3, NULL, 1.000000, 1.000000 ] +- id: 1 + desc: mybug bo_hislabel_fWatchedTimeLen_multi_max_74 + mode: rtidb-batch-unsupport + inputs: + - columns: [ "reqId string", + "eventTime timestamp", + "uUserId string", + "zUserId string", + "uSex string", + "zSex string", + "zChannel string", + "uPlayGame string", + "uHasJoinedGroup string", + "uWatchMorning double", + "uWatchEvening double", + "uWatchAvgLength double", + "zSWihsperNum double" ] + indexs: [ + "index1:uUserId:eventTime", + "index2:zChannel:eventTime", + "index3:uSex:eventTime", + "index4:zUserId:eventTime", + "index5:uPlayGame:eventTime", + "index6:uHasJoinedGroup:eventTime", + "index7:zUserId|uUserId:eventTime" ] + name: flattenRequest + rows: + - [reqId1, 1609894067191, uUserId1, zUserId1, uSex1, zSex1, zChannel1, uPlayGame1, uHasJoinedGroup1, 1.0, 2.0, 3.0, 4.0] + - columns: [ + "reqId string", + "eventTime timestamp", + "ingestionTime timestamp", + "actionValue double"] + indexs: ["index1:reqId:eventTime"] + name: action + rows: + - [reqId1, 1609894067191, 1609894067191, 1.1] + - [NULL, 1609894067191, 1609894067191, 3.3] + - columns: [ "ingestionTime timestamp", + "zUserId string", + "uUserId string", + "nRequestTime timestamp", + "fWatchedTimeLen double"] + indexs: ["index1:zUserId|uUserId:ingestionTime"] + name: bo_hislabel + repeat: 100 + rows: + - [ 1609894067191, zUserId1, uUserId1, 1609894067191, 1.0 ] + - [ 1609894067191, zUserId2, uUserId2, 1609894067191, 1.0 ] + - [ 1609894067191, NULL, NULL, 1609894067191, 1.0 ] + sql: |- + select * from + ( + select + `reqId` as reqId_1 + from `flattenRequest`) as out0 + last join + ( + select + flattenRequest.reqId as reqId_74, + `action_reqId`.`actionValue` as action_actionValue_multi_direct_73 + from + `flattenRequest` + last join `action` as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`) + as out1 + on out0.reqId_1 = out1.reqId_74 + last join + ( + select + reqId as reqId_75, + sum(`fWatchedTimeLen`) over bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s as bo_hislabel_fWatchedTimeLen_multi_sum_73, + max(`fWatchedTimeLen`) over bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s as bo_hislabel_fWatchedTimeLen_multi_max_74, + avg(`fWatchedTimeLen`) over bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s as bo_hislabel_fWatchedTimeLen_multi_avg_75 + from + (select `eventTime` as `ingestionTime`, `zUserId` as `zUserId`, `uUserId` as `uUserId`, timestamp('2019-07-18 09:20:20') as `nRequestTime`, double(0) as `fWatchedTimeLen`, reqId from `flattenRequest`) + window bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s as ( + UNION (select `ingestionTime`, `zUserId`, `uUserId`, `nRequestTime`, `fWatchedTimeLen`, '' as reqId from `bo_hislabel`) partition by `zUserId`,`uUserId` order by `ingestionTime` rows_range between 172801s preceding and 1s preceding INSTANCE_NOT_IN_WINDOW)) + as out2 + on out0.reqId_1 = out2.reqId_75 + ; + tags: ["@baoxinqi, avg 空表处理需要对齐feql/mysql"] + batch_request: + columns: [ + "reqId string", + "eventTime timestamp", + "uUserId string", + "zUserId string", + "uSex string", + "zSex string", + "zChannel string", + "uPlayGame string", + "uHasJoinedGroup string", + "uWatchMorning double", + "uWatchEvening double", + "uWatchAvgLength double", + "zSWihsperNum double" ] + indexs: [ + "index1:uUserId:eventTime", + "index2:zChannel:eventTime", + "index3:uSex:eventTime", + "index4:zUserId:eventTime", + "index5:uPlayGame:eventTime", + "index6:uHasJoinedGroup:eventTime", + "index7:zUserId|uUserId:eventTime", + "index8:uUserId:eventTime", + "index9:uUserId:eventTime" ] + name: flattenRequest + rows: + # pure history window is empty: rows out of time range + - [reqId1, 1609894067191, uUserId1, zUserId1, uSex1, zSex1, zChannel1, uPlayGame1, uHasJoinedGroup1, 1.0, 2.0, 3.0, 4.0] + # pure history window isn't empty + - [reqId2, 1609894068191, uUserId2, zUserId2, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2, 1.0, 2.0, 3.0, 4.0] + # last join key is NULL + - [NULL, 1609894068191, uUserIdNull, zUserIdNull, uSexNull, zSexNull, zChannelNull, uPlayGameNull, uHasJoinedGroupNull, 1.0, 2.0, 3.0, 4.0] + - [reqId2, 1609894068191, uUserId2, NULL, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2, 1.0, 2.0, 3.0, 4.0] + - [reqId2, 1609894068191, NULL, NULL, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2, 1.0, 2.0, 3.0, 4.0] + expect: + schema: reqId_1:string, reqId_74:string, action_actionValue_multi_direct_73:double, reqId_75:string, bo_hislabel_fWatchedTimeLen_multi_sum_73:double, bo_hislabel_fWatchedTimeLen_multi_max_74:double, bo_hislabel_fWatchedTimeLen_multi_avg_75:double + rows: + - [ reqId1, reqId1, 1.1, reqId1, NULL, NULL, NULL ] + - [ reqId2, reqId2, NULL, reqId2, 100.0, 1.0, 1.0 ] + - [ NULL, NULL, 3.3, NULL, NULL, NULL, NULL] + - [ reqId2, reqId2, NULL, reqId2, NULL, NULL, NULL ] + - [ reqId2, reqId2, NULL, reqId2, 100.0, 1.0, 1.0 ] diff --git a/cases/integration_test/join/test_lastjoin_complex.yaml b/cases/integration_test/join/test_lastjoin_complex.yaml new file mode 100644 index 00000000000..07b65aec95c --- /dev/null +++ b/cases/integration_test/join/test_lastjoin_complex.yaml @@ -0,0 +1,1197 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - id: 0 + desc: lastjoin+窗口 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"] + - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"] + - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"] + - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + select {0}.id,{0}.c1,{0}.c3,{1}.c4, + sum({1}.c4) OVER w1 as w1_c4_sum + from {0} + last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1 + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW) + ; + expect: + order: id + columns: ["id int","c1 string","c3 int","c4 bigint","w1_c4_sum bigint"] + rows: + - [1,"aa",20,32,32] + - [2,"aa",21,32,64] + - [3,"aa",22,32,64] + - [4,"bb",23,34,34] + - [5,"bb",24,34,68] + - id: 1 + desc: lastjoin+窗口-没有匹配的列 + version: 0.6.0 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"] + - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"] + - [4,"cc",20,33,1.1,2.1,1590738990003,"2020-05-01"] + - [5,"cc",21,34,1.2,2.2,1590738990004,"2020-05-02"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + select {0}.id,{0}.c1,{0}.c3,{1}.c4, + sum({1}.c4) OVER w1 as w1_c4_sum, + count({1}.c4) OVER w1 as w1_c4_count + from {0} + last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1 + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW) + ; + expect: + order: id + columns: ["id int","c1 string","c3 int","c4 bigint","w1_c4_sum bigint","w1_c4_count bigint"] + rows: + - [1,"aa",20,32,32,1] + - [2,"aa",21,32,64,2] + - [3,"aa",22,32,64,2] + - [4,"bb",23,NULL,NULL,0] + - [5,"bb",24,NULL,NULL,0] + - id: 2 + desc: lastjoin+窗口+union + tags: ["TODO","暂时不支持 lastjoin window + union共存"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - + columns: ["d1 string","d4 bigint","d7 timestamp"] + indexs: ["index1:d1:d7"] + rows: + - ["aa",30,1590738990000] + - ["aa",32,1590738990002] + - ["bb",34,1590738990004] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","d1 string","d4 bigint","d7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02","aa",31,1590738990001] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04","bb",32,1590738990003] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + select id,{0}.c1,{0}.c3,{1}.d4, + sum({1}.d4) OVER w1 as w1_c4_sum + from {0} + last join {1} ORDER BY {1}.d7 on {0}.c1={1}.d1 + WINDOW + w1 AS (UNION {2} PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW) + ; + expect: + order: id + columns: ["id int","c1 string","c3 int","d4 bigint","w1_c4_sum bigint"] + rows: + - [1,"aa",20,32,32] + - [3,"aa",22,32,63] + - [5,"bb",24,34,67] + - id: 3 + desc: lastjoin+窗口+union子查询 + tags: ["TODO","暂时不支持 lastjoin window + union共存"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - + columns: ["d1 string","d4 bigint","d7 timestamp"] + indexs: ["index1:d1:d7"] + rows: + - ["aa",30,1590738990000] + - ["aa",32,1590738990002] + - ["bb",34,1590738990004] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","d4 bigint"] + indexs: ["index1:c1:c7"] + rows: + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02",31] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04",32] + - + columns: ["d1 string","d7 timestamp"] + indexs: ["index1:d1:d7"] + rows: + - ["aa",1590738990001] + - ["bb",1590738990003] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + select id,{0}.c1,{0}.c3,{1}.d4, + sum({1}.d4) OVER w1 as w1_c4_sum + from {0} + last join {1} ORDER BY {1}.d7 on {0}.c1={1}.d1 + WINDOW + w1 AS (UNION + (select id,c1,c3,c4,c5,c6,c7,c8,d1,d4,d7 from {2} last join {3} ORDER BY {3}.d7 on {2}.c1={3}.d1) + PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW) + ; + expect: + order: id + columns: ["id int","c1 string","c3 int","d4 bigint","w1_c4_sum bigint"] + rows: + - [1,"aa",20,32,32] + - [3,"aa",22,32,63] + - [5,"bb",24,34,67] + - id: 4 + desc: lastjoin-一个子查询 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"] + - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"] + - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"] + - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + select id,{0}.c1,{0}.c3,t1.c4, + sum(t1.c4) OVER w1 as w1_c4_sum + from {0} + last join (select c1,c4,c7 from {1}) as t1 ORDER BY t1.c7 on {0}.c1=t1.c1 + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW) + ; + expect: + order: id + columns: ["id int","c1 string","c3 int","c4 bigint","w1_c4_sum bigint"] + rows: + - [1,"aa",20,32,32] + - [2,"aa",21,32,64] + - [3,"aa",22,32,64] + - [4,"bb",23,34,34] + - [5,"bb",24,34,68] + - id: 5 + desc: 基于子查询作窗口-功能边界外 + tags: ["TODO","client core"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"] + - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"] + - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"] + - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + select t1.id,t2.c1,t2.c3,t1.c4, + sum(t1.c4) OVER w1 as w1_c4_sum + from (select id,c1,c3,c4,c7 from {0}) as t2 + last join (select c1,c4,c7 from {1}) as t1 ORDER BY t1.c7 on t2.c1=t1.c1 + WINDOW + w1 AS (PARTITION BY t2.c1 ORDER BY t2.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW) + ; + expect: + success: false + - id: 6-1 + desc: 两个子查询lastjoin-子查询带窗口特征-rtidb不支持 + mode: offline-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.1,2.1,1590738990001,"2020-05-01"] + - [3,"aa",22,32,1.1,2.1,1590738990002,"2020-05-01"] + - [4,"bb",23,33,1.1,2.1,1590738990003,"2020-05-01"] + - [5,"bb",24,34,1.2,2.2,1590738990004,"2020-05-02"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + select id,t2.c1,t2.c3,t1.c4, + sum(t1.c4) OVER w1 as w1_c4_sum, + sum(t2.w2_c3_sum) OVER w1 as w2_c3_sum, + sum(t1.w3_c4_sum) OVER w1 as w3_c4_sum + from (select id,c1,c3,c4,c7,sum({0}.c3) OVER w2 as w2_c3_sum from {0} WINDOW w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)) as t2 + last join (select c1,c4,c7,sum({1}.c4) OVER w3 as w3_c4_sum from {1} WINDOW w3 AS (PARTITION BY {1}.c1 ORDER BY {1}.c7 ROWS_RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)) as t1 + ORDER BY t1.c7 on t2.c1=t1.c1 + WINDOW + w1 AS (PARTITION BY t2.c1 ORDER BY t2.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW) + ; + expect: + success: false + - id: 6-2 + desc: 两个子查询lastjoin-子查询带窗口特征-离线场景 + tags: ["TODO", "@chenjing", "0.3.0", ""] + mode: rtidb-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.1,2.1,1590738990001,"2020-05-01"] + - [3,"aa",22,32,1.1,2.1,1590738990002,"2020-05-01"] + - [4,"bb",23,33,1.1,2.1,1590738990003,"2020-05-01"] + - [5,"bb",24,34,1.2,2.2,1590738990004,"2020-05-02"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + select id,t2.c1,t2.c3,t1.c4, + sum(t1.c4) OVER w1 as w1_c4_sum, + sum(t2.w2_c3_sum) OVER w1 as w2_c3_sum, + sum(t1.w3_c4_sum) OVER w1 as w3_c4_sum + from (select id,c1,c3,c4,c7,sum({0}.c3) OVER w2 as w2_c3_sum from {0} WINDOW w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)) as t2 + last join (select c1,c4,c7,sum({1}.c4) OVER w3 as w3_c4_sum from {1} WINDOW w3 AS (PARTITION BY {1}.c1 ORDER BY {1}.c7 ROWS_RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)) as t1 + ORDER BY t1.c7 on t2.c1=t1.c1 + WINDOW + w1 AS (PARTITION BY t2.c1 ORDER BY t2.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW) + ; + expect: + success: true + - id: 7 + desc: lastjoin-ts-null + tags: ["TODO", "@chendihao", "zhaowei", "lastjoin对null的处理现在跟在线无法一致"] + mode: offline-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,null,"2020-05-01"] + - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"] + - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"] + - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"] + sql: | + select {0}.id,{0}.c1,{0}.c3,{1}.c4 + from {0} + last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1 + ; + expect: + order: id + columns: ["id int","c1 string","c3 int","c4 bigint"] + rows: + - [1,"aa",20,31] + - [2,"aa",21,31] + - [3,"aa",22,31] + - [4,"bb",23,34] + - [5,"bb",24,34] + - id: 8 + desc: lastjoin三张表 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",24,34,1.5,2.5,1590738994000,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,1606755720000,"2020-05-01"] + - [3,"cc",20,32,1.1,2.1,1606755780000,"2020-05-01"] + - [4,"dd",20,33,1.1,2.1,1606755840000,"2020-05-01"] + - [5,"ee",21,34,1.2,2.2,1606755660000,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.1,1606759200000,"2020-05-01"] + - [3,"bb",20,32,1.3,2.1,1606762800000,"2020-05-01"] + - [4,"bb",20,33,1.4,2.1,1606766400000,"2020-05-01"] + - [5,"ee",21,34,1.5,2.2,1606766400000,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"ee",20,30,1.1,2.1,1606752000000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.2,1606838400000,"2020-05-01"] + - [3,"ee",20,32,1.1,2.3,1606924800000,"2020-05-01"] + - [4,"ee",20,33,1.1,2.4,1607011200000,"2020-05-01"] + - [5,"ee",21,34,1.2,2.5,1606752000000,"2020-05-02"] + sql: | + select {0}.id,{0}.c1,{0}.c3,{1}.c4,{2}.c5,{3}.c6 + from {0} + last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1 + last join {2} ORDER BY {2}.c7 on {0}.c1={2}.c1 + last join {3} ORDER BY {3}.c7 on {0}.c1={3}.c1 + ; + expect: + order: id + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double"] + rows: + - [1,"aa",20,31,1.1,null] + - [2,"bb",21,null,1.4,2.2] + - [3,"cc",22,32,null,null] + - [4,"dd",23,33,null,null] + - [5,"ee",24,34,1.5,2.4] + - id: 9-1 + desc: lastjoin三张表-5个window, rtidb模式不支持 + mode: offline-unsupport + tags: ["TODO","边界外", "@zhaowei", "后面需要支持多张表lastjoin后作window"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,1606755720000,"2020-05-01"] + - [3,"aa",20,32,1.1,2.1,1606755780000,"2020-05-01"] + - [4,"bb",20,33,1.1,2.1,1606755840000,"2020-05-01"] + - [5,"bb",21,34,1.2,2.2,1606755660000,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"] + - [2,"aa",20,31,1.2,2.1,1606759200000,"2020-05-01"] + - [3,"bb",20,32,1.3,2.1,1606762800000,"2020-05-01"] + - [4,"bb",20,33,1.4,2.1,1606766400000,"2020-05-01"] + - [5,"bb",21,34,1.5,2.2,1606766400000,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"bb",20,30,1.1,2.1,1606752000000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.2,1606838400000,"2020-05-01"] + - [3,"cc",20,32,1.1,2.3,1606924800000,"2020-05-01"] + - [4,"ee",20,33,1.1,2.4,1607011200000,"2020-05-01"] + - [5,"ee",21,34,1.2,2.5,1606752000000,"2020-05-02"] + sql: | + select id,{0}.c1,{0}.c3,{1}.c4,{2}.c5,{3}.c6, + sum({0}.c4) OVER w1 as w1_c4_sum, + sum({1}.c4) OVER w2 as w2_c4_sum, + sum({2}.c4) OVER w3 as w3_c4_sum, + sum({3}.c4) OVER w4 as w4_c4_sum, + count({3}.c4) OVER w5 as w5_c4_count + from {0} + last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1 + last join {2} ORDER BY {2}.c7 on {0}.c1={2}.c1 + last join {3} ORDER BY {3}.c7 on {0}.c1={3}.c1 + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 1s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {1}.c7 ROWS_RANGE BETWEEN 1m PRECEDING AND CURRENT ROW), + w3 AS (PARTITION BY {0}.c1 ORDER BY {2}.c7 ROWS_RANGE BETWEEN 1h PRECEDING AND CURRENT ROW), + w4 AS (PARTITION BY {0}.c1 ORDER BY {3}.c7 ROWS_RANGE BETWEEN 1d PRECEDING AND CURRENT ROW), + w5 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - id: 9-2 + desc: lastjoin三张表-5个window-离线场景支持 + mode: rtidb-unsupport + tags: ["TODO", "@chendihao", "last join 多张表后做window离线支持有问题"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,1606755720000,"2020-05-01"] + - [3,"aa",20,32,1.1,2.1,1606755780000,"2020-05-01"] + - [4,"bb",20,33,1.1,2.1,1606755840000,"2020-05-01"] + - [5,"bb",21,34,1.2,2.2,1606755660000,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"] + - [2,"aa",20,31,1.2,2.1,1606759200000,"2020-05-01"] + - [3,"bb",20,32,1.3,2.1,1606762800000,"2020-05-01"] + - [4,"bb",20,33,1.4,2.1,1606766400000,"2020-05-01"] + - [5,"bb",21,34,1.5,2.2,1606766400000,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"bb",20,30,1.1,2.1,1606752000000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.2,1606838400000,"2020-05-01"] + - [3,"cc",20,32,1.1,2.3,1606924800000,"2020-05-01"] + - [4,"ee",20,33,1.1,2.4,1607011200000,"2020-05-01"] + - [5,"ee",21,34,1.2,2.5,1606752000000,"2020-05-02"] + sql: | + select id,{0}.c1,{0}.c3,{1}.c4,{2}.c5,{3}.c6, + sum({0}.c4) OVER w1 as w1_c4_sum, + sum({1}.c4) OVER w2 as w2_c4_sum, + sum({2}.c4) OVER w3 as w3_c4_sum, + sum({3}.c4) OVER w4 as w4_c4_sum, + count({3}.c4) OVER w5 as w5_c4_count + from {0} + last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1 + last join {2} ORDER BY {2}.c7 on {0}.c1={2}.c1 + last join {3} ORDER BY {3}.c7 on {0}.c1={3}.c1 + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 1s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {1}.c7 ROWS_RANGE BETWEEN 1m PRECEDING AND CURRENT ROW), + w3 AS (PARTITION BY {0}.c1 ORDER BY {2}.c7 ROWS_RANGE BETWEEN 1h PRECEDING AND CURRENT ROW), + w4 AS (PARTITION BY {0}.c1 ORDER BY {3}.c7 ROWS_RANGE BETWEEN 1d PRECEDING AND CURRENT ROW), + w5 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true +# order: id +# columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","w1_c4_sum bigint","w2_c4_sum bigint","w3_c4_sum bigint","w4_c4_sum bigint","w5_c4_count bigint"] +# rows: +# - [1,"aa",20,31,1.1,null,30,32,31,null,0] +# - [2,"aa",21,null,1.4,2.2,61,64,62,null,0] +# - [3,"aa",22,32,null,null,63,64,62,null,0] +# - [4,"bb",23,33,null,null,33,34,34,31,1] +# - [5,"bb",24,34,1.5,2.4,67,68,68,62,2] + - id: 10 + desc: t1 join t2 join t3,t2的key产出为null + mode: offline-unsupport + tags: ["@chendihao", "这个场景离线的预期不正确,需要迪豪看看"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",24,34,1.5,2.5,1590738994000,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"] + - [2,"aa",21,31,1.1,2.1,1606755720000,"2020-05-01"] + - [3,"cc",21,32,1.1,2.1,1606755780000,"2020-05-01"] + - [4,"dd",21,33,1.1,2.1,1606755840000,"2020-05-01"] + - [5,"ee",24,34,1.2,2.2,1606755660000,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.1,1606759200000,"2020-05-01"] + - [3,"bb",null,32,1.3,2.1,1606762800000,"2020-05-01"] + - [4,"bb",21,33,1.4,2.1,1606766400000,"2020-05-01"] + - [5,"ee",21,34,1.5,2.2,1606766401000,"2020-05-02"] + sql: | + select {0}.id,{0}.c1,{0}.c3,{1}.c3,{2}.c4 + from {0} + last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1 + last join {2} ORDER BY {2}.c7 on {1}.c3={2}.c3 + ; + expect: + order: id + columns: ["id int","c1 string","c3 int","c3 int","c4 bigint"] + rows: + - [1,"aa",20,21,34] + - [2,"bb",21,null,32] + - [3,"cc",22,21,34] + - [4,"dd",23,21,34] + - [5,"ee",24,24,null] + - id: 11 + desc: (t1 join t2) join t3 + mode: offline-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",24,34,1.5,2.5,1590738994000,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"] + - [2,"aa",21,31,1.1,2.1,1606755720000,"2020-05-01"] + - [3,"cc",21,32,1.1,2.1,1606755780000,"2020-05-01"] + - [4,"dd",21,33,1.1,2.1,1606755840000,"2020-05-01"] + - [5,"ee",24,34,1.2,2.2,1606755660000,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.1,1606759200000,"2020-05-01"] + - [3,"bb",null,32,1.3,2.1,1606762800000,"2020-05-01"] + - [4,"bb",21,33,1.4,2.1,1606766400000,"2020-05-01"] + - [5,"ee",21,34,1.5,2.2,1606766401000,"2020-05-02"] + sql: | + select + t1.id,t1.c1,t1.c3,{2}.c4 + from ( + select {0}.id,{0}.c1,{1}.c3 + from {0} + last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1 + ) as t1 last join {2} ORDER BY {2}.c7 on t1.c3={2}.c3 + ; + expect: + order: id + columns: ["id int","c1 string","c3 int","c4 bigint"] + rows: + - [1,"aa",21,34] + - [2,"bb",null,32] + - [3,"cc",21,34] + - [4,"dd",21,34] + - [5,"ee",24,null] + - id: 11-2 + desc: (t1 join t2) join t3 error column resolved + tags: ["TODO", "@baoxinqi 重复列没有报错,理论上要计划编译失败"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",24,34,1.5,2.5,1590738994000,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"] + - [2,"aa",21,31,1.1,2.1,1606755720000,"2020-05-01"] + - [3,"cc",21,32,1.1,2.1,1606755780000,"2020-05-01"] + - [4,"dd",21,33,1.1,2.1,1606755840000,"2020-05-01"] + - [5,"ee",24,34,1.2,2.2,1606755660000,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.1,1606759200000,"2020-05-01"] + - [3,"bb",null,32,1.3,2.1,1606762800000,"2020-05-01"] + - [4,"bb",21,33,1.4,2.1,1606766400000,"2020-05-01"] + - [5,"ee",21,34,1.5,2.2,1606766401000,"2020-05-02"] + sql: | + select + id,t1.c1,t1.c3,{2}.c4 + from ( + select id,{0}.c1,{1}.c3 + from {0} + last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1 + ) as t1 last join {2} ORDER BY {2}.c7 on t1.c3={2}.c3 + ; + expect: + success: false + - id: 12 + desc: t1 join (t2 join t3) + mode: rtidb-unsupport + tags: ["@zhaowei RITDB边界外的能力join的时候主表只有一张","http://jira.4paradigm.com/browse/FEX-1014"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",24,34,1.5,2.5,1590738994000,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"] + - [2,"aa",21,31,1.1,2.1,1606755720000,"2020-05-01"] + - [3,"cc",21,32,1.1,2.1,1606755780000,"2020-05-01"] + - [4,"dd",21,33,1.1,2.1,1606755840000,"2020-05-01"] + - [5,"ee",24,34,1.2,2.2,1606755660000,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.1,1606759200000,"2020-05-01"] + - [3,"bb",null,32,1.3,2.1,1606762800000,"2020-05-01"] + - [4,"bb",21,33,1.4,2.1,1606766400000,"2020-05-01"] + - [5,"ee",21,34,1.5,2.2,1606766401000,"2020-05-02"] + sql: | + select + {0}.id,{0}.c1,t1.c3,t1.c4 + from + {0} last join + (select {1}.c1,{1}.c3,{1}.c7,{2}.c4 from {1} last join {2} order by {2}.c7 on {1}.c3={2}.c3) as t1 + order by t1.c7 on {0}.c1=t1.c1; + expect: + order: id + columns: ["id int","c1 string","c3 int","c4 bigint"] + rows: + - [1,"aa",21,34] + - [2,"bb",null,null] + - [3,"cc",21,34] + - [4,"dd",21,34] + - [5,"ee",24,null] + - id: 13-1 + desc: t1 join (t2 join t3)-rtidb功能边界外的查询, join包含两张主表 + mode: request-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",24,34,1.5,2.5,1590738994000,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"] + - [2,"aa",21,31,1.1,2.1,1606755720000,"2020-05-01"] + - [3,"cc",21,32,1.1,2.1,1606755780000,"2020-05-01"] + - [4,"dd",21,33,1.1,2.1,1606755840000,"2020-05-01"] + - [5,"ee",24,34,1.2,2.2,1606755660000,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.1,1606759200000,"2020-05-01"] + - [3,"bb",null,32,1.3,2.1,1606762800000,"2020-05-01"] + - [4,"bb",21,33,1.4,2.1,1606766400000,"2020-05-01"] + - [5,"ee",21,34,1.5,2.2,1606766401000,"2020-05-02"] + sql: | + select + {0}.id,{0}.c1,t1.c3,t1.c4 + from + {0} last join + (select {1}.c1,{1}.c3,{2}.c7,{2}.c4 from {1} last join {2} order by {2}.c7 on {1}.c3={2}.c3) as t1 + order by t1.c7 on {0}.c1=t1.c1; + expect: + success: true + order: id + columns: [ "id int", "c1 string", "c3 int", "c4 bigint"] + rows: + - [ 1, aa, 21, 34 ] + - [ 2, bb, NULL, NULL ] + - [ 3, cc, 21, 34 ] + - [ 4, dd, 21, 34 ] + - [ 5, ee, 24, NULL ] + + - id: 13-2 + desc: t1 join (t2 join t3)-key和ts不是来自同一个主表 + tags: ["TODO", "@zhaowei", "case构造不合理,需要构造(t1 join t2) join t3 - key 和 ts不来自同一个主表"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",24,34,1.5,2.5,1590738994000,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"] + - [2,"aa",21,31,1.1,2.1,1606755720000,"2020-05-01"] + - [3,"cc",21,32,1.1,2.1,1606755780000,"2020-05-01"] + - [4,"dd",21,33,1.1,2.1,1606755840000,"2020-05-01"] + - [5,"ee",24,34,1.2,2.2,1606755660000,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.1,1606759200000,"2020-05-01"] + - [3,"bb",null,32,1.3,2.1,1606762800000,"2020-05-01"] + - [4,"bb",21,33,1.4,2.1,1606766400000,"2020-05-01"] + - [5,"ee",21,34,1.5,2.2,1606766401000,"2020-05-02"] + sql: | + select + {0}.id,{0}.c1,t1.c3,t1.c4 + from + {0} last join + (select {1}.c1,{1}.c3,{2}.c7,{2}.c4 from {1} last join {2} order by {2}.c7 on {1}.c3={2}.c3) as t1 + order by t1.c7 on {0}.c1=t1.c1; + expect: + success: false + - id: 14 + desc: lastjoin-重名 + tags: ["TODO", "@baoxinqi select c3 预期column resolved error"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,null,"2020-05-01"] + - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"] + - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"] + - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"] + sql: | + select {0}.id,{0}.c1,c3,c3,{1}.c4 + from {0} + last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1 + ; + expect: + success: false + - id: 15 + desc: lastjoin-重名,指定不同的表名-在线场景 + tags: ["TODO", "@zhaowei", "离线场景预期不一样,后面和可能需要针对离线在线配置不同预期"] + mode: offline-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,null,"2020-05-01"] + - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"] + - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"] + - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"] + sql: | + select {0}.id,{0}.c1,{0}.c3,{1}.c3,{1}.c4 + from {0} + last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1 + ; + expect: + order: id + columns: ["id int","c1 string","c3 int","c3 int","c4 bigint"] + rows: + - [1,"aa",20,20,31] + - [2,"aa",21,20,31] + - [3,"aa",22,20,31] + - [4,"bb",23,21,34] + - [5,"bb",24,21,34] + + - id: 16 + desc: 两个子查询lastjoin,拼接条件不是主表的索引 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + sql: | + select t1.id,t2.c8,t2.c3,t1.c4 + from + (select id,c1,c3,c4,c7,c8 from {0}) as t2 + last join + (select c1,c4,c7,c8 from {1}) as t1 + ORDER BY t1.c7 on t2.c8=t1.c8 + ; + expect: + success: false + - id: 17-1 + desc: 两个子查询lastjoin,order不是主表的ts-rtidb不支持 + mode: request-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + sql: | + select id,t2.c8,t2.c3,t1.c4 + from + (select id,c1,c3,c4,c7,c8 from {0}) as t2 + last join + (select c1,c4,c7,c8 from {1}) as t1 + ORDER BY t1.c4 on t2.c1=t1.c1 + ; + expect: + success: true + columns: [ "id int", "c8 date", "c3 int", "c4 bigint" ] + rows: + - [ 1, '2020-05-01', 20, 30 ] + + - id: 17-2 + desc: 两个子查询lastjoin,order不是主表的ts-离线支持 + mode: rtidb-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + sql: | + select id,t2.c8,t2.c3,t1.c4 + from + (select id,c1,c3,c4,c7,c8 from {0}) as t2 + last join + (select c1,c4,c7,c8 from {1}) as t1 + ORDER BY t1.c4 on t2.c1=t1.c1 + ; + expect: + success: true + - id: 18-1 + desc: 两个子查询lastjoin,拼接条件不是主表的索引-不带orderby-rtidb边界外 + mode: request-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + sql: | + select id,t2.c8,t2.c3,t1.c4 + from + (select id,c1,c3,c4,c7,c8 from {0}) as t2 + last join + (select c1,c4,c7,c8 from {1}) as t1 + on t2.c8=t1.c8 + ; + expect: + success: true + columns: [ "id int", "c8 date", "c3 int", "c4 bigint" ] + rows: + - [ 1, '2020-05-01', 20, 30 ] + + - id: 18-2 + desc: 两个子查询lastjoin,拼接条件不是主表的索引-不带orderby-离线支持 + mode: request-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + sql: | + select id,t2.c8,t2.c3,t1.c4 + from + (select id,c1,c3,c4,c7,c8 from {0}) as t2 + last join + (select c1,c4,c7,c8 from {1}) as t1 + on t2.c8=t1.c8 + ; + expect: + success: true + columns: [ "id int", "c8 date", "c3 int", "c4 bigint" ] + rows: + - [ 1, '2020-05-01', 20, 30 ] + + - id: 19-1 + desc: 两个子查询lastjoin-子查询带窗口特征-没有使用索引-不带orderby + mode: request-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-02"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-02"] + sql: | + select id,t2.c1,t2.c3,t1.c4, t2.w2_c3_sum, t1.w3_c4_sum + from (select id,c1,c3,c4,c7,c8,sum({0}.c3) OVER w2 as w2_c3_sum from {0} WINDOW w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW)) as t2 + last join (select c1,c4,c7,c8,sum({0}.c4) OVER w3 as w3_c4_sum from {0} WINDOW w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)) as t1 + on t2.c7=t1.c7 + ; + expect: + order: id + columns: ["id int","c1 string","c3 int","c4 bigint", "w2_c3_sum int", "w3_c4_sum bigint"] + rows: + - [1,"aa",20,30, 20, 30] + - [2,"aa",21,31, 41, 61] + - [3,"aa",22,32, 63, 63] + - [4,"bb",23,33, 23, 33] + - [5,"bb",24,34, 47, 67] + - id: 20 + desc: 两个子查询lastjoin-子查询带窗口特征-没有使用索引-带orderby + mode: request-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + sql: | + select id,t2.c1,t2.c3,t1.c4 + from (select id,c1,c3,c4,c7,c8,sum({0}.c3) OVER w2 as w2_c3_sum from {0} WINDOW w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)) as t2 + last join (select c1,c4,c7,c8,sum({1}.c4) OVER w3 as w3_c4_sum from {1} WINDOW w3 AS (PARTITION BY {1}.c1 ORDER BY {1}.c7 ROWS_RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)) as t1 + ORDER BY t1.c7 on t2.c8=t1.c8 + ; + expect: + success: true + columns: [ "id int", "c1 string", "c3 int", "c4 bigint" ] + rows: + - [ 1, aa, 20, 30 ] + + - id: 21 + desc: lastjoin列名重复-窗口没有指定表名 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"] + - [3,"bb",20,32,1.1,2.1,1590738990002,"2020-05-01"] + - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"] + - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + select {0}.id,{0}.c1,{0}.c3,{1}.c4, + sum({1}.c4) OVER w1 as w1_c4_sum + from {0} + last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1 + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW) + ; + expect: + order: id + columns: ["id int","c1 string","c3 int","c4 bigint","w1_c4_sum bigint"] + rows: + - [1,"aa",20,31,31] + - [2,"aa",21,31,62] + - [3,"aa",22,31,62] + - [4,"bb",23,34,34] + - [5,"bb",24,34,68] + - id: 22 + desc: lastjoin后group by + mode: request-unsupport, cluster-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sql: select {0}.c1,sum({1}.c3) as v1 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1 group by {0}.c1; + expect: + order: c1 + columns: [ "c1 string","v1 bigint" ] + rows: + - [ "aa",26 ] + - [ "cc",151 ] + - id: 23 + desc: lastjoin后group by, left key is match with left table index + mode: request-unsupport, cluster-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sql: select {0}.c1,sum({1}.c3) as v1 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1 group by {0}.c1; + expect: + order: c1 + columns: [ "c1 string","v1 bigint" ] + rows: + - [ "aa",26 ] + - [ "cc",151 ] + - id: 24 + desc: lastjoin后group by with left key and index key + mode: request-unsupport, cluster-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sql: | + select {0}.c1,sum({1}.c3) as v1 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1 and {0}.c2 = + {1}.c2 group by {0}.c1; + expect: + order: c1 + columns: [ "c1 string","v1 bigint" ] + rows: + - [ "aa",13 ] + - [ "cc",151 ] diff --git a/cases/integration_test/join/test_lastjoin_simple.yaml b/cases/integration_test/join/test_lastjoin_simple.yaml new file mode 100644 index 00000000000..05a27164047 --- /dev/null +++ b/cases/integration_test/join/test_lastjoin_simple.yaml @@ -0,0 +1,1068 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - id: 1 + desc: 正常拼接 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1; + expect: + order: c1 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + - id: 2 + desc: 右表没有匹配 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "dd",41,151,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,null,null ] + - id: 3 + desc: 右表匹配了多条-bigint + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "bb",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - id: 4-1 + desc: Last Join 无order by, 拼表条件命中索引 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "dd", 41, NULL, NULL ] + - id: 4-2 + desc: Last Join 无order by, 拼表条件没有命中索引-performance-sensitive环境下编译失败 + mode: non-performance-sensitive-unsupport, offline-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1; + expect: + success: false + - id: 4-2 + desc: Last Join 无order by, 部分拼表条件命中索引 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",20,30,1590738991000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "aa",3,14,1590738990000 ] + - [ "aa",4,15,1590738991000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1 and {0}.c4={1}.c4; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c2 + rows: + - [ "aa",2,13,1590738989000 ] + - [ "aa",20,15,1590738991000 ] + - [ "bb",21,131,1590738990000 ] + - [ "dd", 41, NULL, NULL ] + - id: 4-3 + desc: Last Join 无order by, 拼表条件命中部分的组合索引(前缀索引), performance-sensitive下失败 + mode: non-performance-sensitive-unsupport, offline-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",20,30,1590738991000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1|c2:c4" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "aa",3,14,1590738990000 ] + - [ "aa",4,15,1590738991000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1 and {0}.c4={1}.c4; + expect: + success: false + - id: 4-4 + desc: Last Join 无order by, 拼表条件命中部分的组合索引(后缀索引) + mode: non-performance-sensitive-unsupport, offline-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",20,30,1590738991000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2|c1:c4" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "aa",3,14,1590738990000 ] + - [ "aa",4,15,1590738991000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1 and {0}.c4={1}.c4; + expect: + success: false + - id: 4-5 + desc: Last Join 无order by, 拼表条件命中索引, 副表多条命中 + tags: [ "注意offline随机拼接最后一条,改变结果顺序可能导致Spark结果不符合预期" ] + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "bb",51,130,1590738992000 ] + - [ "bb",31,132,1590738989000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,132,1590738989000 ] + - [ "dd", 41, NULL, NULL ] + - id: 4-6 + desc: Last Join 无order by, 拼表条件没有命中索引-离线支持 + mode: rtidb-unsupport,cli-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1; + expect: + columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ] + rows: + - [ "aa", 2, 13, 1590738989000 ] + - [ "bb", 21, 131, 1590738990000 ] + - id: 4-7 + desc: Last Join 无order by, 部分拼表条件命中索引(常量条件=右边索引key) + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",20,30,1590738991000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "aa",3,14,1590738990000 ] + - [ "aa",4,15,1590738991000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2, {1}.c1 as t2_c1, {1}.c3,{1}.c4 from {0} last join {1} on {1}.c1="aa" and {0}.c4={1}.c4; + expect: + columns: [ "c1 string","c2 int", "t2_c1 string", "c3 bigint","c4 timestamp" ] + order: c2 + rows: + - [ "aa",2, "aa", 13,1590738989000 ] + - [ "aa",20,"aa", 15,1590738991000 ] + - [ "bb",21, "aa", 14,1590738990000 ] + - [ "dd", 41, "aa", 14, 1590738990000 ] + - id: 5 + desc: orderby-timestamp + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "bb",41,121,1590738992000 ] + - [ "bb",41,141,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,121,1590738992000 ] + - id: 6 + desc: orderby-int without index optimized, request-unsupport + mode: request-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "bb",41,121,1590738991000 ] + - [ "bb",31,141,1590738992000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c2 on {0}.c1={1}.c1; + expect: + success: true + columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ] + rows: + - [ aa, 2, 13, 1590738989000 ] + - [ bb, 21, 121, 1590738991000 ] + + - id: 6 + desc: orderby-int-离线支持 + mode: rtidb-unsupport,cli-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "bb",41,121,1590738991000 ] + - [ "bb",31,141,1590738992000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c2 on {0}.c1={1}.c1; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,121,1590738991000 ] + - id: 7 + desc: orderby-float + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ] + - [ "bb",2.2,3.3,1590738990000,"2020-05-03","ab" ] + - [ "bb",2.5,3.6,1590738991000,"2020-05-04","bc" ] + - [ "bb",2.4,3.1,1590738992000,"2020-05-02","bb" ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c2 on {0}.c1={1}.c1; + expect: + success: false + - id: 8 + desc: orderby-double + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ] + - [ "bb",2.2,3.3,1590738990000,"2020-05-03","ab" ] + - [ "bb",2.5,3.6,1590738991000,"2020-05-04","bc" ] + - [ "bb",2.4,3.1,1590738992000,"2020-05-02","bb" ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1; + expect: + success: false + - id: 9 + desc: orderby-date + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ] + - [ "bb",2.2,3.3,1590738990000,"2020-05-03","ab" ] + - [ "bb",2.5,3.6,1590738991000,"2020-05-04","bc" ] + - [ "bb",2.4,3.1,1590738992000,"2020-05-02","bb" ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c5 on {0}.c1={1}.c1; + expect: + success: false + - id: 10 + desc: orderby-string + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ] + - [ "bb",2.2,3.3,1590738990000,"2020-05-03","ab" ] + - [ "bb",2.5,3.6,1590738991000,"2020-05-04","bc" ] + - [ "bb",2.4,3.1,1590738992000,"2020-05-02","bb" ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c6 on {0}.c1={1}.c1; + expect: + success: false + - id: 11 + desc: 拼接条件-bigint + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c3:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c3:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "bb",41,31,1590738992000 ] + - [ "bb",41,31,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c3={1}.c3; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738992000 ] + - id: 12 + desc: 拼接条件-int + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "bb",21,31,1590738992000 ] + - [ "bb",21,31,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2={1}.c2; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738992000 ] + - id: 13 + desc: 拼接条件-float-未命中索引 + mode: rtidb-unsupport, performance-sensitive-unsupport + tags: ["TODO", "v0.3.0", "@chenjing, fix join on double/float equal condition"] + inputs: + - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ] + - [ "bb",2.2,3.2,1590738990000,"2020-05-02","aa" ] + - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ] + - [ "bb",2.2,3.2,1590738990000,"2020-05-03","ab" ] + - [ "bb",2.2,3.2,1590738992000,"2020-05-04","bc" ] + - [ "bb",2.2,3.2,1590738991000,"2020-05-02","bb" ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2={1}.c2; + expect: + columns: [ "c1 string","c2 float","c3 double","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2.1,3.1,1590738989000 ] + - [ "bb",2.2,3.2,1590738992000 ] + - id: 13-2 + desc: 拼接条件-double + mode: rtidb-unsupport, performance-sensitive-unsupport + tags: ["TODO", "v0.3.0", "@chenjing, fix join on double/float equal condition"] + inputs: + - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ] + - [ "bb",2.2,3.2,1590738990000,"2020-05-02","aa" ] + - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ] + - [ "bb",2.2,3.2,1590738990000,"2020-05-03","ab" ] + - [ "bb",2.2,3.2,1590738992000,"2020-05-04","bc" ] + - [ "bb",2.2,3.2,1590738991000,"2020-05-02","bb" ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c3={1}.c3; + expect: + columns: [ "c1 string","c2 float","c3 double","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2.1,3.1,1590738989000 ] + - [ "bb",2.2,3.2,1590738992000 ] + - id: 14 + desc: 拼接条件-date + inputs: + - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ] + indexs: [ "index1:c5:c4" ] + rows: + - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ] + - [ "bb",2.2,3.2,1590738990000,"2020-05-02","aa" ] + - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ] + indexs: [ "index1:c5:c4" ] + rows: + - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ] + - [ "bb",2.2,3.2,1590738990000,"2020-05-02","ab" ] + - [ "bb",2.2,3.2,1590738992000,"2020-05-02","bc" ] + - [ "bb",2.2,3.2,1590738991000,"2020-05-02","bb" ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c5={1}.c5; + expect: + columns: [ "c1 string","c2 float","c3 double","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2.1,3.1,1590738989000 ] + - [ "bb",2.2,3.2,1590738992000 ] + - id: 14 + desc: 拼接条件-timestamp + inputs: + - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 timestamp" ] + indexs: [ "index1:c6:c4" ] + rows: + - [ "aa",2.1,3.1,1590738989000,"2020-05-01",1590738989000 ] + - [ "bb",2.2,3.2,1590738990000,"2020-05-02",1590738990000 ] + - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 timestamp" ] + indexs: [ "index1:c6:c4" ] + rows: + - [ "aa",2.1,3.1,1590738989000,"2020-05-01",1590738989000 ] + - [ "bb",2.2,3.2,1590738990000,"2020-05-02",1590738990000 ] + - [ "bb",2.2,3.2,1590738992000,"2020-05-02",1590738990000 ] + - [ "bb",2.2,3.2,1590738991000,"2020-05-02",1590738990000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c6={1}.c6; + expect: + columns: [ "c1 string","c2 float","c3 double","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2.1,3.1,1590738989000 ] + - [ "bb",2.2,3.2,1590738992000 ] + - id: 15 + desc: 不同类型的列作为拼接条件 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c3:c4" ] + rows: + - [ "aa",2,2,1590738989000 ] + - [ "bb",21,21,1590738990000 ] + - [ "bb",21,21,1590738992000 ] + - [ "bb",21,21,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2={1}.c3; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,2,1590738989000 ] + - [ "bb",21,21,1590738992000 ] + - id: 16 + desc: 多个拼接条件 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "bb",21,32,1590738993000 ] + - [ "bb",21,31,1590738992000 ] + - [ "bb",21,31,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2={1}.c2 and {0}.c3={1}.c3; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738992000 ] + - id: 17 + desc: 不等值拼接 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "bb",21,32,1590738993000 ] + - [ "bb",21,31,1590738992000 ] + - [ "bb",21,31,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2 = {1}.c2 and {0}.c3 <= {1}.c3; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,32,1590738993000 ] + - id: 17-1 + desc: 不等值拼接-未命中索引 + mode: rtidb-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "bb",21,32,1590738993000 ] + - [ "bb",21,31,1590738992000 ] + - [ "bb",21,31,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c3<{1}.c3; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,32,1590738993000 ] + - [ "bb",21,32,1590738993000 ] + - id: 17-2 + desc: order by 限定列的范围-常量 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c3:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "bb",21,32,1590738993000 ] + - [ "bb",21,31,1590738992000 ] + - [ "bb",21,31,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c3={1}.c3 and {1}.c3>10; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,null,null ] + - [ "bb",21,31,1590738992000 ] + - id: 18 + desc: order by 限定列的范围-变量 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c3:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",22,31,1590738990000 ] + - [ "bb",21,32,1590738993000 ] + - [ "bb",22,31,1590738992000 ] + - [ "bb",22,31,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c3={1}.c3 and {0}.c2<{1}.c2; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,null,null ] + - [ "bb",21,31,1590738992000 ] + - id: 19 + desc: 拼接条件中有空串 + mode: cli-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "",2,3,1590738989000 ] + - [ "bb",22,31,1590738990000 ] + - [ "ab",21,32,1590738993000 ] + - [ "bb",22,31,1590738992000 ] + - [ "bb",22,31,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "",2,3,1590738989000 ] + - [ "bb",21,31,1590738992000 ] + - id: 19 + desc: 拼接条件中有null + tags: [ "TODO", "@chenjing, @baoxinqi" ] + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ NULL,2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ NULL,2,3,1590738989000 ] + - [ "bb",22,31,1590738990000 ] + - [ "ab",21,32,1590738993000 ] + - [ "bb",22,31,1590738992000 ] + - [ "bb",22,31,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ null,2,3,1590738989000 ] + - [ "bb",21,31,1590738992000 ] + - id: 20 + desc: 结合limit + tags: [ "TODO", "remove @zhaowei" ] + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c3:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c3:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "bb",41,31,1590738992000 ] + - [ "bb",41,31,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c3={1}.c3 limit 1; + expect: + rows: + - [ "aa",2,3,1590738989000 ] + - id: 21 + desc: 三表拼表 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "bb",41,121,1590738992000 ] + - [ "bb",41,121,1590738991000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "aa",21,131,1590738992000 ] + - [ "aa",41,121,1590738991000 ] + - [ "bb",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c4,{2}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1 last join {2} order by {2}.c4 on {0}.c1={2}.c1; + expect: + columns: [ "c1 string","c2 int","c4 timestamp","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,1590738989000,1590738992000 ] + - [ "bb",21,1590738992000,1590738991000 ] + - id: 22 + desc: 拼接条件不是索引列 + mode: rtidb-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "bb",21,31,1590738992000 ] + - [ "bb",21,31,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2={1}.c2; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738992000 ] + - id: 23 + desc: 使用表别名 + inputs: + - columns: [ "c1 string","c2 bigint","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 bigint","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c2" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "bb",41,121,1590738991000 ] + - [ "bb",31,141,1590738992000 ] + sql: select t1.c1,t1.c2,t2.c3,t2.c4 from {0} as t1 last join {1} as t2 ORDER BY t2.c2 on t1.c1=t2.c1; + expect: + columns: [ "c1 string","c2 bigint","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,121,1590738991000 ] + - id: 25 + desc: LAST JOIN with rename table + mode: python-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4", "index2:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "cc",21,32,1590738993000 ] + - [ "cc",21,31,1590738992000 ] + - [ "dd",21,31,1590738991000 ] + sql: | + select + {0}.c1, {0}.c2, {0}.c3, + t2.c1 as t2_c1, t2.c2 as t2_c2, t2.c3 as t2_c3,t2.c4 as t2_c4, + t3.c1 as t3_c1, t3.c4 as t3_c4 from {0} + last join {1} as t2 ORDER BY t2.c4 on {0}.c2 = t2.c2 and {0}.c3 <= t2.c3 + last join {1} as t3 ORDER BY t3.c4 on {0}.c1 = t3.c1 and {0}.c3 <= t3.c3; + expect: + columns: [ "c1 string","c2 int", "c3 bigint", "t2_c1 string", "t2_c2 int", "t2_c3 bigint","t2_c4 timestamp", "t3_c1 string", "t3_c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,3, "aa", 2, 3,1590738989000, "aa", 1590738989000 ] + - [ "bb",21, 31, "cc", 21, 32,1590738993000, "bb", 1590738990000 ] + - id: 26 + desc: LAST JOIN subquery with rename table + mode: python-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "col1 string","col2 int","col3 bigint","col4 timestamp" ] + indexs: [ "index1:col2:col4", "index2:col1:col4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "cc",21,32,1590738993000 ] + - [ "cc",21,31,1590738992000 ] + - [ "dd",21,31,1590738991000 ] + sql: | + select + {0}.c1, {0}.c2, {0}.c3, + t2.c1 as t2_c1, t2.c2 as t2_c2, t2.c3 as t2_c3,t2.c4 as t2_c4, + t3.c1 as t3_c1, t3.c4 as t3_c4 from {0} + last join (select col1 as c1, col2 as c2, col3 as c3, col4 as c4 from {1}) as t2 ORDER BY t2.c4 on {0}.c2 = t2.c2 and {0}.c3 <= t2.c3 + last join (select col1 as c1, col3 as c3, col4 as c4 from {1}) as t3 ORDER BY t3.c4 on {0}.c1 = t3.c1 and {0}.c3 <= t3.c3; + expect: + columns: [ "c1 string","c2 int", "c3 bigint", "t2_c1 string", "t2_c2 int", "t2_c3 bigint","t2_c4 timestamp", "t3_c1 string", "t3_c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,3, "aa", 2, 3,1590738989000, "aa", 1590738989000 ] + - [ "bb",21, 31, "cc", 21, 32,1590738993000, "bb", 1590738990000 ] + - id: 27 + desc: LAST JOIN subquery with rename table 2 + mode: python-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "col1 string","col2 int","col3 bigint","col4 timestamp" ] + indexs: [ "index1:col2:col4", "index2:col1:col4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "cc",21,32,1590738993000 ] + - [ "cc",21,31,1590738992000 ] + - [ "dd",21,31,1590738991000 ] + sql: | + select + {0}.c1, {0}.c2, {0}.c3, + t2.c1 as t2_c1, t2.c2 as t2_c2, t2.c3 as t2_c3,t2.c4 as t2_c4, + t3.c1 as t3_c1, t3.c4 as t3_c4 from {0} + last join (select col1 as c1, col2 as c2, col3 as c3, col4 as c4 from {1}) as t2 ORDER BY t2.c4 on {0}.c2 = t2.c2 and {0}.c3 <= t2.c3 + last join (select col1 as c1, col2 as c2, col3 as c3, col4 as c4 from {1}) as t3 ORDER BY t3.c4 on {0}.c1 = t3.c1 and {0}.c3 <= t3.c3; + expect: + columns: [ "c1 string","c2 int", "c3 bigint", "t2_c1 string", "t2_c2 int", "t2_c3 bigint","t2_c4 timestamp", "t3_c1 string", "t3_c4 timestamp" ] + order: c1 + rows: + - ["aa",2,3, "aa", 2, 3,1590738989000, "aa", 1590738989000] + - ["bb",21, 31, "cc", 21, 32,1590738993000, "bb", 1590738990000] + + - id: 28 + desc: orderby-smallint + tags: ["offline-unsupport", "TODO"] + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - columns: ["c1 string","c2 smallint","c3 double","c4 timestamp","c5 date","c6 string"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",1,3.1,1590738989000,"2020-05-01","aa"] + - ["bb",2,3.3,1590738990000,"2020-05-03","ab"] + - ["bb",5,3.6,1590738991000,"2020-05-04","bc"] + - ["bb",4,3.1,1590738992000,"2020-05-02","bb"] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c2 on {0}.c1={1}.c1; + expect: + success: false + + - id: 29 + desc: orderby-bool + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - columns: ["c1 string","c2 bool","c3 double","c4 timestamp","c5 date","c6 string"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",true,3.1,1590738989000,"2020-05-01","aa"] + - ["bb",true,3.3,1590738990000,"2020-05-03","ab"] + - ["bb",false,3.6,1590738991000,"2020-05-04","bc"] + - ["bb",true,3.1,1590738992000,"2020-05-02","bb"] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c2 on {0}.c1={1}.c1; + expect: + success: false + - id: 30 + desc: 拼接条件-smallint + inputs: + - columns: ["c1 string","c2 smallint","c3 bigint","c4 timestamp"] + indexs: ["index1:c2:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - columns: ["c1 string","c2 smallint","c3 bigint","c4 timestamp"] + indexs: ["index1:c2:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["bb",21,31,1590738992000] + - ["bb",21,31,1590738991000] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2={1}.c2; + expect: + columns: ["c1 string","c2 smallint","c3 bigint","c4 timestamp"] + order: c1 + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738992000] + - id: 31 + desc: 拼接条件-bool + tags: ["TODO", "bug"] + inputs: + - columns: ["c1 string","c2 bool","c3 bigint","c4 timestamp"] + indexs: ["index1:c2:c4"] + rows: + - ["aa",true,3,1590738989000] + - ["bb",false,31,1590738990000] + - columns: ["c1 string","c2 bool","c3 bigint","c4 timestamp"] + indexs: ["index1:c2:c4"] + rows: + - ["aa",true,3,1590738989000] + - ["bb",false,31,1590738990000] + - ["bb",false,31,1590738992000] + - ["bb",false,31,1590738991000] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2={1}.c2; + expect: + columns: ["c1 string","c2 bool","c3 bigint","c4 timestamp"] + order: c1 + rows: + - ["aa",true,3,1590738989000] + - ["bb",false,31,1590738992000] + - id: 4-6 + desc: lastjoin-拼表条件没有命中索引 + mode: performance-sensitive-unsupport,cli-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} order by {1}.c4 on {0}.c1={1}.c1; + expect: + columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ] + order: c1 + rows: + - [ "aa", 2, 13, 1590738989000 ] + - [ "bb", 21, 131, 1590738990000 ] + - + id: 12 + desc: 不指定索引,进行lastjoin + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "dd", 41, NULL, NULL ] + - + id: 13 + desc: 不指定索引,进行lastjoin,匹配多行 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "aa",21,131,1590738990000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,131,1590738990000 ] + - [ "bb",21,NULL,NULL ] + - [ "dd", 41, NULL, NULL ] \ No newline at end of file diff --git a/cases/integration_test/long_window/long_window.yaml b/cases/integration_test/long_window/long_window.yaml new file mode 100644 index 00000000000..7344aca2cce --- /dev/null +++ b/cases/integration_test/long_window/long_window.yaml @@ -0,0 +1,357 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: ["options(long_window='w1:2h')"] +cases: + - + id: 0 + desc: options(long_window='w1:2') + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 1 + desc: options(long_window='w1:2d') + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7::latest"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2d') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 2 + desc: options(long_window='w1:2h') + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7::latest"] +# rows: +# - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] +# - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] +# - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] +# - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] +# - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sqls: + - deploy deploy_{0} options(long_windows='w1:2d') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); + - show deployment deploy_{0}; + expect: + deployment : + name: deploy_{0} + dbName: test_zw + sql: | + DEPLOY {0} SELECT + id, + c1, + sum(c4) OVER (w1) AS w1_c4_sum + FROM + {0} + WINDOW w1 AS (PARTITION BY {0}.c1 + ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) + ; + inColumns: + - 1,id,kInt32,NO + - 2,c1,kVarchar,NO + - 3,c3,kInt32,NO + - 4,c4,kInt64,NO + - 5,c5,kFloat,NO + - 6,c6,kDouble,NO + - 7,c7,kTimestamp,NO + - 8,c8,kDate,NO + outColumns: + - 1,id,kInt32,NO + - 2,c1,kVarchar,NO + - 3,w1_c4_sum,kInt64,NO + - + id: 3 + desc: options(long_window='w1:2m') + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2m') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 4 + desc: options(long_window='w1:2s') + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2s') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 5 + desc: avg算子(smallint, int, bigint, float, double, string) + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2') SELECT id, c1, avg(c4) OVER w1 as w1_c4_avg FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 6 + desc: min算子(smallint, int, bigint, float, double, string) + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2d') SELECT id, c1, min(c4) OVER w1 as w1_c4_min FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 7 + desc: max算子(smallint, int, bigint, float, double, string) + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2h') SELECT id, c1, max(c4) OVER w1 as w1_c4_max FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 8 + desc: count算子(smallint, int, bigint, float, double, string) + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2m') SELECT id, c1, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 9 + desc: 相同的PARTITION BY和ORDER BY,长窗口和短窗口可合并 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, avg(c4) OVER w1 as w1_c4_avg from {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 10 + desc: 相同的PARTITION BY和ORDER BY,长窗口之间可合并 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2,w2:2') SELECT id, c1, sum(c3) OVER w1 as w1_c3_sum, avg(c3) OVER w2 as w2_c3_avg from {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 11 + desc: 相同的PARTITION BY和ORDER BY,-短窗口之间可合并(三个窗口 一个长窗口,俩个短窗口) + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c3) OVER w1 as w1_c3_sum, avg(c3) OVER w2 as w2_c3_avg, count(c3) OVER w3 as w3_c3_count from {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 12 + desc: 不同的PARTITION BY和ORDER BY,长窗口和短窗口混合-不可合并窗口 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c5) OVER w1 as w1_c5_sum, + avg(c5) OVER w2 as w2_c5_avg from {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 13 + desc: 窗口名不存在 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w9090:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 14 + desc: options(long_window='w1:2y') + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w9090:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true + - + id: 15 + desc: options格式错误 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + deploy {0} options(long_windows='w1:100') SELECT id, c1, avg(c5) OVER w1 as w1_c4_avg FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true diff --git a/cases/integration_test/long_window/test_count_where.yaml b/cases/integration_test/long_window/test_count_where.yaml new file mode 100644 index 00000000000..84740eaa889 --- /dev/null +++ b/cases/integration_test/long_window/test_count_where.yaml @@ -0,0 +1,540 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: ["长窗口count_where,date类型","长窗口count_where,rows"] +cases: + - + id: 0 + desc: 长窗口count_where,date类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,2,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,3,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,4,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,5,34,1.5,2.5,1590738990004,"2020-05-05",false] + sql: | + SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 0-1 + desc: 长窗口count_where,rows + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + sql: | + SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 1 + desc: 长窗口count_where,smallint类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c2,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 2 + desc: 长窗口count_where,int类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c3,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 3 + desc: 长窗口count_where,bigint类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c4,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 4 + desc: 长窗口count_where,string类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c1,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 5 + desc: 长窗口count_where,timestamp类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c7,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 6 + desc: 长窗口count_where,row类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(*,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 7 + desc: 长窗口count_where,bool类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c9,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 8 + desc: 长窗口count_where,float类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c5,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 9 + desc: 长窗口count_where,double类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c6,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 10 + desc: 长窗口count_where,第二个参数使用bool列 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c9) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail + - + id: 11 + desc: 长窗口count_where,第二个参数使用= + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c2=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",0] + - [2,"aa",0] + - [3,"aa",0] + - [4,"aa",1] + - [5,"aa",1] + - + id: 12 + desc: 长窗口count_where,第二个参数使用!= + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c2!=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",2] + - + id: 13 + desc: 长窗口count_where,第二个参数使用>= + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c2>=2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",0] + - [2,"aa",1] + - [3,"aa",2] + - [4,"aa",3] + - [5,"aa",3] + - + id: 14 + desc: 长窗口count_where,第二个参数使用<= + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c2<=3) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 15 + desc: 长窗口count_where,第二个参数使用> + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",0] + - [2,"aa",1] + - [3,"aa",2] + - [4,"aa",3] + - [5,"aa",3] + - + id: 17 + desc: 长窗口count_where,第二个参数使用and + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c2<4 and c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail + - + id: 18 + desc: 长窗口count_where,第二个参数使用两个列 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,c3>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail + - + id: 19 + desc: 长窗口count_where,第二个参数使用嵌套 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,if_null(c2,0)>4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail + - + id: 20 + desc: 长窗口count_where,第二个参数常量在前 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count_where(c8,4>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + + + diff --git a/cases/integration_test/multiple_databases/test_multiple_databases.yaml b/cases/integration_test/multiple_databases/test_multiple_databases.yaml new file mode 100644 index 00000000000..208270b4ae5 --- /dev/null +++ b/cases/integration_test/multiple_databases/test_multiple_databases.yaml @@ -0,0 +1,383 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +debugs: [] +version: 0.5.0 +cases: + - id: 0 + desc: Last Join tables from two databases 1 - default db is db1 + db: db1 + inputs: + - db: db1 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - db: db2 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sql: select db1.{0}.c1,db1.{0}.c2,db2.{1}.c3,db2.{1}.c4 from db1.{0} last join db2.{1} ORDER BY db2.{1}.c3 on db1.{0}.c1=db2.{1}.c1; + expect: + order: c1 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + - id: 1 + desc: Last Join tables from two databases 2 - default db is db, explicit db1 and db2 + db: db + inputs: + - db: db1 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - db: db2 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sql: select db1.{0}.c1, db1.{0}.c2,db2.{1}.c3,db2.{1}.c4 from db1.{0} last join db2.{1} ORDER BY db2.{1}.c3 on db1.{0}.c1=db2.{1}.c1; + expect: + order: c1 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + - id: 2 + desc: Last join tables from 2 databases fail 1 - db2 is not exist + db: db1 + inputs: + - db: db1 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - db: db3 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sql: select db1.{0}.c1, db1.{0}.c2,db2.{1}.c3,db2.{1}.c4 from {0} last join db2.{1} ORDER BY db2.{1}.c3 on db1.{0}.c1=db2.{1}.c1; + expect: + success: false + - id: 3 + desc: Last join tables from 2 databases fail 2 - fail to resolve column {1}.c3 default db + db: db1 + inputs: + - db: db1 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - db: db2 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sql: select db1.{0}.c1, db1.{0}.c2, {1}.c3, {1}.c4 from {0} last join db2.{1} ORDER BY db2.{1}.c3 on db1.{0}.c1=db2.{1}.c1; + expect: + success: false + - id: 4 + desc: 全部使用默认库 + db: test_zw + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1; + expect: + order: c1 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + - id: 5 + desc: 指定当前库查询 + db: test_zw + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1; + expect: + order: c1 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + - id: 6 + desc: 查询使用其他库 + db: test_zw + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + db: db1 + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sql: select * from (select c1, c2+1 as v1,c3+1 as v2 from db1.{0}) as t1; + expect: + columns: ["c1 string", "v1 int", "v2 bigint"] + order: c1 + rows: + - ["aa", 3,4] + - ["bb", 22,32] + - ["cc", 42,52] + - id: 7 + desc: 子查询后的表使用默认库 + db: db + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + db: db1 + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sql: select db.t1.c1 from (select c1, c2+1,c3+1 from db1.{0}) as t1; + expect: + columns: ["c1 string"] + order: c1 + rows: + - ["aa"] + - ["bb"] + - ["cc"] + - id: 8 + desc: 子查询后的表使用其他库 + db: db + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + db: db1 + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sql: select db1.t1.c1 from (select c1, c2+1,c3+1 from db1.{0}) as t1; + expect: + success: false + - id: 9 + desc: 使用子查询查不同库的数据然后lastjoin + tags: ["request 模式有问题,@chenjing"] + db: db + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + db: db1 + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + db: db2 + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sql: select db.t1.c1,db.t1.c2,db.t2.c3,db.t2.c4 from (select * from db1.{0}) as t1 last join (select * from db2.{1}) as t2 ORDER BY db.t2.c3 on db.t1.c1=db.t2.c1; + expect: + order: c1 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + - id: 10 + desc: 三表三个库拼表 + db: db + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + db: db1 + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + db: db2 + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "bb",41,121,1590738992000 ] + - [ "bb",41,121,1590738991000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + db: db3 + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "aa",21,131,1590738992000 ] + - [ "aa",41,121,1590738991000 ] + - [ "bb",41,121,1590738991000 ] + sql: select db1.{0}.c1,db1.{0}.c2,db2.{1}.c4,db3.{2}.c4 from db1.{0} last join db2.{1} ORDER BY db2.{1}.c4 on db1.{0}.c1=db2.{1}.c1 last join db3.{2} order by db3.{2}.c4 on db1.{0}.c1=db3.{2}.c1; + expect: + columns: [ "c1 string","c2 int","c4 timestamp","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,1590738989000,1590738992000 ] + - [ "bb",21,1590738992000,1590738991000 ] + - id: 11 + desc: 不等值拼接 + db: db + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + db: db1 + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + db: db2 + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "bb",21,32,1590738993000 ] + - [ "bb",21,31,1590738992000 ] + - [ "bb",21,31,1590738991000 ] + sql: select db1.{0}.c1,db1.{0}.c2,db2.{1}.c3,db2.{1}.c4 from db1.{0} last join db2.{1} ORDER BY db2.{1}.c4 on db1.{0}.c2 = db2.{1}.c2 and db1.{0}.c3 <= db2.{1}.c3; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,32,1590738993000 ] + - id: 12 + desc: 不同库相同表lastjoin + db: db + inputs: + - db: db1 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + name: t1 + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - db: db2 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + name: t1 + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sql: select db1.t1.c1,db1.t1.c2,db2.t1.c3,db2.t1.c4 from db1.t1 last join db2.t1 ORDER BY db2.t1.c3 on db1.t1.c1=db2.t1.c1; + expect: + order: c1 + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + - + id: 13 + desc: window rows使用其他库 + db: db + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + db: db1 + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM db1.{0} WINDOW w1 AS (PARTITION BY db1.{0}.c1 ORDER BY db1.{0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: c3 + columns: ["c1 string","c3 int","w1_c4_sum bigint"] + rows: + - ["aa",20,30] + - ["aa",21,61] + - ["aa",22,93] + - ["aa",23,96] + - ["bb",24,34] + - id: 14 + desc: window ROWS_RANGE 使用其他库 + db: db + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + db: db1 + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM db1.{0} WINDOW w1 AS (PARTITION BY db1.{0}.c1 ORDER BY db1.{0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,30 ] + - [ "aa",21,61 ] + - [ "aa",22,93 ] + - [ "aa",23,96 ] + - [ "bb",24,34 ] + + diff --git a/cases/integration_test/out_in/test_out_in.yaml b/cases/integration_test/out_in/test_out_in.yaml new file mode 100644 index 00000000000..e7ac9134dfd --- /dev/null +++ b/cases/integration_test/out_in/test_out_in.yaml @@ -0,0 +1,894 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: ['数据里有null、空串、特殊字符'] +cases: + - + id: 0 + desc: 数据里有null、空串、特殊字符 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [4,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [5,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + - [6,"~!@#$%^&*()_+<",3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1}; + - select * from {1}; + expect: + count: 6 + - + id: 1 + desc: 全部数据类型测试 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1}; + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 2 + desc: 复杂sql结果导出 + inputs: + - + columns : ["id int", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"] + indexs: ["index1:card_no:trx_time"] + rows: + - [1, "aaaaaaaaaa",1, 1590738989000, 1.1] + - [2, "aaaaaaaaaa",1, 1590738990000, 2.2] + - [3, "bb",10, 1590738990000, 3.3] + - + columns : ["crd_lst_isu_dte timestamp", "crd_nbr string"] + indexs: ["index2:crd_nbr:crd_lst_isu_dte"] + rows: + - [1590738988000, "aaaaaaaaaa"] + - [1590738990000, "aaaaaaaaaa"] + - [1590738989000, "cc"] + - [1590738992000, "cc"] + - + columns: ["id int", "card_no string", "trx_time timestamp", "card_no_prefix string","sum_trx_amt float", "count_merchant_id int64", "crd_lst_isu_dte timestamp","crd_nbr string"] + sqls: + - select * from + (select + id, + card_no, + trx_time, + substr(card_no, 1, 6) as card_no_prefix, + sum(trx_amt) over w30d as sum_trx_amt, + count(merchant_id) over w10d as count_merchant_id + from {0} + window w30d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW), + w10d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)) as trx_fe + last join {1} order by {1}.crd_lst_isu_dte on trx_fe.card_no = {1}.crd_nbr and trx_fe.trx_time >= {1}.crd_lst_isu_dte + into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {2}; + - select * from {2}; + expect: + columns: ["id int", "card_no string", "trx_time timestamp", "card_no_prefix string","sum_trx_amt float", "count_merchant_id int64", "crd_lst_isu_dte timestamp","crd_nbr string"] + order: id + rows: + - [1, "aaaaaaaaaa", 1590738989000, "aaaaaa", 1.1, 1, 1590738988000, "aaaaaaaaaa"] + - [2, "aaaaaaaaaa", 1590738990000, "aaaaaa", 3.3, 2, 1590738990000, "aaaaaaaaaa"] + - [3, "bb", 1590738990000, "bb", 3.3, 1, null, null] + - + id: 3 + desc: 全部数据类型测试 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1}; + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 4 + desc: 执行其他库查询 + inputs: + - + db: db1 + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from db1.{0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1}; + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 5 + desc: 导出insert结果 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + sqls: + - insert into {0} values (1,"aa",1590738989000) outfile '{0}.csv'; + expect: + success: false + - + id: 6 + desc: sql执行错误 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + sqls: + - select * from db1.{0} into outfile '{0}.csv'; + expect: + success: false + - + id: 7 + desc: mode默认值,文件已经存在 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + sqls: + - select * from {0} into outfile '{0}.csv'; + - select * from {0} into outfile '{0}.csv'; + expect: + success: false + - + id: 8 + desc: mode=overwrite,先到处大数据量,再到处小数据量 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - select * from {1} into outfile '{0}.csv' options(mode='overwrite'); + - load data infile '{0}.csv' into table {2}; + - select * from {2}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 9 + desc: mode=append,相同的表到处两次 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - select * from {0} into outfile '{0}.csv' options(mode='append',header=false); + - load data infile '{0}.csv' into table {1}; + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 10 + desc: mode=append,不同的表导出,第二次header=false + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - select * from {1} into outfile '{0}.csv' options(mode='append',header=false); + - load data infile '{0}.csv' into table {2}; + - select * from {2}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 11 + desc: mode=append,不同的表导出,第二次header=true + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - select * from {1} into outfile '{0}.csv' options(mode='append',header=true); + expect: + cat: + path: "{0}.csv" + lines: + - id,c1,c2,c3,c4,c5,c6,c7,c8,c9 + - 1,aa,1,2,3,1.100000,2.100000,1590738989000,2020-05-01,true + - 2,bb,2,21,31,1.200000,2.200000,1590738990000,2020-05-02,false + - id,c1,c2,c3,c4,c5,c6,c7,c8,c9 + - 3,cc,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true + - + id: 12 + desc: option key错误 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + sqls: + - select * from {0} into outfile '{0}.csv' options(head=true); + expect: + success: false + - + id: 13 + desc: option header 值错误 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + sqls: + - select * from {0} into outfile '{0}.csv' options(header='true'); + expect: + success: false + - + id: 14 + desc: format 其他格式 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + sqls: + - select * from {0} into outfile '{0}.csv' options(format='txt'); + expect: + success: false + - + id: 15 + desc: delimiter为一些特殊字符 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(delimiter='@'); + - load data infile '{0}.csv' into table {1} options(delimiter='@'); + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 16 + desc: null_value为特殊字符 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(null_value='~!@#$%^&*()_+'); + - load data infile '{0}.csv' into table {1} options(null_value='~!@#$%^&*()_+'); + - select * from {1}; + expect: + count: 3 + - + id: 17 + desc: String 有null 空串 ”null“ null_value为”“ + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [4,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [5,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + - [6,"~!@#$%^&*()_+<",3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + sqls: + - select * from {0} into outfile '{0}.csv' options(null_value=''); + expect: + cat: + path: "{0}.csv" + lines: + - id,c1,c2,c3,c4,c5,c6,c7,c8,c9 + - 3,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true + - 5,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03, + - 4,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true + - 1,aa,1,2,3,1.100000,2.100000,1590738989000,2020-05-01,true + - 6,~!@#$%^&*()_+<,3,22,32,1.300000,2.300000,1590738991000,2020-05-03, + - 2,null,2,21,31,1.200000,2.200000,1590738990000,2020-05-02,false + - + id: 18 + desc: String 有null 空串 ”null“ null_value为”null“ + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [4,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [5,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + - [6,"~!@#$%^&*()_+<",3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(null_value='null'); + expect: + cat: + path: "{0}.csv" + lines: + - id,c1,c2,c3,c4,c5,c6,c7,c8,c9 + - 3,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true + - 5,null,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,null + - 4,null,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true + - 1,aa,1,2,3,1.100000,2.100000,1590738989000,2020-05-01,true + - 6,~!@#$%^&*()_+<,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,null + - 2,null,2,21,31,1.200000,2.200000,1590738990000,2020-05-02,false + - + id: 19 + desc: header=false导出数据 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(header=false); + - load data infile '{0}.csv' into table {1} options(header=false); + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 20 + desc: format=csv,导出数据 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(format='csv'); + - load data infile '{0}.csv' into table {1} options(format='csv'); + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 21 + desc: 路径文件夹不存在 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + sqls: + - select * from {0} into outfile '/{0}/{0}.csv'; + expect: + success: false + - + id: 22 + desc: 数据类型不匹配 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 int","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1}; + expect: + success: false + - + id: 23 + desc: header=true导出数据 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(header=true); + - load data infile '{0}.csv' into table {1} options(header=true); + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 24 + desc: header=true,csv没有header + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(header=false); + - load data infile '{0}.csv' into table {1} options(header=true); + expect: + success: false + - + id: 25 + desc: header=false,csv有header + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(header=true); + - load data infile '{0}.csv' into table {1} options(header=false); + expect: + success: false + - + id: 26 + desc: 表不存在 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(header=true); + - load data infile '{0}.csv' into table {1}11 options(header=true); + expect: + success: false + - + id: 27 + desc: format=csv,csv格式的文件,文件名不是csv结尾 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.txt' ; + - load data infile '{0}.txt' into table {1} options(format='csv'); + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 28 + desc: format=其他值 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1} options(format='txt'); + expect: + success: false + - + id: 29 + desc: 路径错误 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}1.csv' into table {1}; + expect: + success: false + - + id: 30 + desc: 导入其他库的表 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + db: db1 + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table db1.{1}; + - select * from db1.{1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 31 + desc: 导出后导入 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {0}; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 32 + desc: 创建表的列和csv对不上 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","cc smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}1.csv' into table {1}; + expect: + success: false + - + id: 33 + desc: 表中已经有数据,然后导入 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1}; + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 34 + desc: delimiter为,数据中有, + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"b,b",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1} options(delimiter=','); + expect: + success: false + - + id: 35 + desc: 导入-null_value=null + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(null_value='null'); + - load data infile '{0}.csv' into table {1} options(null_value='null'); + - select * from {1}; + expect: + count: 3 + - + id: 36 + desc: 导入-null_value=空串 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(null_value=''); + - load data infile '{0}.csv' into table {1} options(null_value=''); + - select * from {1}; + expect: + count: 3 + - + id: 37 + desc: 表删除后再次导入 +# tags: ["TODO","下个版本修复,@huangwei"] + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + - [2,"bb",1590738990000] + - [3,"cc",1590738991000] + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1}; + - drop table {1}; + - create table {1}( + id int, + c1 string, + c7 timestamp, + index(key=(c1),ts=c7))options(partitionnum=1,replicanum=1); + - load data infile '{0}.csv' into table {1}; + - select * from {1}; + expect: + columns : ["id int","c1 string","c7 timestamp"] + order: id + rows: + - [1,"aa",1590738989000] + - [2,"bb",1590738990000] + - [3,"cc",1590738991000] + - + id: 38 + desc: mode 值错误 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + sqls: + - select * from {0} into outfile '{0}.csv' options(mode='true'); + expect: + success: false + + + diff --git a/cases/integration_test/select/test_select_sample.yaml b/cases/integration_test/select/test_select_sample.yaml new file mode 100644 index 00000000000..97c47194cac --- /dev/null +++ b/cases/integration_test/select/test_select_sample.yaml @@ -0,0 +1,307 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - id: 0 + desc: 查询所有列 + inputs: + - columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"] + sql: select c1,c3,c4,c5,c6,c7,c8 from {0}; + expect: + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + rows: + - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"] + - id: 1 + desc: 查询部分列 + inputs: + - columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"] + sql: select c1,c3,c4 from {0}; + expect: + columns: ["c1 string","c3 int","c4 bigint"] + rows: + - ["aa",2,3] + - id: 2 + desc: 查询* + inputs: + - columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"] + sql: select * from {0}; + expect: + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + rows: + - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"] + - id: 3 + desc: 查询列中部分重命名 + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + sql: select c1 as name,c2,c3,c4 from {0}; + expect: + columns: ["name string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa",2,3,1590738989000] + - id: 4 + desc: 查询列中全部重命名 + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + sql: select c1 as name,c2 as v2,c3 as v3 ,c4 as v4 from {0}; + expect: + columns: ["name string","v2 int","v3 bigint","v4 timestamp"] + rows: + - ["aa",2,3,1590738989000] + - id: 5 + desc: 查询的列部分带表名 + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + sql: select c1 as name,{0}.c2,c3,c4 from {0}; + expect: + columns: ["name string","c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa",2,3,1590738989000] + - id: 6 + desc: 查询的表不存在 + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + sql: select c1,c2,c3,c4 from {0}1; + expect: + success: false + - id: 7 + desc: 查询的列不存在 + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + sql: select c1,c2,c3,c5 from {0}; + expect: + success: false + - id: 8 + desc: 查询的数据中有空串 + mode: cli-unsupport + inputs: + - columns: ["c1 string","c2 int","c3 string","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["",2,"",1590738989000] + sql: select c1,c2,c3,c4 from {0}; + expect: + columns: ["c1 string","c2 int","c3 string","c4 timestamp"] + rows: + - ["",2,"",1590738989000] + - id: 9 + desc: 查询的数据中有null + inputs: + - columns: ["c1 string","c2 int","c3 string","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [NULL,2,NULL,1590738989000] + sql: select c1,c2,c3,c4 from {0}; + expect: + columns: ["c1 string","c2 int","c3 string","c4 timestamp"] + rows: + - [NULL,2,NULL,1590738989000] + - id: 10 + desc: 结合limit + tags: ["TODO", "@zhaowei"] + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sql: select c1,c2,c3,c4 from {0} limit 2; + expect: + rows: + - ["aa",2,3,1590738989000] + - ["cc",41,51,1590738991000] + - id: 11 + desc: limit 1 + tags: ["TODO", "@zhaowei"] + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sql: select c1,c2,c3,c4 from {0} limit 1; + expect: + rows: + - ["aa",2,3,1590738989000] + - id: 12 + desc: limit 0 + tags: ["TODO", "@zhaowei"] + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sql: select c1,c2,c3,c4 from {0} limit 0; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + order: c1 + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + - id: 13 + desc: limit条数大于表的条数 + tags: ["TODO","@zhaoweiLIMIT单独测,现在先别测"] + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sql: select c1,c2,c3,c4 from {0} limit 4; + expect: + columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + order: c1 + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + - id: 14 + desc: 查询常量 + sqlDialect: ["HybridSQL","SQLITE3"] + tags: ["常量fesql和mysql类型不配"] + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + sql: select c1 as name,c2,c3,c4,1 from {0}; + expect: + columns: ["name string","c2 int","c3 bigint","c4 timestamp","1 int"] + rows: + - ["aa",2,3,1590738989000,1] + - id: 15 + desc: 查询的列带表名和别名 + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + sql: select {0}.c1 as name,{0}.c2 as t_c2,{0}.c3 as t_c3,{0}.c4 as t_c4 from {0}; + expect: + columns: ["name string","t_c2 int","t_c3 bigint","t_c4 timestamp"] + rows: + - ["aa",2,3,1590738989000] + - id: 16 + desc: 查询表达式使用表名 + sqlDialect: ["HybridSQL","SQLITE3"] + tags: ["表达式计算结果fesql和mysql类型不配"] + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + sql: select c1 as name,{0}.c2+1 as t_c2,c3,c4 from {0}; + expect: + columns: ["name string","t_c2 int","c3 bigint","c4 timestamp"] + rows: + - ["aa",3,3,1590738989000] + - id: 17 + desc: 查询函数表达式使用表名 + sqlDialect: ["HybridSQL","SQLITE3"] + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["123456789",2,3,1590738989000] + sql: select substr({0}.c1, 3, 6) as name,{0}.c2+1 as t_c2,c3,c4 from {0}; + expect: + columns: ["name string","t_c2 int","c3 bigint","c4 timestamp"] + rows: + - ["345678",3,3,1590738989000] + - id: 18 + desc: column name prefix with _ + mode: offline-unsupport + sqlDialect: ["HybridSQL"] + tags: ["@chendihao, @baoxinqi, support simple project node with column cast"] + inputs: + - columns: ["_c1 int", "_c2 string", "_c5 bigint"] + indexs: ["index1:_c1:_c5"] + rows: + - [1, "2020-05-22 10:43:40", 1] + sql: | + select _c1, bigint(_c2) DIV 1000 as _c2_sec from (select _c1, timestamp(_c2) as _c2 from {0}); + expect: + columns: ["_c1 int", "_c2_sec bigint"] + rows: + - [1, 1590115420] + - id: 19 + desc: 全表聚合 + mode: rtidb-unsupport,offline-unsupport,cli-unsupport + db: db1 + sqlDialect: ["HybridSQL", "MYSQL"] + sql: | + SELECT SUM(col1) as sum_col1, COUNT(col1) as cnt_col1, MAX(col1) as max_col1, + MIN(col1) as min_col1, AVG(col1) as avg_col1 FROM {0}; + inputs: + - columns: ["col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64", "col6 string"] + indexs: ["index1:col2:col5"] + rows: + - [0, 1, 5, 1.1, 11.1, 1, 1] + - [0, 2, 5, 2.2, 22.2, 2, 22] + - [1, 3, 55, 3.3, 33.3, 1, 333] + - [1, 4, 55, 4.4, 44.4, 2, 4444] + - [2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa] + batch_plan: | + PROJECT(type=Aggregation) + DATA_PROVIDER(table=auto_t0) + expect: + columns: ["sum_col1 int32", "cnt_col1 int64", "max_col1 int32", "min_col1 int32", "avg_col1 double"] + order: sum_col1 + rows: + - [15, 5, 5, 1, 3] + - + id: 14 + desc: 不指定索引,插入数据,可查询 + inputs: + - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ] + rows: + - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ] + sql: select * from {0}; + expect: + columns : ["id int","c1 int","c2 smallint","c3 float","c4 double","c5 bigint","c6 string","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,1,2,3.3,4.4,5,"aa",12345678,"2020-05-21",true] \ No newline at end of file diff --git a/cases/integration_test/select/test_sub_select.yaml b/cases/integration_test/select/test_sub_select.yaml new file mode 100644 index 00000000000..292b6d35d2a --- /dev/null +++ b/cases/integration_test/select/test_sub_select.yaml @@ -0,0 +1,359 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: 正常使用子查询 + sqlDialect: ["HybridSQL","SQLITE3"] + mode: cli-unsupport + tags: ["mysql要求派生表必须有别名"] + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sql: select * from (select c1, c2+1,c3+1 from {0}); + expect: + columns: ["c1 string", "c2 + 1 int", "c3 + 1 bigint"] + order: c1 + rows: + - ["aa", 3,4] + - ["bb", 22,32] + - ["cc", 42,52] + - + id: 1 + desc: 子查询使列别名 + sqlDialect: ["HybridSQL","SQLITE3"] + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sql: select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t; + expect: + columns: ["v2 int","v3 bigint"] + order: v2 + rows: + - [3,4] + - [22,32] + - [42,52] + - + id: 2 + desc: 子查询使用常量 + sqlDialect: ["HybridSQL","SQLITE3"] + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sql: select * from (select c2+1 as v2,c3+1 as v3,1 as v4 from {0}) as t; + expect: + columns: ["v2 int","v3 bigint","v4 int"] + order: v2 + rows: + - [3,4,1] + - [22,32,1] + - [42,52,1] + - + id: 3 + desc: 子查询中有空串 + mode: cli-unsupport + sqlDialect: ["HybridSQL","SQLITE3"] + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sql: select * from (select c1,c2+1 as v2,c3+1 as v3,1 as v4 from {0}) as t; + expect: + columns: ["c1 string","v2 int","v3 bigint","v4 int"] + order: c1 + rows: + - ["",22,32,1] + - ["aa",3,4,1] + - ["cc",42,52,1] + - + id: 4 + desc: 子查询中有null + inputs: + - + columns : ["id int","c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa",2,3,1590738989000] + - [2,NULL,21,31,1590738990000] + - [3,"cc",41,51,1590738991000] + sql: select * from (select id,c1,c3+1 as v3 from {0}) as t; + expect: + columns: ["id int","c1 string","v3 bigint"] + order: id + rows: + - [1,"aa",4] + - [2,null,32] + - [3,"cc",52] + - + id: 5 + desc: 查询时列不在子查询中 + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + - ["cc",41,51,1590738991000] + sql: select v5 from (select c1,c2+1 as v2,c3+1 as v3,1 as v4 from {0}); + expect: + success: false + - + id: 6 + desc: last join 子查询和副表, 子查询包含window + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"] + indexs: ["index1:card_no:trx_time"] + rows: + - [1, "aaaaaaaaaa",1, 1590738989000, 1.1] + - [2, "aaaaaaaaaa",1, 1590738990000, 2.2] + - [3, "bb",10, 1590738990000, 3.3] + - + columns : ["crd_lst_isu_dte timestamp", "crd_nbr string"] + indexs: ["index2:crd_nbr:crd_lst_isu_dte"] + rows: + - [1590738988000, "aaaaaaaaaa"] + - [1590738990000, "aaaaaaaaaa"] + - [1590738989000, "cc"] + - [1590738992000, "cc"] + sql: select * from + (select + id, + card_no, + trx_time, + substr(card_no, 1, 6) as card_no_prefix, + sum(trx_amt) over w30d as sum_trx_amt, + count(merchant_id) over w10d as count_merchant_id + from {0} + window w30d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW), + w10d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)) as trx_fe + last join {1} order by {1}.crd_lst_isu_dte on trx_fe.card_no = {1}.crd_nbr and trx_fe.trx_time >= {1}.crd_lst_isu_dte; + expect: + columns: ["id int", "card_no string", "trx_time timestamp", "card_no_prefix string", + "sum_trx_amt float", "count_merchant_id int64", "crd_lst_isu_dte timestamp", + "crd_nbr string"] + order: id + rows: + - [1, "aaaaaaaaaa", 1590738989000, "aaaaaa", 1.1, 1, 1590738988000, "aaaaaaaaaa"] + - [2, "aaaaaaaaaa", 1590738990000, "aaaaaa", 3.3, 2, 1590738990000, "aaaaaaaaaa"] + - [3, "bb", 1590738990000, "bb", 3.3, 1, null, null] + - + id: 7 + desc: window样本表和副表都作子查询 + sqlDialect: ["HybridSQL"] + mode: python-unsupport, cluster-unsupport,cli-unsupport + inputs: + - + columns : ["id int", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"] + + indexs: ["index1:card_no:trx_time"] + rows: + - [1, "aaaaaaaaaa",1, 1590738989000, 1.1] + - [2, "aaaaaaaaaa",1, 1590738991000, 2.2] + - [3, "bb",10, 1590738990000, 3.3] + - + columns : ["crd_lst_isu_dte timestamp", "crd_nbr string"] + indexs: ["index2:crd_nbr:crd_lst_isu_dte"] + rows: + - [1590738988000, "aaaaaaaaaa"] + - [1590738990000, "aaaaaaaaaa"] + - [1590738989000, "cc"] + - [1590738992000, "cc"] + sql: | + select + id, + card_no, + trx_time, + substr(card_no, 1, 6) as card_no_prefix, + sum(trx_amt) over w30d as w30d_amt_sum, + count(id) over w10d as w10d_id_cnt + from (select id, card_no, trx_time, trx_amt from {0}) as t_instance + window w30d as (PARTITION BY card_no ORDER BY trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW), + w10d as (UNION (select 0 as id, crd_nbr as card_no, crd_lst_isu_dte as trx_time, 0.0f as trx_amt from {1}) PARTITION BY card_no ORDER BY trx_time ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW); + expect: + columns: ["id int", "card_no string", "trx_time timestamp", "card_no_prefix string", + "w30d_amt_sum float", "w10d_id_cnt int64"] + order: id + rows: + - [1, "aaaaaaaaaa", 1590738989000, "aaaaaa", 1.1, 2] + - [2, "aaaaaaaaaa", 1590738991000, "aaaaaa", 3.3, 4] + - [3, "bb", 1590738990000, "bb", 3.3, 1] + - + id: 8 + desc: window样本表和副表都作子查询,INSTANCE_NOT_IN_WINDOW + sqlDialect: ["HybridSQL"] + mode: python-unsupport + inputs: + - + columns : ["id int", "user_id string", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"] + + indexs: ["index1:user_id:trx_time"] + rows: + - [1, "aaaaaaaaaa", "xxx", 1, 1590738989000, 1.1] + - [2, "aaaaaaaaaa", "xxx", 1, 1590738991000, 2.2] + - [3, "bb", "000", 10, 1590738990000, 3.3] + - [4, "cc", "zzz", 20, 1590738993000, 4.4] + - + columns : ["crd_lst_isu_dte timestamp", "crd_nbr string", "account_amt double"] + indexs: ["index2:crd_nbr:crd_lst_isu_dte"] + rows: + - [1590738988000, "xxx", 100.0] + - [1590738990000, "xxx", 200.0] + - [1590738990000, "yyy", 300.0] + - [1590738989000, "zzz", 400.0] + - [1590738992000, "zzz", 500.0] + sql: | + select id as out2_id, + crd_nbr, + count(id) over w10d as w10d_id_cnt, + sum(account_amt) over w10d as w10d_total_account_amt + from (select id as id, trx_time as crd_lst_isu_dte, card_no as crd_nbr, 0.0 as account_amt from {0}) as t_instance + window w10d as (UNION (select 0 as id, crd_lst_isu_dte, crd_nbr, account_amt from {1}) + PARTITION BY crd_nbr ORDER BY crd_lst_isu_dte ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); + + expect: + columns: ["out2_id int", "crd_nbr string", "w10d_id_cnt int64", "w10d_total_account_amt double"] + order: out2_id + rows: + - [1, "xxx", 2, 100.0] + - [2, "xxx", 3, 300.0] + - [3, "000", 1, 0.0] + - [4, "zzz", 3, 900.0] + - + id: 9 + desc: 特征拼接 + mode: offline-unsupport, python-unsupport,cli-unsupport + sqlDialect: ["HybridSQL"] + inputs: + - + columns : ["id int", "user_id string", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"] + + indexs: ["index1:user_id:trx_time"] + rows: + - [1, "aaaaaaaaaa", "xxx", 1, 1590738989000, 1.1] + - [2, "aaaaaaaaaa","xxx", 1, 1590738991000, 2.2] + - [3, "bb", "000", 10, 1590738990000, 3.3] + - [4, "cc", "zzz", 20, 1590738993000, 4.4] + - + columns : ["crd_lst_isu_dte timestamp", "crd_nbr string", "account_amt double"] + indexs: ["index2:crd_nbr:crd_lst_isu_dte"] + rows: + - [1590738988000, "xxx", 100.0] + - [1590738990000, "xxx", 200.0] + - [1590738990000, "yyy", 300.0] + - [1590738989000, "zzz", 400.0] + - [1590738992000, "zzz", 500.0] + sql: | + select * from + ( select + id as out1_id, + user_id, + trx_time, + sum(trx_amt) over w30d as w30d_amt_sum + from (select id, user_id, trx_time, trx_amt from {0}) as t_instance + window w30d as (PARTITION BY user_id ORDER BY trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW) + ) as out1 last join + ( select id as out2_id, + crd_nbr, + count(id) over w10d as w10d_id_cnt, + sum(account_amt) over w10d as w10d_total_account_amt + from (select id as id, trx_time as crd_lst_isu_dte, card_no as crd_nbr, 0.0 as account_amt from {0}) as t_instance + window w10d as (UNION (select 0 as id, crd_lst_isu_dte, crd_nbr, account_amt from {1}) + PARTITION BY crd_nbr ORDER BY crd_lst_isu_dte ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW)) as out2 + on out1.out1_id=out2.out2_id; + + expect: + columns: ["out1_id int", "user_id string", "trx_time timestamp", + "w30d_amt_sum float", "out2_id int", "crd_nbr string", "w10d_id_cnt int64", "w10d_total_account_amt double"] + order: out1_id + rows: + - [1, "aaaaaaaaaa", 1590738989000, 1.1, 1, "xxx", 2, 100.0] + - [2, "aaaaaaaaaa", 1590738991000, 3.3, 2, "xxx", 3, 300.0] + - [3, "bb", 1590738990000, 3.3, 3, "000", 1, 0.0] + - [4, "cc", 1590738993000, 4.4, 4, "zzz", 3, 900.0] + - + id: 10 + desc: 子查询使列别名重名 + sqlDialect: ["HybridSQL"] + tags: ["mysql报错"] + inputs: + - + columns : ["id int","c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa",2,3,1590738989000] + - [2,"bb",21,31,1590738990000] + - [3,"cc",41,51,1590738991000] + sql: select * from (select id,c2+1 as v2,c3+1 as v2 from {0}) as t; + expect: + columns: ["id int","v2 int","v2 bigint"] + order: id + rows: + - [1,3,4] + - [2,22,32] + - [3,42,52] + - + id: 11 + desc: 子查询使列别名重名,并同时select + tags: ["TODO", "@baoxinqi", "bug-期望子查询别名不可以一致FEX-1009"] + inputs: + - + columns : ["id int","c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa",2,3,1590738989000] + - [2,"bb",21,31,1590738990000] + - [3,"cc",41,51,1590738991000] + sql: select id,v2,v2 from (select id,c2+1 as v2,c3+1 as v2 from {0}); + expect: + success: false + - + id: 15 + desc: 不指定索引,进行子查询操作 + inputs: + - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ] + rows: + - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ] + sql: select c1,c2 from (select id as c1,c1 as c2,c7 as c3 from {0}); + expect: + columns : ["c1 int","c2 int"] + order: id + rows: + - [1,1] diff --git a/cases/integration_test/select/test_where.yaml b/cases/integration_test/select/test_where.yaml new file mode 100644 index 00000000000..8a2f8d26387 --- /dev/null +++ b/cases/integration_test/select/test_where.yaml @@ -0,0 +1,252 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +sqlDialect: ["HybridSQL"] +debugs: [] +version: 0.5.0 +cases: + - id: 0 + desc: Where条件命中索引 + mode: request-unsupport + db: db1 + sql: | + SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=5; + inputs: + - columns: ["col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64", "col6 string"] + indexs: ["index1:col2:col5"] + data: | + 0, 1, 5, 1.1, 11.1, 1, 1 + 0, 2, 5, 2.2, 22.2, 2, 22 + 1, 3, 55, 3.3, 33.3, 1, 333 + 1, 4, 55, 4.4, 44.4, 2, 4444 + 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + batch_plan: | + SIMPLE_PROJECT(sources=(col0, col1, col2, col3, col4, col5, col6)) + FILTER_BY(condition=, left_keys=(), right_keys=(), index_keys=(5)) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + expect: + schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string + order: col1 + data: | + 0, 1, 5, 1.1, 11.1, 1, 1 + 0, 2, 5, 2.2, 22.2, 2, 22 + - id: 1-1 + desc: Where部分条件命中索引, col1>3条件未命中 + mode: request-unsupport, offline-unsupport + db: db1 + sql: | + SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=55 and col1 > 3; + inputs: + - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string + index: index1:col2:col5 + data: | + 0, 1, 5, 1.1, 11.1, 1, 1 + 0, 2, 5, 2.2, 22.2, 2, 22 + 1, 3, 55, 3.3, 33.3, 1, 333 + 1, 4, 55, 4.4, 44.4, 2, 4444 + 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + batch_plan: | + SIMPLE_PROJECT(sources=(col0, col1, col2, col3, col4, col5, col6)) + FILTER_BY(condition=col1 > 3, left_keys=(), right_keys=(), index_keys=(55)) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + expect: + schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string + order: col1 + data: | + 1, 4, 55, 4.4, 44.4, 2, 4444 + 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + - id: 1-1 + desc: Where部分条件命中索引, col1=3条件未命中 + mode: request-unsupport, offline-unsupport + db: db1 + sql: | + SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=55 and col1 = 3; + inputs: + - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string + index: index1:col2:col5 + data: | + 0, 1, 5, 1.1, 11.1, 1, 1 + 0, 2, 5, 2.2, 22.2, 2, 22 + 1, 3, 55, 3.3, 33.3, 1, 333 + 1, 4, 55, 4.4, 44.4, 2, 4444 + 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + batch_plan: | + SIMPLE_PROJECT(sources=(col0, col1, col2, col3, col4, col5, col6)) + FILTER_BY(condition=3 = col1, left_keys=(), right_keys=(), index_keys=(55)) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + expect: + schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string + order: col1 + data: | + 1, 3, 55, 3.3, 33.3, 1, 333 + - id: 2-1 + desc: Where条件未命中索引 + mode: request-unsupport + tags: ["OnlineServing不支持,Training可以支持"] + db: db1 + sql: | + SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=55 and col1 > 1; + inputs: + - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string + index: index1:col6:col5 + data: | + 0, 1, 5, 1.1, 11.1, 1, 1 + 0, 2, 5, 2.2, 22.2, 2, 22 + 1, 3, 55, 3.3, 33.3, 1, 333 + 1, 4, 55, 4.4, 44.4, 2, 4444 + 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + expect: + success: true + columns: [ "col0 string", "col1 int", "col2 smallint", "col3 float", "col4 double", "col5 bigint", "col6 string" ] + order: col1 + rows: + - [ 1, 3, 55, 3.300000, 33.300000, 1, 333 ] + - [ 1, 4, 55, 4.400000, 44.400000, 2, 4444 ] + - [ 2, 5, 55, 5.500000, 55.500000, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ] + + - id: 2-2 + desc: Where条件未命中索引-离线支持 + mode: rtidb-unsupport,cli-unsupport + db: db1 + sql: | + SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=5 and col1 < 2; + inputs: + - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string + index: index1:col6:col5 + data: | + 0, 1, 5, 1.1, 11.1, 1, 1 + 0, 2, 5, 2.2, 22.2, 2, 22 + 1, 3, 55, 3.3, 33.3, 1, 333 + 1, 4, 55, 4.4, 44.4, 2, 4444 + 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + expect: + schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string + order: col1 + data: | + 0, 1, 5, 1.1, 11.1, 1, 1 + - id: 3-1 + desc: Where条件未命中索引示例2 + mode: request-unsupport + db: db1 + sql: | + SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=col3 and col1 < 2; + inputs: + - columns: ["col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64", "col6 string"] + indexs: ["index1:col2:col5"] + data: | + 0, 1, 5, 1.1, 11.1, 1, 1 + 0, 2, 5, 2.2, 22.2, 2, 22 + 1, 3, 55, 3.3, 33.3, 1, 333 + 1, 4, 55, 4.4, 44.4, 2, 4444 + 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + expect: + success: true + columns: [ "col0 string", "col1 int", "col2 smallint", "col3 float", "col4 double", "col5 bigint", "col6 string" ] + rows: + + - id: 3-2 + desc: Where条件未命中索引示例2 + mode: rtidb-unsupport,cli-unsupport + db: db1 + sql: | + SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col1=col5 and col1 > 1; + inputs: + - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string + index: index1:col2:col5 + data: | + 0, 1, 5, 1.1, 11.1, 1, 1 + 0, 2, 5, 2.2, 22.2, 2, 22 + 1, 3, 55, 3.3, 33.3, 1, 333 + 1, 4, 55, 4.4, 44.4, 2, 4444 + 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + expect: + schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string + order: col1 + data: | + 0, 2, 5, 2.2, 22.2, 2, 22 + - id: 4 + desc: Where条件命中索引,索引穿透简单子查询 + mode: request-unsupport + db: db1 + sql: | + SELECT c0, c1, c2, c3, c4, c5, c6, c1+c4 as c14 FROM + (select col0 as c0, col1 as c1, col2 as c2, 0.0f as c3, col4 as c4, col5 as c5, "empty_str" as c6 from {0}) as t1 where t1.c2=5; + inputs: + - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string + index: index1:col2:col5 + data: | + 0, 1, 5, 1.1, 11.1, 1, 1 + 0, 2, 5, 2.2, 22.2, 2, 22 + 1, 3, 55, 3.3, 33.3, 1, 333 + 1, 4, 55, 4.4, 44.4, 2, 4444 + 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + batch_plan: | + PROJECT(type=TableProject) + FILTER_BY(condition=, left_keys=(), right_keys=(), index_keys=(5)) + RENAME(name=t1) + SIMPLE_PROJECT(sources=(col0 -> c0, col1 -> c1, col2 -> c2, 0.000000 -> c3, col4 -> c4, col5 -> c5, empty_str -> c6)) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + expect: + schema: c0:string, c1:int32, c2:int16, c3:float, c4:double, c5:int64, c6:string, c14:double + order: c1 + data: | + 0, 1, 5, 0.0, 11.1, 1, empty_str, 12.1 + 0, 2, 5, 0.0, 22.2, 2, empty_str, 24.2 + - id: 5 + desc: lastjoin+Where,包含重复列名 + mode: request-unsupport, rtidb-unsupport + tags: ["TODO", "@chenjing", "0.3.0", "fail to compute where condition bug"] + db: db1 + inputs: + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp", "c5 int"] + indexs: ["index1:c5:c4"] + rows: + - ["aa",2,3,1590738989000, 100] + - ["bb",21,31,1590738990000, 200] + - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: ["index1:c3:c4"] + rows: + - ["aa",2,3,1590738989000] + - ["bb",21,31,1590738990000] + sql: select {0}.c1,{1}.c1,{0}.c2,{1}.c3,{1}.c4,{0}.c5 from {0} last join {1} on {0}.c3={1}.c3 where c5 = 100; + expect: + columns: ["c1 string","c1 string", "c2 int","c3 bigint", "c4 timestamp", "c5 int"] + rows: + - ["aa","aa",2,3,1590738989000, 100] + - id: 6-1 + desc: Where条件后全表聚合 + tags: ["TODO","batch exec failed"] + mode: request-unsupport + db: db1 + sql: | + SELECT SUM(col1) as sum_col1, COUNT(col1) as cnt_col1, MAX(col1) as max_col1, + MIN(col1) as min_col1, AVG(col1) as avg_col1 FROM {0} where col2=5; + inputs: + - columns: ["col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64", "col6 string"] + indexs: ["index1:col2:col5"] + rows: + - [0, 1, 5, 1.1, 11.1, 1, 1] + - [0, 2, 5, 2.2, 22.2, 2, 22] + - [1, 3, 55, 3.3, 33.3, 1, 333] + - [1, 4, 55, 4.4, 44.4, 2, 4444] + - [2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa] + batch_plan: | + PROJECT(type=Aggregation) + FILTER_BY(condition=, left_keys=(), right_keys=(), index_keys=(5)) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + + expect: + columns: ["sum_col1 int32", "cnt_col1 int64", "max_col1 int32", "min_col1 int32", "avg_col1 double"] + order: sum_col1 + rows: + - [3, 2, 2, 1, 1.5] diff --git a/cases/integration_test/spark/generate_yaml_case.py b/cases/integration_test/spark/generate_yaml_case.py new file mode 100755 index 00000000000..de8551cc70c --- /dev/null +++ b/cases/integration_test/spark/generate_yaml_case.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# -*- coding: utf-8 -*- + +# pip3 install -U ruamel.yaml pyspark first +import argparse +from datetime import date +import random +import string +import time +import sys + +import pyspark +import pyspark.sql +from pyspark.sql.types import * +import ruamel.yaml as yaml +from ruamel.yaml import RoundTripDumper, RoundTripLoader + +from ruamel.yaml.scalarstring import LiteralScalarString, DoubleQuotedScalarString + +YAML_TEST_TEMPLATE = """ +db: test_db +cases: + - id: 1 + desc: yaml 测试用例模版 + inputs: [] + sql: | + select * from t1 + expect: + success: true +""" + +INPUT_TEMPLATE = """ + columns: [] + indexs: [] + rows: [] +""" + + +def random_string(prefix, n): + return "{}_{}".format(prefix, ''.join(random.choices(string.ascii_letters + string.digits, k=n))) + +# random date in current year +def random_date(): + start_dt = date.today().replace(day=1, month=1).toordinal() + end_dt = date.today().toordinal() + random_day = date.fromordinal(random.randint(start_dt, end_dt)) + return random_day + +def to_column_str(field): + tp = '{unknown_type}' + if isinstance(field.dataType, BooleanType): + tp = 'bool' + elif isinstance(field.dataType, ShortType): + tp = 'int16' + elif isinstance(field.dataType, IntegerType): + tp = 'int32' + elif isinstance(field.dataType, LongType): + tp = 'int64' + elif isinstance(field.dataType, StringType): + tp = 'string' + elif isinstance(field.dataType, TimestampType): + tp = 'timestamp' + elif isinstance(field.dataType, DateType): + tp = 'date' + elif isinstance(field.dataType, DoubleType): + tp = 'double' + elif isinstance(field.dataType, FloatType): + tp = 'float' + + return "%s %s" % (field.name, tp) + +def random_row(schema): + row = [] + for field_schema in schema.fields: + field_type = field_schema.dataType + if isinstance(field_type, BooleanType): + row.append(random.choice([True, False])) + elif isinstance(field_type, ShortType): + row.append(random.randint(- (1 << 15), 1 << 15 - 1)) + elif isinstance(field_type, IntegerType): + row.append(random.randint(- (1 << 31), 1 << 31 - 1)) + elif isinstance(field_type, LongType): + row.append(random.randint(-(1 << 63), 1 << 63 - 1)) + elif isinstance(field_type, StringType): + row.append(random_string(field_schema.name, 10)) + elif isinstance(field_type, TimestampType): + # in milliseconds + row.append(int(time.time()) * 1000) + elif isinstance(field_type, DateType): + row.append(random_date()) + elif isinstance(field_type, DoubleType): + row.append(random.uniform(-128.0, 128.0)) + elif isinstance(field_type, FloatType): + row.append(random.uniform(-128.0, 128.0)) + else: + row.append('{unknown}') + + return row + + +def to_string(value): + if isinstance(value, date): + return DoubleQuotedScalarString(value.strftime("%Y-%m-%d")) + if isinstance(value, float): + return float("%.2f" % value) + if isinstance(value, str): + return DoubleQuotedScalarString(value) + return value + + +sess = None +def gen_inputs_column_and_rows(parquet_file, table_name=''): + global sess + if sess is None: + sess = pyspark.sql.SparkSession(pyspark.SparkContext()) + dataframe = sess.read.parquet(parquet_file) + hdfs_schema = dataframe.schema + schema = [DoubleQuotedScalarString(to_column_str(f)) for f in hdfs_schema.fields] + + table = yaml.load(INPUT_TEMPLATE, Loader=RoundTripLoader) + + if table_name: + table['name'] = table_name + + table['columns'] = schema + + data_set = [] + row_cnt = random.randint(1, 10) + for _ in range(row_cnt): + data_set.append(random_row(hdfs_schema)) + + table['rows'] = [list(map(to_string, row)) for row in data_set] + return table + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--sql", required=True, help="sql text path") + group = parser.add_mutually_exclusive_group() + group.add_argument("--schema-file", help="path to hdfs content(in parquet format), used to detect table schema") + group.add_argument("--schema-list-file", help="list file conataining a list of hdfs files, \"table_name: file path\" per line") + parser.add_argument("--output", required=True, help="path to output yaml file") + args = parser.parse_args() + + sql = args.sql + schema_file = args.schema_file + schema_list_file = args.schema_list_file + output = args.output + + yaml_test = yaml.load(YAML_TEST_TEMPLATE, Loader=RoundTripLoader, preserve_quotes=True) + + if schema_file: + tb = gen_inputs_column_and_rows(schema_file) + yaml_test['cases'][0]['inputs'].append(tb) + elif schema_list_file: + with open(schema_list_file, 'r') as l: + for schema_file in l: + sf = schema_file.strip() + if not sf: + continue + table_name, parquet_file, *_ = sf.split(':') + + parquet_file = parquet_file.strip() + if parquet_file: + tb = gen_inputs_column_and_rows(parquet_file, table_name) + yaml_test['cases'][0]['inputs'].append(tb) + else: + print("error") + sys.exit(1) + + + with open(sql, 'r') as f: + yaml_test['cases'][0]['sql'] = LiteralScalarString(f.read().strip()) + + with open(output, 'w') as f: + f.write(yaml.dump(yaml_test, Dumper=RoundTripDumper, allow_unicode=True)) + diff --git a/cases/integration_test/spark/requirements.txt b/cases/integration_test/spark/requirements.txt new file mode 100644 index 00000000000..257735c8ec6 --- /dev/null +++ b/cases/integration_test/spark/requirements.txt @@ -0,0 +1,3 @@ +py4j==0.10.9 +pyspark==3.1.3 +ruamel.yaml==0.16.12 diff --git a/cases/integration_test/spark/test_ads.yaml b/cases/integration_test/spark/test_ads.yaml new file mode 100644 index 00000000000..43d889969ff --- /dev/null +++ b/cases/integration_test/spark/test_ads.yaml @@ -0,0 +1,176 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: template_name +cases: +- id: 1 + desc: 单表-广告场景 + inputs: + - columns: + - "id string" + - "time timestamp" + - "C1 string" + - "banner_pos int32" + - "site_id string" + - "site_domain string" + - "site_category string" + - "app_id string" + - "app_domain string" + - "app_category string" + - "device_id string" + - "device_ip string" + - "device_model string" + - "device_type string" + - "device_conn_type string" + - "C14 string" + - "C15 string" + - "C16 string" + - "C17 string" + - "C18 string" + - "C19 string" + - "C20 string" + - "C21 string" + - "click int32" + indexs: ["index1:device_ip:time"] + rows: + - - "id_XfRHH4kXfh" + - 1609398202000 + - "C1_AXkRcXx3Kw" + - -2136663223 + - "site_id_eDHW3HhKq1" + - "site_domain_BiGZfMhPi4" + - "site_category_fRuxhKkzG7" + - "app_id_qU7KTLhbfd" + - "app_domain_89LBfwJOX6" + - "app_category_6ZYuZwBFU8" + - "device_id_wblCHgZ5XS" + - "device_ip_QghSozyTkL" + - "device_model_npId0EBZlF" + - "device_type_FC6ZCotmB0" + - "device_conn_type_ZDYT1Ax9Ms" + - "C14_fp4R2g2zVQ" + - "C15_uMIOpZgomo" + - "C16_mdReYZ82da" + - "C17_BHAroEq4Oa" + - "C18_tg1duoMypp" + - "C19_Bk6GldZeSl" + - "C20_LHuXYsBnVj" + - "C21_JasNjK98O3" + - 13560844 + - - "id_CcZoKjZdWh" + - 1609398202000 + - "C1_xu9l18vaoM" + - -2064473435 + - "site_id_JTwfcebGpx" + - "site_domain_DrGpN7fHxB" + - "site_category_VnKBVLPjCN" + - "app_id_fFOUOMIZb2" + - "app_domain_WEH14cif3z" + - "app_category_5SDJL3MMbz" + - "device_id_BYRnezWSFI" + - "device_ip_UzE2rMHw3i" + - "device_model_eEvfxxZu2H" + - "device_type_WSyKKMDHzw" + - "device_conn_type_ImtQtq1M0h" + - "C14_N6KNpoRxB7" + - "C15_NoqO6r3LI0" + - "C16_5SkwZizokc" + - "C17_Ubxmmk7l7D" + - "C18_mhmpWVGnvx" + - "C19_MEZPm43rbw" + - "C20_20PAS4g6pi" + - "C21_jBaglxDzWN" + - -1234570441 + sql: |- + select + id as id_1, + id as t1_id_original_0, + `time` as t1_time_original_1, + C1 as t1_C1_original_2, + banner_pos as t1_banner_pos_original_3, + site_id as t1_site_id_original_4, + site_domain as t1_site_domain_original_5, + site_category as t1_site_category_original_6, + app_id as t1_app_id_original_7, + app_domain as t1_app_domain_original_8, + app_category as t1_app_category_original_9, + device_id as t1_device_id_original_10, + device_ip as t1_device_ip_original_11, + device_model as t1_device_model_original_12, + device_type as t1_device_type_original_13, + device_conn_type as t1_device_conn_type_original_14, + C14 as t1_C14_original_15, + C15 as t1_C15_original_16, + C16 as t1_C16_original_17, + C17 as t1_C17_original_18, + C18 as t1_C18_original_19, + C19 as t1_C19_original_20, + C20 as t1_C20_original_21, + C21 as t1_C21_original_22, + click as t1_click_original_23, + device_ip as t1_device_ip_combine_24, + device_model as t1_device_model_combine_24, + C17 as t1_C17_combine_24, + device_ip as t1_device_ip_combine_25, + device_model as t1_device_model_combine_25, + C19 as t1_C19_combine_25, + device_ip as t1_device_ip_combine_26, + device_model as t1_device_model_combine_26, + C21 as t1_C21_combine_26, + banner_pos as t1_banner_pos_combine_27, + device_ip as t1_device_ip_combine_27, + device_model as t1_device_model_combine_27, + C1 as t1_C1_combine_28, + banner_pos as t1_banner_pos_combine_28, + site_domain as t1_site_domain_combine_29, + device_ip as t1_device_ip_combine_29, + device_model as t1_device_model_combine_29, + site_id as t1_site_id_combine_30, + device_ip as t1_device_ip_combine_30, + device_model as t1_device_model_combine_30, + app_domain as t1_app_domain_combine_31, + device_ip as t1_device_ip_combine_31, + device_model as t1_device_model_combine_31, + site_category as t1_site_category_combine_32, + device_ip as t1_device_ip_combine_32, + device_model as t1_device_model_combine_32, + device_ip as t1_device_ip_combine_33, + device_model as t1_device_model_combine_33, + C18 as t1_C18_combine_33, + fz_top1_ratio(id) over t1_device_ip_time_0s_7200s as t1_id_window_top1_ratio_34, + fz_top1_ratio(id) over t1_device_ip_time_0s_36000s as t1_id_window_top1_ratio_35, + case when !isnull(lag(app_domain, 0)) over t1_device_ip_time_0s_7200s then count(app_domain) over t1_device_ip_time_0s_7200s else null end as t1_app_domain_window_count_36, + case when !isnull(lag(app_category, 0)) over t1_device_ip_time_0s_7200s then count(app_category) over t1_device_ip_time_0s_7200s else null end as t1_app_category_window_count_37, + case when !isnull(lag(device_model, 0)) over t1_device_ip_time_0s_36000s then count(device_model) over t1_device_ip_time_0s_36000s else null end as t1_device_model_window_count_38, + case when !isnull(lag(app_id, 0)) over t1_device_ip_time_0s_7200s then count(app_id) over t1_device_ip_time_0s_7200s else null end as t1_app_id_window_count_39, + case when !isnull(lag(C17, 0)) over t1_device_ip_time_0s_7200s then count(C17) over t1_device_ip_time_0s_7200s else null end as t1_C17_window_count_40, + case when !isnull(lag(C19, 0)) over t1_device_ip_time_0s_7200s then count(C19) over t1_device_ip_time_0s_7200s else null end as t1_C19_window_count_41, + case when !isnull(lag(banner_pos, 0)) over t1_device_ip_time_0s_7200s then count(banner_pos) over t1_device_ip_time_0s_7200s else null end as t1_banner_pos_window_count_42, + fz_top1_ratio(C14) over t1_device_ip_time_0s_7200s as t1_C14_window_top1_ratio_43, + case when !isnull(lag(app_id, 0)) over t1_device_ip_time_0s_36000s then count(app_id) over t1_device_ip_time_0s_36000s else null end as t1_app_id_window_count_44, + case when !isnull(lag(site_id, 0)) over t1_device_ip_time_0s_36000s then count(site_id) over t1_device_ip_time_0s_36000s else null end as t1_site_id_window_count_45, + case when !isnull(lag(site_domain, 0)) over t1_device_ip_time_0s_36000s then count(site_domain) over t1_device_ip_time_0s_36000s else null end as t1_site_domain_window_count_46, + case when !isnull(lag(site_category, 0)) over t1_device_ip_time_0s_36000s then count(site_category) over t1_device_ip_time_0s_36000s else null end as t1_site_category_window_count_47, + case when !isnull(lag(app_domain, 0)) over t1_device_ip_time_0s_36000s then count(app_domain) over t1_device_ip_time_0s_36000s else null end as t1_app_domain_window_count_48, + case when !isnull(lag(app_category, 0)) over t1_device_ip_time_0s_36000s then count(app_category) over t1_device_ip_time_0s_36000s else null end as t1_app_category_window_count_49, + case when !isnull(lag(device_id, 0)) over t1_device_ip_time_0s_36000s then count(device_id) over t1_device_ip_time_0s_36000s else null end as t1_device_id_window_count_50, + case when !isnull(lag(C18, 0)) over t1_device_ip_time_0s_36000s then count(C18) over t1_device_ip_time_0s_36000s else null end as t1_C18_window_count_51, + case when !isnull(lag(device_conn_type, 0)) over t1_device_ip_time_0s_36000s then count(device_conn_type) over t1_device_ip_time_0s_36000s else null end as t1_device_conn_type_window_count_52, + case when !isnull(lag(C19, 0)) over t1_device_ip_time_0s_36000s then count(C19) over t1_device_ip_time_0s_36000s else null end as t1_C19_window_count_53 + from + {0} + window t1_device_ip_time_0s_7200s as ( partition by device_ip order by `time` rows_range between 7200s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + t1_device_ip_time_0s_36000s as ( partition by device_ip order by `time` rows_range between 36000s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW); + expect: + success: true diff --git a/cases/integration_test/spark/test_credit.yaml b/cases/integration_test/spark/test_credit.yaml new file mode 100644 index 00000000000..4e466ad44d0 --- /dev/null +++ b/cases/integration_test/spark/test_credit.yaml @@ -0,0 +1,1012 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_db +cases: +- id: 1 + desc: 多表-信用卡用户转借记卡预测场景 + inputs: + - columns: + - "id int32" + - "cust_id_an int32" + - "ins_date timestamp" + - "Label int32" + indexs: ["index1:id:ins_date"] + rows: + - - -1985437192 + - -1317908971 + - 1611144215000 + - -506221216 + - - -906012118 + - 122153399 + - 1611144215000 + - -2073586764 + - - 192540564 + - -1868891884 + - 1611144215000 + - -1291765609 + - - -12339370 + - -1068593442 + - 1611144215000 + - -1218544053 + - - -1346508105 + - 389329057 + - 1611144215000 + - 72100927 + - - -1563970013 + - 64743832 + - 1611144215000 + - -1456087176 + - - -420456303 + - 83758185 + - 1611144215000 + - 593328038 + name: t1 + - columns: + - "cust_id_an int32" + - "agmt_id_an int32" + - "atta_acct_ind string" + - "stmt_dt date" + - "open_acct_dt timestamp" + - "clos_acct_dt timestamp" + indexs: ["indext8:cust_id_an:open_acct_dt"] + rows: + - - -103578926 + - -2129345374 + - "atta_acct_ind_QSKoYcBykS" + - "2021-01-19" + - 1611144215000 + - 1611144215000 + - - -1738231442 + - -1827648982 + - "atta_acct_ind_YCzSZpWh36" + - "2021-01-01" + - 1611144215000 + - 1611144215000 + - - -313899349 + - -620524833 + - "atta_acct_ind_b06NdQiRiE" + - "2021-01-14" + - 1611144215000 + - 1611144215000 + - - -412596205 + - -1082468256 + - "atta_acct_ind_4rFa5IVSF4" + - "2021-01-02" + - 1611144215000 + - 1611144215000 + - - -48236232 + - -170343294 + - "atta_acct_ind_NU8FhCMOiL" + - "2021-01-11" + - 1611144215000 + - 1611144215000 + - - -1455816949 + - 403926185 + - "atta_acct_ind_yIDKZcJSaj" + - "2021-01-04" + - 1611144215000 + - 1611144215000 + - - 225487286 + - 834608659 + - "atta_acct_ind_xOG219V8NP" + - "2021-01-01" + - 1611144215000 + - 1611144215000 + name: t8 + - columns: + - "cust_id_an int32" + - "agmt_id_an int32" + - "curr_ovrd_stat_cd string" + - "curr_yr_ovrd_cnt int32" + - "curr_yr_crdt_card_point double" + - "crdt_card_point double" + - "acct_stat_cd string" + - "consm_od_bal double" + - "cash_od_bal double" + - "amtbl_od_bal double" + - "spl_pay_bal double" + - "ovrd_bal double" + - "last_mth_stmt_amt double" + - "last_mth_consm_cnt int32" + - "m_consm_amt_accm double" + - "m_cash_amt_accm double" + - "m_amtbl_amt_accm double" + - "m_spl_pay_amt_accm double" + - "m_ovrd_bal_accm double" + - "data_date timestamp" + indexs: ["indext9:cust_id_an:data_date"] + rows: + - - -1965865733 + - 181943904 + - "curr_ovrd_stat_cd_pSWF7Z7UVZ" + - 288301759 + - 10.03 + - 57.38 + - "acct_stat_cd_cTTBtj3JnQ" + - 30.94 + - -53.93 + - -81.51 + - -111.3 + - -101.78 + - 68.7 + - -1929310650 + - 121.96 + - -35.3 + - -1.68 + - 109.97 + - 89.78 + - 1611144215000 + - - 305578483 + - 594627092 + - "curr_ovrd_stat_cd_KH7JIGfFuM" + - -583313456 + - -109.77 + - 22.53 + - "acct_stat_cd_nrBFWkaCSO" + - -14.29 + - 126.7 + - 40.33 + - 120.44 + - -73.54 + - 17.18 + - -337679856 + - -81.93 + - -19.57 + - -11.83 + - 80.59 + - 75.35 + - 1611144215000 + - - -501231072 + - 22230390 + - "curr_ovrd_stat_cd_Mwu1mCxGqn" + - 1039709568 + - -113.24 + - -108.36 + - "acct_stat_cd_co20Q23EM8" + - -58.61 + - -73.54 + - -98.85 + - -43.24 + - 33.71 + - -11.95 + - -1818947456 + - -59.67 + - -62.73 + - -51.21 + - 50.64 + - 90.51 + - 1611144215000 + - - -1832175587 + - -991415524 + - "curr_ovrd_stat_cd_H1NkAqnwqe" + - -1908516905 + - -27.17 + - 102.83 + - "acct_stat_cd_pq3jTUtjF0" + - 91.15 + - -83.81 + - -69.61 + - 127.86 + - -86.14 + - 56.68 + - -1995257141 + - 6.71 + - 83.5 + - -32.51 + - -94.43 + - 8.3 + - 1611144215000 + - - 611330902 + - 679194351 + - "curr_ovrd_stat_cd_HIlzlZymnH" + - -254111972 + - 3.04 + - 9.18 + - "acct_stat_cd_PhHHTvGLTL" + - -75.39 + - 15.09 + - -18.1 + - -104.29 + - -49.22 + - -100.48 + - 730288655 + - 58.18 + - 8.3 + - 11.78 + - -91.13 + - 6.87 + - 1611144215000 + - - 826069039 + - 470439749 + - "curr_ovrd_stat_cd_8JQvcEi7yJ" + - 811087014 + - 85.17 + - -97.16 + - "acct_stat_cd_AFju4WMCgx" + - -108.14 + - 117.13 + - -93.99 + - 70.68 + - 107.57 + - 98.27 + - -891433275 + - 35.0 + - -33.36 + - 127.18 + - 25.36 + - -64.98 + - 1611144215000 + - - -784663900 + - -1192305947 + - "curr_ovrd_stat_cd_U4Ophb2kIQ" + - 515010670 + - 105.76 + - 3.51 + - "acct_stat_cd_Z1Kyb1mz7y" + - 9.64 + - -28.33 + - 60.18 + - 117.39 + - -24.18 + - -0.82 + - -1458522076 + - 105.11 + - -68.3 + - -16.45 + - -29.62 + - 47.34 + - 1611144215000 + - - 808471893 + - -2029597450 + - "curr_ovrd_stat_cd_NMy2UGhIrf" + - -551211114 + - -29.29 + - -92.54 + - "acct_stat_cd_HMcl6pIDg4" + - 6.99 + - -111.57 + - -124.1 + - 85.09 + - 113.05 + - -25.19 + - -928477688 + - 110.96 + - 14.01 + - 95.6 + - 4.15 + - -56.27 + - 1611144215000 + - - -414811981 + - -106781549 + - "curr_ovrd_stat_cd_z5gVcFFs0m" + - -1846401879 + - 11.12 + - -56.57 + - "acct_stat_cd_pbubmnmn1M" + - -63.85 + - -47.45 + - 124.76 + - -120.79 + - -70.46 + - -42.95 + - -1432475728 + - -123.98 + - 25.41 + - -95.39 + - -76.1 + - 50.44 + - 1611144215000 + - - 352609173 + - 748553820 + - "curr_ovrd_stat_cd_qgOUkDJ1rQ" + - -932519461 + - -80.07 + - 75.8 + - "acct_stat_cd_9AdRp2Spps" + - -102.28 + - 88.3 + - -15.75 + - 108.03 + - -127.15 + - 94.95 + - -1288349027 + - 100.95 + - 2.77 + - 81.25 + - -26.63 + - 70.67 + - 1611144215000 + name: t9 + - columns: + - "cust_id_an int32" + - "card_agmt_id_an int32" + - "pri_acct_id_an int32" + - "atta_card_ind string" + - "camp_org_id string" + - "prod_id string" + - "snp_gage_cd string" + - "crdt_card_lvl_cd string" + - "pin_card_dt date" + - "card_matr_yr_mth string" + - "sell_chnl_cd string" + - "card_org_cd string" + - "actv_chnl_cd string" + - "free_annl_fee_ind string" + - "annl_fee double" + - "bus_card_ind string" + - "matr_contn_card_ind string" + - "issu_card_dt timestamp" + - "actv_dt timestamp" + indexs: ["indext6:cust_id_an:actv_dt"] + rows: + - - 756930160 + - -1362270267 + - 820739577 + - "atta_card_ind_4oS8b63mVd" + - "camp_org_id_BFbsLHpdSR" + - "prod_id_3m2TZ0si7Z" + - "snp_gage_cd_onOB021pP1" + - "crdt_card_lvl_cd_vQuD1gTTwe" + - "2021-01-12" + - "card_matr_yr_mth_tDIUWOk5ia" + - "sell_chnl_cd_FLfurUmdfR" + - "card_org_cd_piAFoPGMLH" + - "actv_chnl_cd_mTHr98b5Es" + - "free_annl_fee_ind_Lq3eblqZFw" + - 68.08 + - "bus_card_ind_5KK6nTjOxr" + - "matr_contn_card_ind_S4hHwHdJNH" + - 1611144215000 + - 1611144215000 + - - 394465803 + - -1469812793 + - 46768555 + - "atta_card_ind_MEbCAC4sCs" + - "camp_org_id_gV8Zs3vkri" + - "prod_id_Pk1B3xv6JA" + - "snp_gage_cd_ZgHDu3hZbx" + - "crdt_card_lvl_cd_Etc9TpL5u7" + - "2021-01-02" + - "card_matr_yr_mth_AMweyZaygN" + - "sell_chnl_cd_dV661JROf4" + - "card_org_cd_8nvfaf471b" + - "actv_chnl_cd_nmjPCpzA37" + - "free_annl_fee_ind_0yvInU4aXe" + - -4.02 + - "bus_card_ind_gDjvmuKOo9" + - "matr_contn_card_ind_MgCwGwHYy4" + - 1611144215000 + - 1611144215000 + - - -1915196249 + - 715245555 + - -1037414536 + - "atta_card_ind_NBFRDWsXul" + - "camp_org_id_LUgZQkavDC" + - "prod_id_5HHVvMevjR" + - "snp_gage_cd_TLVPPbmIqP" + - "crdt_card_lvl_cd_f1khBG0oFM" + - "2021-01-08" + - "card_matr_yr_mth_0AoPAu7blU" + - "sell_chnl_cd_gmGs4O8BsG" + - "card_org_cd_fCbMNmDc7W" + - "actv_chnl_cd_SkuX9MfN7Z" + - "free_annl_fee_ind_oEUcJ2azyx" + - 108.44 + - "bus_card_ind_NWfBj4nd18" + - "matr_contn_card_ind_6ieA1VpR6O" + - 1611144215000 + - 1611144215000 + - - -1937671087 + - -1386163364 + - 936709843 + - "atta_card_ind_SchOBM3ADn" + - "camp_org_id_iIcs5gi51w" + - "prod_id_pNeYvSsCK9" + - "snp_gage_cd_qs3ZQWlyfm" + - "crdt_card_lvl_cd_Nzbp7Cy4v2" + - "2021-01-02" + - "card_matr_yr_mth_24GI4NhCum" + - "sell_chnl_cd_e6sZGx0UIr" + - "card_org_cd_mEaWKOr2eK" + - "actv_chnl_cd_5jHnIHbODx" + - "free_annl_fee_ind_mNxB0OUuqB" + - -94.58 + - "bus_card_ind_9twM1Sm8N6" + - "matr_contn_card_ind_Ze6N7bLuqc" + - 1611144215000 + - 1611144215000 + - - -1897243199 + - -1931817796 + - 390672335 + - "atta_card_ind_mN5Mw55PCb" + - "camp_org_id_Zn4STXeUD6" + - "prod_id_4uoNNgMc0p" + - "snp_gage_cd_fNOXthNs7J" + - "crdt_card_lvl_cd_ynL4AtIJa3" + - "2021-01-11" + - "card_matr_yr_mth_XROF2DVFVq" + - "sell_chnl_cd_0QLdMs0ENq" + - "card_org_cd_odnosB8A0R" + - "actv_chnl_cd_AjThMogiEt" + - "free_annl_fee_ind_Eem4dzghME" + - -72.53 + - "bus_card_ind_7AD96Q3i6Z" + - "matr_contn_card_ind_35MrxB5cXA" + - 1611144215000 + - 1611144215000 + - - -1853796531 + - -1258445777 + - -1547814111 + - "atta_card_ind_oeDA6We5EC" + - "camp_org_id_S7pZ2RJ4HP" + - "prod_id_DHeuN53pSv" + - "snp_gage_cd_aW92GS2DMu" + - "crdt_card_lvl_cd_tzSehkdxa8" + - "2021-01-17" + - "card_matr_yr_mth_bIlFSqWgT9" + - "sell_chnl_cd_SQE3eVhOwn" + - "card_org_cd_GiXhH8Ilw1" + - "actv_chnl_cd_BBCwH068cK" + - "free_annl_fee_ind_t5sz5QGjAq" + - 59.91 + - "bus_card_ind_2HCWPtpDe5" + - "matr_contn_card_ind_vAViU3mnTF" + - 1611144215000 + - 1611144215000 + - - 599351765 + - -2026344167 + - 406435567 + - "atta_card_ind_0Dc8HKmpeg" + - "camp_org_id_jY2qjsi2yM" + - "prod_id_nn1lrj5ZFX" + - "snp_gage_cd_SDeBM6a51B" + - "crdt_card_lvl_cd_LfX4N7yXil" + - "2021-01-12" + - "card_matr_yr_mth_ORvNy6K6TO" + - "sell_chnl_cd_sUJHlnXZS4" + - "card_org_cd_SzZoSXxmYR" + - "actv_chnl_cd_FuTmvFJMGv" + - "free_annl_fee_ind_00i8JxFXcx" + - 68.1 + - "bus_card_ind_pWrx4XVAKK" + - "matr_contn_card_ind_MgjGK92EfE" + - 1611144215000 + - 1611144215000 + - - 129929182 + - -812735353 + - -776403184 + - "atta_card_ind_caXhPAUCSn" + - "camp_org_id_wvDdQBr0bh" + - "prod_id_6OrANg0pDT" + - "snp_gage_cd_qZhYdtg1EX" + - "crdt_card_lvl_cd_WLmc0oczDJ" + - "2021-01-13" + - "card_matr_yr_mth_fJ7zh8PWuu" + - "sell_chnl_cd_vA3H163pUi" + - "card_org_cd_se7PxoQEWW" + - "actv_chnl_cd_IuJot5ylAH" + - "free_annl_fee_ind_PlRcZHiwDg" + - 40.89 + - "bus_card_ind_Q6vTzxFs7N" + - "matr_contn_card_ind_M8fvOjy5B0" + - 1611144215000 + - 1611144215000 + - - -1696305996 + - -178589482 + - 788546600 + - "atta_card_ind_MkfeU6kAPv" + - "camp_org_id_4Bn9Zgg4eM" + - "prod_id_1ah3kydsh7" + - "snp_gage_cd_ySl8kkcGst" + - "crdt_card_lvl_cd_L8aZAMygq2" + - "2021-01-07" + - "card_matr_yr_mth_ZXmdyVXukr" + - "sell_chnl_cd_UXPdm0d9B6" + - "card_org_cd_3QYp5QfEG6" + - "actv_chnl_cd_uRXCNeSnzt" + - "free_annl_fee_ind_WyScZ3hmyM" + - 5.45 + - "bus_card_ind_taVaX634Mh" + - "matr_contn_card_ind_ppVD5sqBfA" + - 1611144215000 + - 1611144215000 + name: t6 + - columns: + - "cust_id_an int32" + - "card_agmt_id_an int32" + - "fst_use_card_dt date" + - "ltst_use_card_dt date" + - "card_stat_cd string" + - "data_date timestamp" + indexs: ["indext7:cust_id_an:data_date"] + rows: + - - -1416323258 + - 1062068004 + - "2021-01-15" + - "2021-01-10" + - "card_stat_cd_I5RUbf7xEL" + - 1611144216000 + - - 433240030 + - 729717634 + - "2021-01-19" + - "2021-01-17" + - "card_stat_cd_wFB0gUWKQI" + - 1611144216000 + - - -1880955883 + - -1807838612 + - "2021-01-03" + - "2021-01-19" + - "card_stat_cd_rG5nhnzcV5" + - 1611144216000 + name: t7 + - columns: + - "cust_id_an int32" + - "crdt_card_net_incom_amt double" + - "int_incom_amt double" + - "annl_fee_incom_amt double" + - "cash_incom_amt double" + - "commsn_incom_amt double" + - "late_chrg_incom_amt double" + - "extras_fee_incom_amt double" + - "oth_incom_amt double" + - "amtbl_comm_fee double" + - "cap_cost_amt double" + - "provs_cost_amt double" + - "data_date timestamp" + indexs: ["indext5:cust_id_an:data_date"] + rows: + - - -586341746 + - -91.38 + - -103.8 + - -91.79 + - 77.09 + - -39.25 + - -104.55 + - -25.37 + - -42.69 + - 20.24 + - 121.05 + - 40.71 + - 1611144216000 + - - -903799431 + - 82.69 + - 56.49 + - -105.1 + - -126.73 + - 91.97 + - -113.83 + - -119.99 + - 126.4 + - 107.63 + - -1.88 + - 54.72 + - 1611144216000 + - - -2006396570 + - 101.8 + - -63.94 + - 7.75 + - 41.46 + - -42.03 + - 52.33 + - 39.98 + - 10.07 + - -29.53 + - 126.03 + - -63.56 + - 1611144216000 + - - -2035678095 + - -99.5 + - 83.92 + - -63.44 + - -45.01 + - -16.37 + - 105.96 + - -82.37 + - -76.09 + - -120.12 + - -116.56 + - 22.47 + - 1611144216000 + - - 634869109 + - -38.91 + - -0.08 + - 25.59 + - -80.43 + - -23.8 + - 127.24 + - 72.18 + - -84.52 + - -91.3 + - -64.03 + - -117.28 + - 1611144216000 + name: t5 + - columns: + - "cust_id_an int32" + - "crdt_lmt_cust double" + - "aval_lmt_cust double" + - "crdt_lmt_cash double" + - "aval_lmt_cash double" + - "data_date timestamp" + indexs: ["indext3:cust_id_an:data_date"] + rows: + - - -2001222170 + - -4.23 + - -101.67 + - 76.28 + - -83.94 + - 1611144216000 + - - -1514280701 + - -32.77 + - -73.6 + - -17.73 + - 118.89 + - 1611144216000 + - - 5866653 + - 25.81 + - 109.68 + - 62.1 + - -121.53 + - 1611144216000 + - - 10968234 + - 94.03 + - -27.92 + - 37.07 + - -42.7 + - 1611144216000 + - - -537371887 + - -120.6 + - 3.15 + - -22.5 + - -115.86 + - 1611144216000 + - - -904433195 + - 116.03 + - -44.09 + - 65.5 + - 100.47 + - 1611144216000 + - - -358019130 + - -74.14 + - 127.09 + - 30.8 + - 100.9 + - 1611144216000 + name: t3 + - columns: + - "cust_id_an int32" + - "cert_typ_cd string" + - "cert_area_cd string" + - "birth_dt date" + - "gender_typ_cd string" + - "nation_cd string" + - "marrrg_situ_cd string" + - "rsdnt_ind string" + - "citic_grp_emp_typ_cd string" + - "cust_stat_cd string" + - "open_cust_dt date" + - "open_cust_org_id string" + - "open_cust_chnl_typ_cd string" + - "cust_belg_bank_cd string" + indexs: ["indext2:cust_id_an"] + rows: + - - -164930359 + - "cert_typ_cd_cGpwz0DGMQ" + - "cert_area_cd_HecqmfKfQ7" + - "2021-01-09" + - "gender_typ_cd_HlbTDsKxLx" + - "nation_cd_IcAmK6iCHk" + - "marrrg_situ_cd_JzdSTSvnI2" + - "rsdnt_ind_qV6EO9H2E4" + - "citic_grp_emp_typ_cd_mZjOs6AvEm" + - "cust_stat_cd_pL86avtzOm" + - "2021-01-12" + - "open_cust_org_id_TgCKG40Joz" + - "open_cust_chnl_typ_cd_cBUBu2Wm6D" + - "cust_belg_bank_cd_UBZAxmSLUW" + - - -43274786 + - "cert_typ_cd_QetmS9wxcU" + - "cert_area_cd_rrltclnYQU" + - "2021-01-05" + - "gender_typ_cd_DzQCyg6Ui2" + - "nation_cd_tasmOg7NAe" + - "marrrg_situ_cd_t43rdVAhR5" + - "rsdnt_ind_qZOBkBtacn" + - "citic_grp_emp_typ_cd_Xp6gvlxr7o" + - "cust_stat_cd_R9lp6oM2x8" + - "2021-01-03" + - "open_cust_org_id_7rnyNbu4Yu" + - "open_cust_chnl_typ_cd_mu1leQa1Gx" + - "cust_belg_bank_cd_XLIXJnEtRf" + name: t2 + - columns: + - "cust_id_an int32" + - "tx_time timestamp" + - "crdt_card_tx_cd string" + - "tx_amt_to_rmb double" + - "mercht_typ_cd string" + - "cross_bord_ind string" + - "tx_desc_an int32" + indexs: ["indext4:cust_id_an:tx_time"] + rows: + - - 951632459 + - 1611144216000 + - "crdt_card_tx_cd_6j6bjhDy9o" + - 110.73 + - "mercht_typ_cd_feZu3kqy1P" + - "cross_bord_ind_j5RBoKax1g" + - -1752891717 + - - 1033871191 + - 1611144216000 + - "crdt_card_tx_cd_bDs5fzy7vx" + - -20.85 + - "mercht_typ_cd_Ponis59I95" + - "cross_bord_ind_3ErQHlOtLq" + - 24112845 + - - 19144738 + - 1611144216000 + - "crdt_card_tx_cd_G2CZyldEgg" + - -94.15 + - "mercht_typ_cd_xM8BN1jxf5" + - "cross_bord_ind_MuFWwfgxqi" + - -1625982017 + - - -709159498 + - 1611144216000 + - "crdt_card_tx_cd_SWmMk5bGbe" + - -104.9 + - "mercht_typ_cd_F8SmujshlU" + - "cross_bord_ind_Cja6dv7mJt" + - 734595537 + - - 407401011 + - 1611144216000 + - "crdt_card_tx_cd_Q2bYofa0LV" + - 118.56 + - "mercht_typ_cd_raO5rr5AZW" + - "cross_bord_ind_FtZc0Pd2e8" + - -347783598 + - - -274181216 + - 1611144216000 + - "crdt_card_tx_cd_SrvekEh3VO" + - -36.7 + - "mercht_typ_cd_wkQggxQwfB" + - "cross_bord_ind_lIkIIKdrmU" + - -1929744820 + - - -1693120077 + - 1611144216000 + - "crdt_card_tx_cd_crzOFQUvEV" + - -63.78 + - "mercht_typ_cd_gyHnXWDCcr" + - "cross_bord_ind_lSjZJSUzjz" + - -1367456280 + - - -1441604939 + - 1611144216000 + - "crdt_card_tx_cd_gLqQvmRyub" + - 58.01 + - "mercht_typ_cd_ltgNcE28wj" + - "cross_bord_ind_ruileQrE9G" + - -26181260 + name: t4 + sql: |- + select * from + ( + select + id as id_1, + `id` as t1_id_original_0, + `cust_id_an` as t1_cust_id_an_original_1, + `ins_date` as t1_ins_date_original_2, + `Label` as t1_Label_original_3, + dayofweek(timestamp(`ins_date`)) as t1_ins_date_dayofweek_138 + from + `t1` + ) + as out0 + last join + ( + select + t1.id as id_5, + `t2_cust_id_an`.`birth_dt` as t2_birth_dt_multi_direct_4, + `t2_cust_id_an`.`cert_area_cd` as t2_cert_area_cd_multi_direct_5, + `t2_cust_id_an`.`cert_typ_cd` as t2_cert_typ_cd_multi_direct_6, + `t2_cust_id_an`.`citic_grp_emp_typ_cd` as t2_citic_grp_emp_typ_cd_multi_direct_7, + `t2_cust_id_an`.`cust_belg_bank_cd` as t2_cust_belg_bank_cd_multi_direct_8, + `t2_cust_id_an`.`cust_stat_cd` as t2_cust_stat_cd_multi_direct_9, + `t2_cust_id_an`.`gender_typ_cd` as t2_gender_typ_cd_multi_direct_10, + `t2_cust_id_an`.`marrrg_situ_cd` as t2_marrrg_situ_cd_multi_direct_11, + `t2_cust_id_an`.`nation_cd` as t2_nation_cd_multi_direct_12, + `t2_cust_id_an`.`open_cust_chnl_typ_cd` as t2_open_cust_chnl_typ_cd_multi_direct_13, + `t2_cust_id_an`.`open_cust_dt` as t2_open_cust_dt_multi_direct_14, + `t2_cust_id_an`.`open_cust_org_id` as t2_open_cust_org_id_multi_direct_15, + `t2_cust_id_an`.`rsdnt_ind` as t2_rsdnt_ind_multi_direct_16, + `t3_cust_id_an__ins_date_0_10`.`aval_lmt_cash` as t3_aval_lmt_cash_multi_last_value_17, + `t3_cust_id_an__ins_date_0_10`.`aval_lmt_cust` as t3_aval_lmt_cust_multi_last_value_18, + `t3_cust_id_an__ins_date_0_10`.`crdt_lmt_cash` as t3_crdt_lmt_cash_multi_last_value_19, + `t3_cust_id_an__ins_date_0_10`.`crdt_lmt_cust` as t3_crdt_lmt_cust_multi_last_value_20, + `t3_cust_id_an__ins_date_0_10`.`data_date` as t3_data_date_multi_last_value_21, + `t5_cust_id_an__ins_date_0_10`.`amtbl_comm_fee` as t5_amtbl_comm_fee_multi_last_value_22, + `t5_cust_id_an__ins_date_0_10`.`annl_fee_incom_amt` as t5_annl_fee_incom_amt_multi_last_value_23, + `t5_cust_id_an__ins_date_0_10`.`cap_cost_amt` as t5_cap_cost_amt_multi_last_value_24, + `t5_cust_id_an__ins_date_0_10`.`cash_incom_amt` as t5_cash_incom_amt_multi_last_value_25, + `t5_cust_id_an__ins_date_0_10`.`commsn_incom_amt` as t5_commsn_incom_amt_multi_last_value_26, + `t5_cust_id_an__ins_date_0_10`.`crdt_card_net_incom_amt` as t5_crdt_card_net_incom_amt_multi_last_value_27, + `t5_cust_id_an__ins_date_0_10`.`data_date` as t5_data_date_multi_last_value_28, + `t5_cust_id_an__ins_date_0_10`.`extras_fee_incom_amt` as t5_extras_fee_incom_amt_multi_last_value_29, + `t5_cust_id_an__ins_date_0_10`.`int_incom_amt` as t5_int_incom_amt_multi_last_value_30, + `t5_cust_id_an__ins_date_0_10`.`late_chrg_incom_amt` as t5_late_chrg_incom_amt_multi_last_value_31, + `t5_cust_id_an__ins_date_0_10`.`oth_incom_amt` as t5_oth_incom_amt_multi_last_value_32, + `t5_cust_id_an__ins_date_0_10`.`provs_cost_amt` as t5_provs_cost_amt_multi_last_value_33 + from + `t1` + last join `t2` as `t2_cust_id_an` on `t1`.`cust_id_an` = `t2_cust_id_an`.`cust_id_an` + last join `t3` as `t3_cust_id_an__ins_date_0_10` order by t3_cust_id_an__ins_date_0_10.`data_date` on `t1`.`cust_id_an` = `t3_cust_id_an__ins_date_0_10`.`cust_id_an` + last join `t5` as `t5_cust_id_an__ins_date_0_10` order by t5_cust_id_an__ins_date_0_10.`data_date` on `t1`.`cust_id_an` = `t5_cust_id_an__ins_date_0_10`.`cust_id_an`) + as out1 + on out0.id_1 = out1.id_5 + last join + ( + select + id as id_35, + min(`tx_amt_to_rmb`) over t4_cust_id_an_tx_time_0s_1d as t4_tx_amt_to_rmb_multi_min_34, + avg(`tx_amt_to_rmb`) over t4_cust_id_an_tx_time_0s_1d as t4_tx_amt_to_rmb_multi_avg_35, + fz_topn_frequency(`crdt_card_tx_cd`, 3) over t4_cust_id_an_tx_time_0_100 as t4_crdt_card_tx_cd_multi_top3frequency_36, + distinct_count(`crdt_card_tx_cd`) over t4_cust_id_an_tx_time_0_100 as t4_crdt_card_tx_cd_multi_unique_count_37, + distinct_count(`cross_bord_ind`) over t4_cust_id_an_tx_time_0_100 as t4_cross_bord_ind_multi_unique_count_38, + fz_topn_frequency(`cross_bord_ind`, 3) over t4_cust_id_an_tx_time_0_100 as t4_cross_bord_ind_multi_top3frequency_39, + distinct_count(`mercht_typ_cd`) over t4_cust_id_an_tx_time_0_10 as t4_mercht_typ_cd_multi_unique_count_40, + distinct_count(`mercht_typ_cd`) over t4_cust_id_an_tx_time_0_100 as t4_mercht_typ_cd_multi_unique_count_41, + distinct_count(`tx_desc_an`) over t4_cust_id_an_tx_time_0_10 as t4_tx_desc_an_multi_unique_count_42, + distinct_count(`tx_desc_an`) over t4_cust_id_an_tx_time_0_100 as t4_tx_desc_an_multi_unique_count_43 + from + (select `cust_id_an` as `cust_id_an`, `ins_date` as `tx_time`, '' as `crdt_card_tx_cd`, double(0) as `tx_amt_to_rmb`, '' as `mercht_typ_cd`, '' as `cross_bord_ind`, int(0) as `tx_desc_an`, id from `t1`) + window t4_cust_id_an_tx_time_0s_1d as ( + UNION (select `cust_id_an`, `tx_time`, `crdt_card_tx_cd`, `tx_amt_to_rmb`, `mercht_typ_cd`, `cross_bord_ind`, `tx_desc_an`, int(0) as id from `t4`) partition by `cust_id_an` order by `tx_time` rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + t4_cust_id_an_tx_time_0_100 as ( + UNION (select `cust_id_an`, `tx_time`, `crdt_card_tx_cd`, `tx_amt_to_rmb`, `mercht_typ_cd`, `cross_bord_ind`, `tx_desc_an`, int(0) as id from `t4`) partition by `cust_id_an` order by `tx_time` rows between 100 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW), + t4_cust_id_an_tx_time_0_10 as ( + UNION (select `cust_id_an`, `tx_time`, `crdt_card_tx_cd`, `tx_amt_to_rmb`, `mercht_typ_cd`, `cross_bord_ind`, `tx_desc_an`, int(0) as id from `t4`) partition by `cust_id_an` order by `tx_time` rows between 10 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW)) + as out2 + on out0.id_1 = out2.id_35 + last join + ( + select + id as id_45, + min(`annl_fee`) over t6_cust_id_an_actv_dt_0_100 as t6_annl_fee_multi_min_44, + min(`annl_fee`) over t6_cust_id_an_actv_dt_0_10 as t6_annl_fee_multi_min_45, + min(`card_agmt_id_an`) over t6_cust_id_an_actv_dt_0_10 as t6_card_agmt_id_an_multi_min_46, + avg(`card_agmt_id_an`) over t6_cust_id_an_actv_dt_0_10 as t6_card_agmt_id_an_multi_avg_47, + min(`pri_acct_id_an`) over t6_cust_id_an_actv_dt_0_100 as t6_pri_acct_id_an_multi_min_48, + min(`pri_acct_id_an`) over t6_cust_id_an_actv_dt_0_10 as t6_pri_acct_id_an_multi_min_49, + fz_topn_frequency(`actv_chnl_cd`, 3) over t6_cust_id_an_actv_dt_0_100 as t6_actv_chnl_cd_multi_top3frequency_50, + fz_topn_frequency(`actv_chnl_cd`, 3) over t6_cust_id_an_actv_dt_0_10 as t6_actv_chnl_cd_multi_top3frequency_51, + distinct_count(`atta_card_ind`) over t6_cust_id_an_actv_dt_0s_32d as t6_atta_card_ind_multi_unique_count_52, + fz_topn_frequency(`atta_card_ind`, 3) over t6_cust_id_an_actv_dt_0s_32d as t6_atta_card_ind_multi_top3frequency_53, + fz_topn_frequency(`bus_card_ind`, 3) over t6_cust_id_an_actv_dt_0s_32d as t6_bus_card_ind_multi_top3frequency_54, + distinct_count(`bus_card_ind`) over t6_cust_id_an_actv_dt_0s_32d as t6_bus_card_ind_multi_unique_count_55, + distinct_count(`camp_org_id`) over t6_cust_id_an_actv_dt_0_100 as t6_camp_org_id_multi_unique_count_56, + distinct_count(`camp_org_id`) over t6_cust_id_an_actv_dt_0_10 as t6_camp_org_id_multi_unique_count_57, + fz_topn_frequency(`card_matr_yr_mth`, 3) over t6_cust_id_an_actv_dt_0_100 as t6_card_matr_yr_mth_multi_top3frequency_58, + fz_topn_frequency(`card_matr_yr_mth`, 3) over t6_cust_id_an_actv_dt_0_10 as t6_card_matr_yr_mth_multi_top3frequency_59, + fz_topn_frequency(`card_org_cd`, 3) over t6_cust_id_an_actv_dt_0s_32d as t6_card_org_cd_multi_top3frequency_60, + distinct_count(`card_org_cd`) over t6_cust_id_an_actv_dt_0s_32d as t6_card_org_cd_multi_unique_count_61, + distinct_count(`crdt_card_lvl_cd`) over t6_cust_id_an_actv_dt_0_100 as t6_crdt_card_lvl_cd_multi_unique_count_62, + distinct_count(`crdt_card_lvl_cd`) over t6_cust_id_an_actv_dt_0_10 as t6_crdt_card_lvl_cd_multi_unique_count_63, + fz_topn_frequency(`free_annl_fee_ind`, 3) over t6_cust_id_an_actv_dt_0s_32d as t6_free_annl_fee_ind_multi_top3frequency_64, + distinct_count(`free_annl_fee_ind`) over t6_cust_id_an_actv_dt_0s_32d as t6_free_annl_fee_ind_multi_unique_count_65, + fz_topn_frequency(`matr_contn_card_ind`, 3) over t6_cust_id_an_actv_dt_0s_32d as t6_matr_contn_card_ind_multi_top3frequency_66, + distinct_count(`matr_contn_card_ind`) over t6_cust_id_an_actv_dt_0s_32d as t6_matr_contn_card_ind_multi_unique_count_67, + distinct_count(`prod_id`) over t6_cust_id_an_actv_dt_0_100 as t6_prod_id_multi_unique_count_68, + fz_topn_frequency(`prod_id`, 3) over t6_cust_id_an_actv_dt_0_10 as t6_prod_id_multi_top3frequency_69, + fz_topn_frequency(`sell_chnl_cd`, 3) over t6_cust_id_an_actv_dt_0_10 as t6_sell_chnl_cd_multi_top3frequency_70, + fz_topn_frequency(`sell_chnl_cd`, 3) over t6_cust_id_an_actv_dt_0_100 as t6_sell_chnl_cd_multi_top3frequency_71, + fz_topn_frequency(`snp_gage_cd`, 3) over t6_cust_id_an_actv_dt_0_10 as t6_snp_gage_cd_multi_top3frequency_72, + fz_topn_frequency(`snp_gage_cd`, 3) over t6_cust_id_an_actv_dt_0_100 as t6_snp_gage_cd_multi_top3frequency_73 + from + (select `cust_id_an` as `cust_id_an`, int(0) as `card_agmt_id_an`, int(0) as `pri_acct_id_an`, '' as `atta_card_ind`, '' as `camp_org_id`, '' as `prod_id`, '' as `snp_gage_cd`, '' as `crdt_card_lvl_cd`, date('2019-07-18') as `pin_card_dt`, '' as `card_matr_yr_mth`, '' as `sell_chnl_cd`, '' as `card_org_cd`, '' as `actv_chnl_cd`, '' as `free_annl_fee_ind`, double(0) as `annl_fee`, '' as `bus_card_ind`, '' as `matr_contn_card_ind`, timestamp('2019-07-18 09:20:20') as `issu_card_dt`, `ins_date` as `actv_dt`, id from `t1`) + window t6_cust_id_an_actv_dt_0_100 as ( + UNION (select `cust_id_an`, `card_agmt_id_an`, `pri_acct_id_an`, `atta_card_ind`, `camp_org_id`, `prod_id`, `snp_gage_cd`, `crdt_card_lvl_cd`, `pin_card_dt`, `card_matr_yr_mth`, `sell_chnl_cd`, `card_org_cd`, `actv_chnl_cd`, `free_annl_fee_ind`, `annl_fee`, `bus_card_ind`, `matr_contn_card_ind`, `issu_card_dt`, `actv_dt`, int(0) as id from `t6`) partition by `cust_id_an` order by `actv_dt` rows between 100 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW), + t6_cust_id_an_actv_dt_0_10 as ( + UNION (select `cust_id_an`, `card_agmt_id_an`, `pri_acct_id_an`, `atta_card_ind`, `camp_org_id`, `prod_id`, `snp_gage_cd`, `crdt_card_lvl_cd`, `pin_card_dt`, `card_matr_yr_mth`, `sell_chnl_cd`, `card_org_cd`, `actv_chnl_cd`, `free_annl_fee_ind`, `annl_fee`, `bus_card_ind`, `matr_contn_card_ind`, `issu_card_dt`, `actv_dt`, int(0) as id from `t6`) partition by `cust_id_an` order by `actv_dt` rows between 10 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW), + t6_cust_id_an_actv_dt_0s_32d as ( + UNION (select `cust_id_an`, `card_agmt_id_an`, `pri_acct_id_an`, `atta_card_ind`, `camp_org_id`, `prod_id`, `snp_gage_cd`, `crdt_card_lvl_cd`, `pin_card_dt`, `card_matr_yr_mth`, `sell_chnl_cd`, `card_org_cd`, `actv_chnl_cd`, `free_annl_fee_ind`, `annl_fee`, `bus_card_ind`, `matr_contn_card_ind`, `issu_card_dt`, `actv_dt`, int(0) as id from `t6`) partition by `cust_id_an` order by `actv_dt` rows_range between 32d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW)) + as out3 + on out0.id_1 = out3.id_45 + last join + ( + select + id as id_75, + fz_topn_frequency(`card_agmt_id_an`, 3) over t7_cust_id_an_data_date_0s_1d as t7_card_agmt_id_an_multi_top3frequency_74, + fz_topn_frequency(`card_agmt_id_an`, 3) over t7_cust_id_an_data_date_0s_32d as t7_card_agmt_id_an_multi_top3frequency_75, + fz_topn_frequency(`card_stat_cd`, 3) over t7_cust_id_an_data_date_0_100 as t7_card_stat_cd_multi_top3frequency_76, + distinct_count(`card_stat_cd`) over t7_cust_id_an_data_date_0_100 as t7_card_stat_cd_multi_unique_count_77 + from + (select `cust_id_an` as `cust_id_an`, int(0) as `card_agmt_id_an`, date('2019-07-18') as `fst_use_card_dt`, date('2019-07-18') as `ltst_use_card_dt`, '' as `card_stat_cd`, `ins_date` as `data_date`, id from `t1`) + window t7_cust_id_an_data_date_0s_1d as ( + UNION (select `cust_id_an`, `card_agmt_id_an`, `fst_use_card_dt`, `ltst_use_card_dt`, `card_stat_cd`, `data_date`, int(0) as id from `t7`) partition by `cust_id_an` order by `data_date` rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + t7_cust_id_an_data_date_0s_32d as ( + UNION (select `cust_id_an`, `card_agmt_id_an`, `fst_use_card_dt`, `ltst_use_card_dt`, `card_stat_cd`, `data_date`, int(0) as id from `t7`) partition by `cust_id_an` order by `data_date` rows_range between 32d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + t7_cust_id_an_data_date_0_100 as ( + UNION (select `cust_id_an`, `card_agmt_id_an`, `fst_use_card_dt`, `ltst_use_card_dt`, `card_stat_cd`, `data_date`, int(0) as id from `t7`) partition by `cust_id_an` order by `data_date` rows between 100 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW)) + as out4 + on out0.id_1 = out4.id_75 + last join + ( + select + id as id_79, + min(`agmt_id_an`) over t8_cust_id_an_open_acct_dt_0_10 as t8_agmt_id_an_multi_min_78, + max(`agmt_id_an`) over t8_cust_id_an_open_acct_dt_0_10 as t8_agmt_id_an_multi_max_79, + fz_topn_frequency(`atta_acct_ind`, 3) over t8_cust_id_an_open_acct_dt_0s_32d as t8_atta_acct_ind_multi_top3frequency_80, + distinct_count(`atta_acct_ind`) over t8_cust_id_an_open_acct_dt_0s_32d as t8_atta_acct_ind_multi_unique_count_81 + from + (select `cust_id_an` as `cust_id_an`, int(0) as `agmt_id_an`, '' as `atta_acct_ind`, date('2019-07-18') as `stmt_dt`, `ins_date` as `open_acct_dt`, timestamp('2019-07-18 09:20:20') as `clos_acct_dt`, id from `t1`) + window t8_cust_id_an_open_acct_dt_0_10 as ( + UNION (select `cust_id_an`, `agmt_id_an`, `atta_acct_ind`, `stmt_dt`, `open_acct_dt`, `clos_acct_dt`, int(0) as id from `t8`) partition by `cust_id_an` order by `open_acct_dt` rows between 10 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW), + t8_cust_id_an_open_acct_dt_0s_32d as ( + UNION (select `cust_id_an`, `agmt_id_an`, `atta_acct_ind`, `stmt_dt`, `open_acct_dt`, `clos_acct_dt`, int(0) as id from `t8`) partition by `cust_id_an` order by `open_acct_dt` rows_range between 32d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW)) + as out5 + on out0.id_1 = out5.id_79 + last join + ( + select + id as id_83, + min(`amtbl_od_bal`) over t9_cust_id_an_data_date_0s_32d as t9_amtbl_od_bal_multi_min_82, + avg(`amtbl_od_bal`) over t9_cust_id_an_data_date_0_10 as t9_amtbl_od_bal_multi_avg_83, + min(`cash_od_bal`) over t9_cust_id_an_data_date_0_100 as t9_cash_od_bal_multi_min_84, + min(`cash_od_bal`) over t9_cust_id_an_data_date_0_10 as t9_cash_od_bal_multi_min_85, + min(`consm_od_bal`) over t9_cust_id_an_data_date_0_100 as t9_consm_od_bal_multi_min_86, + min(`consm_od_bal`) over t9_cust_id_an_data_date_0_10 as t9_consm_od_bal_multi_min_87, + max(`crdt_card_point`) over t9_cust_id_an_data_date_0_100 as t9_crdt_card_point_multi_max_88, + max(`crdt_card_point`) over t9_cust_id_an_data_date_0_10 as t9_crdt_card_point_multi_max_89, + min(`curr_yr_crdt_card_point`) over t9_cust_id_an_data_date_0_100 as t9_curr_yr_crdt_card_point_multi_min_90, + min(`curr_yr_crdt_card_point`) over t9_cust_id_an_data_date_0_10 as t9_curr_yr_crdt_card_point_multi_min_91, + max(`last_mth_consm_cnt`) over t9_cust_id_an_data_date_0_100 as t9_last_mth_consm_cnt_multi_max_92, + max(`last_mth_consm_cnt`) over t9_cust_id_an_data_date_0_10 as t9_last_mth_consm_cnt_multi_max_93, + min(`last_mth_stmt_amt`) over t9_cust_id_an_data_date_0_100 as t9_last_mth_stmt_amt_multi_min_94, + min(`last_mth_stmt_amt`) over t9_cust_id_an_data_date_0_10 as t9_last_mth_stmt_amt_multi_min_95, + min(`m_amtbl_amt_accm`) over t9_cust_id_an_data_date_0s_32d as t9_m_amtbl_amt_accm_multi_min_96, + avg(`m_amtbl_amt_accm`) over t9_cust_id_an_data_date_0_10 as t9_m_amtbl_amt_accm_multi_avg_97, + min(`m_cash_amt_accm`) over t9_cust_id_an_data_date_0_100 as t9_m_cash_amt_accm_multi_min_98, + min(`m_cash_amt_accm`) over t9_cust_id_an_data_date_0_10 as t9_m_cash_amt_accm_multi_min_99, + min(`m_consm_amt_accm`) over t9_cust_id_an_data_date_0_100 as t9_m_consm_amt_accm_multi_min_100, + min(`m_consm_amt_accm`) over t9_cust_id_an_data_date_0_10 as t9_m_consm_amt_accm_multi_min_101, + avg(`m_ovrd_bal_accm`) over t9_cust_id_an_data_date_0_100 as t9_m_ovrd_bal_accm_multi_avg_102, + avg(`m_ovrd_bal_accm`) over t9_cust_id_an_data_date_0_10 as t9_m_ovrd_bal_accm_multi_avg_103, + max(`m_spl_pay_amt_accm`) over t9_cust_id_an_data_date_0_100 as t9_m_spl_pay_amt_accm_multi_max_104, + max(`m_spl_pay_amt_accm`) over t9_cust_id_an_data_date_0_10 as t9_m_spl_pay_amt_accm_multi_max_105, + avg(`ovrd_bal`) over t9_cust_id_an_data_date_0_100 as t9_ovrd_bal_multi_avg_106, + avg(`ovrd_bal`) over t9_cust_id_an_data_date_0_10 as t9_ovrd_bal_multi_avg_107, + max(`spl_pay_bal`) over t9_cust_id_an_data_date_0_100 as t9_spl_pay_bal_multi_max_108, + max(`spl_pay_bal`) over t9_cust_id_an_data_date_0_10 as t9_spl_pay_bal_multi_max_109, + fz_topn_frequency(`acct_stat_cd`, 3) over t9_cust_id_an_data_date_0_100 as t9_acct_stat_cd_multi_top3frequency_110, + distinct_count(`acct_stat_cd`) over t9_cust_id_an_data_date_0_100 as t9_acct_stat_cd_multi_unique_count_111, + fz_topn_frequency(`agmt_id_an`, 3) over t9_cust_id_an_data_date_0s_1d as t9_agmt_id_an_multi_top3frequency_112, + fz_topn_frequency(`agmt_id_an`, 3) over t9_cust_id_an_data_date_0s_32d as t9_agmt_id_an_multi_top3frequency_113, + fz_topn_frequency(`curr_ovrd_stat_cd`, 3) over t9_cust_id_an_data_date_0_100 as t9_curr_ovrd_stat_cd_multi_top3frequency_114, + fz_topn_frequency(`curr_ovrd_stat_cd`, 3) over t9_cust_id_an_data_date_0_10 as t9_curr_ovrd_stat_cd_multi_top3frequency_115, + fz_topn_frequency(`curr_yr_ovrd_cnt`, 3) over t9_cust_id_an_data_date_0_100 as t9_curr_yr_ovrd_cnt_multi_top3frequency_116, + fz_topn_frequency(`curr_yr_ovrd_cnt`, 3) over t9_cust_id_an_data_date_0_10 as t9_curr_yr_ovrd_cnt_multi_top3frequency_117 + from + (select `cust_id_an` as `cust_id_an`, int(0) as `agmt_id_an`, '' as `curr_ovrd_stat_cd`, int(0) as `curr_yr_ovrd_cnt`, double(0) as `curr_yr_crdt_card_point`, double(0) as `crdt_card_point`, '' as `acct_stat_cd`, double(0) as `consm_od_bal`, double(0) as `cash_od_bal`, double(0) as `amtbl_od_bal`, double(0) as `spl_pay_bal`, double(0) as `ovrd_bal`, double(0) as `last_mth_stmt_amt`, int(0) as `last_mth_consm_cnt`, double(0) as `m_consm_amt_accm`, double(0) as `m_cash_amt_accm`, double(0) as `m_amtbl_amt_accm`, double(0) as `m_spl_pay_amt_accm`, double(0) as `m_ovrd_bal_accm`, `ins_date` as `data_date`, id from `t1`) + window t9_cust_id_an_data_date_0s_32d as ( + UNION (select `cust_id_an`, `agmt_id_an`, `curr_ovrd_stat_cd`, `curr_yr_ovrd_cnt`, `curr_yr_crdt_card_point`, `crdt_card_point`, `acct_stat_cd`, `consm_od_bal`, `cash_od_bal`, `amtbl_od_bal`, `spl_pay_bal`, `ovrd_bal`, `last_mth_stmt_amt`, `last_mth_consm_cnt`, `m_consm_amt_accm`, `m_cash_amt_accm`, `m_amtbl_amt_accm`, `m_spl_pay_amt_accm`, `m_ovrd_bal_accm`, `data_date`, int(0) as id from `t9`) partition by `cust_id_an` order by `data_date` rows_range between 32d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + t9_cust_id_an_data_date_0_10 as ( + UNION (select `cust_id_an`, `agmt_id_an`, `curr_ovrd_stat_cd`, `curr_yr_ovrd_cnt`, `curr_yr_crdt_card_point`, `crdt_card_point`, `acct_stat_cd`, `consm_od_bal`, `cash_od_bal`, `amtbl_od_bal`, `spl_pay_bal`, `ovrd_bal`, `last_mth_stmt_amt`, `last_mth_consm_cnt`, `m_consm_amt_accm`, `m_cash_amt_accm`, `m_amtbl_amt_accm`, `m_spl_pay_amt_accm`, `m_ovrd_bal_accm`, `data_date`, int(0) as id from `t9`) partition by `cust_id_an` order by `data_date` rows between 10 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW), + t9_cust_id_an_data_date_0_100 as ( + UNION (select `cust_id_an`, `agmt_id_an`, `curr_ovrd_stat_cd`, `curr_yr_ovrd_cnt`, `curr_yr_crdt_card_point`, `crdt_card_point`, `acct_stat_cd`, `consm_od_bal`, `cash_od_bal`, `amtbl_od_bal`, `spl_pay_bal`, `ovrd_bal`, `last_mth_stmt_amt`, `last_mth_consm_cnt`, `m_consm_amt_accm`, `m_cash_amt_accm`, `m_amtbl_amt_accm`, `m_spl_pay_amt_accm`, `m_ovrd_bal_accm`, `data_date`, int(0) as id from `t9`) partition by `cust_id_an` order by `data_date` rows between 100 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW), + t9_cust_id_an_data_date_0s_1d as ( + UNION (select `cust_id_an`, `agmt_id_an`, `curr_ovrd_stat_cd`, `curr_yr_ovrd_cnt`, `curr_yr_crdt_card_point`, `crdt_card_point`, `acct_stat_cd`, `consm_od_bal`, `cash_od_bal`, `amtbl_od_bal`, `spl_pay_bal`, `ovrd_bal`, `last_mth_stmt_amt`, `last_mth_consm_cnt`, `m_consm_amt_accm`, `m_cash_amt_accm`, `m_amtbl_amt_accm`, `m_spl_pay_amt_accm`, `m_ovrd_bal_accm`, `data_date`, int(0) as id from `t9`) partition by `cust_id_an` order by `data_date` rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW)) + as out6 + on out0.id_1 = out6.id_83 + ; + expect: + success: true diff --git a/cases/integration_test/spark/test_fqz_studio.yaml b/cases/integration_test/spark/test_fqz_studio.yaml new file mode 100644 index 00000000000..cbbbaf5a5ec --- /dev/null +++ b/cases/integration_test/spark/test_fqz_studio.yaml @@ -0,0 +1,363 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# 单表反欺诈场景 +db: test_fqz +cases: + - id: 1 + desc: 单表-反欺诈场景 + inputs: + - columns: [ "id int32", "bilabel int32", "D_TXN_TIME_std timestamp", "C_SK_SEQ string", "D_RHX_DATE_KEY string", "N_CMX_TRAN_ID int32", "D_TXN_DATE string", "D_TXN_TIME string", "C_ACCT_TYPE string" , "C_ACCT_CARD_NO string", "C_ACCT_CARD_FLAG string", "C_ACCT_ZONE string", "N_ISSUE_AMT double", "N_TXN_AMT_RMB double", "C_ISSUE_CURR string", "C_CUSTOMER_ID string", "N_TRXCODE string", "C_DAILY_OPENBUY double", "C_INDI_OPENBUY double", "N_PHONE_NO string", "N_BOUND_PHONE_NO string", "C_MAC_ADDR string", "C_TXN_IP string", "C_MERCH_ID string", "C_MAC_STAT string", "C_IP_STAT string", "C_GREYLIST_FLAG string", "C_RULE_ACT string", "RRF_RULE_DATA string", "RRF_BUILD_NUM int32", "C_PAY_NAME string", "C_TXN_TYPE string", "C_PAYEE_ACCT string", "C_PAYEE_NAME string", "C_PAYEE_BANK_NAME string", "C_TXN_CHANNEL string", "C_SERIAL_NO string", "D_REGISTER_DATETIME double", "C_PAYEE_ACCT_ZONE string", "C_COMMONLY_PAYEE_FLAG string", "C_TRUST_PAYEE_FLAG string", "C_MEDIUM_NO string", "C_TRUST_CLIENT_FLAG string", "C_VERIFY_TYPE string", "C_PAYEE_CUSTOMER_ID string", "C_CPU_ID string", "C_MEMORY_CAPACITY double", "C_SYSTEM_VERSION string", "C_BROWSER_VERSION string", "C_BROWSER_LANG string", "C_SCREEN_RESOLUTION double", "C_APP_VERSION string", "C_FACTORY_INFO string", "C_WHITE_CARD_FLAG string", "C_ACCOUNT_BALANCE double", "C_MOBILE_LOCATION double", "C_DEAL_RESULT string", "C_FINAL_DEAL_TYPE string", "N_MODEL_SCORE double", "C_TXN_TYPE_TMP string", "N_UNIT_PRICE double", "N_TXN_COUNT double", "PROV string", "CITY string", "MER_OPTIMESP string", "MER_GROUPID int32", "MER_ZONENO string", "MER_BRNO string", "MER_SHOP_BASE string", "MER_PACCTYPE int32", "MER_FRATEPC string", "MER_SPECACCT int32", "MER_SWITCH int32", "MER_AMTLIMI1 int32", "MER_RATLIMI1 int32", "MER_BUSIAREA string", "CUS_Gender_Cd int32", "CUS_Ethnic_Cd int32", "CUS_Birth_Cty_Cd string", "CUS_Edu_Degree_Cd int32", "CUS_Marriage_Status_Cd int32", "CUS_Vip_Cust_Ind int32", "CUS_Icbc_Emply_Ind int32", "CUS_Dom_Resdnt_Ind int32", "CUS_Belong_Corp_Type_Cd int32", "CUS_Proper_Career_Cd string", "CUS_Proper_Industry_Cd int32", "CUS_Integrality_Ind_Cd int32", "CUS_Integrality_Check_Result int32", "CUS_Identity_Actl_Result_Type_Cd int32", "CUS_Cert_Provi_Situ_Type_Cd int32", "CUS_Invalid_Acct_Cert_Actl_Result int32", "CUS_Start_Dt string", "CUS_Birth_Dt string", "CUS_Career_Cd string", "CARDSTAT int32", "CARDKIND int32", "SYNFLAG int32", "GOLDFLAG int32", "OPENDATE date", "CDSQUOTA int64", "CDTQUOTA int64", "BT_CARDSTAT int32", "BT_ACTCHANEL int32", "BT_ACTDATE date", "BT_SALECODE string" ] + indexs: ["index1:C_ACCT_CARD_NO:D_TXN_TIME_std", "index2:N_BOUND_PHONE_NO:D_TXN_TIME_std", "index3:N_PHONE_NO:D_TXN_TIME_std", "index4:C_CUSTOMER_ID:D_TXN_TIME_std"] + rows: + - [33, 250, 1609236827000, "c_sk_seq", "d_rhx_date_key", 11, "d_txn_date", "d_txn_time", "c_acct_type" , "c_acct_card_no", "c_acct_card_flag", "c_acct_zone", 12.00, 13.14, "c_issue_curr", "c_customer_id", "n_trxcode", 14.12, 128.99, "n_phone_no", "n_bound_phone_no", "c_mac_addr", "c_txn_ip", "c_merch_id", "c_mac_stat", "c_ip_stat", "c_greylist_flag", "c_rule_act", "rrf_rule_data", 19, "c_pay_name", "c_txn_type", "c_payee_acct", "c_payee_name", "c_payee_bank_name", "c_txn_channel", "c_serial_no", 88.88, "c_payee_acct_zone", "c_commonly_payee_flag", "c_trust_payee_flag", "c_medium_no", "c_trust_client_flag", "c_verify_type", "c_payee_customer_id", "c_cpu_id", 77.07, "c_system_version", "c_browser_version", "c_browser_lang", 100.00, "c_app_version", "c_factory_info", "c_white_card_flag", 99.19, 67.81, "c_deal_result", "c_final_deal_type", 34.43, "c_txn_type_tmp", 88.08, 128.12, "prov", "city", "mer_optimesp", 939, "mer_zoneno", "mer_brno", "mer_shop_base", 477, "mer_fratepc", 122, 355, 223, 211, "mer_busiarea", 334, 444, "cus_birth_cty_cd", 555, 566, 577, 588, 42020, 314, "cus_proper_career_cd", 333, 41212, 666, 677, 688, 699, "cus_start_dt", "cus_birth_dt", "cus_career_cd", 61010, 777, 711, 733, "2020-12-22", 122, 999, 977, 432, "2021-01-02", "bt_salecode" ] + sql: | + select + id as id_1, + id as t1_id_original_0, + bilabel as t1_bilabel_original_1, + D_TXN_TIME_std as t1_D_TXN_TIME_std_original_2, + C_SK_SEQ as t1_C_SK_SEQ_original_3, + D_RHX_DATE_KEY as t1_D_RHX_DATE_KEY_original_4, + N_CMX_TRAN_ID as t1_N_CMX_TRAN_ID_original_5, + D_TXN_DATE as t1_D_TXN_DATE_original_6, + D_TXN_TIME as t1_D_TXN_TIME_original_7, + C_ACCT_TYPE as t1_C_ACCT_TYPE_original_8, + C_ACCT_CARD_NO as t1_C_ACCT_CARD_NO_original_9, + C_ACCT_CARD_FLAG as t1_C_ACCT_CARD_FLAG_original_10, + C_ACCT_ZONE as t1_C_ACCT_ZONE_original_11, + N_ISSUE_AMT as t1_N_ISSUE_AMT_original_12, + N_TXN_AMT_RMB as t1_N_TXN_AMT_RMB_original_13, + C_ISSUE_CURR as t1_C_ISSUE_CURR_original_14, + C_CUSTOMER_ID as t1_C_CUSTOMER_ID_original_15, + N_TRXCODE as t1_N_TRXCODE_original_16, + C_DAILY_OPENBUY as t1_C_DAILY_OPENBUY_original_17, + C_INDI_OPENBUY as t1_C_INDI_OPENBUY_original_18, + N_PHONE_NO as t1_N_PHONE_NO_original_19, + N_BOUND_PHONE_NO as t1_N_BOUND_PHONE_NO_original_20, + C_MAC_ADDR as t1_C_MAC_ADDR_original_21, + C_TXN_IP as t1_C_TXN_IP_original_22, + C_MERCH_ID as t1_C_MERCH_ID_original_23, + C_MAC_STAT as t1_C_MAC_STAT_original_24, + C_IP_STAT as t1_C_IP_STAT_original_25, + C_GREYLIST_FLAG as t1_C_GREYLIST_FLAG_original_26, + C_RULE_ACT as t1_C_RULE_ACT_original_27, + RRF_RULE_DATA as t1_RRF_RULE_DATA_original_28, + RRF_BUILD_NUM as t1_RRF_BUILD_NUM_original_29, + C_PAY_NAME as t1_C_PAY_NAME_original_30, + C_TXN_TYPE as t1_C_TXN_TYPE_original_31, + C_PAYEE_ACCT as t1_C_PAYEE_ACCT_original_32, + C_PAYEE_NAME as t1_C_PAYEE_NAME_original_33, + C_PAYEE_BANK_NAME as t1_C_PAYEE_BANK_NAME_original_34, + C_TXN_CHANNEL as t1_C_TXN_CHANNEL_original_35, + C_SERIAL_NO as t1_C_SERIAL_NO_original_36, + D_REGISTER_DATETIME as t1_D_REGISTER_DATETIME_original_37, + C_PAYEE_ACCT_ZONE as t1_C_PAYEE_ACCT_ZONE_original_38, + C_COMMONLY_PAYEE_FLAG as t1_C_COMMONLY_PAYEE_FLAG_original_39, + C_TRUST_PAYEE_FLAG as t1_C_TRUST_PAYEE_FLAG_original_40, + C_MEDIUM_NO as t1_C_MEDIUM_NO_original_41, + C_TRUST_CLIENT_FLAG as t1_C_TRUST_CLIENT_FLAG_original_42, + C_VERIFY_TYPE as t1_C_VERIFY_TYPE_original_43, + C_PAYEE_CUSTOMER_ID as t1_C_PAYEE_CUSTOMER_ID_original_44, + C_CPU_ID as t1_C_CPU_ID_original_45, + C_MEMORY_CAPACITY as t1_C_MEMORY_CAPACITY_original_46, + C_SYSTEM_VERSION as t1_C_SYSTEM_VERSION_original_47, + C_BROWSER_VERSION as t1_C_BROWSER_VERSION_original_48, + C_BROWSER_LANG as t1_C_BROWSER_LANG_original_49, + C_SCREEN_RESOLUTION as t1_C_SCREEN_RESOLUTION_original_50, + C_APP_VERSION as t1_C_APP_VERSION_original_51, + C_FACTORY_INFO as t1_C_FACTORY_INFO_original_52, + C_WHITE_CARD_FLAG as t1_C_WHITE_CARD_FLAG_original_53, + C_ACCOUNT_BALANCE as t1_C_ACCOUNT_BALANCE_original_54, + C_MOBILE_LOCATION as t1_C_MOBILE_LOCATION_original_55, + C_DEAL_RESULT as t1_C_DEAL_RESULT_original_56, + C_FINAL_DEAL_TYPE as t1_C_FINAL_DEAL_TYPE_original_57, + N_MODEL_SCORE as t1_N_MODEL_SCORE_original_58, + C_TXN_TYPE_TMP as t1_C_TXN_TYPE_TMP_original_59, + N_UNIT_PRICE as t1_N_UNIT_PRICE_original_60, + N_TXN_COUNT as t1_N_TXN_COUNT_original_61, + PROV as t1_PROV_original_62, + CITY as t1_CITY_original_63, + MER_OPTIMESP as t1_MER_OPTIMESP_original_64, + MER_GROUPID as t1_MER_GROUPID_original_65, + MER_ZONENO as t1_MER_ZONENO_original_66, + MER_BRNO as t1_MER_BRNO_original_67, + MER_SHOP_BASE as t1_MER_SHOP_BASE_original_68, + MER_PACCTYPE as t1_MER_PACCTYPE_original_69, + MER_FRATEPC as t1_MER_FRATEPC_original_70, + MER_SPECACCT as t1_MER_SPECACCT_original_71, + MER_SWITCH as t1_MER_SWITCH_original_72, + MER_AMTLIMI1 as t1_MER_AMTLIMI1_original_73, + MER_RATLIMI1 as t1_MER_RATLIMI1_original_74, + MER_BUSIAREA as t1_MER_BUSIAREA_original_75, + CUS_Gender_Cd as t1_CUS_Gender_Cd_original_76, + CUS_Ethnic_Cd as t1_CUS_Ethnic_Cd_original_77, + CUS_Birth_Cty_Cd as t1_CUS_Birth_Cty_Cd_original_78, + CUS_Edu_Degree_Cd as t1_CUS_Edu_Degree_Cd_original_79, + CUS_Marriage_Status_Cd as t1_CUS_Marriage_Status_Cd_original_80, + CUS_Vip_Cust_Ind as t1_CUS_Vip_Cust_Ind_original_81, + CUS_Icbc_Emply_Ind as t1_CUS_Icbc_Emply_Ind_original_82, + CUS_Dom_Resdnt_Ind as t1_CUS_Dom_Resdnt_Ind_original_83, + CUS_Belong_Corp_Type_Cd as t1_CUS_Belong_Corp_Type_Cd_original_84, + CUS_Proper_Career_Cd as t1_CUS_Proper_Career_Cd_original_85, + CUS_Proper_Industry_Cd as t1_CUS_Proper_Industry_Cd_original_86, + CUS_Integrality_Ind_Cd as t1_CUS_Integrality_Ind_Cd_original_87, + CUS_Integrality_Check_Result as t1_CUS_Integrality_Check_Result_original_88, + CUS_Identity_Actl_Result_Type_Cd as t1_CUS_Identity_Actl_Result_Type_Cd_original_89, + CUS_Cert_Provi_Situ_Type_Cd as t1_CUS_Cert_Provi_Situ_Type_Cd_original_90, + CUS_Invalid_Acct_Cert_Actl_Result as t1_CUS_Invalid_Acct_Cert_Actl_Result_original_91, + CUS_Start_Dt as t1_CUS_Start_Dt_original_92, + CUS_Birth_Dt as t1_CUS_Birth_Dt_original_93, + CUS_Career_Cd as t1_CUS_Career_Cd_original_94, + CARDSTAT as t1_CARDSTAT_original_95, + CARDKIND as t1_CARDKIND_original_96, + SYNFLAG as t1_SYNFLAG_original_97, + GOLDFLAG as t1_GOLDFLAG_original_98, + OPENDATE as t1_OPENDATE_original_99, + CDSQUOTA as t1_CDSQUOTA_original_100, + CDTQUOTA as t1_CDTQUOTA_original_101, + BT_CARDSTAT as t1_BT_CARDSTAT_original_102, + BT_ACTCHANEL as t1_BT_ACTCHANEL_original_103, + BT_ACTDATE as t1_BT_ACTDATE_original_104, + BT_SALECODE as t1_BT_SALECODE_original_105, + distinct_count(C_SERIAL_NO) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_2764800s as t1_C_SERIAL_NO_window_unique_count_106, + distinct_count(C_FACTORY_INFO) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_1209600s as t1_C_FACTORY_INFO_window_unique_count_107, + distinct_count(C_FACTORY_INFO) over t1_N_BOUND_PHONE_NO_D_TXN_TIME_std_0s_2764800s as t1_C_FACTORY_INFO_window_unique_count_108, + distinct_count(C_FACTORY_INFO) over t1_N_PHONE_NO_D_TXN_TIME_std_0s_2764800s as t1_C_FACTORY_INFO_window_unique_count_109, + fz_top1_ratio(C_SERIAL_NO) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_1209600s as t1_C_SERIAL_NO_window_top1_ratio_110, + fz_top1_ratio(C_FACTORY_INFO) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_1209600s as t1_C_FACTORY_INFO_window_top1_ratio_111, + distinct_count(C_APP_VERSION) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_1209600s as t1_C_APP_VERSION_window_unique_count_112, + distinct_count(C_SERIAL_NO) over t1_N_BOUND_PHONE_NO_D_TXN_TIME_std_0s_1209600s as t1_C_SERIAL_NO_window_unique_count_113, + distinct_count(C_FACTORY_INFO) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_2764800s as t1_C_FACTORY_INFO_window_unique_count_114, + distinct_count(C_SERIAL_NO) over t1_N_PHONE_NO_D_TXN_TIME_std_0s_1209600s as t1_C_SERIAL_NO_window_unique_count_115, + distinct_count(C_FACTORY_INFO) over t1_N_BOUND_PHONE_NO_D_TXN_TIME_std_0s_1209600s as t1_C_FACTORY_INFO_window_unique_count_116, + max(C_SCREEN_RESOLUTION) over t1_N_BOUND_PHONE_NO_D_TXN_TIME_std_0s_1209600s as t1_C_SCREEN_RESOLUTION_window_max_117, + max(C_SCREEN_RESOLUTION) over t1_N_BOUND_PHONE_NO_D_TXN_TIME_std_0s_2764800s as t1_C_SCREEN_RESOLUTION_window_max_118, + distinct_count(C_FACTORY_INFO) over t1_C_CUSTOMER_ID_D_TXN_TIME_std_0s_1209600s as t1_C_FACTORY_INFO_window_unique_count_119, + distinct_count(C_FACTORY_INFO) over t1_N_PHONE_NO_D_TXN_TIME_std_0s_1209600s as t1_C_FACTORY_INFO_window_unique_count_120, + distinct_count(C_FACTORY_INFO) over t1_C_CUSTOMER_ID_D_TXN_TIME_std_0s_2764800s as t1_C_FACTORY_INFO_window_unique_count_121, + distinct_count(C_SERIAL_NO) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_1209600s as t1_C_SERIAL_NO_window_unique_count_122, + max(C_SCREEN_RESOLUTION) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_1209600s as t1_C_SCREEN_RESOLUTION_window_max_123, + C_IP_STAT as t1_C_IP_STAT_combine_124, + C_RULE_ACT as t1_C_RULE_ACT_combine_124, + CITY as t1_CITY_combine_124, + C_RULE_ACT as t1_C_RULE_ACT_combine_125, + C_FINAL_DEAL_TYPE as t1_C_FINAL_DEAL_TYPE_combine_125, + CITY as t1_CITY_combine_125, + C_IP_STAT as t1_C_IP_STAT_combine_126, + RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_126, + C_APP_VERSION as t1_C_APP_VERSION_combine_126, + C_RULE_ACT as t1_C_RULE_ACT_combine_127, + RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_127, + C_DEAL_RESULT as t1_C_DEAL_RESULT_combine_127, + C_IP_STAT as t1_C_IP_STAT_combine_128, + RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_128, + C_DEAL_RESULT as t1_C_DEAL_RESULT_combine_128, + C_RULE_ACT as t1_C_RULE_ACT_combine_129, + RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_129, + PROV as t1_PROV_combine_129, + C_MAC_STAT as t1_C_MAC_STAT_combine_130, + RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_130, + C_DEAL_RESULT as t1_C_DEAL_RESULT_combine_130, + C_RULE_ACT as t1_C_RULE_ACT_combine_131, + RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_131, + C_FINAL_DEAL_TYPE as t1_C_FINAL_DEAL_TYPE_combine_131, + RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_132, + C_VERIFY_TYPE as t1_C_VERIFY_TYPE_combine_132, + C_DEAL_RESULT as t1_C_DEAL_RESULT_combine_132, + RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_133, + C_MEDIUM_NO as t1_C_MEDIUM_NO_combine_133, + C_DEAL_RESULT as t1_C_DEAL_RESULT_combine_133, + C_MAC_STAT as t1_C_MAC_STAT_combine_134, + RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_134, + PROV as t1_PROV_combine_134, + C_RULE_ACT as t1_C_RULE_ACT_combine_135, + RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_135, + C_VERIFY_TYPE as t1_C_VERIFY_TYPE_combine_135 + from + {0} + window t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_2764800s as ( partition by C_ACCT_CARD_NO order by D_TXN_TIME_std rows_range between 2764800s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_1209600s as ( partition by C_ACCT_CARD_NO order by D_TXN_TIME_std rows_range between 1209600s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + t1_N_BOUND_PHONE_NO_D_TXN_TIME_std_0s_2764800s as ( partition by N_BOUND_PHONE_NO order by D_TXN_TIME_std rows_range between 2764800s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + t1_N_PHONE_NO_D_TXN_TIME_std_0s_2764800s as ( partition by N_PHONE_NO order by D_TXN_TIME_std rows_range between 2764800s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + t1_N_BOUND_PHONE_NO_D_TXN_TIME_std_0s_1209600s as ( partition by N_BOUND_PHONE_NO order by D_TXN_TIME_std rows_range between 1209600s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + t1_N_PHONE_NO_D_TXN_TIME_std_0s_1209600s as ( partition by N_PHONE_NO order by D_TXN_TIME_std rows_range between 1209600s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + t1_C_CUSTOMER_ID_D_TXN_TIME_std_0s_1209600s as ( partition by C_CUSTOMER_ID order by D_TXN_TIME_std rows_range between 1209600s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + t1_C_CUSTOMER_ID_D_TXN_TIME_std_0s_2764800s as ( partition by C_CUSTOMER_ID order by D_TXN_TIME_std rows_range between 2764800s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW); + expect: + success: true + # columns: [ + # "id_1 int32", + # "t1_id_original_0 int32", + # "t1_bilabel_original_1 int32", + # "t1_D_TXN_TIME_std_original_2 timestamp", + # "t1_C_SK_SEQ_original_3 string", + # "t1_D_RHX_DATE_KEY_original_4 string", + # "t1_N_CMX_TRAN_ID_original_5 int32", + # "t1_D_TXN_DATE_original_6 string", + # "t1_D_TXN_TIME_original_7 string", + # "t1_C_ACCT_TYPE_original_8 string", + # "t1_C_ACCT_CARD_NO_original_9 string", + # "t1_C_ACCT_CARD_FLAG_original_10 string", + # "t1_C_ACCT_ZONE_original_11 string", + # "t1_N_ISSUE_AMT_original_12 double", + # "t1_N_TXN_AMT_RMB_original_13 double", + # "t1_C_ISSUE_CURR_original_14 string", + # "t1_C_CUSTOMER_ID_original_15 string", + # "t1_N_TRXCODE_original_16 string", + # "t1_C_DAILY_OPENBUY_original_17 double", + # "t1_C_INDI_OPENBUY_original_18 double", + # "t1_N_PHONE_NO_original_19 string", + # "t1_N_BOUND_PHONE_NO_original_20 string", + # "t1_C_MAC_ADDR_original_21 string", + # "t1_C_TXN_IP_original_22 string", + # "t1_C_MERCH_ID_original_23 string", + # "t1_C_MAC_STAT_original_24 string", + # "t1_C_IP_STAT_original_25 string", + # "t1_C_GREYLIST_FLAG_original_26 string", + # "t1_C_RULE_ACT_original_27 string", + # "t1_RRF_RULE_DATA_original_28 string", + # "t1_RRF_BUILD_NUM_original_29 int32", + # "t1_C_PAY_NAME_original_30 string", + # "t1_C_TXN_TYPE_original_31 string", + # "t1_C_PAYEE_ACCT_original_32 string", + # "t1_C_PAYEE_NAME_original_33 string", + # "t1_C_PAYEE_BANK_NAME_original_34 string", + # "t1_C_TXN_CHANNEL_original_35 string", + # "t1_C_SERIAL_NO_original_36 string", + # "t1_D_REGISTER_DATETIME_original_37 double", + # "t1_C_PAYEE_ACCT_ZONE_original_38 string", + # "t1_C_COMMONLY_PAYEE_FLAG_original_39 string", + # "t1_C_TRUST_PAYEE_FLAG_original_40 string", + # "t1_C_MEDIUM_NO_original_41 string", + # "t1_C_TRUST_CLIENT_FLAG_original_42 string", + # "t1_C_VERIFY_TYPE_original_43 string", + # "t1_C_PAYEE_CUSTOMER_ID_original_44 string", + # "t1_C_CPU_ID_original_45 string", + # "t1_C_MEMORY_CAPACITY_original_46 double", + # "t1_C_SYSTEM_VERSION_original_47 string", + # "t1_C_BROWSER_VERSION_original_48 string", + # "t1_C_BROWSER_LANG_original_49 string", + # "t1_C_SCREEN_RESOLUTION_original_50 double", + # "t1_C_APP_VERSION_original_51 string", + # "t1_C_FACTORY_INFO_original_52 string", + # "t1_C_WHITE_CARD_FLAG_original_53 string", + # "t1_C_ACCOUNT_BALANCE_original_54 double", + # "t1_C_MOBILE_LOCATION_original_55 double", + # "t1_C_DEAL_RESULT_original_56 string", + # "t1_C_FINAL_DEAL_TYPE_original_57 string", + # "t1_N_MODEL_SCORE_original_58 double", + # "t1_C_TXN_TYPE_TMP_original_59 string", + # "t1_N_UNIT_PRICE_original_60 double", + # "t1_N_TXN_COUNT_original_61 double", + # "t1_PROV_original_62 string", + # "t1_CITY_original_63 string", + # "t1_MER_OPTIMESP_original_64 string", + # "t1_MER_GROUPID_original_65 int32", + # "t1_MER_ZONENO_original_66 string", + # "t1_MER_BRNO_original_67 string", + # "t1_MER_SHOP_BASE_original_68 string", + # "t1_MER_PACCTYPE_original_69 int32", + # "t1_MER_FRATEPC_original_70 string", + # "t1_MER_SPECACCT_original_71 int32", + # "t1_MER_SWITCH_original_72 int32", + # "t1_MER_AMTLIMI1_original_73 int32", + # "t1_MER_RATLIMI1_original_74 int32", + # "t1_MER_BUSIAREA_original_75 string", + # "t1_CUS_Gender_Cd_original_76 int32", + # "t1_CUS_Ethnic_Cd_original_77 int32", + # "t1_CUS_Birth_Cty_Cd_original_78 string", + # "t1_CUS_Edu_Degree_Cd_original_79 int32", + # "t1_CUS_Marriage_Status_Cd_original_80 int32", + # "t1_CUS_Vip_Cust_Ind_original_81 int32", + # "t1_CUS_Icbc_Emply_Ind_original_82 int32", + # "t1_CUS_Dom_Resdnt_Ind_original_83 int32", + # "t1_CUS_Belong_Corp_Type_Cd_original_84 int32", + # "t1_CUS_Proper_Career_Cd_original_85 string", + # "t1_CUS_Proper_Industry_Cd_original_86 int32", + # "t1_CUS_Integrality_Ind_Cd_original_87 int32", + # "t1_CUS_Integrality_Check_Result_original_88 int32", + # "t1_CUS_Identity_Actl_Result_Type_Cd_original_89 int32", + # "t1_CUS_Cert_Provi_Situ_Type_Cd_original_90 int32", + # "t1_CUS_Invalid_Acct_Cert_Actl_Result_original_91 int32", + # "t1_CUS_Start_Dt_original_92 string", + # "t1_CUS_Birth_Dt_original_93 string", + # "t1_CUS_Career_Cd_original_94 string", + # "t1_CARDSTAT_original_95 int32", + # "t1_CARDKIND_original_96 int32", + # "t1_SYNFLAG_original_97 int32", + # "t1_GOLDFLAG_original_98 int32", + # "t1_OPENDATE_original_99 date", + # "t1_CDSQUOTA_original_100 int64", + # "t1_CDTQUOTA_original_101 int64", + # "t1_BT_CARDSTAT_original_102 int32", + # "t1_BT_ACTCHANEL_original_103 int32", + # "t1_BT_ACTDATE_original_104 date", + # "t1_BT_SALECODE_original_105 string", + # "t1_C_SERIAL_NO_window_unique_count_106 int", + # "t1_C_FACTORY_INFO_window_unique_count_107 int", + # "t1_C_FACTORY_INFO_window_unique_count_108 int", + # "t1_C_FACTORY_INFO_window_unique_count_109 int", + # "t1_C_SERIAL_NO_window_top1_ratio_110 double", + # "t1_C_FACTORY_INFO_window_top1_ratio_111 double", + # "t1_C_APP_VERSION_window_unique_count_112 int", + # "t1_C_SERIAL_NO_window_unique_count_113 int", + # "t1_C_FACTORY_INFO_window_unique_count_114 int", + # "t1_C_SERIAL_NO_window_unique_count_115 int", + # "t1_C_FACTORY_INFO_window_unique_count_116 int", + # "t1_C_SCREEN_RESOLUTION_window_max_117 double", + # "t1_C_SCREEN_RESOLUTION_window_max_118 double", + # "t1_C_FACTORY_INFO_window_unique_count_119 int", + # "t1_C_FACTORY_INFO_window_unique_count_120 int", + # "t1_C_FACTORY_INFO_window_unique_count_121 int", + # "t1_C_SERIAL_NO_window_unique_count_122 int", + # "t1_C_SCREEN_RESOLUTION_window_max_123 double", + # "t1_C_IP_STAT_combine_124 string", + # "t1_C_RULE_ACT_combine_124 string", + # "t1_CITY_combine_124 string", + # "t1_C_RULE_ACT_combine_125 string", + # "t1_C_FINAL_DEAL_TYPE_combine_125 string", + # "t1_CITY_combine_125 string", + # "t1_C_IP_STAT_combine_126 string", + # "t1_RRF_RULE_DATA_combine_126 string", + # "t1_C_APP_VERSION_combine_126 string", + # "t1_C_RULE_ACT_combine_127 string", + # "t1_RRF_RULE_DATA_combine_127 string", + # "t1_C_DEAL_RESULT_combine_127 string", + # "t1_C_IP_STAT_combine_128 string", + # "t1_RRF_RULE_DATA_combine_128 string", + # "t1_C_DEAL_RESULT_combine_128 string", + # "t1_C_RULE_ACT_combine_129 string", + # "t1_RRF_RULE_DATA_combine_129 string", + # "t1_PROV_combine_129 string", + # "t1_C_MAC_STAT_combine_130 string", + # "t1_RRF_RULE_DATA_combine_130 string", + # "t1_C_DEAL_RESULT_combine_130 string", + # "t1_C_RULE_ACT_combine_131 string", + # "t1_RRF_RULE_DATA_combine_131 string", + # "C_FINAL_DEt1_C_FINAL_DEAL_TYPE_combine_131 string", + # "t1_RRF_RULE_DATA_combine_132 string", + # "t1_C_VERIFY_TYPE_combine_132 string", + # "t1_C_DEAL_RESULT_combine_132 string", + # "t1_RRF_RULE_DATA_combine_133 string", + # "t1_C_MEDIUM_NO_combine_133 string", + # "t1_C_DEAL_RESULT_combine_133 string", + # "t1_C_MAC_STAT_combine_134 string", + # "t1_RRF_RULE_DATA_combine_134 string", + # "t1_PROV_combine_134 string", + # "t1_C_RULE_ACT_combine_135 string", + # "t1_RRF_RULE_DATA_combine_135 string", + # "t1_C_VERIFY_TYPE_combine_135 string" + # ] + # diff --git a/cases/integration_test/spark/test_jd.yaml b/cases/integration_test/spark/test_jd.yaml new file mode 100644 index 00000000000..02744f958f4 --- /dev/null +++ b/cases/integration_test/spark/test_jd.yaml @@ -0,0 +1,307 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_db +cases: +- id: 1 + desc: 多表-京东数据场景 + inputs: + - columns: + - "id int32" + - "user_id int32" + - "sku_id int32" + - "date timestamp" + - "label int32" + indexs: ["index1:user_id:date"] + rows: + - - 459992740 + - -1311478396 + - 659918340 + - 1611146000000 + - -1939588571 + - - -543207062 + - 507763171 + - 954458270 + - 1611146000000 + - -1603336561 + - - -1304001546 + - -769990921 + - -2013336026 + - 1611146000000 + - -159697690 + - - 158625020 + - -945166892 + - -74761189 + - 1611146000000 + - -93625855 + - - 658374105 + - -1246658137 + - -1487653472 + - 1611146000000 + - -2042844456 + - - -1036345552 + - -1145428983 + - -322971158 + - 1611146000000 + - -2141990920 + - - -1454270183 + - 653071136 + - -1843758289 + - 1611146000000 + - -685391703 + - - -27071105 + - 630100915 + - 314469207 + - 1611146000000 + - 993761881 + - - 38809088 + - -1539014266 + - 295127280 + - 1611146000000 + - -1518440147 + - - -1037180916 + - -1318776756 + - 244202015 + - 1611146000000 + - -2111130440 + name: all + - columns: + - "user_id int32" + - "age string" + - "sex int32" + - "user_lv_cd int32" + - "user_reg_tm timestamp" + indexs: ["index_user:user_id:user_reg_tm"] + rows: + - - -1275547367 + - "age_KGJgiSMgcx" + - -1321603784 + - 679568701 + - 1611146001000 + - - 193784185 + - "age_z7XwDlSdzE" + - -918521235 + - -1839640562 + - 1611146001000 + - - -1500008039 + - "age_UxLHj6n5iG" + - -490726213 + - -2044459492 + - 1611146001000 + name: user + - columns: + - "sku_id int32" + - "a1 int32" + - "a2 int32" + - "a3 int32" + - "cate int32" + - "brand int32" + indexs: ["index_pdt:sku_id"] + rows: + - - 200135598 + - 620202989 + - -1819873162 + - 944811254 + - -1016957005 + - -348886786 + - - -1812792532 + - -548438081 + - 408684499 + - -546175077 + - 18157988 + - -1619495426 + - - 740971942 + - -995983125 + - -74505618 + - 875561670 + - -1701622561 + - -2066012196 + - - -1953481289 + - 394506620 + - -871334434 + - -1883922132 + - 337664649 + - -678183716 + - - 690079825 + - -124658147 + - -2013081012 + - 514316543 + - -1892105452 + - -398640514 + - - -1357806486 + - -1866091467 + - -848394605 + - -1321197691 + - 1037826917 + - 576025216 + name: product + - columns: + - "user_id int32" + - "sku_id int32" + - "time timestamp" + - "model_id int32" + - "type int32" + - "cate int32" + - "brand int32" + indexs: ["index:user_id:time"] + rows: + - - -946359508 + - -784482204 + - 1611146001000 + - 831631177 + - 50026040 + - 125260267 + - -1212429112 + - - 674634423 + - -608174802 + - 1611146001000 + - -1094861038 + - -1421894956 + - -3671335 + - -1054215935 + - - 548059146 + - -271665164 + - 1611146001000 + - 81808312 + - -1996872304 + - 660746138 + - 786421686 + - - -1970341445 + - -900311277 + - 1611146001000 + - -107428720 + - 746853108 + - -805673533 + - -860397196 + name: action + - columns: + - "sku_id int32" + - "comment_num int32" + - "has_bad_comment int32" + - "bad_comment_rate double" + - "dt timestamp" + indexs: ["index1:sku_id:dt"] + rows: + - - -2009402124 + - -130694795 + - -377940874 + - -38.93 + - 1611146001000 + - - -284125685 + - 216789062 + - 520778695 + - -73.75 + - 1611146001000 + - - -2059682888 + - 865555637 + - -370172128 + - -62.3 + - 1611146001000 + - - -1747089957 + - -720960620 + - -113399911 + - -109.97 + - 1611146001000 + - - -1446988855 + - 964829781 + - -796129056 + - 43.56 + - 1611146001000 + - - -931224783 + - 784179322 + - -1570583655 + - 7.31 + - 1611146001000 + - - -986441723 + - -1938361365 + - -986946742 + - 98.82 + - 1611146001000 + name: comment + sql: |- + select * from + ( + select + id as id_1, + `id` as all_id_original_0, + `user_id` as all_user_id_original_1, + `sku_id` as all_sku_id_original_2, + `date` as all_date_original_3, + `label` as all_label_original_4, + fz_top1_ratio(`id`) over all_user_id_date_0s_2764800s as all_id_window_top1_ratio_28, + fz_top1_ratio(`sku_id`) over all_user_id_date_0s_2764800s as all_sku_id_window_top1_ratio_29, + distinct_count(`sku_id`) over all_user_id_date_0s_2764800s as all_sku_id_window_unique_count_30, + fz_top1_ratio(`sku_id`) over all_user_id_date_0s_5529600s as all_sku_id_window_top1_ratio_31, + fz_top1_ratio(`id`) over all_user_id_date_0s_5529600s as all_id_window_top1_ratio_32, + `sku_id` as all_sku_id_combine_33, + `sku_id` as all_sku_id_combine_34, + `sku_id` as all_sku_id_combine_35, + `sku_id` as all_sku_id_combine_36, + `sku_id` as all_sku_id_combine_37, + `sku_id` as all_sku_id_combine_38, + `sku_id` as all_sku_id_combine_39 + from + `all` + window all_user_id_date_0s_2764800s as (partition by `user_id` order by `date` rows_range between 2764800s preceding and 0s preceding), + all_user_id_date_0s_5529600s as (partition by `user_id` order by `date` rows_range between 5529600s preceding and 0s preceding)) + as out0 + last join + ( + select + `all`.id as id_6, + `comment_sku_id__date_0s_1209600s`.`bad_comment_rate` as comment_bad_comment_rate_multi_last_value_5, + `comment_sku_id__date_0s_1209600s`.`comment_num` as comment_comment_num_multi_last_value_6, + `comment_sku_id__date_0s_1209600s`.`dt` as comment_dt_multi_last_value_7, + `comment_sku_id__date_0s_1209600s`.`has_bad_comment` as comment_has_bad_comment_multi_last_value_8, + `product_sku_id`.`a1` as product_a1_multi_direct_9, + `product_sku_id`.`a2` as product_a2_multi_direct_10, + `product_sku_id`.`a3` as product_a3_multi_direct_11, + `product_sku_id`.`brand` as product_brand_multi_direct_12, + `product_sku_id`.`cate` as product_cate_multi_direct_13, + `user_user_id`.`age` as user_age_multi_direct_14, + `user_user_id`.`sex` as user_sex_multi_direct_15, + `user_user_id`.`user_lv_cd` as user_user_lv_cd_multi_direct_16, + `user_user_id`.`user_reg_tm` as user_user_reg_tm_multi_direct_17 + from + `all` + last join `comment` as `comment_sku_id__date_0s_1209600s` order by comment_sku_id__date_0s_1209600s.`dt` on `all`.`sku_id` = `comment_sku_id__date_0s_1209600s`.`sku_id` and comment_sku_id__date_0s_1209600s.`dt` < `all`.`date` - 0 and comment_sku_id__date_0s_1209600s.`dt` > `all`.`date` - 1209600000 + last join `product` as `product_sku_id` on `all`.`sku_id` = `product_sku_id`.`sku_id` + last join `user` as `user_user_id` on `all`.`user_id` = `user_user_id`.`user_id`) + as out1 + on out0.id_1 = out1.id_6 + last join + ( + select + id as id_19, + fz_topn_frequency(`brand`, 3) over action_user_id_time_0s_32d as action_brand_multi_top3frequency_18, + distinct_count(`brand`) over action_user_id_time_0_100 as action_brand_multi_unique_count_19, + distinct_count(`cate`) over action_user_id_time_0_100 as action_cate_multi_unique_count_20, + distinct_count(`cate`) over action_user_id_time_0s_32d as action_cate_multi_unique_count_21, + fz_topn_frequency(`model_id`, 3) over action_user_id_time_0s_32d as action_model_id_multi_top3frequency_22, + distinct_count(`model_id`) over action_user_id_time_0_100 as action_model_id_multi_unique_count_23, + distinct_count(`sku_id`) over action_user_id_time_0_100 as action_sku_id_multi_unique_count_24, + distinct_count(`sku_id`) over action_user_id_time_0s_32d as action_sku_id_multi_unique_count_25, + fz_topn_frequency(`type`, 3) over action_user_id_time_0s_32d as action_type_multi_top3frequency_26, + fz_topn_frequency(`type`, 3) over action_user_id_time_0_100 as action_type_multi_top3frequency_27 + from + (select `user_id` as `user_id`, int(0) as `sku_id`, `date` as `time`, int(0) as `model_id`, int(0) as `type`, int(0) as `cate`, int(0) as `brand`, id from `all`) + window action_user_id_time_0s_32d as ( + UNION (select `user_id`, `sku_id`, `time`, `model_id`, `type`, `cate`, `brand`, int(0) as id from `action`) partition by `user_id` order by `time` rows_range between 32d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + action_user_id_time_0_100 as ( + UNION (select `user_id`, `sku_id`, `time`, `model_id`, `type`, `cate`, `brand`, int(0) as id from `action`) partition by `user_id` order by `time` rows between 100 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW)) + as out2 + on out0.id_1 = out2.id_19 + ; + expect: + success: true diff --git a/cases/integration_test/spark/test_news.yaml b/cases/integration_test/spark/test_news.yaml new file mode 100644 index 00000000000..ff449b296c3 --- /dev/null +++ b/cases/integration_test/spark/test_news.yaml @@ -0,0 +1,439 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: template_db +cases: +- id: 1 + desc: 单表-新闻场景 + inputs: + - columns: + - "InstanceKey string" + - "RequestDatetime timestamp" + - "PageId string" + - "NewsId string" + - "CategoryId string" + - "TermScores string" + - "TitleTermScores string" + - "TagScores string" + - "UserTagScores string" + - "UserTermScores string" + - "MediaId string" + - "ContentWords int32" + - "TitleWords int32" + - "Tag string" + - "TotalLikes int32" + - "TotalDislikes int32" + - "TotalComments int32" + - "TotalImpressions int32" + - "TotalAdjustImpressions int32" + - "TotalClicks int32" + - "TotalShares int32" + - "UserId string" + - "RequestLatitude double" + - "RequestLongitude double" + - "DeviceId string" + - "UserIp string" + - "Clicked int32" + - "UserClickedMediaIdsIn1Times string" + - "UserClickedMediaIdsIn3Times string" + - "UserClickedMediaIdsIn10Times string" + - "UserClickedMediaIdsIn1Minutes string" + - "UserClickedMediaIdsIn5Minutes string" + - "UserClickedMediaIdsIn30Minutes string" + - "UserClickedMediaIdsIn360Minutes string" + - "UserClickedCatIdsIn1Times string" + - "UserClickedCatIdsIn3Times string" + - "UserClickedCatIdsIn10Times string" + - "UserClickedCatIdsIn1Minutes string" + - "UserClickedCatIdsIn5Minutes string" + - "UserClickedCatIdsIn30Minutes string" + - "UserClickedCatIdsIn360Minutes string" + - "UserClickedTagScoresIn1Times string" + - "UserClickedTagScoresIn3Times string" + - "UserClickedTagScoresIn10Times string" + - "UserClickedTagScoresIn1Minutes string" + - "UserClickedTagScoresIn5Minutes string" + - "UserClickedTagScoresIn30Minutes string" + - "UserClickedTagScoresIn360Minutes string" + - "UserClickedTermScoresIn1Times string" + - "UserClickedTermScoresIn3Times string" + - "UserClickedTermScoresIn10Times string" + - "UserClickedTermScoresIn1Minutes string" + - "UserClickedTermScoresIn5Minutes string" + - "UserClickedTermScoresIn30Minutes string" + - "UserClickedTermScoresIn360Minutes string" + - "UserClickedTitleTermScoresIn1Times string" + - "UserClickedTitleTermScoresIn3Times string" + - "UserClickedTitleTermScoresIn10Times string" + - "UserClickedTitleTermScoresIn1Minutes string" + - "UserClickedTitleTermScoresIn5Minutes string" + - "UserClickedTitleTermScoresIn30Minutes string" + - "UserClickedTitleTermScoresIn360Minutes string" + indexs: + - "index1:UserTermScores:RequestDatetime" + - "index2:UserTagScores:RequestDatetime" + - "index3:UserId:RequestDatetime" + - "index4:UserIp:RequestDatetime" + rows: + - - "InstanceKey_hdIp5qM957" + - 1609405780000 + - "PageId_2qfcb9EBP4" + - "NewsId_ErcZw6WqZC" + - "CategoryId_gWDyj6FMC6" + - "TermScores_xppEG6AJ38" + - "TitleTermScores_kw3z2g2K98" + - "TagScores_c9zE9v08wj" + - "UserTagScores_84fOPfN56G" + - "UserTermScores_pJVZ7lPMeI" + - "MediaId_i0stuPP10g" + - 829372349 + - 601942391 + - "Tag_ciC6wk19PJ" + - -1820777477 + - 883273961 + - 266011166 + - 625586443 + - -684001291 + - 902064193 + - 124534625 + - "UserId_N4VsmRmV5e" + - 3.7387905194494238 + - 125.52669722380091 + - "DeviceId_2zMD4oSYcI" + - "UserIp_HpEH1YJjRI" + - -1434651347 + - "UserClickedMediaIdsIn1Times_3HDiJhw431" + - "UserClickedMediaIdsIn3Times_dMlPGtTIhR" + - "UserClickedMediaIdsIn10Times_av0JnzlZTG" + - "UserClickedMediaIdsIn1Minutes_mOktj5LJiD" + - "UserClickedMediaIdsIn5Minutes_9rypts8eWg" + - "UserClickedMediaIdsIn30Minutes_rgvXB0uxwH" + - "UserClickedMediaIdsIn360Minutes_c5UxGaYceL" + - "UserClickedCatIdsIn1Times_LeHDkid2pj" + - "UserClickedCatIdsIn3Times_q1NIvIEMP7" + - "UserClickedCatIdsIn10Times_6u8Xg7cS9F" + - "UserClickedCatIdsIn1Minutes_oRKjk9HTtA" + - "UserClickedCatIdsIn5Minutes_GdLcy4lnLO" + - "UserClickedCatIdsIn30Minutes_hJqHlZOlXf" + - "UserClickedCatIdsIn360Minutes_6E2LKw7j2O" + - "UserClickedTagScoresIn1Times_cAkeiQEbZi" + - "UserClickedTagScoresIn3Times_tFexwMHFw4" + - "UserClickedTagScoresIn10Times_M5J9oPpbqM" + - "UserClickedTagScoresIn1Minutes_F9Ba3faBRO" + - "UserClickedTagScoresIn5Minutes_wCSaqSRatG" + - "UserClickedTagScoresIn30Minutes_BzJfoCf21a" + - "UserClickedTagScoresIn360Minutes_30l7jaJ4gB" + - "UserClickedTermScoresIn1Times_LqLUppsBv0" + - "UserClickedTermScoresIn3Times_Lokr3ory2y" + - "UserClickedTermScoresIn10Times_xTZVbQqHw0" + - "UserClickedTermScoresIn1Minutes_pBFLuGB0p0" + - "UserClickedTermScoresIn5Minutes_giEJ7skHMs" + - "UserClickedTermScoresIn30Minutes_C8JaxDwypo" + - "UserClickedTermScoresIn360Minutes_Rm6L1ywrhl" + - "UserClickedTitleTermScoresIn1Times_JxKKWwPfnI" + - "UserClickedTitleTermScoresIn3Times_whxcLRU2Px" + - "UserClickedTitleTermScoresIn10Times_TwsUNK2E5q" + - "UserClickedTitleTermScoresIn1Minutes_nzkQNp1WVM" + - "UserClickedTitleTermScoresIn5Minutes_1YOFOVlbvh" + - "UserClickedTitleTermScoresIn30Minutes_IfSQLmvSqa" + - "UserClickedTitleTermScoresIn360Minutes_r5sD1XpY2c" + - - "InstanceKey_pve2h4oBmM" + - 1609405780000 + - "PageId_KZoyi08pAP" + - "NewsId_TGIqBGEVHb" + - "CategoryId_Ie7ucdUYXe" + - "TermScores_1gZiIGPRQz" + - "TitleTermScores_yQyoGdHRNe" + - "TagScores_Y110SxqWpY" + - "UserTagScores_i0icat48DT" + - "UserTermScores_cL9G53KJhT" + - "MediaId_1tOAd6ZaZC" + - -1539942388 + - -645368500 + - "Tag_lstd2JNED7" + - -203531434 + - -1137889304 + - -1877229079 + - -1849242659 + - -1005223131 + - 32773880 + - -730536017 + - "UserId_pjFJNfdPYs" + - 118.13343685266054 + - -75.95372022179421 + - "DeviceId_kmtzlnZRbc" + - "UserIp_pRTNUjNjpf" + - 186372981 + - "UserClickedMediaIdsIn1Times_2EG9u6VG3z" + - "UserClickedMediaIdsIn3Times_U52gnngZpl" + - "UserClickedMediaIdsIn10Times_SZMJFndrWA" + - "UserClickedMediaIdsIn1Minutes_sUzsztqLo6" + - "UserClickedMediaIdsIn5Minutes_j8k1DEJ3K2" + - "UserClickedMediaIdsIn30Minutes_WQYr1ipJzJ" + - "UserClickedMediaIdsIn360Minutes_kNPuSmOLCh" + - "UserClickedCatIdsIn1Times_AWeuDDwzJX" + - "UserClickedCatIdsIn3Times_5oBau1ONjC" + - "UserClickedCatIdsIn10Times_nC04RrROot" + - "UserClickedCatIdsIn1Minutes_BCraczQzN8" + - "UserClickedCatIdsIn5Minutes_OYg6nwBjgB" + - "UserClickedCatIdsIn30Minutes_SR13pQy3Xn" + - "UserClickedCatIdsIn360Minutes_I8LR8qCAfD" + - "UserClickedTagScoresIn1Times_sLP8dEPBuF" + - "UserClickedTagScoresIn3Times_Z6wY8t1DdZ" + - "UserClickedTagScoresIn10Times_X9rXFAgUuH" + - "UserClickedTagScoresIn1Minutes_MazqtyoPcg" + - "UserClickedTagScoresIn5Minutes_16ltZzRQid" + - "UserClickedTagScoresIn30Minutes_pSlMAYSeYb" + - "UserClickedTagScoresIn360Minutes_0Zz8P4xjGH" + - "UserClickedTermScoresIn1Times_bvkzRyHAus" + - "UserClickedTermScoresIn3Times_0HMO3i4yns" + - "UserClickedTermScoresIn10Times_DT8xge6vdi" + - "UserClickedTermScoresIn1Minutes_2okBnnoBid" + - "UserClickedTermScoresIn5Minutes_lqNLfKvrh0" + - "UserClickedTermScoresIn30Minutes_ac2U74ym1H" + - "UserClickedTermScoresIn360Minutes_JSBVGmOT7m" + - "UserClickedTitleTermScoresIn1Times_xChAvlI0Hg" + - "UserClickedTitleTermScoresIn3Times_sASTrsDGA3" + - "UserClickedTitleTermScoresIn10Times_21cB10rAvK" + - "UserClickedTitleTermScoresIn1Minutes_SVXF4JVpJ5" + - "UserClickedTitleTermScoresIn5Minutes_LCLbuQVXs2" + - "UserClickedTitleTermScoresIn30Minutes_bwXZz631fl" + - "UserClickedTitleTermScoresIn360Minutes_sR95HAIcHx" + - - "InstanceKey_k4XtEfFsqT" + - 1609405780000 + - "PageId_BZWLnCZmQ9" + - "NewsId_YdHfQBoErt" + - "CategoryId_oard5Cne0T" + - "TermScores_e8dAwnunlf" + - "TitleTermScores_8eghaLsTjR" + - "TagScores_Igz3roJMYt" + - "UserTagScores_D0noZJ4FzI" + - "UserTermScores_p2ZShNACkv" + - "MediaId_7BELEeQo8t" + - -1400976088 + - -185610105 + - "Tag_qDw3zDu0Kf" + - -1424703288 + - 326020146 + - -1788522406 + - -894083919 + - -614604127 + - 836914113 + - -514315335 + - "UserId_cnDtbfUEMH" + - 77.52642088566631 + - 61.52004136781969 + - "DeviceId_88cLvltsp1" + - "UserIp_6QnBErDqMJ" + - -2147467600 + - "UserClickedMediaIdsIn1Times_dfNUH5v0a6" + - "UserClickedMediaIdsIn3Times_7C9bV4aMUz" + - "UserClickedMediaIdsIn10Times_y7bSntxLJ9" + - "UserClickedMediaIdsIn1Minutes_PLy8SqEQ84" + - "UserClickedMediaIdsIn5Minutes_5BnsVlthDt" + - "UserClickedMediaIdsIn30Minutes_GMdEG1RRGL" + - "UserClickedMediaIdsIn360Minutes_Zb85hck0aF" + - "UserClickedCatIdsIn1Times_1WG4dLVfOH" + - "UserClickedCatIdsIn3Times_HuZi6EaTCV" + - "UserClickedCatIdsIn10Times_QPL2TWKSN3" + - "UserClickedCatIdsIn1Minutes_rzk3a4Klss" + - "UserClickedCatIdsIn5Minutes_0X05NkhD7o" + - "UserClickedCatIdsIn30Minutes_jYleKJf8IF" + - "UserClickedCatIdsIn360Minutes_ar6mj9US4t" + - "UserClickedTagScoresIn1Times_P2MmbiyS4I" + - "UserClickedTagScoresIn3Times_8StMrSWAeI" + - "UserClickedTagScoresIn10Times_Bl7yrclqG2" + - "UserClickedTagScoresIn1Minutes_DqBqyScA9d" + - "UserClickedTagScoresIn5Minutes_K6ZgXsqw0u" + - "UserClickedTagScoresIn30Minutes_6lv8OvRI7W" + - "UserClickedTagScoresIn360Minutes_Hs54K7u27l" + - "UserClickedTermScoresIn1Times_H6SHDMGtuy" + - "UserClickedTermScoresIn3Times_DVVW13LIcd" + - "UserClickedTermScoresIn10Times_dZdjYFHvpd" + - "UserClickedTermScoresIn1Minutes_ZTBWK0VaYf" + - "UserClickedTermScoresIn5Minutes_aIfxNFWfaz" + - "UserClickedTermScoresIn30Minutes_XkLhwMM16w" + - "UserClickedTermScoresIn360Minutes_VccLPVQ0kC" + - "UserClickedTitleTermScoresIn1Times_bM308gVgrl" + - "UserClickedTitleTermScoresIn3Times_4jqy1Aeiar" + - "UserClickedTitleTermScoresIn10Times_FQ79yzLr4K" + - "UserClickedTitleTermScoresIn1Minutes_enU5HDPII1" + - "UserClickedTitleTermScoresIn5Minutes_X0YzeMlxE1" + - "UserClickedTitleTermScoresIn30Minutes_WAWIp5zsTD" + - "UserClickedTitleTermScoresIn360Minutes_SYU1A5lgJy" + - - "InstanceKey_Ik6w1GJ3ak" + - 1609405780000 + - "PageId_l8hTiHLe7c" + - "NewsId_U1l7n7Z1cz" + - "CategoryId_z93urYcLTz" + - "TermScores_05J4os5hvJ" + - "TitleTermScores_MGrW4hhUdP" + - "TagScores_1k3NEltzP4" + - "UserTagScores_1PHt2Sw8Z5" + - "UserTermScores_537uScy0i9" + - "MediaId_xc7NYROEZt" + - -1256228849 + - -110570093 + - "Tag_d4mRWCbrMO" + - 365243338 + - 873343892 + - 17923145 + - -681865200 + - -444619580 + - -1894396283 + - -1127215708 + - "UserId_fgmdPtLt87" + - -61.396138086485564 + - -87.37716465146411 + - "DeviceId_CCtZyRqhvh" + - "UserIp_CxGseOdjSM" + - -76661935 + - "UserClickedMediaIdsIn1Times_LEYaofr5Hl" + - "UserClickedMediaIdsIn3Times_3FSI83BEln" + - "UserClickedMediaIdsIn10Times_0uxy6hp2ql" + - "UserClickedMediaIdsIn1Minutes_iR7f3ML0Cy" + - "UserClickedMediaIdsIn5Minutes_5lifH8ACGz" + - "UserClickedMediaIdsIn30Minutes_veGUAV6ecL" + - "UserClickedMediaIdsIn360Minutes_4ZfwIYLjI0" + - "UserClickedCatIdsIn1Times_MsWvdpbriS" + - "UserClickedCatIdsIn3Times_OOQ3KsuFoC" + - "UserClickedCatIdsIn10Times_lSXIYryDz4" + - "UserClickedCatIdsIn1Minutes_lcgKRcqF1r" + - "UserClickedCatIdsIn5Minutes_APcl6yWNKU" + - "UserClickedCatIdsIn30Minutes_JA3aKMbLRU" + - "UserClickedCatIdsIn360Minutes_iRcC0hXYHY" + - "UserClickedTagScoresIn1Times_BsalAUhfaV" + - "UserClickedTagScoresIn3Times_4YgxkGeFO8" + - "UserClickedTagScoresIn10Times_JGEY6hnpRt" + - "UserClickedTagScoresIn1Minutes_qh78KhthQ9" + - "UserClickedTagScoresIn5Minutes_KwokIGT8ih" + - "UserClickedTagScoresIn30Minutes_esweRoZRlQ" + - "UserClickedTagScoresIn360Minutes_SEhVJL8Isv" + - "UserClickedTermScoresIn1Times_uiIHrsV6LB" + - "UserClickedTermScoresIn3Times_y3BznAylvB" + - "UserClickedTermScoresIn10Times_IU8v9wrb65" + - "UserClickedTermScoresIn1Minutes_YP8gIJCiEZ" + - "UserClickedTermScoresIn5Minutes_vDHmUEWZgj" + - "UserClickedTermScoresIn30Minutes_v3yee1Glcu" + - "UserClickedTermScoresIn360Minutes_7dWE2PTpRW" + - "UserClickedTitleTermScoresIn1Times_gnyIe4mq1F" + - "UserClickedTitleTermScoresIn3Times_UGzqsDJ5zr" + - "UserClickedTitleTermScoresIn10Times_498w6xB6Nc" + - "UserClickedTitleTermScoresIn1Minutes_jdo8wg4Qvj" + - "UserClickedTitleTermScoresIn5Minutes_u6pQFRC1AT" + - "UserClickedTitleTermScoresIn30Minutes_XyyNo9Vj1t" + - "UserClickedTitleTermScoresIn360Minutes_JlyEeiBHUZ" + sql: |- + select + InstanceKey as InstanceKey_1, + InstanceKey as t1_InstanceKey_0, + RequestDatetime as t1_RequestDatetime_1, + PageId as t1_PageId_2, + NewsId as t1_NewsId_3, + CategoryId as t1_CategoryId_4, + TermScores as t1_TermScores_5, + TitleTermScores as t1_TitleTermScores_6, + TagScores as t1_TagScores_7, + UserTagScores as t1_UserTagScores_8, + UserTermScores as t1_UserTermScores_9, + MediaId as t1_MediaId_10, + ContentWords as t1_ContentWords_11, + TitleWords as t1_TitleWords_12, + Tag as t1_Tag_13, + TotalLikes as t1_TotalLikes_14, + TotalDislikes as t1_TotalDislikes_15, + TotalComments as t1_TotalComments_16, + TotalImpressions as t1_TotalImpressions_17, + TotalAdjustImpressions as t1_TotalAdjustImpressions_18, + TotalClicks as t1_TotalClicks_19, + TotalShares as t1_TotalShares_20, + UserId as t1_UserId_21, + RequestLatitude as t1_RequestLatitude_22, + RequestLongitude as t1_RequestLongitude_23, + DeviceId as t1_DeviceId_24, + UserIp as t1_UserIp_25, + Clicked as t1_Clicked_26, + UserClickedMediaIdsIn1Times as t1_UserClickedMediaIdsIn1Times_27, + UserClickedMediaIdsIn3Times as t1_UserClickedMediaIdsIn3Times_28, + UserClickedMediaIdsIn10Times as t1_UserClickedMediaIdsIn10Times_29, + UserClickedMediaIdsIn1Minutes as t1_UserClickedMediaIdsIn1Minutes_30, + UserClickedMediaIdsIn5Minutes as t1_UserClickedMediaIdsIn5Minutes_31, + UserClickedMediaIdsIn30Minutes as t1_UserClickedMediaIdsIn30Minutes_32, + UserClickedMediaIdsIn360Minutes as t1_UserClickedMediaIdsIn360Minutes_33, + UserClickedCatIdsIn1Times as t1_UserClickedCatIdsIn1Times_34, + UserClickedCatIdsIn3Times as t1_UserClickedCatIdsIn3Times_35, + UserClickedCatIdsIn10Times as t1_UserClickedCatIdsIn10Times_36, + UserClickedCatIdsIn1Minutes as t1_UserClickedCatIdsIn1Minutes_37, + UserClickedCatIdsIn5Minutes as t1_UserClickedCatIdsIn5Minutes_38, + UserClickedCatIdsIn30Minutes as t1_UserClickedCatIdsIn30Minutes_39, + UserClickedCatIdsIn360Minutes as t1_UserClickedCatIdsIn360Minutes_40, + UserClickedTagScoresIn1Times as t1_UserClickedTagScoresIn1Times_41, + UserClickedTagScoresIn3Times as t1_UserClickedTagScoresIn3Times_42, + UserClickedTagScoresIn10Times as t1_UserClickedTagScoresIn10Times_43, + UserClickedTagScoresIn1Minutes as t1_UserClickedTagScoresIn1Minutes_44, + UserClickedTagScoresIn5Minutes as t1_UserClickedTagScoresIn5Minutes_45, + UserClickedTagScoresIn30Minutes as t1_UserClickedTagScoresIn30Minutes_46, + UserClickedTagScoresIn360Minutes as t1_UserClickedTagScoresIn360Minutes_47, + UserClickedTermScoresIn1Times as t1_UserClickedTermScoresIn1Times_48, + UserClickedTermScoresIn3Times as t1_UserClickedTermScoresIn3Times_49, + UserClickedTermScoresIn10Times as t1_UserClickedTermScoresIn10Times_50, + UserClickedTermScoresIn1Minutes as t1_UserClickedTermScoresIn1Minutes_51, + UserClickedTermScoresIn5Minutes as t1_UserClickedTermScoresIn5Minutes_52, + UserClickedTermScoresIn30Minutes as t1_UserClickedTermScoresIn30Minutes_53, + UserClickedTermScoresIn360Minutes as t1_UserClickedTermScoresIn360Minutes_54, + UserClickedTitleTermScoresIn1Times as t1_UserClickedTitleTermScoresIn1Times_55, + UserClickedTitleTermScoresIn3Times as t1_UserClickedTitleTermScoresIn3Times_56, + UserClickedTitleTermScoresIn10Times as t1_UserClickedTitleTermScoresIn10Times_57, + UserClickedTitleTermScoresIn1Minutes as t1_UserClickedTitleTermScoresIn1Minutes_58, + UserClickedTitleTermScoresIn5Minutes as t1_UserClickedTitleTermScoresIn5Minutes_59, + UserClickedTitleTermScoresIn30Minutes as t1_UserClickedTitleTermScoresIn30Minutes_60, + UserClickedTitleTermScoresIn360Minutes as t1_UserClickedTitleTermScoresIn360Minutes_61, + sum(TitleWords) over t1_UserTermScores_RequestDatetime_0s_7200s as t1_TitleWords_62, + fz_top1_ratio(NewsId) over t1_UserTermScores_RequestDatetime_0s_7200s as t1_NewsId_63, + sum(RequestLatitude) over t1_UserTermScores_RequestDatetime_0s_7200s as t1_RequestLatitude_64, + distinct_count(NewsId) over t1_UserTermScores_RequestDatetime_0s_7200s as t1_NewsId_65, + sum(ContentWords) over t1_UserTermScores_RequestDatetime_0s_7200s as t1_ContentWords_66, + case when !isnull(lag(UserClickedTagScoresIn1Times, 0)) over t1_UserTermScores_RequestDatetime_0s_7200s then count(UserClickedTagScoresIn1Times) over t1_UserTermScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTagScoresIn1Times_67, + case when !isnull(lag(UserClickedTagScoresIn1Times, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedTagScoresIn1Times) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTagScoresIn1Times_68, + case when !isnull(lag(UserClickedCatIdsIn1Times, 0)) over t1_UserTermScores_RequestDatetime_0s_7200s then count(UserClickedCatIdsIn1Times) over t1_UserTermScores_RequestDatetime_0s_7200s else null end as t1_UserClickedCatIdsIn1Times_69, + case when !isnull(lag(UserClickedMediaIdsIn1Times, 0)) over t1_UserTermScores_RequestDatetime_0s_7200s then count(UserClickedMediaIdsIn1Times) over t1_UserTermScores_RequestDatetime_0s_7200s else null end as t1_UserClickedMediaIdsIn1Times_70, + case when !isnull(lag(UserClickedTagScoresIn30Minutes, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedTagScoresIn30Minutes) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTagScoresIn30Minutes_71, + fz_top1_ratio(NewsId) over t1_UserTagScores_RequestDatetime_0s_7200s as t1_NewsId_72, + case when !isnull(lag(UserClickedTagScoresIn3Times, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedTagScoresIn3Times) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTagScoresIn3Times_73, + case when !isnull(lag(UserClickedTagScoresIn10Times, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedTagScoresIn10Times) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTagScoresIn10Times_74, + case when !isnull(lag(UserClickedCatIdsIn1Times, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedCatIdsIn1Times) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedCatIdsIn1Times_75, + case when !isnull(lag(UserClickedTitleTermScoresIn1Times, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedTitleTermScoresIn1Times) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTitleTermScoresIn1Times_76, + case when !isnull(lag(UserClickedTermScoresIn30Minutes, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedTermScoresIn30Minutes) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTermScoresIn30Minutes_77, + case when !isnull(lag(UserClickedTermScoresIn1Times, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedTermScoresIn1Times) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTermScoresIn1Times_78, + case when !isnull(lag(UserClickedMediaIdsIn10Times, 0)) over t1_UserId_RequestDatetime_0s_7200s then count(UserClickedMediaIdsIn10Times) over t1_UserId_RequestDatetime_0s_7200s else null end as t1_UserClickedMediaIdsIn10Times_79, + case when !isnull(lag(UserTermScores, 0)) over t1_UserIp_RequestDatetime_0s_7200s then count(UserTermScores) over t1_UserIp_RequestDatetime_0s_7200s else null end as t1_UserTermScores_80, + case when !isnull(lag(UserClickedMediaIdsIn10Times, 0)) over t1_UserIp_RequestDatetime_0s_7200s then count(UserClickedMediaIdsIn10Times) over t1_UserIp_RequestDatetime_0s_7200s else null end as t1_UserClickedMediaIdsIn10Times_81, + distinct_count(InstanceKey) over t1_UserTagScores_RequestDatetime_0s_7200s as t1_InstanceKey_82, + case when !isnull(lag(UserTagScores, 0)) over t1_UserIp_RequestDatetime_0s_7200s then count(UserTagScores) over t1_UserIp_RequestDatetime_0s_7200s else null end as t1_UserTagScores_83, + case when !isnull(lag(UserTagScores, 0)) over t1_UserId_RequestDatetime_0s_7200s then count(UserTagScores) over t1_UserId_RequestDatetime_0s_7200s else null end as t1_UserTagScores_84, + case when !isnull(lag(UserClickedTitleTermScoresIn3Times, 0)) over t1_UserId_RequestDatetime_0s_7200s then count(UserClickedTitleTermScoresIn3Times) over t1_UserId_RequestDatetime_0s_7200s else null end as t1_UserClickedTitleTermScoresIn3Times_85, + case when !isnull(lag(UserClickedMediaIdsIn360Minutes, 0)) over t1_UserId_RequestDatetime_0s_7200s then count(UserClickedMediaIdsIn360Minutes) over t1_UserId_RequestDatetime_0s_7200s else null end as t1_UserClickedMediaIdsIn360Minutes_86, + case when !isnull(lag(UserClickedTitleTermScoresIn10Times, 0)) over t1_UserId_RequestDatetime_0s_7200s then count(UserClickedTitleTermScoresIn10Times) over t1_UserId_RequestDatetime_0s_7200s else null end as t1_UserClickedTitleTermScoresIn10Times_87, + case when !isnull(lag(UserClickedTitleTermScoresIn3Times, 0)) over t1_UserIp_RequestDatetime_0s_7200s then count(UserClickedTitleTermScoresIn3Times) over t1_UserIp_RequestDatetime_0s_7200s else null end as t1_UserClickedTitleTermScoresIn3Times_88, + case when !isnull(lag(UserClickedTermScoresIn3Times, 0)) over t1_UserIp_RequestDatetime_0s_7200s then count(UserClickedTermScoresIn3Times) over t1_UserIp_RequestDatetime_0s_7200s else null end as t1_UserClickedTermScoresIn3Times_89, + case when !isnull(lag(UserClickedMediaIdsIn360Minutes, 0)) over t1_UserIp_RequestDatetime_0s_7200s then count(UserClickedMediaIdsIn360Minutes) over t1_UserIp_RequestDatetime_0s_7200s else null end as t1_UserClickedMediaIdsIn360Minutes_90, + case when !isnull(lag(UserClickedTermScoresIn10Times, 0)) over t1_UserId_RequestDatetime_0s_7200s then count(UserClickedTermScoresIn10Times) over t1_UserId_RequestDatetime_0s_7200s else null end as t1_UserClickedTermScoresIn10Times_91 + from + {0} + window t1_UserTermScores_RequestDatetime_0s_7200s as ( partition by UserTermScores order by RequestDatetime rows_range between 7200s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + t1_UserTagScores_RequestDatetime_0s_7200s as ( partition by UserTagScores order by RequestDatetime rows_range between 7200s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + t1_UserId_RequestDatetime_0s_7200s as ( partition by UserId order by RequestDatetime rows_range between 7200s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW), + t1_UserIp_RequestDatetime_0s_7200s as ( partition by UserIp order by RequestDatetime rows_range between 7200s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW); + expect: + success: true diff --git a/cases/integration_test/test_batch_request.yaml b/cases/integration_test/test_batch_request.yaml new file mode 100644 index 00000000000..9f3134806e1 --- /dev/null +++ b/cases/integration_test/test_batch_request.yaml @@ -0,0 +1,358 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - id: 0 + desc: batch request without common column + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"] + indexs: ["index1:c1:c7"] + rows: + - [1,"a",1,30,1.0,2.0,1590738990000,"2020-05-01","a"] + - [3,"a",3,32,1.2,2.2,1590738992000,"2020-05-03","c"] + - [5,"a",5,34,1.4,2.4,1590738994000,"2020-05-05","d"] + - [6,"a",6,35,1.5,2.5,1590738995000,"2020-05-06","e"] + batch_request: + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"] + indexs: ["index1:c1:c7"] + rows: + - [2,"a",2,31,1.1,2.1,1590738991000,"2020-05-02","b"] + - [4,"a",4,33,1.3,2.3,1590738993000,"2020-05-04","c"] + - [7,"a",6,36,1.6,2.6,1590738996000,"2020-05-07","f"] + sql: | + SELECT id, c1, sum(c3) OVER w1 as m3, sum(c4) OVER w1 as m4, sum(c5) OVER w1 as m5, + sum(c6) OVER w1 as m6, max(c7) OVER w1 as m7, max(c8) OVER w1 as m8, min(c9) OVER w1 as m9 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m3 int","m4 bigint","m5 float","m6 double","m7 timestamp","m8 date","m9 string"] + rows: + - [2,"a",3,61,2.1,4.1,1590738991000,"2020-05-02","a"] + - [4,"a",8,95,3.5,6.5,1590738993000,"2020-05-04","a"] + - [7,"a",17,105,4.5,7.5,1590738996000,"2020-05-07","d"] + + - id: 1 + desc: batch request with all common columns + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"] + indexs: ["index1:c1:c7"] + rows: + - [1,"a",1,30,1.0,2.0,1590738990000,"2020-05-01","a"] + - [3,"a",3,32,1.2,2.2,1590738992000,"2020-05-03","c"] + - [5,"a",5,34,1.4,2.4,1590738994000,"2020-05-05","d"] + - [6,"a",6,35,1.5,2.5,1590738995000,"2020-05-06","e"] + batch_request: + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"] + indexs: ["index1:c1:c7"] + common_column_indices: [0,1,2,3,4,5,6,7,8] + rows: + - [2,"a",2,31,1.1,2.1,1590738991000,"2020-05-02","b"] + - [2,"a",2,31,1.1,2.1,1590738991000,"2020-05-02","b"] + - [2,"a",2,31,1.1,2.1,1590738991000,"2020-05-02","b"] + sql: | + SELECT id, c1, sum(c3) OVER w1 as m3, sum(c4) OVER w1 as m4, sum(c5) OVER w1 as m5, + sum(c6) OVER w1 as m6, max(c7) OVER w1 as m7, max(c8) OVER w1 as m8, min(c9) OVER w1 as m9 FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + columns: ["id int","c1 string","m3 int","m4 bigint","m5 float","m6 double","m7 timestamp","m8 date","m9 string"] + rows: + - [2,"a",3,61,2.1,4.1,1590738991000,"2020-05-02","a"] + - [2,"a",3,61,2.1,4.1,1590738991000,"2020-05-02","a"] + - [2,"a",3,61,2.1,4.1,1590738991000,"2020-05-02","a"] + + - id: 2 + desc: batch request with non-trival common columns + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"] + indexs: ["index1:c1:c7", "index2:id:c7"] + rows: + - [1,"a",1,30,1.0,2.0,1590738990000] + - [3,"a",3,32,1.2,2.2,1590738992000] + - [5,"a",5,34,1.4,2.4,1590738994000] + - [6,"a",6,35,1.5,2.5,1590738995000] + - + columns : ["id int","timecol timestamp","c8 date","c9 string"] + indexs: ["index2:id:timecol"] + rows: + - [1,1590738990000,"2020-05-01","a"] + - [2,1590738991000,"2020-05-02","b"] + - [3,1590738992000,"2020-05-03","c"] + - [4,1590738993000,"2020-05-04","d"] + - [5,1590738994000,"2020-05-05","e"] + - [6,1590738995000,"2020-05-06","f"] + - [7,1590738996000,"2020-05-07","g"] + batch_request: + indexs: ["index1:c1:c7"] + common_column_indices: [1,3,5] + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"] + rows: + - [2,"a",2,31,1.1,2.1,1590738991000] + - [4,"a",3,31,1.2,2.1,1590738993000] + - [7,"a",4,31,1.3,2.1,1590738996000] + sql: | + SELECT {0}.id, c1, sum(c3) OVER w1 as m3, sum(c4) OVER w1 as m4, sum(c5) OVER w1 as m5, + sum(c6) OVER w1 as m6, max(c7) OVER w1 as m7, max(c8) OVER w1 as m8, min(c9) OVER w1 as m9 + FROM {0} last join {1} order by {1}.timecol on {0}.id={1}.id and {0}.c7={1}.timecol + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","m3 int","m4 bigint","m5 float","m6 double","m7 timestamp","m8 date","m9 string"] + rows: + - [2,"a",3,61,2.1,4.1,1590738991000,"2020-05-02","a"] + - [4,"a",7,93,3.4,6.3,1590738993000,"2020-05-04","a"] + - [7,"a",15,100,4.2,7.0,1590738996000,"2020-05-07","e"] + common_column_indices: [] + + - id: 3 + desc: batch request with non-trival output common columns, window is common + inputs: + - + columns : ["id int","c1 string","c2 int","c3 bigint","c4 float","c5 double","c6 timestamp"] + indexs: ["index1:c1:c6", "index2:id:c6"] + rows: + - [1,"a",1,30,1.0,2.0,1590738990000] + - [3,"a",3,32,1.2,2.2,1590738992000] + - [5,"a",5,34,1.4,2.4,1590738994000] + - [6,"a",6,35,1.5,2.5,1590738995000] + - + columns : ["id int","timecol timestamp","c7 date","c8 string"] + indexs: ["index2:id:timecol"] + rows: + - [1,1590738990000,"2020-05-01","a"] + - [2,1590738991000,"2020-05-02","b"] + - [3,1590738992000,"2020-05-03","c"] + - [4,1590738993000,"2020-05-04","d"] + - [5,1590738994000,"2020-05-05","e"] + - [6,1590738995000,"2020-05-06","f"] + - [7,1590738996000,"2020-05-07","g"] + batch_request: + indexs: ["index1:c1:c6"] + common_column_indices: [1,3,6] + columns : ["id int","c1 string","c2 int","c3 bigint","c4 float","c5 double","c6 timestamp"] + rows: + - [2,"a",2,31,1.1,2.1,1590738996000] + - [4,"a",3,31,1.2,2.2,1590738996000] + - [7,"a",4,31,1.3,2.3,1590738996000] + sql: | + SELECT {0}.id, c1 as m1, sum(c2) OVER w1 as m2, sum(c3) OVER w1 as m3, sum(c4) OVER w1 as m4, + sum(c5) OVER w1 as m5, max(c6) OVER w1 as m6, max(c7) OVER w1 as m7, min(c8) OVER w1 as m8 + FROM {0} last join {1} order by {1}.timecol on {0}.id={1}.id and {0}.c6={1}.timecol + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","m1 string","m2 int","m3 bigint","m4 float","m5 double","m6 timestamp","m7 date","m8 string"] + common_column_indices: [1,3,6] + rows: + - [2,"a",13,100,4.0,7.0,1590738996000,"2020-05-06","e"] + - [4,"a",14,100,4.1,7.1,1590738996000,"2020-05-06","e"] + - [7,"a",15,100,4.2,7.2,1590738996000,"2020-05-07","e"] + + - id: 4 + desc: batch request with non-trival output common columns, join is common and window non-common + inputs: + - + columns : ["id int","c1 string","c2 int","c3 bigint","c4 float","c5 double","c6 timestamp"] + indexs: ["index1:c1:c6", "index2:id:c6"] + rows: + - [1,"a",1,30,1.0,2.0,1590738990000] + - [3,"a",3,32,1.2,2.2,1590738992000] + - [5,"a",5,34,1.4,2.4,1590738994000] + - [6,"a",6,35,1.5,2.5,1590738995000] + - + columns : ["id int","timecol timestamp","c1 string", "c7 date","c8 string"] + indexs: ["index2:c1:timecol"] + rows: + - [1,1590738990000,"a","2020-05-01","a"] + - [2,1590738991000,"a","2020-05-02","b"] + - [3,1590738992000,"a","2020-05-03","c"] + - [4,1590738993000,"a","2020-05-04","d"] + - [5,1590738994000,"a","2020-05-05","e"] + - [6,1590738995000,"a","2020-05-06","f"] + - [7,1590738996000,"a","2020-05-07","g"] + batch_request: + indexs: ["index1:c1:c6"] + common_column_indices: [1,3] + columns : ["id int","c1 string","c2 int","c3 bigint","c4 float","c5 double","c6 timestamp"] + rows: + - [2,"a",2,31,1.1,2.1,1590738996000] + - [4,"a",3,31,1.2,2.2,1590738997000] + - [7,"a",4,31,1.3,2.3,1590738998000] + sql: | + SELECT {0}.id, {0}.c1 as m1, sum(c2) OVER w1 as m2, sum(c3) OVER w1 as m3, sum(c4) OVER w1 as m4, + sum(c5) OVER w1 as m5, max(c6) OVER w1 as m6, max(c7) OVER w1 as m7, min(c8) OVER w1 as m8 + FROM {0} last join {1} order by {1}.timecol on {0}.c1={1}.c1 + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","m1 string","m2 int","m3 bigint","m4 float","m5 double","m6 timestamp","m7 date","m8 string"] + common_column_indices: [] + rows: + - [2,"a",13,100,4.0,7.0,1590738996000,"2020-05-07","g"] + - [4,"a",14,100,4.1,7.1,1590738997000,"2020-05-07","g"] + - [7,"a",15,100,4.2,7.2,1590738998000,"2020-05-07","g"] + + - id: 5 + desc: batch request with non-trival output common columns, window and join are common + inputs: + - + columns : ["id int","c1 string","c2 int","c3 bigint","c4 float","c5 double","c6 timestamp"] + indexs: ["index1:c1:c6", "index2:id:c6"] + rows: + - [1,"a",1,30,1.0,2.0,1590738990000] + - [3,"a",3,32,1.2,2.2,1590738992000] + - [5,"a",5,34,1.4,2.4,1590738994000] + - [6,"a",6,35,1.5,2.5,1590738995000] + - + columns : ["id int","timecol timestamp","c1 string", "c7 date","c8 string"] + indexs: ["index2:c1:timecol"] + rows: + - [1,1590738990000,"a","2020-05-01","a"] + - [2,1590738991000,"a","2020-05-02","b"] + - [3,1590738992000,"a","2020-05-03","c"] + - [4,1590738993000,"a","2020-05-04","d"] + - [5,1590738994000,"a","2020-05-05","e"] + - [6,1590738995000,"a","2020-05-06","f"] + - [7,1590738996000,"a","2020-05-07","g"] + batch_request: + indexs: ["index1:c1:c6"] + common_column_indices: [1,3,6] + columns : ["id int","c1 string","c2 int","c3 bigint","c4 float","c5 double","c6 timestamp"] + rows: + - [2,"a",2,31,1.1,2.1,1590738996000] + - [4,"a",3,31,1.2,2.2,1590738996000] + - [7,"a",4,31,1.3,2.3,1590738996000] + sql: | + SELECT {0}.id, {0}.c1 as m1, sum(c2) OVER w1 as m2, sum(c3) OVER w1 as m3, sum(c4) OVER w1 as m4, + sum(c5) OVER w1 as m5, max(c6) OVER w1 as m6, max(c7) OVER w1 as m7, min(c8) OVER w1 as m8 + FROM {0} last join {1} order by {1}.timecol on {0}.c1={1}.c1 and {0}.c6={1}.timecol + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","m1 string","m2 int","m3 bigint","m4 float","m5 double","m6 timestamp","m7 date","m8 string"] + common_column_indices: [1,3,6,7,8] + rows: + - [2,"a",13,100,4.0,7.0,1590738996000,"2020-05-07","e"] + - [4,"a",14,100,4.1,7.1,1590738996000,"2020-05-07","e"] + - [7,"a",15,100,4.2,7.2,1590738996000,"2020-05-07","e"] + - id: 6 + desc: batch request with one common window and one non-common window + mode: disk-unsupport + inputs: + - + columns: ["id int","k1 bigint","k2 bigint","k3 timestamp", "k4 timestamp", + "c1 double","c2 double","c3 double","c4 double","c5 double","c6 double"] + indexs: ["index1:k1:k3", "index2:k2:k4"] + repeat: 10 + rows: + - [1,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0] + - [3,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0] + - [5,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0] + - [6,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0] + batch_request: + common_column_indices: [1,3,5,6,7] + columns : ["id int","k1 bigint","k2 bigint","k3 timestamp", "k4 timestamp", + "c1 double","c2 double","c3 double","c4 double","c5 double","c6 double"] + rows: + - [2,1,2,1590738991000,1590738991000,1.0,1.0,1.0,1.0,1.0,1.0] + - [4,1,2,1590738991000,1590738991000,1.0,1.0,1.0,1.0,1.0,1.0] + - [7,1,2,1590738991000,1590738991000,1.0,1.0,1.0,1.0,1.0,1.0] + sql: | + SELECT {0}.id, sum(c1) over w1 as m1, sum(c2) over w1 as m2, sum(c3) over w1 as m3, + sum(c4) over w2 as m4, sum(c5) over w2 as m5, sum(c6) over w2 as m6 + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.k1 ORDER BY {0}.k3 ROWS_RANGE BETWEEN 20s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.k2 ORDER BY {0}.k4 ROWS_RANGE BETWEEN 20s PRECEDING AND CURRENT ROW); + expect: + success: true + order: id + columns: [ "id int","m1 double","m2 double","m3 double","m4 double","m5 double","m6 double"] + common_column_indices: [1,2,3] + rows: + - [2, 41.0, 41.0, 41.0, 41.0, 41.0, 41.0] + - [4, 41.0, 41.0, 41.0, 41.0, 41.0, 41.0] + - [7, 41.0, 41.0, 41.0, 41.0, 41.0, 41.0] + + - id: 7 + desc: batch request with common window and common and non-common aggregations, window is small + mode: disk-unsupport + inputs: + - + columns: ["id int","k1 bigint","k2 timestamp", + "c1 double","c2 double","c3 double", + "c4 double","c5 double","c6 double"] + indexs: ["index1:k1:k2",] + repeat: 10 + rows: + - [1,1,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0] + batch_request: + common_column_indices: [1,2,3,5,7] + columns : ["id int","k1 bigint","k2 timestamp", + "c1 double","c2 double","c3 double", + "c4 double","c5 double","c6 double"] + rows: + - [2,1,1590738991000,1.0,1.0,1.0,1.0,1.0,1.0] + - [4,1,1590738991000,1.0,1.0,1.0,1.0,1.0,1.0] + - [7,1,1590738991000,1.0,1.0,1.0,1.0,1.0,1.0] + sql: | + SELECT {0}.id, sum(c1) over w1 as m1, sum(c2) over w1 as m2, sum(c3) over w1 as m3, + sum(c4) over w1 as m4, sum(c5) over w1 as m5, sum(c6) over w1 as m6 + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.k1 ORDER BY {0}.k2 ROWS_RANGE BETWEEN 20s PRECEDING AND CURRENT ROW); + expect: + success: true + order: id + common_column_indices: [1,3,5] + columns: [ "id int","m1 double","m2 double","m3 double","m4 double","m5 double","m6 double"] + rows: + - [2, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0] + - [4, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0] + - [7, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0] + + - id: 8 + desc: batch request with one common window and one non-common window, current time == history time + mode: disk-unsupport + inputs: + - + columns: ["id int","k1 bigint","k2 bigint","k3 timestamp", "k4 timestamp", + "c1 double","c2 double","c3 double","c4 double","c5 double","c6 double"] + indexs: ["index1:k1:k3", "index2:k2:k4"] + repeat: 10 + rows: + - [1,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0] + - [3,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0] + - [5,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0] + - [6,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0] + batch_request: + columns : ["id int","k1 bigint","k2 bigint","k3 timestamp", "k4 timestamp", + "c1 double","c2 double","c3 double","c4 double","c5 double","c6 double"] + rows: + - [2,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0] + sql: | + SELECT {0}.id, sum(c1) over w1 as m1, sum(c2) over w1 as m2, sum(c3) over w1 as m3, + sum(c4) over w2 as m4, sum(c5) over w2 as m5, sum(c6) over w2 as m6 + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.k1 ORDER BY {0}.k3 ROWS BETWEEN 10 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.k2 ORDER BY {0}.k4 ROWS BETWEEN 20 PRECEDING AND CURRENT ROW); + expect: + success: true + order: id + columns: [ "id int","m1 double","m2 double","m3 double","m4 double","m5 double","m6 double"] + rows: + - [2, 11.0, 11.0, 11.0, 21.0, 21.0, 21.0] diff --git a/cases/integration_test/test_feature_zero_function.yaml b/cases/integration_test/test_feature_zero_function.yaml new file mode 100644 index 00000000000..24876d3ce97 --- /dev/null +++ b/cases/integration_test/test_feature_zero_function.yaml @@ -0,0 +1,176 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_fz +debugs: [] +cases: + - id: 1 + desc: feature zero split utility functions + inputs: + - columns: ["id int64", "pk int64", "c1 string"] + indexs: ["index1:pk:id"] + rows: + - [1, 0, "k1:v1,k2:v2"] + - [2, 0, "k3:v3"] + - [3, 0, "???,,k4:v4"] + - [4, 0, NULL] + - [5, 0, "k5:v5,k5:v3"] + sql: | + SELECT id, + identity(case when !isnull(lag(c1, 0)) then distinct_count(fz_window_split(c1, ",")) else null end) over w1 as table_2_kn_0, + identity(case when !isnull(lag(c1, 0)) then distinct_count(fz_window_split_by_key(c1, ",", ":")) else null end) over w1 as table_2_kn_1, + identity(case when !isnull(lag(c1, 0)) then distinct_count(fz_window_split_by_value(c1, ",", ":")) else null end) over w1 as table_2_kn_2, + fz_join(fz_window_split(c1, ","), " ") OVER w1 AS split_and_join, + fz_join(fz_window_split_by_key(c1, ",", ":"), " ") OVER w1 AS split_key_and_join, + fz_join(fz_window_split_by_value(c1, ",", ":"), " ") OVER w1 AS split_value_and_join, + count(fz_window_split_by_key(c1, ",", ":")) OVER w1 AS split_key_and_count, + distinct_count(fz_window_split_by_key(c1, ",", ":")) OVER w1 AS split_key_and_distinct_count + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int64", "table_2_kn_0 int64", "table_2_kn_1 int64", "table_2_kn_2 int64", "split_and_join string", "split_key_and_join string", "split_value_and_join string", "split_key_and_count int64", "split_key_and_distinct_count int64"] + rows: + - [1, 2, 2, 2, "k1:v1 k2:v2", "k1 k2", "v1 v2", 2, 2] + - [2, 3, 3, 3, "k3:v3 k1:v1 k2:v2", "k3 k1 k2", "v3 v1 v2", 3, 3] + - [3, 6, 4, 4, "??? k4:v4 k3:v3 k1:v1 k2:v2", "k4 k3 k1 k2", "v4 v3 v1 v2", 4, 4] + - [4, NULL, NULL, NULL, "??? k4:v4 k3:v3 k1:v1 k2:v2", "k4 k3 k1 k2", "v4 v3 v1 v2", 4, 4] + - [5, 8, 5, 5, "k5:v5 k5:v3 ??? k4:v4 k3:v3 k1:v1 k2:v2", "k5 k5 k4 k3 k1 k2", "v5 v3 v4 v3 v1 v2", 6, 5] + + - id: 2 + desc: feature zero split utility functions on single row + inputs: + - name: main + columns: ["id int64", "pk int64", "c1 string"] + indexs: ["index1:pk:id"] + rows: + - [1, 0, "k1:v1,k2:v2"] + - [2, 0, "k3:v3"] + - [3, 0, "???,,k4:v4"] + - [4, 0, NULL] + - [5, 0, "k5:v5,k3:v3"] + sql: | + SELECT id, + fz_join(fz_split(c1, ","), " ") AS split_and_join, + fz_join(fz_split_by_key(c1, ",", ":"), " ") AS split_key_and_join, + fz_join(fz_split_by_value(c1, ",", ":"), " ") AS split_value_and_join + FROM main; + expect: + order: id + columns: ["id int64", "split_and_join string", "split_key_and_join string", "split_value_and_join string"] + rows: + - [1, "k1:v1 k2:v2", "k1 k2", "v1 v2"] + - [2, "k3:v3", "k3", "v3"] + - [3, "??? k4:v4", "k4", "v4"] + - [4, "", "", ""] + - [5, "k5:v5 k3:v3", "k5 k3", "v5 v3"] + + - id: 3 + desc: window top1 ratio + inputs: + - + columns : ["id bigint","pk bigint","c1 smallint","c2 int","c3 bigint","c4 float", + "c5 double", "c6 string"] + indexs: ["index1:pk:id"] + rows: + - [1, 1, 1, 1, 1, 1.1, 2.1, "1:1 1:2"] + - [2, 1, 2, 2, 1, 1.4, 2.1, "1:1" ] + - [3, 1, NULL, 3, 1, 1.3, 2.3, "1:1 1:3"] + - [4, 2, NULL, 5, 1, NULL, NULL, "1:3"] + - [5, 2, 5, 4, 1, 1.5, 2.5, "1:2 1:3"] + sql: | + SELECT id, + fz_top1_ratio(c1) OVER w1 as r1, + fz_top1_ratio(c2) OVER w1 as r2, + fz_top1_ratio(c3) OVER w1 as r3, + fz_top1_ratio(c4) OVER w1 as r4, + fz_top1_ratio(c5) OVER w1 as r5, + fz_top1_ratio(fz_window_split_by_value(c6, " ", ":")) OVER w1 as r6, + fz_join(fz_window_split_by_value(c6, " ", ":")," ") OVER w1 as r7 + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id bigint","r1 double","r2 double","r3 double","r4 double","r5 double","r6 double","r7 string"] + rows: + - [1, 1.0, 1.0, 1, 1.0, 1.0, 0.5,"1 2"] + - [2, 0.5, 0.5, 1, 0.5, 1.0, 0.66666666666666663,"1 1 2"] + - [3, 0.5, 0.33333333333333331, 1, 0.33333333333333331, 0.66666666666666663, 0.6,"1 3 1 1 2"] + - [4, 0, 1, 1, 0, 0, 1,"3"] + - [5, 1, 0.5, 1, 1.0, 1, 0.66666666666666663,"2 3 3"] + + - id: 4 + desc: Multi Top 3 Frequency + inputs: + - + columns : ["id bigint","pk bigint","c1 string","c2 int","c3 string","c4 float", + "c5 double", "c6 string"] + indexs: ["index1:pk:id"] + rows: + - [1, 1, "1:2 4:3", 1, "1:2 1:3", 1.1, 2.1, "1:1 1:2"] + - [2, 1, "4:2 8:3", NULL, "1:7 1:3", 1.4, 2.1, "1:1" ] + - [3, 1, NULL, 2, "1:2 1:3", 1.3, 2.3, "1:1 1:3"] + - [4, 2, NULL, NULL, "1:8 1:3", NULL, NULL, "1:3"] + - [5, 2, "1:2 1:3", 5, "1:8 1:3", 1, 1.5, "1:2 1:3"] + sql: | + SELECT id, + fz_topn_frequency(fz_window_split_by_key(c1, " ", ":"), 3) OVER w1 as r1, + fz_topn_frequency(c2, 3) OVER w1 as r2, + fz_topn_frequency(fz_window_split(c3, ","), 3) OVER w1 as r3, + fz_topn_frequency(c4, 3) OVER w1 as r4, + fz_topn_frequency(c5, 3) OVER w1 as r5, + fz_topn_frequency(fz_window_split_by_value(c6, " ", ":"), 3) OVER w1 as r6, + fz_join(fz_window_split_by_value(c6, " ", ":")," ") OVER w1 as r7 + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id bigint","r1 string","r2 string","r3 string","r4 string","r5 string","r6 string","r7 string"] + rows: + - [1, "1,4,NULL", "1,NULL,NULL", "1:2 1:3,NULL,NULL", "1.100000,NULL,NULL", "2.100000,NULL,NULL", "1,2,NULL", "1 2"] + - [2, "4,1,8", "1,NULL,NULL", "1:2 1:3,1:7 1:3,NULL", "1.100000,1.400000,NULL", "2.100000,NULL,NULL", "1,2,NULL","1 1 2"] + - [3, "4,1,8", "1,2,NULL","1:2 1:3,1:7 1:3,NULL", "1.100000,1.300000,1.400000", "2.100000,2.300000,NULL", "1,2,3","1 3 1 1 2"] + - [4, "", "NULL,NULL,NULL", "1:8 1:3,NULL,NULL", "NULL,NULL,NULL", "NULL,NULL,NULL", "3,NULL,NULL","3"] + - [5, "1,NULL,NULL", "5,NULL,NULL", "1:8 1:3,NULL,NULL", "1.000000,NULL,NULL", "1.500000,NULL,NULL", "3,2,NULL","2 3 3"] + + - id: 5 + desc: feature zero split utility functions on empty separator + inputs: + - columns: ["id int64", "pk int64", "c1 string"] + indexs: ["index1:pk:id"] + rows: + - [1, 0, "a"] + - [2, 0, "b"] + - [3, 0, "c"] + - [4, 0, NULL] + - [5, 0, "e"] + sql: | + SELECT id, + fz_join(fz_split(c1, ""), "") OVER w1 AS r1, + fz_join(fz_split_by_key(c1, "", ""), "") OVER w1 AS r2, + fz_join(fz_split_by_value(c1, "", ""), "") OVER w1 AS r3, + fz_join(fz_window_split(c1, ""), " ") OVER w1 AS r4, + fz_join(fz_window_split_by_key(c1, "", ""), " ") OVER w1 AS r5, + fz_join(fz_window_split_by_value(c1, "", ""), " ") OVER w1 AS r6 + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int64", "r1 string", "r2 string", "r3 string", "r4 string", "r5 string", "r6 string"] + rows: + - [1, "", "", "", "", "", ""] + - [2, "", "", "", "", "", ""] + - [3, "", "", "", "", "", ""] + - [4, "", "", "", "", "", ""] + - [5, "", "", "", "", "", ""] diff --git a/cases/integration_test/test_fz_sql.yaml b/cases/integration_test/test_fz_sql.yaml new file mode 100644 index 00000000000..f79cecd1a27 --- /dev/null +++ b/cases/integration_test/test_fz_sql.yaml @@ -0,0 +1,156 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_fz +debugs: [] +cases: + - id: 0 + desc: feature zero split utility functions + inputs: + - columns: ["id int64", "pk int64", "c1 string"] + indexs: ["index1:pk:id"] + rows: + - [1, 0, "k1:v1,k2:v2"] + - [2, 0, "k3:v3"] + - [3, 0, "???,,k4:v4"] + - [4, 0, NULL] + - [5, 0, "k5:v5,k5:v3"] + sql: | + SELECT id, + identity(case when !isnull(lag(c1, 0)) then distinct_count(fz_window_split(c1, ",")) else null end) over w1 as table_2_kn_0, + identity(case when !isnull(lag(c1, 0)) then distinct_count(fz_window_split_by_key(c1, ",", ":")) else null end) over w1 as table_2_kn_1, + identity(case when !isnull(lag(c1, 0)) then distinct_count(fz_window_split_by_value(c1, ",", ":")) else null end) over w1 as table_2_kn_2, + fz_join(fz_window_split(c1, ","), " ") OVER w1 AS split_and_join, + fz_join(fz_window_split_by_key(c1, ",", ":"), " ") OVER w1 AS split_key_and_join, + fz_join(fz_window_split_by_value(c1, ",", ":"), " ") OVER w1 AS split_value_and_join, + count(fz_window_split_by_key(c1, ",", ":")) OVER w1 AS split_key_and_count, + distinct_count(fz_window_split_by_key(c1, ",", ":")) OVER w1 AS split_key_and_distinct_count + FROM {0} + WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int64", "table_2_kn_0 int64", "table_2_kn_1 int64", "table_2_kn_2 int64", "split_and_join string", "split_key_and_join string", "split_value_and_join string", "split_key_and_count int64", "split_key_and_distinct_count int64"] + rows: + - [1, 2, 2, 2, "k1:v1 k2:v2", "k1 k2", "v1 v2", 2, 2] + - [2, 3, 3, 3, "k3:v3 k1:v1 k2:v2", "k3 k1 k2", "v3 v1 v2", 3, 3] + - [3, 6, 4, 4, "??? k4:v4 k3:v3 k1:v1 k2:v2", "k4 k3 k1 k2", "v4 v3 v1 v2", 4, 4] + - [4, NULL, NULL, NULL, "??? k4:v4 k3:v3 k1:v1 k2:v2", "k4 k3 k1 k2", "v4 v3 v1 v2", 4, 4] + - [5, 8, 5, 5, "k5:v5 k5:v3 ??? k4:v4 k3:v3 k1:v1 k2:v2", "k5 k5 k4 k3 k1 k2", "v5 v3 v4 v3 v1 v2", 6, 5] + + - id: 1 + desc: feature zero split utility functions on single row + inputs: + - name: main + columns: ["id int64", "pk int64", "c1 string"] + indexs: ["index1:pk:id"] + rows: + - [1, 0, "k1:v1,k2:v2"] + - [2, 0, "k3:v3"] + - [3, 0, "???,,k4:v4"] + - [4, 0, NULL] + - [5, 0, "k5:v5,k3:v3"] + sql: | + SELECT id, + fz_join(fz_split(c1, ","), " ") AS split_and_join, + fz_join(fz_split_by_key(c1, ",", ":"), " ") AS split_key_and_join, + fz_join(fz_split_by_value(c1, ",", ":"), " ") AS split_value_and_join + FROM main; + expect: + order: id + columns: ["id int64", "split_and_join string", "split_key_and_join string", "split_value_and_join string"] + rows: + - [1, "k1:v1 k2:v2", "k1 k2", "v1 v2"] + - [2, "k3:v3", "k3", "v3"] + - [3, "??? k4:v4", "k4", "v4"] + - [4, "", "", ""] + - [5, "k5:v5 k3:v3", "k5 k3", "v5 v3"] + - id: 2 + desc: fz case 5 simple version debug + mode: rtidb-batch-unsupport + inputs: + - columns: ["id int64", "reqId string", "eventTime timestamp", "SK_ID_CURR string"] + indexs: ["index1:reqId:id"] + rows: + - [1, "col0", 1607473951299, "col3"] + - columns: [ "ingestionTime timestamp","eventTime timestamp", + "SK_ID_PREV string","SK_ID_CURR string", + "NAME_CONTRACT_TYPE string","AMT_ANNUITY double","AMT_APPLICATION double","AMT_CREDIT double","AMT_DOWN_PAYMENT double", + "AMT_GOODS_PRICE double","WEEKDAY_APPR_PROCESS_START string","HOUR_APPR_PROCESS_START int", + "FLAG_LAST_APPL_PER_CONTRACT string","NFLAG_LAST_APPL_IN_DAY int", + "RATE_DOWN_PAYMENT double","RATE_INTEREST_PRIMARY double","RATE_INTEREST_PRIVILEGED double", + "NAME_CASH_LOAN_PURPOSE string","NAME_CONTRACT_STATUS string","DAYS_DECISION int","NAME_PAYMENT_TYPE string", + "CODE_REJECT_REASON string","NAME_TYPE_SUITE string","NAME_CLIENT_TYPE string","NAME_GOODS_CATEGORY string", + "NAME_PORTFOLIO string","NAME_PRODUCT_TYPE string","CHANNEL_TYPE string","SELLERPLACE_AREA int", + "NAME_SELLER_INDUSTRY string","CNT_PAYMENT double","NAME_YIELD_GROUP string","PRODUCT_COMBINATION string", + "DAYS_FIRST_DRAWING double","DAYS_FIRST_DUE double","DAYS_LAST_DUE_1ST_VERSION double", + "DAYS_LAST_DUE double","DAYS_TERMINATION double", + "NFLAG_INSURED_ON_APPROVAL double"] + indexs: ["index1:SK_ID_CURR:ingestionTime"] + rows: + - [1607473951298, 1607473951298, + 'col2', 'col3', 'col4', 1.4, 1.4, 1.4, 1.4, 1.4, 'col10', 11, 'col12', 13, 1.4, 1.4, 1.4, + 'col17', 'col18', 19, 'col20', 'col21', 'col22', 'col23', 'col24', 'col25', 'col26', + 'col27', 28, 'col29', 1.4, 'col31', 'col32', 1.4, 1.4, 1.4, 1.4, 1.4, 1.4] + sql: | + select reqId_1, reqId_243 from ( select reqId as reqId_1 from {0} ) as out0 last join + ( select + reqId as reqId_243, + case when !isnull(lag(NAME_CLIENT_TYPE, 1)) over previous_application_SK_ID_CURR_ingestionTime_0s_32d then count(NAME_CLIENT_TYPE) over previous_application_SK_ID_CURR_ingestionTime_0s_32d else null end as f1, + 1 as f2, + fz_topn_frequency(NAME_CONTRACT_STATUS, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as f3, + distinct_count(NAME_CONTRACT_TYPE) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as f4, + fz_topn_frequency(NAME_CONTRACT_TYPE, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as f5, + fz_topn_frequency(NAME_GOODS_CATEGORY, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as f6, + distinct_count(NAME_GOODS_CATEGORY) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as f7, + fz_topn_frequency(NAME_PAYMENT_TYPE, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as f8, + case when !isnull(lag(NAME_PAYMENT_TYPE, 1)) over previous_application_SK_ID_CURR_ingestionTime_0s_32d then count(NAME_PAYMENT_TYPE) over previous_application_SK_ID_CURR_ingestionTime_0s_32d else null end as f9, + distinct_count(NAME_PORTFOLIO) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as f10, + fz_topn_frequency(NAME_PORTFOLIO, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_NAME_PORTFOLIO_multi_top3frequency_299, + distinct_count(NAME_PRODUCT_TYPE) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_NAME_PRODUCT_TYPE_multi_unique_count_300, + fz_topn_frequency(NAME_PRODUCT_TYPE, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_NAME_PRODUCT_TYPE_multi_top3frequency_301, + fz_topn_frequency(NAME_SELLER_INDUSTRY, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_NAME_SELLER_INDUSTRY_multi_top3frequency_302, + case when !isnull(lag(NAME_SELLER_INDUSTRY, 1)) over previous_application_SK_ID_CURR_ingestionTime_0s_32d then count(NAME_SELLER_INDUSTRY) over previous_application_SK_ID_CURR_ingestionTime_0s_32d else null end as previous_application_NAME_SELLER_INDUSTRY_multi_count_303, + fz_topn_frequency(NAME_TYPE_SUITE, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_NAME_TYPE_SUITE_multi_top3frequency_304, + case when !isnull(lag(NAME_TYPE_SUITE, 1)) over previous_application_SK_ID_CURR_ingestionTime_0s_32d then count(NAME_TYPE_SUITE) over previous_application_SK_ID_CURR_ingestionTime_0s_32d else null end as previous_application_NAME_TYPE_SUITE_multi_count_305, + fz_topn_frequency(NAME_YIELD_GROUP, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_NAME_YIELD_GROUP_multi_top3frequency_306, + case when !isnull(lag(NAME_YIELD_GROUP, 1)) over previous_application_SK_ID_CURR_ingestionTime_0s_32d then count(NAME_YIELD_GROUP) over previous_application_SK_ID_CURR_ingestionTime_0s_32d else null end as previous_application_NAME_YIELD_GROUP_multi_count_307, + fz_topn_frequency(PRODUCT_COMBINATION, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_PRODUCT_COMBINATION_multi_top3frequency_308, + case when !isnull(lag(PRODUCT_COMBINATION, 1)) over previous_application_SK_ID_CURR_ingestionTime_0s_32d then count(PRODUCT_COMBINATION) over previous_application_SK_ID_CURR_ingestionTime_0s_32d else null end as previous_application_PRODUCT_COMBINATION_multi_count_309, + fz_topn_frequency(SK_ID_PREV, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_SK_ID_PREV_multi_top3frequency_310, + distinct_count(SK_ID_PREV) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_SK_ID_PREV_multi_unique_count_311, + fz_topn_frequency(WEEKDAY_APPR_PROCESS_START, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_WEEKDAY_APPR_PROCESS_START_multi_top3frequency_312, + + case when !isnull(lag(WEEKDAY_APPR_PROCESS_START, 1)) over previous_application_SK_ID_CURR_ingestionTime_0s_32d then count(WEEKDAY_APPR_PROCESS_START) over previous_application_SK_ID_CURR_ingestionTime_0s_32d else null end as previous_application_WEEKDAY_APPR_PROCESS_START_multi_count_313 + from + (select eventTime as ingestionTime, timestamp('2019-07-18 09:20:20') as eventTime, '' as SK_ID_PREV, + SK_ID_CURR as SK_ID_CURR, '' as NAME_CONTRACT_TYPE, double(0) as AMT_ANNUITY, double(0) as AMT_APPLICATION, + double(0) as AMT_CREDIT, double(0) as AMT_DOWN_PAYMENT, double(0) as AMT_GOODS_PRICE, '' as WEEKDAY_APPR_PROCESS_START, + int(0) as HOUR_APPR_PROCESS_START, '' as FLAG_LAST_APPL_PER_CONTRACT, int(0) as NFLAG_LAST_APPL_IN_DAY, double(0) as RATE_DOWN_PAYMENT, + double(0) as RATE_INTEREST_PRIMARY, double(0) as RATE_INTEREST_PRIVILEGED, '' as NAME_CASH_LOAN_PURPOSE, '' as NAME_CONTRACT_STATUS, int(0) as DAYS_DECISION, + '' as NAME_PAYMENT_TYPE, '' as CODE_REJECT_REASON, '' as NAME_TYPE_SUITE, '' as NAME_CLIENT_TYPE, '' as NAME_GOODS_CATEGORY, '' as NAME_PORTFOLIO, '' as NAME_PRODUCT_TYPE, + '' as CHANNEL_TYPE, int(0) as SELLERPLACE_AREA, '' as NAME_SELLER_INDUSTRY, double(0) as CNT_PAYMENT, '' as NAME_YIELD_GROUP, '' as PRODUCT_COMBINATION, + double(0) as DAYS_FIRST_DRAWING, double(0) as DAYS_FIRST_DUE, double(0) as DAYS_LAST_DUE_1ST_VERSION, double(0) as DAYS_LAST_DUE, double(0) as DAYS_TERMINATION, + double(0) as NFLAG_INSURED_ON_APPROVAL, reqId from {0}) + window previous_application_SK_ID_CURR_ingestionTime_0s_32d as ( UNION (select ingestionTime, + eventTime, SK_ID_PREV, SK_ID_CURR, NAME_CONTRACT_TYPE, AMT_ANNUITY, AMT_APPLICATION, AMT_CREDIT, AMT_DOWN_PAYMENT, AMT_GOODS_PRICE, WEEKDAY_APPR_PROCESS_START, HOUR_APPR_PROCESS_START, + FLAG_LAST_APPL_PER_CONTRACT, NFLAG_LAST_APPL_IN_DAY, RATE_DOWN_PAYMENT, RATE_INTEREST_PRIMARY, RATE_INTEREST_PRIVILEGED, NAME_CASH_LOAN_PURPOSE, NAME_CONTRACT_STATUS, + DAYS_DECISION, NAME_PAYMENT_TYPE, CODE_REJECT_REASON, NAME_TYPE_SUITE, NAME_CLIENT_TYPE, NAME_GOODS_CATEGORY, NAME_PORTFOLIO, NAME_PRODUCT_TYPE, CHANNEL_TYPE, SELLERPLACE_AREA, + NAME_SELLER_INDUSTRY, CNT_PAYMENT, NAME_YIELD_GROUP, PRODUCT_COMBINATION, DAYS_FIRST_DRAWING, DAYS_FIRST_DUE, DAYS_LAST_DUE_1ST_VERSION, DAYS_LAST_DUE, DAYS_TERMINATION, NFLAG_INSURED_ON_APPROVAL, + '' as reqId from {1}) + partition by SK_ID_CURR order by ingestionTime rows_range between 32d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW)) as out7 on out0.reqId_1 = out7.reqId_243 ; + expect: + success: true + columns: ["reqId_1 string", "reqId_243 string"] + rows: + - ["col0", "col0"] diff --git a/cases/integration_test/test_index_optimized.yaml b/cases/integration_test/test_index_optimized.yaml new file mode 100644 index 00000000000..78e05a96131 --- /dev/null +++ b/cases/integration_test/test_index_optimized.yaml @@ -0,0 +1,184 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - id: 0 + desc: window optimized one key one ts + inputs: + - columns: [ "c1 string","c3 int","c6 timestamp","c7 timestamp" ] + indexs: ["index1:c1:c6" ] + rows: + - [ "aa",1, 1590738990000, 1590738990000 ] + - [ "aa",2, 1590738991000, 1590738991000 ] + - [ "aa",3, 1590738992000, 1590738992000 ] + - [ "aa",4, 1590738993000, 1590738993000 ] + - [ "aa",5, 1590739001000, 1590738994000 ] + - [ "aa",6, 1590739002000, 1590738995000 ] + sql: | + SELECT c1, c3, c6, c7, + count(c1) OVER w1 as w1_cnt + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c6 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW); + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c6, 3000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c6 timestamp", "c7 timestamp", "w1_cnt bigint"] + rows: + - [ "aa", 1, 1590738990000, 1590738990000, 1] + - [ "aa", 2, 1590738991000, 1590738991000, 2] + - [ "aa", 3, 1590738992000, 1590738992000, 3] + - [ "aa", 4, 1590738993000, 1590738993000, 4] + - [ "aa", 5, 1590739001000, 1590738994000, 1] + - [ "aa", 6, 1590739002000, 1590738995000, 2] + - id: 1 + desc: window optimized different key same ts + inputs: + - columns: [ "c1 string","c3 int","c6 timestamp","c7 timestamp" ] + indexs: ["index0:c3:c6", "index1:c1:c6" ] + rows: + - [ "aa",1, 1590738990000, 1590738990000 ] + - [ "aa",2, 1590738991000, 1590738991000 ] + - [ "aa",3, 1590738992000, 1590738992000 ] + - [ "aa",4, 1590738993000, 1590738993000 ] + - [ "aa",5, 1590739001000, 1590738994000 ] + - [ "aa",6, 1590739002000, 1590738995000 ] + sql: | + SELECT c1, c3, c6, c7, + count(c1) OVER w1 as w1_cnt + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c6 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW); + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c6, 3000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c6 timestamp", "c7 timestamp", "w1_cnt bigint"] + rows: + - [ "aa", 1, 1590738990000, 1590738990000, 1] + - [ "aa", 2, 1590738991000, 1590738991000, 2] + - [ "aa", 3, 1590738992000, 1590738992000, 3] + - [ "aa", 4, 1590738993000, 1590738993000, 4] + - [ "aa", 5, 1590739001000, 1590738994000, 1] + - [ "aa", 6, 1590739002000, 1590738995000, 2] + - id: 2 + desc: window optimized same key different ts + inputs: + - columns: [ "c1 string","c3 int","c6 timestamp","c7 timestamp" ] + indexs: [ "index0:c3:c7", "index1:c3:c6", "index2:c1:c7", "index3:c1:c6" ] + rows: + - [ "aa",1, 1590738990000, 1590738990000 ] + - [ "aa",2, 1590738991000, 1590738991000 ] + - [ "aa",3, 1590738992000, 1590738992000 ] + - [ "aa",4, 1590738993000, 1590738993000 ] + - [ "aa",5, 1590739001000, 1590738994000 ] + - [ "aa",6, 1590739002000, 1590738995000 ] + sql: | + SELECT c1, c3, c6, c7, + count(c1) OVER w1 as w1_cnt, + count(c1) OVER w2 as w2_cnt + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c6 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW); + request_plan: | + SIMPLE_PROJECT(sources=(c1, c3, c6, c7, w1_cnt, w2_cnt)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c6, 3000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index3) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 3000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index2) + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c6 timestamp", "c7 timestamp", "w1_cnt bigint", "w2_cnt bigint" ] + rows: + - [ "aa", 1, 1590738990000, 1590738990000, 1, 1 ] + - [ "aa", 2, 1590738991000, 1590738991000, 2, 2 ] + - [ "aa", 3, 1590738992000, 1590738992000, 3, 3 ] + - [ "aa", 4, 1590738993000, 1590738993000, 4, 4 ] + - [ "aa", 5, 1590739001000, 1590738994000, 1, 4 ] + - [ "aa", 6, 1590739002000, 1590738995000, 2, 4 ] + - id: 3 + desc: LastJoin optimized one key one ts + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: [ "index1:c1:c3", "index2:c1:c4" ] + rows: + - [ "aa",2,13,1590738990000 ] + - [ "aa",21,131,1590738989000 ] + - [ "bb",41,151,1590738988000 ] + sql: | + select {0}.c1,{0}.c2, t1.c3 as t1_c3, t1.c4 as t1_c4 from {0} + last join {1} as t1 ORDER BY t1.c3 on {0}.c1 = t1.c1; + request_plan: | + SIMPLE_PROJECT(sources=(auto_t0.c1, auto_t0.c2, t1.c3 -> t1_c3, t1.c4 -> t1_c4)) + REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=, left_keys=(), right_keys=(), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index1) + expect: + order: c1 + columns: [ "c1 string","c2 int","t1_c3 bigint","t1_c4 timestamp" ] + rows: + - [ "aa",2, 131, 1590738989000] + - [ "bb",21,151, 1590738988000] + - id: 4 + desc: LastJoin optimized one key two ts + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp"] + indexs: [ "index0:c2:c3", "index1:c1:c3", "index2:c1:c4" ] + rows: + - [ "aa",2,13,1590738990000 ] + - [ "aa",21,131,1590738989000 ] + - [ "bb",41,151,1590738988000 ] + sql: | + select {0}.c1,{0}.c2, t1.c3 as t1_c3, t1.c4 as t1_c4, t2.c3 as t2_c3, t2.c4 as t2_c4 from {0} + last join {1} as t1 ORDER BY t1.c3 on {0}.c1 = t1.c1 + last join {1} as t2 ORDER BY t2.c4 on {0}.c1 = t2.c1; + request_plan: | + SIMPLE_PROJECT(sources=(auto_t0.c1, auto_t0.c2, t1.c3 -> t1_c3, t1.c4 -> t1_c4, t2.c3 -> t2_c3, t2.c4 -> t2_c4)) + REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=, left_keys=(), right_keys=(), index_keys=(auto_t0.c1)) + REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=, left_keys=(), right_keys=(), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index1) + RENAME(name=t2) + DATA_PROVIDER(type=Partition, table=auto_t1, index=index2) + expect: + order: c1 + columns: [ "c1 string","c2 int","t1_c3 bigint","t1_c4 timestamp", "t2_c3 bigint","t2_c4 timestamp" ] + rows: + - [ "aa",2, 131, 1590738989000, 13, 1590738990000 ] + - [ "bb",21,151, 1590738988000, 151,1590738988000 ] diff --git a/cases/integration_test/test_performance_insensitive/test_performance_insensitive.yaml b/cases/integration_test/test_performance_insensitive/test_performance_insensitive.yaml new file mode 100644 index 00000000000..f03b0d0235a --- /dev/null +++ b/cases/integration_test/test_performance_insensitive/test_performance_insensitive.yaml @@ -0,0 +1,401 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +cases: + - id: 0 + desc: where不命中素索引= + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",20,30,1590738991000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + sql: select * from {0} where c2=20; + expect: + columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ] + rows: + - [ "aa", 20, 30, 1590738991000 ] + - id: 1 + desc: where不命中素索引== + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",20,30,1590738991000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + sql: select * from {0} where c2==20; + expect: + columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ] + rows: + - [ "aa", 20, 30, 1590738991000 ] + - id: 2 + desc: where不命中索引不等值查询 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",20,30,1590738991000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + sql: select * from {0} where c2>20; + expect: + columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ] + rows: + - [ "bb", 21, 31, 1590738990000 ] + - [ "dd", 41, 51, 1590738990000 ] + - id: 3 + desc: where两个条件第一个命中索引 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",20,30,1590738991000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + sql: select * from {0} where c1='aa' and c2>2; + expect: + columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ] + rows: + - [ "aa", 20, 30, 1590738991000 ] + - id: 4 + desc: where命中索引 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",20,30,1590738991000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + sql: select * from {0} where c1='bb'; + expect: + columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ] + rows: + - [ "bb", 21, 31, 1590738990000 ] + - id: 5 + desc: where两个条件第二个命中索引 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",20,30,1590738991000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + sql: select * from {0} where c1='aa' and c2>2; + expect: + columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ] + rows: + - [ "aa", 20, 30, 1590738991000 ] + - id: 6 + desc: where两个条件都命中索引 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4","index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",20,30,1590738991000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + sql: select * from {0} where c1='aa' and c2>2; + expect: + columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ] + rows: + - [ "aa", 20, 30, 1590738991000 ] + - id: 7 + desc: where两个条件都不命中索引 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c3:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",20,30,1590738991000 ] + - [ "bb",21,31,1590738990000 ] + - [ "dd",41,51,1590738990000 ] + sql: select * from {0} where c1='aa' and c2>2; + expect: + columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ] + rows: + - [ "aa", 20, 30, 1590738991000 ] + - id: 8 + desc: lastjoin-拼表条件没有命中索引 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} order by {1}.c4 on {0}.c1={1}.c1; + expect: + columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ] + rows: + - [ "aa", 2, 13, 1590738989000 ] + - [ "bb", 21, 131, 1590738990000 ] + - id: 9 + desc: Last Join 无order by, 拼表条件命中部分的组合索引(前缀索引) + mode: offline-unsupport + inputs: + - columns: ["id int", "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ 1,"aa",2,3,1590738989000 ] + - [ 2,"aa",20,30,1590738991000 ] + - [ 3,"bb",21,31,1590738990000 ] + - [ 4,"dd",41,51,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1|c2:c4" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "aa",3,14,1590738990000 ] + - [ "aa",4,15,1590738991000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.id,{0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1 and {0}.c4={1}.c4; + expect: + order: id + columns: [ "id int","c1 string", "c2 int", "c3 bigint", "c4 timestamp" ] + rows: + - [1, "aa", 2, 13, 1590738989000 ] + - [2, "aa", 20, 15, 1590738991000 ] + - [3, "bb", 21, 131, 1590738990000 ] + - [4, "dd", 41, null, null ] + - id: 10 + desc: Last Join 无order by, 拼表条件命中部分的组合索引(后缀索引) + mode: offline-unsupport + inputs: + - columns: [ "id int","c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ 1,"aa",2,3,1590738989000 ] + - [ 2,"aa",20,30,1590738991000 ] + - [ 3,"bb",21,31,1590738990000 ] + - [ 4,"dd",41,51,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2|c1:c4" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "aa",3,14,1590738990000 ] + - [ "aa",4,15,1590738991000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,121,1590738991000 ] + sql: select {0}.id,{0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1 and {0}.c4={1}.c4; + expect: + order: id + columns: [ "id int","c1 string", "c2 int", "c3 bigint", "c4 timestamp" ] + rows: + - [1, "aa", 2, 13, 1590738989000 ] + - [2, "aa", 20, 15, 1590738991000 ] + - [3, "bb", 21, 131, 1590738990000 ] + - [4, "dd", 41, null, null ] + - id: 11 + desc: 不等值拼接-未命中索引 + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c2:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "bb",21,31,1590738990000 ] + - [ "bb",21,32,1590738993000 ] + - [ "bb",21,31,1590738992000 ] + - [ "bb",21,31,1590738991000 ] + sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c3<{1}.c3; + expect: + columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + order: c1 + rows: + - [ "aa",2,32,1590738993000 ] + - [ "bb",21,32,1590738993000 ] + - id: 12 + desc: 两个子查询lastjoin-子查询带窗口特征-没有使用索引-不带orderby + mode: offline-unsupport + tags: ["offline-unsupport, @chendihao", "离线结果不对"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-02"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-02"] + sql: | + select id,t2.c1,t2.c3,t1.c4, t2.w2_c3_sum, t1.w3_c4_sum from + (select id,c1,c3,c4,c7,c8,sum({0}.c3) OVER w2 as w2_c3_sum from {0} WINDOW w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW)) as t2 + last join (select c1,c4,c7,c8,sum({0}.c4) OVER w3 as w3_c4_sum from {0} WINDOW w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)) as t1 + on t2.c8=t1.c8 + ; + expect: + order: id + columns: ["id int","c1 string","c3 int","c4 bigint", "w2_c3_sum int", "w3_c4_sum bigint"] + rows: + - [1,"aa",20,30, 20, 30] + - [2,"aa",21,31, 41, 30] + - [3,"aa",22,32, 63, 33] + - [4,"bb",23,33, 23, 33] + - [5,"bb",24,34, 47, 33] + - + id: 14 + desc: rows-float为partition by-未命中索引 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.1,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.1,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.2,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c5, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c5 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 15 + desc: rows-double为partition by-未命中索引 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c6, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c6 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 16 + desc: rows-int为partition by-未命中索引 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [2,"bb",20,61] + - [3,"cc",20,93] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 17 + desc: rows_range-float为partition by-未命中索引 + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.1,2.2,1590738991000,"2020-05-02" ] + - [ 3,"cc",20,32,1.1,2.3,1590738992000,"2020-05-03" ] + - [ 4,"dd",20,33,1.1,2.4,1590738993000,"2020-05-04" ] + - [ 5,"ee",21,34,1.2,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT id, c1, c5, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c5 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + success: false + - id: 18 + desc: rows_range-double为partition by-未命中索引 + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-02" ] + - [ 3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-03" ] + - [ 4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-04" ] + - [ 5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-05" ] + sql: | + SELECT id, c1, c6, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c6 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 19 + desc: rows_range-int为partition by-未命中索引 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [2,"bb",20,61] + - [3,"cc",20,93] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 20 + desc: 样本表使用索引,UNION表未命中索引 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] \ No newline at end of file diff --git a/cases/integration_test/tmp/test_current_time.yaml b/cases/integration_test/tmp/test_current_time.yaml new file mode 100644 index 00000000000..528113cf3e5 --- /dev/null +++ b/cases/integration_test/tmp/test_current_time.yaml @@ -0,0 +1,106 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +cases: + - id: 0 + desc: ts列的值为0 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,0,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,30 ] + - [ "aa",21,31 ] + - [ "aa",22,32 ] + - [ "aa",23,33 ] + - [ "bb",24,34 ] + - id: 1 + desc: ts列的值为0 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0s OPEN PRECEDING EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,null ] + - [ "aa",23,32 ] + - [ "bb",24,null ] + - id: 2 + desc: ts列的值为-1 + tags: ["TODO","ts为负数有问题,带支持后再验证"] + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,-1,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,-1,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,-1,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,30 ] + - [ "aa",21,31 ] + - [ "aa",22,32 ] + - [ "aa",23,33 ] + - [ "bb",24,34 ] +# - id: 2 +# desc: ts列的值为1 +# inputs: +# - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ] +# indexs: [ "index1:c1:c7" ] +# rows: +# - [ "aa",20,30,1.1,2.1,1,"2020-05-01" ] +# - [ "aa",21,31,1.2,2.2,1,"2020-05-02" ] +# - [ "aa",22,32,1.3,2.3,1,"2020-05-03" ] +# - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] +# - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] +# sql: | +# SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); +# expect: +# order: c3 +# columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] +# rows: +# - [ "aa",20,30 ] +# - [ "aa",21,31 ] +# - [ "aa",22,32 ] +# - [ "aa",23,33 ] +# - [ "bb",24,34 ] diff --git a/cases/integration_test/ut_case/test_unique_expect.yaml b/cases/integration_test/ut_case/test_unique_expect.yaml new file mode 100644 index 00000000000..61865e1a2f0 --- /dev/null +++ b/cases/integration_test/ut_case/test_unique_expect.yaml @@ -0,0 +1,56 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +cases: + - + id: 0 + desc: ts乱序 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, + sum(c4) OVER w1 as w1_c4_sum + FROM {0} + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3) + ; + unequalExpect: + batch_expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",93] + - [4,"aa",96] + - [5,"aa",99] + request_expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",32] + - [4,"aa",33] + - [5,"aa",99] diff --git a/cases/integration_test/v040/test_execute_mode.yaml b/cases/integration_test/v040/test_execute_mode.yaml new file mode 100644 index 00000000000..dabae313d0d --- /dev/null +++ b/cases/integration_test/v040/test_execute_mode.yaml @@ -0,0 +1,81 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +cases: + - + id: 0 + desc: 测试EXECUTE_MODE=online + sqls: + - set @@SESSION.execute_mode="online"; + - show variables; + expect: + columns: ["Variable_name string","Value string"] + rows: + - ["execute_mode","online"] + - + id: 1 + desc: EXECUTE_MODE=offline + sqls: + - set @@SESSION.execute_mode="offline"; + - show variables; + expect: + columns: ["Variable_name","Value"] + rows: + - ["execute_mode","offline"] + - + id: 2 + desc: EXECUTE_MODE为其他字符 + sqls: + - set @@SESSION.execute_olol = "offline"; + - show variables; + expect: + success: false + - + id: 3 + desc: EXECUTE_MODE为小写 + sqls: + - set @@SESSION.execute_mode = "online"; + - show variables; + expect: + success: false + - + id: 4 + desc: EXECUTE_MODE=online,创建表,insert数据,并查询 + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + sqls: + - set @@SESSION.execute_mode = "online"; + - insert into {0} value ("aa",1,2,1590738989000); + - select * from {0}; + expect: + columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + rows: + - ["aa",1,2,1590738989000] + - + id: 5 + desc: EXECUTE_MODE=offline,创建表,insert数据,并查询 + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + sqls: + - set @@SESSION.execute_mode = "offline"; + - insert into {0} values ("bb",2,3,1590738989000); + - select * from {0}; + expect: + colunms: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + rows: + - ["bb",2,3,1590738989000] \ No newline at end of file diff --git a/cases/integration_test/v040/test_groupby.yaml b/cases/integration_test/v040/test_groupby.yaml new file mode 100644 index 00000000000..7150588bedd --- /dev/null +++ b/cases/integration_test/v040/test_groupby.yaml @@ -0,0 +1,560 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +sqlDialect: ["HybridSQL"] +cases: + - id: 0 + desc: "group by一个索引列" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738990000] + - [2,"bb",1590738991000] + - [3,"aa",1590738992000] + - [4,"a%",1590738993000] + - [5,"bb",1590738994000] + sql: select c1,count(*) as v1 from {0} group by c1; + expect: + order: c1 + columns: ["c1 string","v1 bigint"] + rows: + - ["aa",2] + - ["bb",2] + - ["a%",1] + - id: 1 + desc: "group by一个非索引列" + mode: request-unsupport + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,"aa",1590738990000] + - [2,"bb",1590738991000] + - [3,"aa",1590738992000] + - [4,"a%",1590738993000] + - [5,"bb",1590738994000] + sql: select c1,count(*) as v1 from {0} group by c1; + expect: + order: c1 + columns: ["c1 string","v1 bigint"] + rows: + - ["aa",2] + - ["bb",2] + - ["a%",1] + - id: 2 + desc: "group by 两个列,组合索引" + inputs: + - + columns : ["id bigint","c1 string","c2 int","c7 timestamp"] + indexs: ["index1:c1,c2:c7"] + rows: + - [1,"aa",11,1590738990000] + - [2,"bb",11,1590738991000] + - [3,"aa",12,1590738992000] + - [4,"a%",11,1590738993000] + - [5,"bb",11,1590738994000] + - [6,"aa",11,1590738995000] + sql: select c1,c2,count(*) as v1 from {0} group by c1,c2; + expect: + order: c1 + columns: ["c1 string","c2 int","v1 bigint"] + rows: + - ["aa",11,2] + - ["bb",11,2] + - ["a%",11,1] + - ["aa",12,1] + - id: 3 + desc: "group by int类型" + inputs: + - + columns : ["id bigint","c1 int","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,11,1590738990000] + - [2,22,1590738991000] + - [3,11,1590738992000] + - [4,33,1590738993000] + - [5,22,1590738994000] + sql: select c1,count(*) as v1 from {0} group by c1; + expect: + order: c1 + columns: ["c1 int","v1 bigint"] + rows: + - [11,2] + - [22,2] + - [33,1] + - id: 4 + desc: "group by bigint类型" + inputs: + - + columns : ["id bigint","c1 bigint","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,11,1590738990000] + - [2,22,1590738991000] + - [3,11,1590738992000] + - [4,33,1590738993000] + - [5,22,1590738994000] + sql: select c1,count(*) as v1 from {0} group by c1; + expect: + order: c1 + columns: ["c1 bigint","v1 bigint"] + rows: + - [11,2] + - [22,2] + - [33,1] + - id: 5 + desc: "group by smallint类型" + inputs: + - + columns : ["id bigint","c1 smallint","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,11,1590738990000] + - [2,22,1590738991000] + - [3,11,1590738992000] + - [4,33,1590738993000] + - [5,22,1590738994000] + sql: select c1,count(*) as v1 from {0} group by c1; + expect: + order: c1 + columns: ["c1 smallint","v1 bigint"] + rows: + - [11,2] + - [22,2] + - [33,1] + - id: 6 + desc: "group by float类型" + mode: request-unsupport + inputs: + - + columns: ["id bigint","c1 float","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,11.1,1590738990000] + - [2,22.1,1590738991000] + - [3,11.1,1590738992000] + - [4,33.1,1590738993000] + - [5,22.1,1590738994000] + sql: select c1,count(*) as v1 from {0} group by c1; + expect: + success: false + - id: 7 + desc: "group by double类型" + mode: request-unsupport + inputs: + - + columns : ["id bigint","c1 double","c7 timestamp"] + indexs: ["index1:id:c7"] + rows: + - [1,11.1,1590738990000] + - [2,22.1,1590738991000] + - [3,11.1,1590738992000] + - [4,33.1,1590738993000] + - [5,22.1,1590738994000] + sql: select c1,count(*) as v1 from {0} group by c1; + expect: + success: false + - id: 8 + desc: "group by date类型" + inputs: + - + columns : ["id bigint","c1 date","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"2020-05-01",1590738990000] + - [2,"2020-05-02",1590738991000] + - [3,"2020-05-01",1590738992000] + - [4,"2020-05-03",1590738993000] + - [5,"2020-05-02",1590738994000] + sql: select c1,count(*) as v1 from {0} group by c1; + expect: + order: c1 + columns: ["c1 date","v1 bigint"] + rows: + - ["2020-05-01",2] + - ["2020-05-02",2] + - ["2020-05-03",1] + - id: 9 + desc: "group by timestamp类型" + inputs: + - + columns : ["id bigint","c1 timestamp","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,11,1590738990000] + - [2,22,1590738991000] + - [3,11,1590738992000] + - [4,33,1590738993000] + - [5,22,1590738994000] + sql: select c1,count(*) as v1 from {0} group by c1; + expect: + order: c1 + columns: ["c1 timestamp","v1 bigint"] + rows: + - [11,2] + - [22,2] + - [33,1] + - id: 10 + desc: "group by bool类型" + inputs: + - + columns : ["id bigint","c1 bool","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,true,1590738990000] + - [2,false,1590738991000] + - [3,false,1590738992000] + - [4,true,1590738993000] + - [5,true,1590738994000] + sql: select c1,count(*) as v1 from {0} group by c1; + expect: + order: c1 + columns: ["c1 bool","v1 bigint"] + rows: + - [true,3] + - [false,2] + - id: 11 + desc: "列有空串和null" + mode: cli-unsupport + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"",1590738990000] + - [2,null,1590738991000] + - [3,"",1590738992000] + - [4,"a%",1590738993000] + - [5,null,1590738994000] + sql: select c1,count(*) as v1 from {0} group by c1; + expect: + order: c1 + columns: ["c1 string","v1 bigint"] + rows: + - ["",2] + - [null,2] + - ["a%",1] + - id: 12 + desc: "group by 两个列,其中一个列有索引" + inputs: + - + columns : ["id bigint","c1 string","c2 int","c7 timestamp"] + indexs: ["index1:c1,c2:c7"] + rows: + - [1,"aa",11,1590738990000] + - [2,"bb",11,1590738991000] + - [3,"aa",12,1590738992000] + - [4,"a%",11,1590738993000] + - [5,"bb",11,1590738994000] + - [6,"aa",11,1590738995000] + sql: select c1,c2,count(*) as v1 from {0} group by c1,c2; + expect: + order: c1 + columns: ["c1 string","c2 int","v1 bigint"] + rows: + - ["aa",11,2] + - ["bb",11,2] + - ["a%",11,1] + - ["aa",12,1] + - id: 13 + desc: "group by 两个列,两个索引" + inputs: + - + columns : ["id bigint","c1 string","c2 int","c7 timestamp"] + indexs: ["index1:c1:c7","index2:c2:c7"] + rows: + - [1,"aa",11,1590738990000] + - [2,"bb",11,1590738991000] + - [3,"aa",12,1590738992000] + - [4,"a%",11,1590738993000] + - [5,"bb",11,1590738994000] + - [6,"aa",11,1590738995000] + sql: select c1,c2,count(*) as v1 from {0} group by c1,c2; + expect: + columns: ["c1 string","c2 int","v1 bigint"] + rows: + - ["aa",12,1] + - ["bb",11,2] + - ["aa",11,2] + - ["a%",11,1] + + - id: 14 + desc: "select的列不在group by后面" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738990000] + - [2,"bb",1590738991000] + - [3,"aa",1590738992000] + - [4,"a%",1590738993000] + - [5,"bb",1590738994000] + sql: select id,c1,count(*) as v1 from {0} group by c1; + expect: + success: false + - id: 15 + desc: "group by结合count/sum/max/min/avg" + inputs: + - + columns : ["id bigint","c1 string","c2 int","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1590738990000] + - [2,"bb",2,1590738991000] + - [3,"aa",3,1590738992000] + - [4,"cc",4,1590738993000] + - [5,"bb",5,1590738994000] + - [6,"aa",6,1590738995000] + sql: select c1,count(c2) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0} group by c1; + expect: + order: c1 + columns: ["c1 string","v1 bigint","v2 int","v3 int","v4 double","v5 int"] + rows: + - ["aa",3,6,1,3.333333,10] + - ["bb",2,5,2,3.5,7] + - ["cc",1,4,4,4,4] + - id: 16 + desc: "select的列不在group by后面" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738990000] + - [2,"bb",1590738991000] + - [3,"aa",1590738992000] + - [4,"a%",1590738993000] + - [5,"bb",1590738994000] + sql: select c2,count(*) as v1 from {0} group by c2; + expect: + success: false + - id: 17 + desc: "group by结合having" + inputs: + - + columns : ["id bigint","c1 string","c2 int","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1590738990000] + - [2,"bb",2,1590738991000] + - [3,"aa",3,1590738992000] + - [4,"cc",4,1590738993000] + - [5,"bb",5,1590738994000] + - [6,"aa",6,1590738995000] + sql: select c1,count(c2) as v1 from {0} group by c1 having count(c2)>1; + expect: + order: c1 + columns: ["c1 string","v1 bigint"] + rows: + - ["aa",3] + - ["bb",2] + - id: 18 + desc: "group by结合having,使用别名" + inputs: + - + columns : ["id bigint","c1 string","c2 int","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1590738990000] + - [2,"bb",2,1590738991000] + - [3,"aa",3,1590738992000] + - [4,"cc",4,1590738993000] + - [5,"bb",5,1590738994000] + - [6,"aa",6,1590738995000] + sql: select c1,count(c2) as v1 from {0} group by c1 having v1>1; + expect: + success: false + - id: 19 + desc: "group by使用where根据粗函数筛选" + inputs: + - + columns : ["id bigint","c1 string","c2 int","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1590738990000] + - [2,"bb",2,1590738991000] + - [3,"aa",3,1590738992000] + - [4,"cc",4,1590738993000] + - [5,"bb",5,1590738994000] + - [6,"aa",6,1590738995000] + sql: select c1,count(c2) as v1 from {0} group by c1 where count(c2)>1; + expect: + success: false + - id: 20 + desc: "group by结合where" + inputs: + - + columns : ["id bigint","c1 string","c2 int","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1590738990000] + - [2,"bb",2,1590738991000] + - [3,"aa",3,1590738992000] + - [4,"cc",4,1590738993000] + - [5,"bb",5,1590738994000] + - [6,"aa",6,1590738995000] + sql: select c1,count(c2) as v1 from {0} group by c1 where c1='aa'; + expect: + success: false + - id: 21 + desc: lastjoin后group by + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "bb",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sql: select {0}.c1,sum({1}.c3) as v1 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1 group by {0}.c1; + expect: + order: c1 + columns: [ "c1 string","v1 bigint"] + rows: + - [ "aa",26 ] + - [ "cc",151 ] + - id: 22 + desc: group by在lastjoin + mode: request-unsupport + inputs: + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c4" ] + rows: + - [ "aa",2,3,1590738989000 ] + - [ "aa",21,31,1590738990000 ] + - [ "cc",41,51,1590738991000 ] + - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ] + indexs: [ "index1:c1:c3" ] + rows: + - [ "aa",2,13,1590738989000 ] + - [ "cc",21,131,1590738990000 ] + - [ "cc",41,151,1590738992000 ] + sql: select t1.c1,t1.v1,t2.v1 from (select c1,sum(c2) as v1 from {0} group by c1) as t1 last join (select c1,sum(c2) as v1 from {1} group by c1) as t2 on t1.c1=t2.c1; + expect: + order: c1 + columns: [ "c1 string","v1 int","v1 int"] + rows: + - [ "aa",23,2 ] + - [ "cc",41,62 ] + - + id: 23 + desc: winhow后group by + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, max(sum(c4) OVER w1) as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) group by c1; + expect: + success: false + - id: 24 + desc: "子查询后group by" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738990000] + - [2,"bb",1590738991000] + - [3,"aa",1590738992000] + - [4,"a%",1590738993000] + - [5,"bb",1590738994000] + sql: select c1,count(*) as v1 from (select * from {0}) as t group by c1; + expect: + order: c1 + columns: ["c1 string","v1 bigint"] + rows: + - ["aa",2] + - ["bb",2] + - ["a%",1] + - id: 25 + desc: "group by后在子查询" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738990000] + - [2,"bb",1590738991000] + - [3,"aa",1590738992000] + - [4,"a%",1590738993000] + - [5,"bb",1590738994000] + sql: select * from (select c1,count(*) as v1 from {0} group by c1); + expect: + order: c1 + columns: ["c1 string","v1 bigint"] + rows: + - ["aa",2] + - ["bb",2] + - ["a%",1] + - id: 26 + desc: "group by where后面使用组函数别名" + inputs: + - + columns : ["id bigint","c1 string","c2 int","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1590738990000] + - [2,"bb",2,1590738991000] + - [3,"aa",3,1590738992000] + - [4,"cc",4,1590738993000] + - [5,"bb",5,1590738994000] + - [6,"aa",6,1590738995000] + sql: select c1,count(c2) as v1 from {0} group by c1 where v1>1; + expect: + success: false + - id: 27 + desc: "group by后在子查询,使用where" + mode: request-unsupport + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738990000] + - [2,"bb",1590738991000] + - [3,"aa",1590738992000] + - [4,"a%",1590738993000] + - [5,"bb",1590738994000] + sql: select * from (select c1,count(*) as v1 from {0} group by c1) where v1=2; + expect: + order: c1 + columns: ["c1 string","v1 bigint"] + rows: + - ["aa",2] + - ["bb",2] + + + + + diff --git a/cases/integration_test/v040/test_job.yaml b/cases/integration_test/v040/test_job.yaml new file mode 100644 index 00000000000..74b6a0fd4a4 --- /dev/null +++ b/cases/integration_test/v040/test_job.yaml @@ -0,0 +1,176 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debus: [] +cases: + - + id: 0 + desc: show jobs + sqls: + - use __INTERNAL_DB; + - set @@SESSION.execute_mode = "offline"; + - insert into JOB_INFO values (1,'SparkBatchSql','Running',11111,22222,'','local','application_1111',''); + - show jobs; + expects: + columns: ["JOBID string","JOB_TYPE string","STATUS string"] + rows: + - ["JOB-11220021","OFFLINE LOAD","RUNNING"] + - + id: 1 + desc: showjobs + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + rows: + - ["aa",1,2,1590738989000] + sqls: + - use test_zw; + - set @@SESSION.execute_mode = "offline"; + - insert into JOB_INFO values (1,'SparkBatchSql','Running',11111,22222,'','local','application_1111',''); + - showjobs; + expects: + success: false + - + id: 2 + desc: 切换不同的db,show jobs + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + rows: + - ["aa",1,2,1590738989000] + sqls: + - use test_zw; + - set @@SESSION.execute_mode = "offline"; + - insert into JOB_INFO values (1,'SparkBatchSql','Running',11111,22222,'','local','application_1111',''); + - use other_db; + - show jobs; + expects: + columns: ["JOBID string","JOB_TYPE string","STATUS string"] + rows: + - ["JOB-11220021","OFFLINE LOAD","RUNNING"] + - + id: 3 + desc: show job jobID + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + rows: + - ["aa",1,2,1590738989000] + sqls: + - insert into JOB_INFO values (1,'SparkBatchSql','Running',11111,22222,'','local','application_1111',''); + - show job 1; + expects: + columns: ["JOBID string","JOB_TYPE string","URL string","CONTENT string"] + rows: + - ["JOB-11220021","OFFLINE LOAD","xxxx","LOAD DATA INFILE"] + - + id: 4 + desc: jobID不存在 + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + rows: + - ["aa",1,2,1590738989000] + sqls: + insert into JOB_INFO values (1,'SparkBatchSql','Running',11111,22222,'','local','application_1111',''); + - show job 1111; + expects: + - + id: 5 + desc: 语法错误 + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + rows: + - ["aa",1,2,1590738989000] + sqls: + - insert into JOB_INFO values (1,'SparkBatchSql','Running',11111,22222,'','local','application_1111',''); + - show jobe 1; + expects: + success: false + - + id: 6 + desc: delete job jobID + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + rows: + - ["aa",1,2,1590738989000] + sqls: + - delete job JOB-11220021; + expects: + - + id: 7 + desc: jobID不存在 + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + rows: + - ["aa",1,2,1590738989000] + sqls: + - delete job JOB-xxxxxx; + expects: + - + id: 8 + desc: 语法错误 + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + rows: + - ["aa",1,2,1590738989000] + sqls: + - delete jobe JOB-11220021; + expects: + - + id: 9 + desc: stop job jobID + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + rows: + - ["aa",1,2,1590738989000] + sqls: + - set @@SESSION.execute_mode="offline"; + - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table test_smoke options(deep_copy=true,mode='append'); + - stop job JOB-11220021; + expects: + columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"] + rows: + - [1,"ImportOfflineData","STOPPED","","","load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table test_smoke options(deep_copy=true,mode='append');", + "local","local-1640683224470",""] + - + id: 10 + desc: jobID不存在 + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + rows: + - ["aa",1,2,1590738989000] + sqls: + - stop job JOB-xxxxxx; + expects: + success: false + - + id: 11 + desc: 语法错误 + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + rows: + - ["aa",1,2,1590738989000] + sqls: + - stop jobe JOB-11220021; + expects: + success: false; \ No newline at end of file diff --git a/cases/integration_test/v040/test_load_data.yaml b/cases/integration_test/v040/test_load_data.yaml new file mode 100644 index 00000000000..41a446a8e76 --- /dev/null +++ b/cases/integration_test/v040/test_load_data.yaml @@ -0,0 +1,467 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +db: test_zw +debugs: [] +cases: + - + id: 0 + desc: Load data 集群版,EXECUTE_MODE=online,load parquet文件 + inputs: + - + columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp timestamp, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "online"; + - LOAD DATA INFILE 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/user/zhaowei/openmldb/load_data/parquet' INTO TABLE {0}; + - SHOW JOBS; + expect: + columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"] + + - + id: 1 + desc: 集群版,EXECUTE_MODE=offline,load parquet文件 + inputs: + - + columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp timestamp, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "offline"; + - LOAD DATA INFILE 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/user/zhaowei/openmldb/load_data/parquet' INTO TABLE {0} options(format='parquet',foo='bar',deep_copy=false,header=false,mode='append'); + - SHOW JOBS; + expect: + + + - + id: 2 + desc: 集群版,EXECUTE_MODE=online,load csv文件,mode默认不写 + inputs: + - + columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp bigint, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "online"; + - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0}; + - SHOW JOBS; + expect: + columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"] + rows: + - [1,"ImportOfflineData","FINISHED","","","load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table test_smoke options(deep_copy=true,mode='append');", + "local","local-1640683224470",""] + - + id: 3 + desc: 集群版,execute_mode=online, load csv文件,mode=append + inputs: + - + columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp bigint, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "online"; + - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(mode='append'); + - SHOW JOBS; + expect: + + - + id: 4 + desc: 集群版,execute_mode=online, load csv文件,mode=overwrite + inputs: + - + columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp bigint, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "online"; + - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(mode='overwrite'); + - SHOW JOBS; + expect: + - + id: 5 + desc: 集群版,集群版 execute_mode=offline, load csv文件,deep_copy=true, mode默认不写,如果文件不存在 + inputs: + - + columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp bigint, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "offline"; + - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=true); + - SHOW JOBS; + expect: + + - + id: 6 + desc: 集群版 execute_mode=offline, load csv文件,deep_copy=true, mode默认不写,如果文件存在 + inputs: + - + columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp bigint, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "offline"; + - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=true); + - SHOW JOBS; + expect: + + - + id: 7 + desc: 集群版 execute_mode=offline, load csv文件, deep_copy=true, mode=append + inputs: + - + columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp bigint, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "offline"; + - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=true,mode='append'); + - SHOW JOBS; + expect: + - + id: 8 + desc: 集群版 execute_mode=offline, load csv文件, deep_copy=true, mode=overwrite + inputs: + - + columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp bigint, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "offline"; + - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=true,mode='overwrite'); + - SHOW JOBS; + expect: + - + id: 9 + desc: 集群版 execute_mode=offline, load csv文件, deep_copy=false, mode=append + inputs: + - + columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp bigint, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "offline"; + - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=false,mode='append'); + - SHOW JOBS; + expect: + - + id: 10 + desc: 集群版 execute_mode=offline, load csv文件, deep_copy=false, mode=overwrite + inputs: + - + columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp bigint, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "offline"; + - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=false,mode='overwrite'); + - desc {0}; + expect: + - + id: 11 + desc: 集群版 execute_mode=offline, load csv文件, deep_copy=false,mode默认不写,没有load + inputs: + - + columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp bigint, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "offline"; + - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=false); + - desc {0}; + expect: + - + id: 12 + desc: 集群版 execute_mode=offline, load csv文件, deep_copy=false,mode默认不写,已经load过 + inputs: + - + columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp bigint, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "offline"; + - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=false); + - desc {0}; + expect: + + + + + + + + + + + + + + + + + + + + + + + - + id: 4 + desc: 集群版,EXECUTE_MODE=offline,load parquet文件,method=duplicate + inputs: + - + columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp timestamp, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "offline"; + - LOAD DATA INFILE 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/user/zhaowei/openmldb/load_data/parquet' INTO TABLE {0} options(format='parquet',foo='bar',deep_copy=false,header=false,mode='duplicate'); + - SHOW JOBS; + expect: + - + id: 5 + desc: 集群版,EXECUTE_MODE=online,load parquet文件,method=duplicate + inputs: + - + columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp timestamp, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "online"; + - LOAD DATA INFILE 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/user/zhaowei/openmldb/load_data/parquet' INTO TABLE {0} options(format='parquet',foo='bar',deep_copy=false,header=false,mode='duplicate'); + - SHOW JOBS; + expect: + - + id: 6 + desc: 集群版,EXECUTE_MODE=offline,load parquet文件,method=symbolic_link + inputs: + - + columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"] + create: | + create table {0}( + id int, + c1_smallint smallint, + c2_int int, + c3_bigint bigint, + c4_float float, + c5_double double, + c6_string string, + c7_timestamp timestamp, + c8_date date, + c9_bool bool, + index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute) + )options(partitionnum = 1,replicanum = 1); + sqls: + - set @@SESSION.execute_mode = "offline"; + - LOAD DATA INFILE 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/user/zhaowei/openmldb/load_data/parquet' INTO TABLE {0} options(format='parquet',deep_copy=false); + - SHOW JOBS; + expect: + - + id: 7 + desc: 集群版,EXECUTE_MODE=online,load parquet文件,method=symbolic_link + inputs: + - + columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"] + rows: + - ["aa",1,2,1590738989000] + sqls: + - set @@SESSION.execute_mode = "online"; + - LOAD DATA INFILE 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/user/zhaowei/openmldb/load_data/parquet' INTO TABLE {0} options(format='parquet',foo='bar',deep_copy=false,header=false,mode='symbolic_link'); + - SHOW JOBS; + expect: + + + diff --git a/cases/integration_test/v040/test_out_in_offline.yaml b/cases/integration_test/v040/test_out_in_offline.yaml new file mode 100644 index 00000000000..9edd35ebf00 --- /dev/null +++ b/cases/integration_test/v040/test_out_in_offline.yaml @@ -0,0 +1,894 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: ["数据里有null、空串、特殊字符"] +cases: + - + id: 0 + desc: 数据里有null、空串、特殊字符 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [4,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [5,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + - [6,"~!@#$%^&*()_+<",3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] +# - +# columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] +# indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; +# - load data infile '{0}.csv' into table {1}; +# - select * from {1}; + expect: + count: 6 + - + id: 1 + desc: 全部数据类型测试 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1}; + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 2 + desc: 复杂sql结果导出 + inputs: + - + columns : ["id int", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"] + indexs: ["index1:card_no:trx_time"] + rows: + - [1, "aaaaaaaaaa",1, 1590738989000, 1.1] + - [2, "aaaaaaaaaa",1, 1590738990000, 2.2] + - [3, "bb",10, 1590738990000, 3.3] + - + columns : ["crd_lst_isu_dte timestamp", "crd_nbr string"] + indexs: ["index2:crd_nbr:crd_lst_isu_dte"] + rows: + - [1590738988000, "aaaaaaaaaa"] + - [1590738990000, "aaaaaaaaaa"] + - [1590738989000, "cc"] + - [1590738992000, "cc"] + - + columns: ["id int", "card_no string", "trx_time timestamp", "card_no_prefix string","sum_trx_amt float", "count_merchant_id int64", "crd_lst_isu_dte timestamp","crd_nbr string"] + sqls: + - select * from + (select + id, + card_no, + trx_time, + substr(card_no, 1, 6) as card_no_prefix, + sum(trx_amt) over w30d as sum_trx_amt, + count(merchant_id) over w10d as count_merchant_id + from {0} + window w30d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW), + w10d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)) as trx_fe + last join {1} order by {1}.crd_lst_isu_dte on trx_fe.card_no = {1}.crd_nbr and trx_fe.trx_time >= {1}.crd_lst_isu_dte + into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {2}; + - select * from {2}; + expect: + columns: ["id int", "card_no string", "trx_time timestamp", "card_no_prefix string","sum_trx_amt float", "count_merchant_id int64", "crd_lst_isu_dte timestamp","crd_nbr string"] + order: id + rows: + - [1, "aaaaaaaaaa", 1590738989000, "aaaaaa", 1.1, 1, 1590738988000, "aaaaaaaaaa"] + - [2, "aaaaaaaaaa", 1590738990000, "aaaaaa", 3.3, 2, 1590738990000, "aaaaaaaaaa"] + - [3, "bb", 1590738990000, "bb", 3.3, 1, null, null] + - + id: 3 + desc: 全部数据类型测试 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1}; + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 4 + desc: 执行其他库查询 + inputs: + - + db: db1 + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from db1.{0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1}; + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 5 + desc: 导出insert结果 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + sqls: + - insert into {0} values (1,"aa",1590738989000) outfile '{0}.csv'; + expect: + success: false + - + id: 6 + desc: sql执行错误 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + sqls: + - select * from db1.{0} into outfile '{0}.csv'; + expect: + success: false + - + id: 7 + desc: mode默认值,文件已经存在 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + sqls: + - select * from {0} into outfile '{0}.csv'; + - select * from {0} into outfile '{0}.csv'; + expect: + success: false + - + id: 8 + desc: mode=overwrite,先到处大数据量,再到处小数据量 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - select * from {1} into outfile '{0}.csv' options(mode='overwrite'); + - load data infile '{0}.csv' into table {2}; + - select * from {2}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - + id: 9 + desc: mode=append,相同的表到处两次 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - select * from {0} into outfile '{0}.csv' options(mode='append',header=false); + - load data infile '{0}.csv' into table {1}; + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 10 + desc: mode=append,不同的表导出,第二次header=false + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - select * from {1} into outfile '{0}.csv' options(mode='append',header=false); + - load data infile '{0}.csv' into table {2}; + - select * from {2}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 11 + desc: mode=append,不同的表导出,第二次header=true + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - select * from {1} into outfile '{0}.csv' options(mode='append',header=true); + expect: + cat: + path: "{0}.csv" + lines: + - id,c1,c2,c3,c4,c5,c6,c7,c8,c9 + - 1,aa,1,2,3,1.100000,2.100000,1590738989000,2020-05-01,true + - 2,bb,2,21,31,1.200000,2.200000,1590738990000,2020-05-02,false + - id,c1,c2,c3,c4,c5,c6,c7,c8,c9 + - 3,cc,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true + - + id: 12 + desc: option key错误 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + sqls: + - select * from {0} into outfile '{0}.csv' options(head=true); + expect: + success: false + - + id: 13 + desc: option header 值错误 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + sqls: + - select * from {0} into outfile '{0}.csv' options(header='true'); + expect: + success: false + - + id: 14 + desc: format 其他格式 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + sqls: + - select * from {0} into outfile '{0}.csv' options(format='txt'); + expect: + success: false + - + id: 15 + desc: delimiter为一些特殊字符 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(delimiter='@'); + - load data infile '{0}.csv' into table {1} options(delimiter='@'); + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 16 + desc: null_value为特殊字符 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(null_value='~!@#$%^&*()_+'); + - load data infile '{0}.csv' into table {1} options(null_value='~!@#$%^&*()_+'); + - select * from {1}; + expect: + count: 3 + - + id: 17 + desc: String 有null 空串 ”null“ null_value为”“ + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [4,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [5,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + - [6,"~!@#$%^&*()_+<",3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + sqls: + - select * from {0} into outfile '{0}.csv' options(null_value=''); + expect: + cat: + path: "{0}.csv" + lines: + - id,c1,c2,c3,c4,c5,c6,c7,c8,c9 + - 3,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true + - 5,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03, + - 4,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true + - 1,aa,1,2,3,1.100000,2.100000,1590738989000,2020-05-01,true + - 6,~!@#$%^&*()_+<,3,22,32,1.300000,2.300000,1590738991000,2020-05-03, + - 2,null,2,21,31,1.200000,2.200000,1590738990000,2020-05-02,false + - + id: 18 + desc: String 有null 空串 ”null“ null_value为”null“ + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [4,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [5,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + - [6,"~!@#$%^&*()_+<",3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(null_value='null'); + expect: + cat: + path: "{0}.csv" + lines: + - id,c1,c2,c3,c4,c5,c6,c7,c8,c9 + - 3,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true + - 5,null,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,null + - 4,null,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true + - 1,aa,1,2,3,1.100000,2.100000,1590738989000,2020-05-01,true + - 6,~!@#$%^&*()_+<,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,null + - 2,null,2,21,31,1.200000,2.200000,1590738990000,2020-05-02,false + - + id: 19 + desc: header=false导出数据 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(header=false); + - load data infile '{0}.csv' into table {1} options(header=false); + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 20 + desc: format=csv,导出数据 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(format='csv'); + - load data infile '{0}.csv' into table {1} options(format='csv'); + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 21 + desc: 路径文件夹不存在 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + sqls: + - select * from {0} into outfile '/{0}/{0}.csv'; + expect: + success: false + - + id: 22 + desc: 数据类型不匹配 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 int","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1}; + expect: + success: false + - + id: 23 + desc: header=true导出数据 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(header=true); + - load data infile '{0}.csv' into table {1} options(header=true); + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 24 + desc: header=true,csv没有header + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(header=false); + - load data infile '{0}.csv' into table {1} options(header=true); + expect: + success: false + - + id: 25 + desc: header=false,csv有header + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(header=true); + - load data infile '{0}.csv' into table {1} options(header=false); + expect: + success: false + - + id: 26 + desc: 表不存在 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(header=true); + - load data infile '{0}.csv' into table {1}11 options(header=true); + expect: + success: false + - + id: 27 + desc: format=csv,csv格式的文件,文件名不是csv结尾 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.txt' ; + - load data infile '{0}.txt' into table {1} options(format='csv'); + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 28 + desc: format=其他值 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1} options(format='txt'); + expect: + success: false + - + id: 29 + desc: 路径错误 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}1.csv' into table {1}; + expect: + success: false + - + id: 30 + desc: 导入其他库的表 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + db: db1 + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table db1.{1}; + - select * from db1.{1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 31 + desc: 导出后导入 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {0}; + - select * from {0}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 32 + desc: 创建表的列和csv对不上 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","cc smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}1.csv' into table {1}; + expect: + success: false + - + id: 33 + desc: 表中已经有数据,然后导入 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1}; + - select * from {1}; + expect: + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + id: 34 + desc: delimiter为,数据中有, + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"b,b",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1} options(delimiter=','); + expect: + success: false + - + id: 35 + desc: 导入-null_value=null + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(null_value='null'); + - load data infile '{0}.csv' into table {1} options(null_value='null'); + - select * from {1}; + expect: + count: 3 + - + id: 36 + desc: 导入-null_value=空串 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false] + - [3,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null] + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv' options(null_value=''); + - load data infile '{0}.csv' into table {1} options(null_value=''); + - select * from {1}; + expect: + count: 3 + - + id: 37 + desc: 表删除后再次导入 +# tags: ["TODO","下个版本修复,@huangwei"] + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + - [2,"bb",1590738990000] + - [3,"cc",1590738991000] + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + sqls: + - select * from {0} into outfile '{0}.csv'; + - load data infile '{0}.csv' into table {1}; + - drop table {1}; + - create table {1}( + id int, + c1 string, + c7 timestamp, + index(key=(c1),ts=c7))options(partitionnum=1,replicanum=1); + - load data infile '{0}.csv' into table {1}; + - select * from {1}; + expect: + columns : ["id int","c1 string","c7 timestamp"] + order: id + rows: + - [1,"aa",1590738989000] + - [2,"bb",1590738990000] + - [3,"cc",1590738991000] + - + id: 38 + desc: mode 值错误 + inputs: + - + columns : ["id int","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738989000] + sqls: + - select * from {0} into outfile '{0}.csv' options(mode='true'); + expect: + success: false + + + diff --git a/cases/integration_test/v040/test_udaf.yaml b/cases/integration_test/v040/test_udaf.yaml new file mode 100644 index 00000000000..fee7f58b800 --- /dev/null +++ b/cases/integration_test/v040/test_udaf.yaml @@ -0,0 +1,108 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +sqlDialect: ["HybridSQL"] +cases: + - id: 0 + desc: "count(*)" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738990000] + - [2,"bb",1590738991000] + - [3,"aa",1590738992000] + - [4,"a%",1590738993000] + - [5,"bb",1590738994000] + sql: select count(*) as v1 from {0}; + expect: + columns: ["v1 bigint"] + rows: + - [5] + - id: 1 + desc: "count(1)" + inputs: + - + columns : ["id bigint","c1 string","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1590738990000] + - [2,"bb",1590738991000] + - [3,"aa",1590738992000] + - [4,"a%",1590738993000] + - [5,"bb",1590738994000] + sql: select count(1) as v1 from {0}; + expect: + success: false + - id: 2 + desc: "count/sum/max/min/avg一个列" + inputs: + - + columns : ["id bigint","c1 string","c2 int","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1590738990000] + - [2,"bb",2,1590738991000] + - [3,"aa",3,1590738992000] + - [4,"cc",4,1590738993000] + - [5,"bb",5,1590738994000] + - [6,"aa",6,1590738995000] + sql: select count(c2) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0}; + expect: + order: c1 + columns: ["v1 bigint","v2 int","v3 int","v4 double","v5 int"] + rows: + - [6,6,1,3.5,21] + - id: 3 + desc: "表是空的" + tags: ["TODO","@chengjing,bug,"] + inputs: + - + columns : ["id bigint","c1 string","c2 int","c7 timestamp"] + indexs: ["index1:c1:c7"] + sql: select count(c2) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0}; + expect: + order: c1 + columns: ["v1 int","v2 int","v3 int","v4 double","v5 int"] + rows: + - [0,0,0,0,0] + - id: 4 + desc: "列有null和空串" + inputs: + - + columns : ["id bigint","c1 string","c2 int","c7 timestamp"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",1,1590738990000] + - [2,"bb",2,1590738991000] + - [3,"aa",null,1590738992000] + - [4,null,4,1590738993000] + - [5,"",5,1590738994000] + - [6,"aa",6,1590738995000] + sql: select count(c1) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0}; + expect: + order: c1 + columns: ["v1 bigint","v2 int","v3 int","v4 double","v5 int"] + rows: + - [5,6,1,3.6,18] + + + + + + + diff --git a/cases/integration_test/window/error_window.yaml b/cases/integration_test/window/error_window.yaml new file mode 100644 index 00000000000..9e9419bc74f --- /dev/null +++ b/cases/integration_test/window/error_window.yaml @@ -0,0 +1,303 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - id: 0 + desc: no order by + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c8:c4" ] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - id: 1 + desc: no partition by + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c8:c4" ] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 2 + desc: float为partition by - 未命中索引 - rtidb下不支持 + mode: offline-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.1,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.1,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.2,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c5, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c5 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - id: 3 + desc: double为partition by - 未命中索引 - rtidb下不支持 + mode: offline-unsupport + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c6, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c6 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 4 + desc: string为order by + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [5,"ee",21,31,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c7 ORDER BY {0}.c1 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 5 + desc: float为order by + mode: offline-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c8:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.1,1590738990000,"2020-05-01"] + - [3,"cc",20,32,1.3,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.1,1590738990000,"2020-05-01"] + - [5,"ee",21,34,1.5,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c5, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c5 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 6 + desc: double为order by + mode: offline-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738990000,"2020-05-01"] + - [3,"cc",20,32,1.3,2.3,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738990000,"2020-05-01"] + - [5,"ee",21,34,1.5,2.5,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c6, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 7 + desc: date为order by + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738990000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738990000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738990000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738991000,"2020-05-05"] + sql: | + SELECT id, c1, c8, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c7 ORDER BY {0}.c8 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + + - + id: 8 + desc: BETWEEN加单位 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 9 + desc: window名不存在 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w2 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 10 + desc: window使用的表名不存在 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}1.c3 ORDER BY {0}1.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 11 + desc: window使用的列名不存在 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c33 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 12 + desc: window1 expression + window2 expression + tags: ["目前属于功能边界外, @chenjing计划支持依赖同类窗口的表达式"] + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["bb",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, c4, + (sum(c4) over w1 + sum(c3) over w2) as sum_c3_c4_w1 FROM {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 10 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 13 + desc: ROWS Window 不支持MAXSIZE + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW MAXSIZE 10); + expect: + success: false + - + id: 14 + desc: window名不存在 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w2 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 15 + desc: window使用的表名不存在 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}1.c3 ORDER BY {0}1.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 16 + desc: window使用的列名不存在 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c33 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + success: false diff --git a/cases/integration_test/window/test_current_row.yaml b/cases/integration_test/window/test_current_row.yaml new file mode 100644 index 00000000000..a70e63b570c --- /dev/null +++ b/cases/integration_test/window/test_current_row.yaml @@ -0,0 +1,1507 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.6.0 +cases: + - id: 0 + desc: rows-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 1 + desc: rows_range-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 2 + desc: rows-current_row-有和当前行ts一致的数据 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 3 + desc: rows_range-current_row-有和当前行ts一致的数据 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738991000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,32 ] + - [ "bb",24,null ] + - id: 4 + desc: rows-纯历史窗口-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 5 + desc: rows_range-纯历史窗口-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 1s PRECEDING EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 6 + desc: rows-current_row-ts=0 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 7 + desc: rows_range-current_row-ts=0 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,null ] + - [ "aa",23,32 ] + - [ "bb",24,null ] + - id: 8 + desc: rows-current_row-ts=-1 + tags: ["TODO","ts为负数有问题,带支持后再验证"] + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,-1,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,-1,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 9 + desc: rows_range-current_row-ts=-1 + tags: ["TODO","ts为负数有问题,带支持后再验证"] + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,-1,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,-1,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,null ] + - [ "aa",23,32 ] + - [ "bb",24,null ] + - id: 10 + desc: rows-current_row-ts=负数和0 + tags: ["TODO","ts为负数有问题,带支持后再验证"] + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,-1000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 11 + desc: rows_range-current_row-ts=负数和0 + tags: ["TODO","ts为负数有问题,带支持后再验证"] + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,-1000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,null ] + - [ "aa",23,32 ] + - [ "bb",24,null ] + - id: 12 + desc: rows-open-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 0 OPEN PRECEDING EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 13 + desc: rows_range-open-current_row + tags: ["TODO","bug,修复后验证"] + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "aa",24,34,1.5,2.5,1590738993000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0s OPEN PRECEDING EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,null ] + - [ "aa",23,32 ] + - [ "aa",24,32 ] + - id: 14 + desc: rows_range-current_row-maxsize小于窗口 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 15 + desc: rows_range-current_row-maxsize大于窗口 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW MAXSIZE 3 EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 16 + desc: rows-current_row-current_time + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 17 + desc: rows_range-current_row-current_time + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738991000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,61 ] + - [ "aa",23,32 ] + - [ "bb",24,null ] + - id: 18 + desc: window union rows-current_row-instance_not_in_window + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,63] + - [4,"dd",20,63] + - [5,"ee",21,null] + - id: 19 + desc: window union rows_range-current_row-instance_not_in_window + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,63] + - [4,"dd",20,32] + - [5,"ee",21,null] + - id: 20 + desc: window union rows-current_row + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,63] + - [4,"dd",20,62] + - [5,"ee",21,null] + - id: 21 + desc: window union rows_range-current_row + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,63] + - [4,"dd",20,62] + - [5,"ee",21,null] + - id: 22 + desc: rows窗口包含open/maxsize/instance_not_in_window/current_time/current_row + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [6,"cc",20,35,1.3,2.3,1590738993000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 0 OPEN PRECEDING EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,63] + - [4,"dd",20,67] + - [5,"ee",21,null] + - id: 23 + desc: rows_range窗口包含open/maxsize/instance_not_in_window/current_time/current_row + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [6,"cc",20,35,1.3,2.3,1590738993000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0s OPEN PRECEDING MAXSIZE 1 EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,32] + - [4,"dd",20,35] + - [5,"ee",21,null] + - id: 24 + desc: rows-lag-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, lag(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,30 ] + - [ "aa",23,31 ] + - [ "bb",24,null ] + - id: 25 + desc: rows_range-lag-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, lag(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,30 ] + - [ "aa",23,31 ] + - [ "bb",24,null ] + - id: 26 + desc: rows-at-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, at(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,30 ] + - [ "aa",23,31 ] + - [ "bb",24,null ] + - id: 27 + desc: rows_range-at-current_row + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, at(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,30 ] + - [ "aa",23,31 ] + - [ "bb",24,null ] + - id: 28 + desc: 两个窗口,一个rows,一个rows_range,current_row + tags: ["TODO","bug,修复后验证"] + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c5) OVER w2 as w2_c5_count FROM {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 rows_range BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint","w2_c5_count bigint" ] + rows: + - [ "aa",20,null,0 ] + - [ "aa",21,30,1 ] + - [ "aa",22,61,2 ] + - [ "aa",23,63,2 ] + - [ "bb",24,null,0 ] + - id: 29 + desc: current_row小写 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW exclude current_row); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 30 + desc: maxsize位置错误 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW MAXSIZE 2); + expect: + success: false + - id: 31 + desc: rows-纯历史窗口-current_row-ts=0 + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,30 ] + - [ "aa",22,61 ] + - [ "aa",23,63 ] + - [ "bb",24,null ] + - id: 32 + desc: rows_range-纯历史窗口-current_row-ts=0 + tags: ["TODO","bug,修复后验证"] + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,2000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 1s PRECEDING EXCLUDE CURRENT_ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,null ] + - [ "aa",21,null ] + - [ "aa",22,61 ] + - [ "aa",23,93 ] + - [ "bb",24,null ] + + ################################################### + # tests for window attribute 'EXCLUDE CURRENT_ROW' + # - id: 20 - 23: exclude current_row window + lag window + # - id: 24 - 30: exclude current_row window + (maxsize, exclude current_time, instance_not_in_window) + ################################################### + - id: 20 + desc: | + rows_range window union with exclude current_row. batch not support see 1807 + mode: batch-unsupport + request_plan: | + SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding and 0s preceding EXCLUDE CURRENT_ROW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 2, 233, 200, 200 + 2, 3, 233, 21, 21 + - id: 21 + desc: | + rows_range window union with exclude current_row and exclude current_time + mode: batch-unsupport + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 40 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + request_plan: | + SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding and 0s preceding + EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 1, 233, 233, 233 + 2, 1, 233, 233, 233 + 3, 4, 233, 5, 5 + - id: 22 + desc: | + rows_range window union with exclude current_row and instance_not_in_window + mode: batch-unsupport + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + request_plan: | + SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + PROJECT(type=Aggregation) + REQUEST_UNION(INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding and 0s preceding + EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 2, 233, 200, 200 + 2, 2, 233, 200, 200 + - id: 23 + desc: | + rows_range window union with exclude current_row, instance_not_in_window and exclude_current_time + mode: batch-unsupport + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 40 + 4, 102, 111, 0 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + request_plan: | + SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding and 0s preceding + EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 1, 233, 233, 233 + 2, 1, 233, 233, 233 + 3, 2, 233, 200, 200 + 4, 3, 233, 17, 17 + + # rows_range union window with exclude current_row, single window + - id: 24 + desc: | + rows_range union window with exclude_current_row + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 0 + 4, 102, 111, -1 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + batch_plan: | + PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_ROW) + +-WINDOW(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding and 0s preceding + EXCLUDE CURRENT_ROW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + order: id + data: | + 1, 2, 233, 200 + 2, 3, 233, 21 + 3, 5, 233, 5 + 4, 6, 233, 0 + - id: 25 + desc: | + rows_range union window with exclude_current_row and exclude_current_time + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 0 + 4, 102, 111, 0 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + batch_plan: | + PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW) + +-WINDOW(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding AND CURRENT ROW + EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + order: id + data: | + 1, 1, 233, 233 + 2, 1, 233, 233 + 3, 4, 233, 5 + 4, 6, 233, 0 + - id: 26 + desc: | + rows_range union window with exclude_current_row and instance_not_in_window + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 0 + 4, 102, 111, 0 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + # instance_not_in_window not optimize main table + batch_plan: | + PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW) + +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(table=t1) + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding AND CURRENT ROW + EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + order: id + data: | + 1, 2, 233, 200 + 2, 2, 233, 200 + 3, 3, 233, 17 + 4, 3, 233, 17 + - id: 27 + desc: | + rows_range union window with exclude_current_row, exclude current_time and instance_not_in_window + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 0 + 4, 102, 111, 0 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + batch_plan: | + PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW) + +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(table=t1) + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding AND CURRENT ROW + EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + order: id + data: | + 1, 1, 233, 233 + 2, 1, 233, 233 + 3, 2, 233, 200 + 4, 3, 233, 17 + - id: 28 + desc: | + rows_range union window with exclude_current_row, exclude current_time, instance_not_in_window and maxsize + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 0 + 4, 102, 111, 0 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + batch_plan: | + PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW) + +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(table=t1) + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding AND CURRENT ROW + MAXSIZE 2 + EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + order: id + data: | + 1, 1, 233, 233 + 2, 1, 233, 233 + 3, 2, 233, 200 + 4, 2, 200, 17 + - id: 29 + desc: | + rows_range union window with exclude_current_row, instance_not_in_window and maxsize + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 0 + 4, 102, 111, 0 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + batch_plan: | + PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW) + +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(table=t1) + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(table=t1) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding AND CURRENT ROW + MAXSIZE 2 + EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + order: id + data: | + 1, 2, 233, 200 + 2, 2, 233, 200 + 3, 2, 200, 17 + 4, 2, 200, 17 + - id: 30 + desc: | + rows_range union window with exclude_current_row, exclude_current_time and maxsize + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 5 + 3, 101, 111, 0 + 4, 102, 111, 0 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + batch_plan: | + PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW) + +-WINDOW(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g)) + +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g)) + RENAME(name=t1) + DATA_PROVIDER(type=Partition, table=t2, index=idx) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 3s preceding AND CURRENT ROW + MAXSIZE 2 + EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + order: id + data: | + 1, 1, 233, 233 + 2, 1, 233, 233 + 3, 2, 21, 5 + 4, 2, 17, 0 \ No newline at end of file diff --git a/cases/integration_test/window/test_maxsize.yaml b/cases/integration_test/window/test_maxsize.yaml new file mode 100644 index 00000000000..28af076d27a --- /dev/null +++ b/cases/integration_test/window/test_maxsize.yaml @@ -0,0 +1,789 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: maxsize小于窗口的大小 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",93] + - [4,"aa",96] + - [5,"aa",99] + - + id: 1 + desc: maxsize大于窗口的大小 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW MAXSIZE 5); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",93] + - [4,"aa",96] + - [5,"aa",99] + - + id: 2 + desc: maxsize等于窗口的大小 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW MAXSIZE 3); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",93] + - [4,"aa",96] + - [5,"aa",99] + - + id: 3 + desc: maxsize=0 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW MAXSIZE 0); + expect: + success: false + - + id: 4 + desc: maxsize=1 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW MAXSIZE 1); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",31] + - [3,"aa",32] + - [4,"aa",33] + - [5,"aa",34] + - + id: 5 + desc: maxsize=-1 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW MAXSIZE -1); + expect: + success: false + - + id: 6 + desc: 纯历史窗口-maxsize + version: 0.6.0 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 5 PRECEDING AND 1 PRECEDING MAXSIZE 3); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa", NULL] + - [2,"aa",30] + - [3,"aa",61] + - [4,"aa",93] + - [5,"aa",96] + - + id: 7 + desc: 没有数据进入maxsize的窗口 + version: 0.6.0 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 5 PRECEDING AND 3 PRECEDING MAXSIZE 3); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa", NULL] + - [2,"aa", NULL] + - [3,"aa", NULL] + - + id: 8 + desc: 两个pk,都大于maxsize + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - [6,"bb",24,35,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND 0 PRECEDING MAXSIZE 2); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",63] + - [4,"bb",33] + - [5,"bb",67] + - [6,"bb",69] + - + id: 9 + desc: 两个pk,一个大于maxsize,一个小于maxsize + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND 0 PRECEDING MAXSIZE 2); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",63] + - [4,"bb",33] + - [5,"bb",67] + - + id: 10 + desc: 两个窗口的maxsize一致 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3) + ; + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [3,"aa",93,93] + - [4,"aa",96,96] + - [5,"aa",99,99] + - + id: 11 + desc: 两个窗口的maxsize不一致 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 2) + ; + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [3,"aa",93,63] + - [4,"aa",96,65] + - [5,"aa",99,67] + - + id: 12 + desc: 两个窗口不同的key的maxsize一致 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",20,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",21,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",21,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3), + w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3) + ; + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [3,"aa",93,93] + - [4,"aa",96,33] + - [5,"aa",99,67] + - + id: 13 + desc: 两个窗口不同的ts的maxsize一致 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c1:c4"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3) + ; + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [3,"aa",93,93] + - [4,"aa",96,96] + - [5,"aa",99,99] + - + id: 14 + desc: 两个窗口一个带有maxsize,一个没有 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW) + ; + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [3,"aa",93,93] + - [4,"aa",96,126] + - [5,"aa",99,160] + - + id: 15 + desc: 两个窗口不同的key的maxsize不一致 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",20,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",21,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",21,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3), + w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 2) + ; + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [3,"aa",93,63] + - [4,"aa",96,33] + - [5,"aa",99,67] + - + id: 16 + desc: 两个窗口的不同的ts的maxsize不一致 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c1:c4"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 4) + ; + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [3,"aa",93,93] + - [4,"aa",96,126] + - [5,"aa",99,130] + - + id: 17 + desc: 两个窗口相同的key的一个maxsize大于窗口一个小于窗口 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 5), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 3) + ; + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [3,"aa",93,93] + - [4,"aa",126,96] + - [5,"aa",130,99] + - + id: 18 + desc: 两个窗口不同的key的一个maxsize大于窗口一个小于窗口 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",20,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",20,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",21,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 5), + w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 3) + ; + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [3,"aa",93,93] + - [4,"aa",126,96] + - [5,"aa",130,34] + - + id: 19 + desc: union结合maxsize + mode: cluster-unsupport + tags: ["cluster-执行失败"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 2); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [5,"aa",67] + - + id: 20 + desc: union结合maxsize-两个窗口 + mode: cluster-unsupport + tags: ["cluster-执行失败"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 2); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [5,"aa",95,67] + - + id: 21 + desc: union+maxsize+INSTANCE_NOT_IN_WINDOW + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 2 INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",31] + - [5,"aa",67] + - + id: 22 + desc: union子查询结合maxsize + mode: cluster-unsupport + tags: ["cluster-执行失败"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION (select * from {1}) PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 2); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [5,"aa",67] + - + id: 23-1 + desc: lastjoin结合maxsize + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"] + - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"] + - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"] + - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"] + sql: | + select {0}.id,{0}.c1,{0}.c3,{1}.c4, + sum({1}.c4) OVER w1 as w1_c4_sum + from {0} + last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1 + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 2) + ; + expect: + order: id + columns: ["id int","c1 string","c3 int","c4 bigint","w1_c4_sum bigint"] + rows: + - [1,"aa",20,32,32] + - [2,"aa",21,32,64] + - [3,"aa",22,32,64] + - [4,"bb",23,34,34] + - [5,"bb",24,34,68] + - + id: 23-2 + desc: lastjoin结合maxsize, last join副表有ts列为null + tags: ["TODO"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,null,"2020-05-01"] + - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"] + - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"] + - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"] + sql: | + select {0}.id,{0}.c1,{0}.c3,{1}.c4, + sum({1}.c4) OVER w1 as w1_c4_sum + from {0} + last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1 + WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 2) + ; + expect: + order: id + columns: ["id int","c1 string","c3 int","c4 bigint","w1_c4_sum bigint"] + rows: + - [1,"aa",20,31,31] + - [2,"aa",21,31,62] + - [3,"aa",22,31,62] + - [4,"bb",23,34,34] + - [5,"bb",24,34,68] + - + id: 24 + desc: union多表结合maxsize + mode: cluster-unsupport + tags: ["cluster-执行失败"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1},{2} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 2); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [5,"aa",67] + - + id: 25 + desc: maxsize-rows + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3); + expect: + success: false + - + id: 26 + desc: 两个union,不同的maxsize + mode: cluster-unsupport + tags: ["TODO", "@chenjing support online batch", "@chendihao support offline batch", "cluster-执行失败"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 5 PRECEDING AND CURRENT ROW MAXSIZE 4), + w2 AS (UNION {1},{2} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 5 PRECEDING AND CURRENT ROW MAXSIZE 2); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [5,"aa",127,67] diff --git a/cases/integration_test/window/test_window.yaml b/cases/integration_test/window/test_window.yaml new file mode 100644 index 00000000000..80731888843 --- /dev/null +++ b/cases/integration_test/window/test_window.yaml @@ -0,0 +1,1223 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: 一个pk窗口的大小大于所有数据 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",93] + - [4,"aa",126] + - [5,"aa",160] + - + id: 1 + desc: 一个pk窗口的大小等于所有数据 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 4 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",93] + - [4,"aa",126] + - [5,"aa",160] + - + id: 2 + desc: 一个pk窗口的大小小于所有数据 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",93] + - [4,"aa",96] + - [5,"aa",99] + - + id: 3 + desc: 一个pk所有数据都不在窗口内 + version: 0.6.0 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 5 PRECEDING AND 3 PRECEDING); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",null] + - + id: 4 + desc: 窗口只要当前行 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 0 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",31] + - [3,"aa",32] + - + id: 5 + desc: 窗口只要当前行 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 0 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",31] + - [3,"aa",32] + - + id: 6 + desc: 最后一行进入窗口 + version: 0.6.0 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 3 PRECEDING AND 2 PRECEDING); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",30] + - + id: 7 + desc: 纯历史窗口-滑动 + version: 0.6.0 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND 1 PRECEDING); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",null] + - [2,"aa",30] + - [3,"aa",61] + - [4,"aa",63] + - [5,"aa",65] + - + id: 8 + desc: 两个pk,一个没有进入窗口,一个滑动 + version: 0.6.0 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND 1 PRECEDING); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",null] + - [2,"aa",30] + - [3,"aa",61] + - [4,"aa",63] + - [5,"bb",null] + - + id: 9 + desc: 两个pk,一个全部进入窗口,一个滑动 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND 0 PRECEDING); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",93] + - [4,"aa",96] + - [5,"bb",34] + - + id: 10 + desc: 两个pk都滑动 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + - [6,"bb",24,35,1.5,2.5,1590738990005,"2020-05-05"] + - [7,"bb",24,36,1.5,2.5,1590738990006,"2020-05-05"] + - [8,"bb",24,37,1.5,2.5,1590738990007,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND 0 PRECEDING); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",63] + - [4,"aa",65] + - [5,"bb",34] + - [6,"bb",69] + - [7,"bb",71] + - [8,"bb",73] + - + id: 11 + desc: ts列乱序 + mode: request-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",93] + - [4,"aa",96] + - [5,"aa",99] + - + id: 12 + desc: ts列乱序 + mode: batch-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",62] + - [4,"aa",33] + - [5,"aa",99] + - + id: 13 + desc: ts列相同 + mode: disk-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",23,33,1.4,2.4,1590738990000,"2020-05-04"] + - [2,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",21,31,1.2,2.2,1590738990005,"2020-05-02"] + - [5,"aa",24,34,1.5,2.5,1590738990005,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + expectProvider: + 0: + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + 1: + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",1] + - [5,"aa",2] + - + id: 14 + desc: 每次上一条都划出 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",23,30,1.4,2.4,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,1590738990003,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990006,"2020-05-03"] + - [4,"aa",21,33,1.2,2.2,1590738990009,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990012,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, count(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + expectProvider: + 0: + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + 1: + rows: + - [1,"aa",1] + - [2,"aa",1] + - [3,"aa",1] + - [4,"aa",1] + - [5,"aa",1] + - + id: 15 + desc: pk包含null + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,null,21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,null,22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,null,23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + expectProvider: + 0: + rows: + - [1,"aa",30] + - [2,null,31] + - [3,null,63] + - [4,null,65] + - [5,"aa",64] + 1: + rows: + - [1,"aa",30] + - [2,null,31] + - [3,null,63] + - [4,null,65] + - [5,"aa",34] + - + id: 16 + desc: pk包含空串 + mode: cli-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + expectProvider: + 0: + rows: + - [1,"aa",30] + - [2,"",31] + - [3,"",63] + - [4,"",65] + - [5,"aa",64] + 1: + rows: + - [1,"aa",30] + - [2,"",31] + - [3,"",63] + - [4,"",65] + - [5,"aa",34] + - + id: 17 + desc: pk包含空串和null + mode: cli-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,null,20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,null,24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint"] + expectProvider: + 0: + rows: + - [1,null,30] + - [2,"",31] + - [3,"",63] + - [4,"",65] + - [5,null,64] + 1: + rows: + - [1,null,30] + - [2,"",31] + - [3,"",63] + - [4,"",65] + - [5,null,34] + - + id: 18 + desc: 两个窗口相同的pk,相同的ts,相同的聚合函数 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [3,"aa",93,93] + - [4,"aa",96,96] + - [5,"aa",99,99] + - + id: 19 + desc: 两个窗口相同的pk,相同的ts,不同的列 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c3) OVER w2 as w2_c3_sum FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c3_sum int"] + rows: + - [1,"aa",30,20] + - [2,"aa",61,41] + - [3,"aa",93,63] + - [4,"aa",96,66] + - [5,"aa",99,69] + - + id: 20 + desc: 两个窗口相同的pk,相同的ts,不同的函数 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_count bigint"] + rows: + - [1,"aa",30,1] + - [2,"aa",61,2] + - [3,"aa",93,3] + - [4,"aa",96,3] + - [5,"aa",99,3] + - + id: 21 + desc: sum超过int的范围 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",2147483647,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c3) OVER w1 as w1_c3_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c3_sum int"] + rows: + - [1,"aa",20] + - [2,"aa",41] + - [3,"aa",63] + - [4,"aa",66] + - [5,"aa",-2147483604] + - + id: 22 + desc: 两个窗口相同的pk,不同的ts,相同的聚合函数 + tags: ["TODO","@chenjing by@zhaowei","sql执行失败","http://jira.4paradigm.com/browse/FEX-924"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c1:c4"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c4 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [3,"aa",93,63] + - [4,"aa",96,65] + - [5,"aa",99,67] + - + id: 23 + desc: 两个窗口不同的pk,相同的ts,相同的聚合函数 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c8:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-01"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c8 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [3,"aa",93,93] + - [4,"aa",96,96] + - [5,"aa",99,99] + - + id: 24 + desc: 两个窗口不同的pk,相同的ts,相同的聚合函数,一个窗口两个pk + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c8:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c8 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [3,"aa",93,93] + - [4,"aa",96,96] + - [5,"aa",99,34] + - + id: 25 + desc: 两个窗口不同的pk,不同的ts,相同的聚合函数 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c8:c4"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c8 ORDER BY {0}.c4 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [3,"aa",93,93] + - [4,"aa",96,96] + - [5,"aa",99,34] + - + id: 26 + desc: 两个窗口不同的ts,一个都在窗口内,一个都不进入窗口 + tags: ["TODO","@chenjing by@zhaowei","sql执行失败,应该和 22 case是一个问题"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 5 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c4 d[0] BETWEEN 6 PRECEDING AND 5 PRECEDING); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,0] + - [2,"aa",61,0] + - [3,"aa",93,0] + - [4,"aa",126,0] + - [5,"aa",160,0] + - + id: 27 + desc: 两个窗口,一个union,一个不union + mode: rtidb-batch-unsupport,cluster-unsupport + tags: ["cluster-执行失败"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + expectProvider: + 0: + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [5,"aa",95,99] + 1: + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [5,"aa",34,99] + - + id: 28 + desc: 两个窗口,一个union一个表,一个union两个表 + mode: rtidb-batch-unsupport,cluster-unsupport + tags: ["cluster-执行失败"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (UNION {1},{2} PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + expectProvider: + 0: + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [5,"aa",97,99] + 1: + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [5,"aa",66,99] + - + id: 29 + desc: 两个窗口,一个union,一个INSTANCE_NOT_IN_WINDOW + mode: rtidb-batch-unsupport,cluster-unsupport + tags: ["cluster-执行失败"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,31] + - [4,"aa",96,65] + - [5,"aa",99,66] + - + id: 30 + desc: 两个窗口,一个union一个表,一个union使用子查询 + mode: rtidb-batch-unsupport,cluster-unsupport + tags: ["cluster-执行失败"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"] + dataProvider: + - ["ROWS","ROWS_RANGE"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (UNION (select * from {1}) PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,30] + - [2,"aa",61,61] + - [5,"aa",99,99] + - + id: 31 + desc: 多个窗口-rows + mode: rtidb-batch-unsupport,cluster-unsupport + tags: ["cluster-执行失败", "@chenjing batch online fix for multi window with union", "@tobe batch offline fix"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW + w1 AS (UNION {1},{2},{3} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 4 PRECEDING AND CURRENT ROW), + w2 AS (UNION {1},{2} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 4 PRECEDING AND 1 PRECEDING); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"] + rows: + - [1,"aa",30,0] + - [5,"aa",160,93] + - + id: 32 + desc: 多个窗口包含不同的单位 + mode: cluster-unsupport + tags: ["cluster-执行失败"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"] + - [3,"aa",20,32,1.1,2.1,1590738992000,"2020-05-01"] + - [4,"aa",20,33,1.1,2.1,1590739110000,"2020-05-01"] + - [5,"aa",20,34,1.1,2.1,1590746190000,"2020-05-01"] + - [6,"aa",20,35,1.1,2.1,1590911790000,"2020-05-01"] + - + columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738991000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738993000,"2020-05-02"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590739050000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,1590739170000,"2020-05-01"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590742590000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,1590749790000,"2020-05-01"] + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590825390000,"2020-05-01"] + - [2,"aa",20,31,1.1,2.1,1590998190000,"2020-05-01"] + sql: | + SELECT id, c1, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum, + sum(c4) OVER w3 as w3_c4_sum, + sum(c4) OVER w4 as w4_c4_sum, + sum(c4) OVER w5 as w5_c4_sum + FROM {0} WINDOW + w1 AS (UNION {1},{2},{3},{4} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (UNION {1},{2},{3},{4} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW), + w3 AS (UNION {1},{2},{3},{4} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2m PRECEDING AND CURRENT ROW), + w4 AS (UNION {1},{2},{3},{4} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2h PRECEDING AND CURRENT ROW), + w5 AS (UNION {1},{2},{3},{4} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2d PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint","w3_c4_sum bigint","w4_c4_sum bigint","w5_c4_sum bigint"] + rows: + - [1,"aa",30,30,30,30,30] + - [2,"aa",61,61,61,61,61] + - [3,"aa",32,123,123,123,123] + - [4,"aa",33,33,217,217,217] + - [5,"aa",34,34,34,312,312] + - [6,"aa",35,35,35,35,408] + + - id: 33 + desc: | + first_value results in two rows_range window, refer https://github.com/4paradigm/OpenMLDB/issues/1587 + inputs: + - columns: [ "id int","ts timestamp","group1 string","val1 int" ] + indexs: [ "index1:group1:ts" ] + name: t1 + data: | + 1, 1612130400000, g1, 1 + 2, 1612130401000, g1, 2 + 3, 1612130402000, g1, 3 + 4, 1612130403000, g1, 4 + 5, 1612130404000, g1, 5 + 6, 1612130404000, g2, 4 + 7, 1612130405000, g2, 3 + 8, 1612130406000, g2, 2 + sql: | + select + `id`, + `val1`, + first_value(val1) over w1 as agg1, + first_value(val1) over w2 as agg2, + from `t1` WINDOW + w1 as (partition by `group1` order by `ts` rows_range between 5s preceding and 0s preceding), + w2 as (partition by `group1` order by `ts` rows_range between 5s preceding and 1s preceding); + expect: + columns: ["id int", "val1 int", "agg1 int", "agg2 int"] + order: id + rows: + - [1, 1, 1, NULL] + - [2, 2, 2, 1] + - [3, 3, 3, 2] + - [4, 4, 4, 3] + - [5, 5, 5, 4] + - [6, 4, 4, NULL] + - [7, 3, 3, 4] + - [8, 2, 2, 3] + + - id: 34 + desc: | + first_value results in two rows windows + inputs: + - columns: [ "id int","ts timestamp","group1 string","val1 int" ] + indexs: [ "index1:group1:ts" ] + name: t1 + rows: + - [1, 1612130400000, g1, 1] + - [2, 1612130401000, g1, 2] + - [3, 1612130402000, g1, 3] + - [4, 1612130403000, g1, 4] + - [5, 1612130404000, g1, 5] + - [6, 1612130404000, g2, 4] + - [7, 1612130405000, g2, 3] + - [8, 1612130406000, g2, 2] + sql: | + select + `id`, + `val1`, + first_value(val1) over w1 as agg1, + first_value(val1) over w2 as agg2, + from `t1` WINDOW + w1 as (partition by `group1` order by `ts` rows between 5 preceding and 0 preceding), + w2 as (partition by `group1` order by `ts` rows between 5 preceding and 1 preceding); + expect: + columns: ["id int", "val1 int", "agg1 int", "agg2 int"] + order: id + rows: + - [1, 1, 1, NULL] + - [2, 2, 2, 1] + - [3, 3, 3, 2] + - [4, 4, 4, 3] + - [5, 5, 5, 4] + - [6, 4, 4, NULL] + - [7, 3, 3, 4] + - [8, 2, 2, 3] + + - id: 35 + desc: | + first_value results in rows/rows_range windows + inputs: + - columns: [ "id int","ts timestamp","group1 string","val1 int" ] + indexs: [ "index1:group1:ts" ] + name: t1 + data: | + 1, 1612130400000, g1, 1 + 2, 1612130401000, g1, 2 + 3, 1612130402000, g1, 3 + 4, 1612130403000, g1, 4 + 5, 1612130404000, g1, 5 + 6, 1612130404000, g2, 4 + 7, 1612130405000, g2, 3 + 8, 1612130406000, g2, 2 + sql: | + select + `id`, + `val1`, + first_value(val1) over w1 as agg1, + first_value(val1) over w2 as agg2, + from `t1` WINDOW + w1 as (partition by `group1` order by `ts` rows_range between 5s preceding and 0s preceding), + w2 as (partition by `group1` order by `ts` rows between 5 preceding and 1 preceding); + expect: + columns: ["id int", "val1 int", "agg1 int", "agg2 int"] + order: id + rows: + - [1, 1, 1, NULL] + - [2, 2, 2, 1] + - [3, 3, 3, 2] + - [4, 4, 4, 3] + - [5, 5, 5, 4] + - [6, 4, 4, NULL] + - [7, 3, 3, 4] + - [8, 2, 2, 3] + + - id: 36 + version: 0.6.0 + desc: | + correctness for window functions over window whose border is open + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 21 + 2, 100, 111, 22 + 3, 101, 111, 23 + sql: | + select + id, + count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from t1 window w as( + partition by `g` order by `ts` + ROWS between 3 OPEN preceding and 0 OPEN PRECEDING); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 0, NULL, NULL, NULL + 2, 1, 21, 21, 21 + 3, 2, 22, 21, 22 + + - id: 37 + version: 0.6.0 + desc: | + correctness for rows_range window functions over window whose border is open + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99000, 111, 21 + 2, 100000, 111, 22 + 3, 101000, 111, 23 + sql: | + select + id, + count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from t1 window w as( + partition by `g` order by `ts` + ROWS_RANGE between 2s OPEN PRECEDING and 0s OPEN preceding); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 0, NULL, NULL, NULL + 2, 1, 21, 21, 21 + 3, 1, 22, 22, 22 diff --git a/cases/integration_test/window/test_window_exclude_current_time.yaml b/cases/integration_test/window/test_window_exclude_current_time.yaml new file mode 100644 index 00000000000..2f00fff56e1 --- /dev/null +++ b/cases/integration_test/window/test_window_exclude_current_time.yaml @@ -0,0 +1,761 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +version: 0.5.0 +cases: + - id: 0 + mode: disk-unsupport + desc: ROWS_RANGE Window OPEN PRECEDING EXCLUDE CURRENT_TIME + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",-2, 1.0, 0 ] + - [ "aa",-1, 1.0, 0 ] + - [ "aa",0, 1.0, 0 ] + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738994000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ] + rows: + - [ "aa", -2, 0, 1.0 ] + - [ "aa", -1, 0, 1.0 ] + - [ "aa", 0, 0, 1.0 ] + - [ "aa", 1, 1590738990000, 1.0 ] + - [ "aa", 2, 1590738990000, 1.0 ] + - [ "aa", 3, 1590738992000, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0 ] + - [ "aa", 5, 1590738994000, 3.0 ] + - [ "aa", 6, 1590738994000, 3.0 ] + - [ "aa", 7, 1590738999000, 1.0 ] + - [ "aa", 8, 1590739001000, 2.0 ] + - [ "aa", 9, 1590739002000, 3.0 ] + - id: 1 + desc: ROWS_RANGE Window with MaxSize 2 OPEN PRECEDING EXCLUDE CURRENT_TIME + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738994000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0 ] + - [ "aa", 2, 1590738990000, 1.0 ] + - [ "aa", 3, 1590738992000, 2.0 ] + - [ "aa", 4, 1590738993000, 2.0 ] + - [ "aa", 5, 1590738994000, 2.0 ] + - [ "aa", 6, 1590738994000, 2.0 ] + - [ "aa", 7, 1590738999000, 1.0 ] + - [ "aa", 8, 1590739001000, 2.0 ] + - [ "aa", 9, 1590739002000, 2.0 ] + - id: 2 + desc: ROWS_RANGE Window with MaxSize 10 OPEN PRECEDING EXCLUDE CURRENT_TIME + mode: disk-unsupport + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",-1, 1.0, 0 ] + - [ "aa", 0, 1.0, 0 ] + - [ "aa", 1, 1.0, 1590738990000 ] + - [ "aa", 2, 1.0, 1590738990000 ] + - [ "aa", 3, 1.0, 1590738992000 ] + - [ "aa", 4, 1.0, 1590738993000 ] + - [ "aa", 5, 1.0, 1590738994000 ] + - [ "aa", 6, 1.0, 1590738994000 ] + - [ "aa", 7, 1.0, 1590738999000 ] + - [ "aa", 8, 1.0, 1590739001000 ] + - [ "aa", 9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 10 EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ] + rows: + - [ "aa",-1, 0, 1.0 ] + - [ "aa", 0, 0, 1.0 ] + - [ "aa", 1, 1590738990000, 1.0 ] + - [ "aa", 2, 1590738990000, 1.0 ] + - [ "aa", 3, 1590738992000, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0 ] + - [ "aa", 5, 1590738994000, 3.0 ] + - [ "aa", 6, 1590738994000, 3.0 ] + - [ "aa", 7, 1590738999000, 1.0 ] + - [ "aa", 8, 1590739001000, 2.0 ] + - [ "aa", 9, 1590739002000, 3.0 ] + - id: 3 + desc: ROWS Window OPEN PRECEDING EXCLUDE CURRENT_TIME + mode: disk-unsupport + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",-1, 1.0, 0] + - [ "aa", 0, 1.0, 0] + - [ "aa", 1, 1.0, 1590738990000 ] + - [ "aa", 2, 1.0, 1590738990000 ] + - [ "aa", 3, 1.0, 1590738992000 ] + - [ "aa", 4, 1.0, 1590738993000 ] + - [ "aa", 5, 1.0, 1590738994000 ] + - [ "aa", 6, 1.0, 1590738994000 ] + - [ "aa", 7, 1.0, 1590738999000 ] + - [ "aa", 8, 1.0, 1590739001000 ] + - [ "aa", 9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ] + rows: + - [ "aa",-1, 0, 1.0 ] + - [ "aa", 0, 0, 1.0 ] + - [ "aa", 1, 1590738990000, 3.0 ] + - [ "aa", 2, 1590738990000, 3.0 ] + - [ "aa", 3, 1590738992000, 5.0 ] + - [ "aa", 4, 1590738993000, 6.0 ] + - [ "aa", 5, 1590738994000, 7.0 ] + - [ "aa", 6, 1590738994000, 7.0 ] + - [ "aa", 7, 1590738999000, 7.0 ] + - [ "aa", 8, 1590739001000, 7.0 ] + - [ "aa", 9, 1590739002000, 7.0 ] + - id: 4 + desc: ROWS and ROWS Window OPEN PRECEDING EXCLUDE CURRENT_TIME + mode: disk-unsupport + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738994000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0 ] + - [ "aa", 2, 1590738990000, 1.0, 1.0 ] + - [ "aa", 3, 1590738992000, 3.0, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0, 4.0 ] + - [ "aa", 5, 1590738994000, 3.0, 5.0 ] + - [ "aa", 6, 1590738994000, 3.0, 5.0 ] + - [ "aa", 7, 1590738999000, 1.0, 7.0 ] + - [ "aa", 8, 1590739001000, 2.0, 7.0 ] + - [ "aa", 9, 1590739002000, 3.0, 7.0 ] + + - id: 5 + mode: offline-unsupport,disk-unsupport + desc: ROWS_RANGE Window and EXCLUDE CURRENT_TIME Window + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738994000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0 ] + - [ "aa", 2, 1590738990000, 2.0, 1.0 ] + - [ "aa", 3, 1590738992000, 3.0, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0, 4.0 ] + - [ "aa", 5, 1590738994000, 3.0, 3.0 ] + - [ "aa", 6, 1590738994000, 4.0, 3.0 ] + - [ "aa", 7, 1590738999000, 1.0, 1.0 ] + - [ "aa", 8, 1590739001000, 2.0, 2.0 ] + - [ "aa", 9, 1590739002000, 3.0, 3.0 ] + - id: 6 + desc: ROWS_RANGE Window with MaxSize 2 and EXCLUDE CURRENT_TIME Window + mode: offline-unsupport,disk-unsupport + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738994000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0 ] + - [ "aa", 2, 1590738990000, 2.0, 1.0 ] + - [ "aa", 3, 1590738992000, 2.0, 2.0 ] + - [ "aa", 4, 1590738993000, 2.0, 2.0 ] + - [ "aa", 5, 1590738994000, 2.0, 2.0 ] + - [ "aa", 6, 1590738994000, 2.0, 2.0 ] + - [ "aa", 7, 1590738999000, 1.0, 1.0 ] + - [ "aa", 8, 1590739001000, 2.0, 2.0 ] + - [ "aa", 9, 1590739002000, 2.0, 2.0 ] + - id: 7 + desc: ROWS_RANGE Window with MaxSize 10 and EXCLUDE CURRENT_TIME Window + mode: offline-unsupport,disk-unsupport + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738994000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 10), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 10 EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0 ] + - [ "aa", 2, 1590738990000, 2.0, 1.0 ] + - [ "aa", 3, 1590738992000, 3.0, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0, 4.0 ] + - [ "aa", 5, 1590738994000, 3.0, 3.0 ] + - [ "aa", 6, 1590738994000, 4.0, 3.0 ] + - [ "aa", 7, 1590738999000, 1.0, 1.0 ] + - [ "aa", 8, 1590739001000, 2.0, 2.0 ] + - [ "aa", 9, 1590739002000, 3.0, 3.0 ] + - id: 8 + desc: ROWS Window and EXCLUDE CURRENT_TIME Window + mode: offline-unsupport,disk-unsupport + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738994000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 6 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 6 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0 ] + - [ "aa", 2, 1590738990000, 2.0, 1.0 ] + - [ "aa", 3, 1590738992000, 3.0, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0, 4.0 ] + - [ "aa", 5, 1590738994000, 5.0, 5.0 ] + - [ "aa", 6, 1590738994000, 6.0, 5.0 ] + - [ "aa", 7, 1590738999000, 7.0, 7.0 ] + - [ "aa", 8, 1590739001000, 7.0, 7.0 ] + - [ "aa", 9, 1590739002000, 7.0, 7.0 ] + - id: 9 + desc: ROWS and ROWS Window and EXCLUDE CURRENT_TIME Window + mode: offline-unsupport,disk-unsupport + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738994000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum, + sum(c4) OVER w3 as w3_c4_sum, + sum(c4) OVER w4 as w4_c4_sum, + sum(c4) OVER w5 as w5_c4_sum, + sum(c4) OVER w6 as w6_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME), + w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2), + w4 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_TIME), + w5 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 6 PRECEDING AND CURRENT ROW), + w6 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 6 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double", + "w3_c4_sum double", "w4_c4_sum double", + "w5_c4_sum double", "w6_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ] + - [ "aa", 2, 1590738990000, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0 ] + - [ "aa", 3, 1590738992000, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0, 4.0, 2.0, 2.0, 4.0, 4.0 ] + - [ "aa", 5, 1590738994000, 3.0, 3.0, 2.0, 2.0, 5.0, 5.0 ] + - [ "aa", 6, 1590738994000, 4.0, 3.0, 2.0, 2.0, 6.0, 5.0 ] + - [ "aa", 7, 1590738999000, 1.0, 1.0, 1.0, 1.0, 7.0, 7.0 ] + - [ "aa", 8, 1590739001000, 2.0, 2.0, 2.0, 2.0, 7.0, 7.0 ] + - [ "aa", 9, 1590739002000, 3.0, 3.0, 2.0, 2.0, 7.0, 7.0 ] + + - id: 10 + desc: ROWS_RANGE Window OPEN PRECEDING and EXCLUDE CURRENT_TIME Window + mode: offline-unsupport,disk-unsupport + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738994000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0 ] + - [ "aa", 2, 1590738990000, 2.0, 1.0 ] + - [ "aa", 3, 1590738992000, 3.0, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0, 4.0 ] + - [ "aa", 5, 1590738994000, 3.0, 3.0 ] + - [ "aa", 6, 1590738994000, 4.0, 3.0 ] + - [ "aa", 7, 1590738999000, 1.0, 1.0 ] + - [ "aa", 8, 1590739001000, 2.0, 2.0 ] + - [ "aa", 9, 1590739002000, 3.0, 3.0 ] + - id: 11 + desc: ROWS_RANGE Window with MaxSize 2 OPEN PRECEDING amd EXCLUDE CURRENT_TIME Window + mode: offline-unsupport,disk-unsupport + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738994000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 2), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0 ] + - [ "aa", 2, 1590738990000, 2.0, 1.0 ] + - [ "aa", 3, 1590738992000, 2.0, 2.0 ] + - [ "aa", 4, 1590738993000, 2.0, 2.0 ] + - [ "aa", 5, 1590738994000, 2.0, 2.0 ] + - [ "aa", 6, 1590738994000, 2.0, 2.0 ] + - [ "aa", 7, 1590738999000, 1.0, 1.0 ] + - [ "aa", 8, 1590739001000, 2.0, 2.0 ] + - [ "aa", 9, 1590739002000, 2.0, 2.0 ] + - id: 12 + desc: ROWS_RANGE Window with MaxSize 10 OPEN PRECEDING and EXCLUDE CURRENT_TIME Window + mode: offline-unsupport,disk-unsupport + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738994000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 10), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 10 EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0 ] + - [ "aa", 2, 1590738990000, 2.0, 1.0 ] + - [ "aa", 3, 1590738992000, 3.0, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0, 4.0 ] + - [ "aa", 5, 1590738994000, 3.0, 3.0 ] + - [ "aa", 6, 1590738994000, 4.0, 3.0 ] + - [ "aa", 7, 1590738999000, 1.0, 1.0 ] + - [ "aa", 8, 1590739001000, 2.0, 2.0 ] + - [ "aa", 9, 1590739002000, 3.0, 3.0 ] + - id: 13 + desc: ROWS Window OPEN PRECEDING and EXCLUDE CURRENT_TIME Window + mode: offline-unsupport,disk-unsupport + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738994000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0 ] + - [ "aa", 2, 1590738990000, 2.0, 1.0 ] + - [ "aa", 3, 1590738992000, 3.0, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0, 4.0 ] + - [ "aa", 5, 1590738994000, 5.0, 5.0 ] + - [ "aa", 6, 1590738994000, 6.0, 5.0 ] + - [ "aa", 7, 1590738999000, 7.0, 7.0 ] + - [ "aa", 8, 1590739001000, 7.0, 7.0 ] + - [ "aa", 9, 1590739002000, 7.0, 7.0 ] + - id: 14 + desc: ROWS and ROWS Window OPEN PRECEDING and EXCLUDE CURRENT_TIME Window + mode: offline-unsupport,disk-unsupport + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738994000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum, + sum(c4) OVER w3 as w3_c4_sum, + sum(c4) OVER w4 as w4_c4_sum, + sum(c4) OVER w5 as w5_c4_sum, + sum(c4) OVER w6 as w6_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME), + w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 2), + w4 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_TIME), + w5 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW), + w6 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double", + "w3_c4_sum double", "w4_c4_sum double", + "w5_c4_sum double", "w6_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ] + - [ "aa", 2, 1590738990000, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0 ] + - [ "aa", 3, 1590738992000, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0, 4.0, 2.0, 2.0, 4.0, 4.0 ] + - [ "aa", 5, 1590738994000, 3.0, 3.0, 2.0, 2.0, 5.0, 5.0 ] + - [ "aa", 6, 1590738994000, 4.0, 3.0, 2.0, 2.0, 6.0, 5.0 ] + - [ "aa", 7, 1590738999000, 1.0, 1.0, 1.0, 1.0, 7.0, 7.0 ] + - [ "aa", 8, 1590739001000, 2.0, 2.0, 2.0, 2.0, 7.0, 7.0 ] + - [ "aa", 9, 1590739002000, 3.0, 3.0, 2.0, 2.0, 7.0, 7.0 ] + - id: 16 + desc: ROWS and ROWS Window 各类窗口混合 + mode: offline-unsupport,disk-unsupport + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738994000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum, + sum(c4) OVER w3 as w3_c4_sum, + sum(c4) OVER w4 as w4_c4_sum, + sum(c4) OVER w5 as w5_c4_sum, + sum(c4) OVER w6 as w6_c4_sum, + sum(c4) OVER w7 as w7_c4_sum, + sum(c4) OVER w8 as w8_c4_sum, + sum(c4) OVER w9 as w9_c4_sum, + sum(c4) OVER w10 as w10_c4_sum, + sum(c4) OVER w11 as w11_c4_sum, + sum(c4) OVER w12 as w12_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME), + w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2), + w4 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_TIME), + w5 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 6 PRECEDING AND CURRENT ROW), + w6 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 6 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME), + w7 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW), + w8 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME), + w9 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 2), + w10 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_TIME), + w11 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW), + w12 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", + "w1_c4_sum double", "w2_c4_sum double", + "w3_c4_sum double", "w4_c4_sum double", + "w5_c4_sum double", "w6_c4_sum double", + "w7_c4_sum double", "w8_c4_sum double", + "w9_c4_sum double", "w10_c4_sum double", + "w11_c4_sum double", "w12_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ] + - [ "aa", 2, 1590738990000, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0 ] + - [ "aa", 3, 1590738992000, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0, 4.0, 2.0, 2.0, 4.0, 4.0, 4.0, 4.0, 2.0, 2.0, 4.0, 4.0 ] + - [ "aa", 5, 1590738994000, 3.0, 3.0, 2.0, 2.0, 5.0, 5.0, 3.0, 3.0, 2.0, 2.0, 5.0, 5.0 ] + - [ "aa", 6, 1590738994000, 4.0, 3.0, 2.0, 2.0, 6.0, 5.0, 4.0, 3.0, 2.0, 2.0, 6.0, 5.0 ] + - [ "aa", 7, 1590738999000, 1.0, 1.0, 1.0, 1.0, 7.0, 7.0, 1.0, 1.0, 1.0, 1.0, 7.0, 7.0 ] + - [ "aa", 8, 1590739001000, 2.0, 2.0, 2.0, 2.0, 7.0, 7.0, 2.0, 2.0, 2.0, 2.0, 7.0, 7.0 ] + - [ "aa", 9, 1590739002000, 3.0, 3.0, 2.0, 2.0, 7.0, 7.0, 3.0, 3.0, 2.0, 2.0, 7.0, 7.0 ] + - id: 17 + desc: ROWS Window with same timestamp + mode: offline-unsupport,disk-unsupport + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738990000 ] + - [ "aa",4, 1.0, 1590738990000 ] + - [ "aa",5, 1.0, 1590738990000 ] + - [ "aa",6, 1.0, 1590738990000 ] + - [ "aa",7, 1.0, 1590738991000 ] + - [ "aa",8, 1.0, 1590738992000 ] + - [ "aa",9, 1.0, 1590738993000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double"] + rows: + - [ "aa", 1, 1590738990000, 1.0] + - [ "aa", 2, 1590738990000, 2.0] + - [ "aa", 3, 1590738990000, 3.0] + - [ "aa", 4, 1590738990000, 4.0] + - [ "aa", 5, 1590738990000, 4.0] + - [ "aa", 6, 1590738990000, 4.0] + - [ "aa", 7, 1590738991000, 4.0] + - [ "aa", 8, 1590738992000, 4.0] + - [ "aa", 9, 1590738993000, 4.0] + - id: 18 + desc: ROWS Window with same timestamp Exclude CurretTime + mode: disk-unsupport + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738990000 ] + - [ "aa",4, 1.0, 1590738990000 ] + - [ "aa",5, 1.0, 1590738990000 ] + - [ "aa",6, 1.0, 1590738990000 ] + - [ "aa",7, 1.0, 1590738991000 ] + - [ "aa",8, 1.0, 1590738992000 ] + - [ "aa",9, 1.0, 1590738993000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double"] + rows: + - [ "aa", 1, 1590738990000, 1.0] + - [ "aa", 2, 1590738990000, 1.0] + - [ "aa", 3, 1590738990000, 1.0] + - [ "aa", 4, 1590738990000, 1.0] + - [ "aa", 5, 1590738990000, 1.0] + - [ "aa", 6, 1590738990000, 1.0] + - [ "aa", 7, 1590738991000, 4.0] + - [ "aa", 8, 1590738992000, 4.0] + - [ "aa", 9, 1590738993000, 4.0] + - id: 19 + desc: ROWS, ROWS_RANGE Window, Normal Window, OPEN Window, EXCLUDE CURRENT TIME Window + mode: batch-unsupport,disk-unsupport + tags: ["@chendihao, @baoxinqi, 测试的时候spark需要保证输入数据滑入顺序"] + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738990000 ] + - [ "aa",3, 1.0, 1590738990000 ] + - [ "aa",4, 1.0, 1590738990000 ] + - [ "aa",5, 1.0, 1590738990000 ] + - [ "aa",6, 1.0, 1590738990000 ] + - [ "aa",7, 1.0, 1590738991000 ] + - [ "aa",8, 1.0, 1590738992000 ] + - [ "aa",9, 1.0, 1590738993000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum, + sum(c4) OVER w3 as w3_c4_sum, + sum(c4) OVER w4 as w4_c4_sum, + sum(c4) OVER w5 as w5_c4_sum, + sum(c4) OVER w6 as w6_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 OPEN PRECEDING AND CURRENT ROW), + w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME), + w4 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW), + w5 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s OPEN PRECEDING AND CURRENT ROW), + w6 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME); + + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double", "w3_c4_sum double", + "w4_c4_sum double", "w5_c4_sum double", "w6_c4_sum double"] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + - [ "aa", 2, 1590738990000, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0] + - [ "aa", 3, 1590738990000, 3.0, 3.0, 1.0, 3.0, 3.0, 1.0] + - [ "aa", 4, 1590738990000, 4.0, 3.0, 1.0, 4.0, 4.0, 1.0] + - [ "aa", 5, 1590738990000, 4.0, 3.0, 1.0, 5.0, 5.0, 1.0] + - [ "aa", 6, 1590738990000, 4.0, 3.0, 1.0, 6.0, 6.0, 1.0] + - [ "aa", 7, 1590738991000, 4.0, 3.0, 3.0, 7.0, 7.0, 7.0] + - [ "aa", 8, 1590738992000, 4.0, 3.0, 3.0, 8.0, 8.0, 8.0] + - [ "aa", 9, 1590738993000, 4.0, 3.0, 3.0, 9.0, 3.0, 3.0] diff --git a/cases/integration_test/window/test_window_row.yaml b/cases/integration_test/window/test_window_row.yaml new file mode 100644 index 00000000000..c4b0814f8ba --- /dev/null +++ b/cases/integration_test/window/test_window_row.yaml @@ -0,0 +1,920 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - + id: 0 + desc: string为partition by + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: c3 + columns: ["c1 string","c3 int","w1_c4_sum bigint"] + rows: + - ["aa",20,30] + - ["aa",21,61] + - ["aa",22,93] + - ["aa",23,96] + - ["bb",24,34] + - + id: 1 + desc: int为partition by + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [2,"bb",20,61] + - [3,"cc",20,93] + - [4,"dd",20,96] + - [5,"ee",21,34] + - + id: 2 + desc: float为partition by - 未命中索引 + mode: rtidb-unsupport + tags: ["TODO", "@chenjing", "0.3.0", "window partition by float under non-performance-sensitive mode"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.1,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.1,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.2,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c5, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c5 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c5 float","w1_c4_sum bigint"] + rows: + - [1,"aa",1.1,30] + - [2,"bb",1.1,61] + - [3,"cc",1.1,93] + - [4,"dd",1.1,96] + - [5,"ee",1.2,34] + - + id: 3 + desc: double为partition by - 未命中索引 + mode: rtidb-unsupport + tags: ["TODO", "@chenjing", "0.3.0", "window partition by float under non-performance-sensitive mode"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c6, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c6 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c6 double","w1_c4_sum bigint"] + rows: + - [1,"aa",2.1,30] + - [2,"bb",2.1,61] + - [3,"cc",2.1,93] + - [4,"dd",2.1,96] + - [5,"ee",2.2,34] + - + id: 4 + desc: date为partition by + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c8:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-01"] + - [3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-01"] + - [4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-01"] + - [5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-02"] + sql: | + SELECT id, c1, c8, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c8 date","w1_c4_sum bigint"] + rows: + - [1,"aa","2020-05-01",30] + - [2,"bb","2020-05-01",61] + - [3,"cc","2020-05-01",93] + - [4,"dd","2020-05-01",96] + - [5,"ee","2020-05-02",34] + - + id: 5 + desc: timestamp为partition by + inputs: + - + columns : ["id bigint","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c7:id"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01"] + - [5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c7, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c7 ORDER BY {0}.id ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id bigint","c1 string","c7 timestamp","w1_c4_sum bigint"] + rows: + - [1,"aa",1590738990000,30] + - [2,"bb",1590738990000,61] + - [3,"cc",1590738990000,93] + - [4,"dd",1590738990000,96] + - [5,"ee",1590738991000,34] + - + id: 6 + desc: bigint为partition by + inputs: + - + columns : ["id bigint","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c4:id"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [5,"ee",21,31,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c4 ORDER BY {0}.id ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id bigint","c1 string","c4 bigint","w1_c4_count bigint"] + rows: + - [1,"aa",30,1] + - [2,"bb",30,2] + - [3,"cc",30,3] + - [4,"dd",30,3] + - [5,"ee",31,1] + - + id: 7 + desc: bigint为order by + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c8:c4"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01"] + - [5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c4 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"] + rows: + - [1,"aa",30,1] + - [2,"bb",31,2] + - [3,"cc",32,3] + - [4,"dd",33,3] + - [5,"ee",34,1] + - + id: 8 + desc: 多个pk + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"aa",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"aa",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738994000,"2020-05-05"] + - [6,"bb",24,35,1.5,2.5,1590738995000,"2020-05-06"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [2,"aa",20,61] + - [3,"aa",20,93] + - [4,"aa",20,96] + - [5,"aa",24,34] + - [6,"bb",24,35] + - + id: 9 + desc: 两个pk都使用了索引 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1|c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"aa",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"aa",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738994000,"2020-05-05"] + - [6,"bb",24,35,1.5,2.5,1590738995000,"2020-05-06"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [2,"aa",20,61] + - [3,"aa",20,93] + - [4,"aa",20,96] + - [5,"aa",24,34] + - [6,"bb",24,35] + - + id: 13-2 + desc: 两个pk都使用了索引 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"aa",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"aa",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738994000,"2020-05-05"] + - [6,"bb",24,35,1.5,2.5,1590738995000,"2020-05-06"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [2,"aa",20,61] + - [3,"aa",20,93] + - [4,"aa",20,96] + - [5,"aa",24,34] + - [6,"bb",24,35] + - + id: 10 + desc: 多个window指定相同的pk + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"] + rows: + - [1,"aa",20,30,1] + - [2,"bb",20,61,2] + - [3,"cc",20,93,3] + - [4,"dd",20,96,3] + - [5,"ee",21,34,1] + - + id: 11 + desc: 多个window指定相不同的pk + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7", "index2:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"cc",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY c1 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"] + rows: + - [1,"aa",20,30,1] + - [2,"aa",20,61,2] + - [3,"cc",20,93,1] + - [4,"cc",20,96,2] + - [5,"ee",21,34,1] + - + id: 12 + desc: 多个windowpk是table.column模式 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7", "index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"cc",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"] + rows: + - [1,"aa",20,30,1] + - [2,"aa",20,61,2] + - [3,"cc",20,93,1] + - [4,"cc",20,96,2] + - [5,"ee",21,34,1] + - + id: 13-1 + desc: 多个window指定不同的ts, 数据时间乱序插入,batch模式预期 + mode: request-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7", "index2:c3:c4"] + rows: + - [1,"aa",20,33,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,30,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,32,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"] + rows: + - [1,"aa",20,33,3] + - [2,"bb",20,64,2] + - [3,"cc",20,94,1] + - [4,"dd",20,93,3] + - [5,"ee",21,34,1] + - + id: 13-2 + desc: 多个window指定不同的ts, 数据时间乱序插入,request模式预期 + mode: batch-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7", "index2:c3:c4"] + rows: + - [1,"aa",20,33,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,30,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,32,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"] + rows: + - [1,"aa",20,33,1] + - [2,"bb",20,64,1] + - [3,"cc",20,94,1] + - [4,"dd",20,93,3] + - [5,"ee",21,34,1] + - + id: 13-3 + desc: 多个window指定不同的ts, 数据按时间顺序插入 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7", "index2:c3:c4"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"] + rows: + - [1,"aa",20,30,1] + - [2,"bb",20,61,2] + - [3,"cc",20,93,3] + - [4,"dd",20,96,3] + - [5,"ee",21,34,1] + - + id: 14 + desc: 两个window其中两个pk为索引列 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"cc",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"] + rows: + - [1,"aa",20,30,1] + - [2,"aa",20,61,2] + - [3,"cc",20,93,1] + - [4,"cc",20,96,2] + - [5,"ee",21,34,1] + - + id: 15 + desc: 两个window其中一个pk和两个pk + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7","index2:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"cc",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"cc",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"] + rows: + - [1,"aa",20,30,1] + - [2,"aa",20,61,2] + - [3,"cc",20,93,1] + - [4,"cc",20,96,2] + - [5,"cc",21,34,1] + - + id: 16 + desc: 全部window + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, count(c3) OVER w1 as w1_c3_count, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + columns: ["id int", "w1_c3_count bigint","w1_c4_sum bigint"] + order: id + rows: + - [1, 1,30] + - [2, 2,61] + - [3, 3,93] + - [4, 3,96] + - [5, 1,34] + - + id: 17 + desc: 结合limit + tags: ["TODO", "LIMIT批模式没有确定性输出"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) limit 2; + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [5,"ee",21,34] + - + id: 18 + desc: window的计算结果不使用别名 + mode: cli-unsupport + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","sum(c4)over w1 bigint"] + rows: + - [1,"aa",20,30] + - [2,"bb",20,61] + - [3,"cc",20,93] + - [4,"dd",20,96] + - [5,"ee",21,34] + - + id: 19 + desc: case when window expression then window expression else null end + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["bb",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c4, + case when lag(c1, 0) OVER w1 == "aa" then sum(c4) over w1 + else null end + as sum_c1_w1 FROM {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: c4 + columns: ["c1 string","c4 bigint","sum_c1_w1 bigint"] + rows: + - ["aa",30,30] + - ["aa",31,61] + - ["aa",32,93] + - ["bb",33,NULL] + - ["bb",34,NULL] + - + id: 20 + desc: case when window expr then window expr else window expr + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["bb",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c4, + case when lag(c1, 0) OVER w1 == "aa" then sum(c4) over w1 + else min(c4) over w1 end + as sum_c1_w1 FROM {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: c4 + columns: ["c1 string","c4 bigint","sum_c1_w1 bigint"] + rows: + - ["aa",30,30] + - ["aa",31,61] + - ["aa",32,93] + - ["bb",33,33] + - ["bb",34,33] + - + id: 21 + desc: case when simple expression then window expression else null end + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["bb",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c4, + case when c1 == "aa" then sum(c4) over w1 + else null end + as sum_c1_w1 FROM {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: c4 + columns: ["c1 string","c4 bigint","sum_c1_w1 bigint"] + rows: + - ["aa",30,30] + - ["aa",31,61] + - ["aa",32,93] + - ["bb",33,NULL] + - ["bb",34,NULL] + - + id: 22 + desc: window expression + window expression + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["bb",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, c4, + (sum(c4) over w1 + sum(c3) over w1) as sum_c3_c4_w1 FROM {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: c4 + columns: ["c1 string","c3 int", "c4 bigint","sum_c3_c4_w1 bigint"] + rows: + - ["aa",20, 30, 50] + - ["aa",21, 31, 102] + - ["aa",22, 32, 156] + - ["bb",23, 33, 56] + - ["bb",24, 34, 114] + + - + id: 28 + desc: 匿名窗口 + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) as w1_c4_sum FROM {0}; + expect: + order: c3 + columns: ["c1 string","c3 int","w1_c4_sum bigint"] + rows: + - ["aa",20,30] + - ["aa",21,61] + - ["aa",22,93] + - ["aa",23,96] + - ["bb",24,34] + - + id: 29 + desc: 匿名窗口-没有小括号 + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW as w1_c4_sum FROM {0}; + expect: + success: false + - + id: 30 + desc: smallint为partition by + inputs: + - + columns : ["id int","c1 string","c3 smallint","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 smallint","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [2,"bb",20,61] + - [3,"cc",20,93] + - [4,"dd",20,96] + - [5,"ee",21,34] + - + id: 31 + desc: bool为partition by + tags: ["TODO", "bug"] + inputs: + - + columns : ["id int","c1 string","c2 bool","c3 smallint","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c2:c7"] + rows: + - [1,"aa",true,20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",true,20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",true,20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",true,20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",false,21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c2, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c2 bool","w1_c4_sum bigint"] + rows: + - [1,"aa",true,30] + - [2,"bb",true,61] + - [3,"cc",true,93] + - [4,"dd",true,96] + - [5,"ee",false,34] + - + id: 33 + desc: int为order by + tags: ["TODO"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [5,"ee",21,31,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c7 ORDER BY {0}.c3 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 34 + desc: smallint为order by + tags: ["TODO"] + inputs: + - + columns : ["id int","c1 string","c3 smallint","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [5,"ee",21,31,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c7 ORDER BY {0}.c3 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 35 + desc: bool为order by + inputs: + - + columns : ["id int","c1 string","c2 bool","c3 smallint","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",true,20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",true,20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",true,20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",true,20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [5,"ee",false,21,31,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c7 ORDER BY {0}.c2 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 37 + desc: no frame + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7); + expect: + success: false + + - + id: 38 + desc: rows 1-2 + version: 0.6.0 + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING); + expect: + order: c3 + columns: ["c1 string","c3 int","w1_c4_sum bigint"] + rows: + - ["aa",20,NULL] + - ["aa",21,30] + - ["aa",22,61] + - ["aa",23,63] + - ["bb",24,NULL] + - + id: 39 + desc: rows 0-2 + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 0 PRECEDING); + expect: + order: c3 + columns: ["c1 string","c3 int","w1_c4_sum bigint"] + rows: + - ["aa",20,30] + - ["aa",21,61] + - ["aa",22,93] + - ["aa",23,96] + - ["bb",24,34] + - + id: 40 + desc: rows -1-2 + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND -1 PRECEDING); + expect: + order: c3 + columns: ["c1 string","c3 int","w1_c4_sum bigint"] + rows: + - ["aa",20,30] + - ["aa",21,61] + - ["aa",22,93] + - ["aa",23,96] + - ["bb",24,34] diff --git a/cases/integration_test/window/test_window_row_range.yaml b/cases/integration_test/window/test_window_row_range.yaml new file mode 100644 index 00000000000..71681b7d41e --- /dev/null +++ b/cases/integration_test/window/test_window_row_range.yaml @@ -0,0 +1,1497 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - id: 0 + desc: string为partition by + inputs: + - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: c3 + columns: [ "c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ "aa",20,30 ] + - [ "aa",21,61 ] + - [ "aa",22,93 ] + - [ "aa",23,96 ] + - [ "bb",24,34 ] + - id: 1 + desc: int为partition by + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c3:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ 4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ 1,"aa",20,30 ] + - [ 2,"bb",20,61 ] + - [ 3,"cc",20,93 ] + - [ 4,"dd",20,96 ] + - [ 5,"ee",21,34 ] + - id: 2 + desc: float为partition by - 未命中索引 + mode: rtidb-unsupport + tags: ["TODO", "@chenjing", "0.3.0", "window partition by float under non-performance-sensitive mode"] + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.1,2.2,1590738991000,"2020-05-02" ] + - [ 3,"cc",20,32,1.1,2.3,1590738992000,"2020-05-03" ] + - [ 4,"dd",20,33,1.1,2.4,1590738993000,"2020-05-04" ] + - [ 5,"ee",21,34,1.2,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT id, c1, c5, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c5 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c5 float","w1_c4_sum bigint" ] + rows: + - [ 1,"aa",1.1,30 ] + - [ 2,"bb",1.1,61 ] + - [ 3,"cc",1.1,93 ] + - [ 4,"dd",1.1,96 ] + - [ 5,"ee",1.2,34 ] + - id: 3 + desc: double为partition by - 未命中索引 + mode: rtidb-unsupport + tags: ["TODO", "@chenjing", "0.3.0", "window partition by float under non-performance-sensitive mode"] + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-02" ] + - [ 3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-03" ] + - [ 4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-04" ] + - [ 5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-05" ] + sql: | + SELECT id, c1, c6, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c6 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c6 double","w1_c4_sum bigint" ] + rows: + - [ 1,"aa",2.1,30 ] + - [ 2,"bb",2.1,61 ] + - [ 3,"cc",2.1,93 ] + - [ 4,"dd",2.1,96 ] + - [ 5,"ee",2.2,34 ] + - id: 4 + desc: date为partition by + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c8:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-01" ] + - [ 3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-01" ] + - [ 4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-01" ] + - [ 5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-02" ] + sql: | + SELECT id, c1, c8, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c8 date","w1_c4_sum bigint" ] + rows: + - [ 1,"aa","2020-05-01",30 ] + - [ 2,"bb","2020-05-01",61 ] + - [ 3,"cc","2020-05-01",93 ] + - [ 4,"dd","2020-05-01",96 ] + - [ 5,"ee","2020-05-02",34 ] + - id: 5 + desc: timestamp为partition by + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 timestamp" ] + indexs: [ "index1:c7:c9" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01",1590738990000 ] + - [ 2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01",1590738991000 ] + - [ 3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01",1590738992000 ] + - [ 4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01",1590738993000 ] + - [ 5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02",1590738994000 ] + sql: | + SELECT id, c1, c7, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c7 ORDER BY {0}.c9 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c7 timestamp","w1_c4_sum bigint" ] + rows: + - [ 1,"aa",1590738990000,30 ] + - [ 2,"bb",1590738990000,61 ] + - [ 3,"cc",1590738990000,93 ] + - [ 4,"dd",1590738990000,96 ] + - [ 5,"ee",1590738991000,34 ] + - id: 6 + desc: bigint为partition by + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c4:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,30,1.1,2.1,1590738991000,"2020-05-01" ] + - [ 3,"cc",20,30,1.1,2.1,1590738992000,"2020-05-01" ] + - [ 4,"dd",20,30,1.1,2.1,1590738993000,"2020-05-01" ] + - [ 5,"ee",21,31,1.2,2.2,1590738994000,"2020-05-02" ] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c4 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c4 bigint","w1_c4_count bigint" ] + rows: + - [ 1,"aa",30,1 ] + - [ 2,"bb",30,2 ] + - [ 3,"cc",30,3 ] + - [ 4,"dd",30,3 ] + - [ 5,"ee",31,1 ] + - id: 7 + desc: string为order by + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,30,1.1,2.1,1590738991000,"2020-05-01" ] + - [ 3,"cc",20,30,1.1,2.1,1590738992000,"2020-05-01" ] + - [ 4,"dd",20,30,1.1,2.1,1590738993000,"2020-05-01" ] + - [ 5,"ee",21,31,1.2,2.2,1590738994000,"2020-05-02" ] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c1 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + success: false + + - + id: 8 + desc: bigint为order by-不加单位-bigint + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c3:c4" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02" ] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c4 bigint","w1_c4_count bigint" ] + rows: + - [ 1,"aa",30,1 ] + - [ 2,"bb",31,2 ] + - [ 3,"cc",32,3 ] + - [ 4,"dd",33,3 ] + - [ 5,"ee",34,1 ] + - id: 8-2 + desc: int为order by-未命中TS + mode: rtidb-unsupport,cli-unsupport + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c8:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",21,31,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 3,"cc",22,32,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 4,"dd",23,33,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 5,"ee",24,34,1.2,2.2,1590738991000,"2020-05-02" ] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c3 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c4 bigint","w1_c4_count bigint" ] + rows: + - [ 1,"aa",30,1 ] + - [ 2,"bb",31,2 ] + - [ 3,"cc",32,3 ] + - [ 4,"dd",33,3 ] + - [ 5,"ee",34,1 ] + - id: 8-3 + desc: bigint为order by-加单位 + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c3:c4" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02" ] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c4 bigint","w1_c4_count bigint" ] + rows: + - [ 1,"aa",30,1 ] + - [ 2,"bb",31,2 ] + - [ 3,"cc",32,3 ] + - [ 4,"dd",33,4 ] + - [ 5,"ee",34,1 ] + - id: 8-4 + desc: int为order by-加单位-未命中索引 + mode: rtidb-unsupport,cli-unsupport + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",21,31,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 3,"cc",22,32,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 4,"dd",23,33,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 5,"ee",24,34,1.2,2.2,1590738991000,"2020-05-02" ] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c3 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c4 bigint","w1_c4_count bigint" ] + rows: + - [ 1,"aa",30,1 ] + - [ 2,"bb",31,2 ] + - [ 3,"cc",32,3 ] + - [ 4,"dd",33,4 ] + - [ 5,"ee",34,1 ] + - id: 9 + desc: float为order by + mode: rtidb-unsupport + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c8:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.2,2.1,1590738990000,"2020-05-01" ] + - [ 3,"cc",20,32,1.3,2.1,1590738990000,"2020-05-01" ] + - [ 4,"dd",20,33,1.4,2.1,1590738990000,"2020-05-01" ] + - [ 5,"ee",21,34,1.5,2.2,1590738991000,"2020-05-02" ] + sql: | + SELECT id, c1, c5, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c5 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + success: false + - id: 10 + desc: double为order by + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.2,2.2,1590738990000,"2020-05-01" ] + - [ 3,"cc",20,32,1.3,2.3,1590738990000,"2020-05-01" ] + - [ 4,"dd",20,33,1.4,2.4,1590738990000,"2020-05-01" ] + - [ 5,"ee",21,34,1.5,2.5,1590738991000,"2020-05-02" ] + sql: | + SELECT id, c1, c6, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c6 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + success: false + - id: 11 + desc: date为order by-未命中索引 + mode: offline-unsupport + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.2,2.2,1590738990000,"2020-05-02" ] + - [ 3,"cc",20,32,1.3,2.3,1590738990000,"2020-05-03" ] + - [ 4,"dd",20,33,1.4,2.4,1590738990000,"2020-05-04" ] + - [ 5,"ee",21,34,1.5,2.5,1590738991000,"2020-05-05" ] + sql: | + SELECT id, c1, c8, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c8 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + success: false + - id: 12 + desc: 多个pk + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1|c3:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ 3,"aa",20,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ 4,"aa",20,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ 5,"aa",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + - [ 6,"bb",24,35,1.5,2.5,1590738995000,"2020-05-06" ] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ 1,"aa",20,30 ] + - [ 2,"aa",20,61 ] + - [ 3,"aa",20,93 ] + - [ 4,"aa",20,96 ] + - [ 5,"aa",24,34 ] + - [ 6,"bb",24,35 ] + - id: 13 + desc: 两个pk都使用了索引 + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1|c3:c7","index2:c3:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ 3,"aa",20,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ 4,"aa",20,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ 5,"aa",24,34,1.5,2.5,1590738994000,"2020-05-05" ] + - [ 6,"bb",24,35,1.5,2.5,1590738995000,"2020-05-06" ] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ 1,"aa",20,30 ] + - [ 2,"aa",20,61 ] + - [ 3,"aa",20,93 ] + - [ 4,"aa",20,96 ] + - [ 5,"aa",24,34 ] + - [ 6,"bb",24,35 ] + - id: 14 + desc: 多个window指定相同的pk + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c3:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ 4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint" ] + rows: + - [ 1,"aa",20,30,1 ] + - [ 2,"bb",20,61,2 ] + - [ 3,"cc",20,93,3 ] + - [ 4,"dd",20,96,3 ] + - [ 5,"ee",21,34,1 ] + - id: 15 + desc: 多个window指定相不同的pk + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7", "index2:c3:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ 4,"cc",20,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint" ] + rows: + - [ 1,"aa",20,30,1 ] + - [ 2,"aa",20,61,2 ] + - [ 3,"cc",20,93,1 ] + - [ 4,"cc",20,96,2 ] + - [ 5,"ee",21,34,1 ] + - id: 16-1 + desc: 多个window指定不同的ts, 数据时间乱序插入,batch模式预期 + mode: request-unsupport + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c3:c7", "index2:c3:c4" ] + rows: + - [ 1,"aa",20,33,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ 3,"cc",20,30,1.3,2.3,1590738992000,"2020-05-03" ] + - [ 4,"dd",20,32,1.4,2.4,1590738993000,"2020-05-04" ] + - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint" ] + rows: + - [ 1,"aa",20,33,3 ] + - [ 2,"bb",20,64,2 ] + - [ 3,"cc",20,94,1 ] + - [ 4,"dd",20,93,3 ] + - [ 5,"ee",21,34,1 ] + + - id: 16-2 + desc: 多个window指定不同的ts, 数据时间乱序插入,request模式预期 + mode: batch-unsupport + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c3:c7", "index2:c3:c4" ] + rows: + - [ 1,"aa",20,33,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ 3,"cc",20,30,1.3,2.3,1590738992000,"2020-05-03" ] + - [ 4,"dd",20,32,1.4,2.4,1590738993000,"2020-05-04" ] + - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint" ] + rows: + - [ 1,"aa",20,33,1 ] + - [ 2,"bb",20,64,1 ] + - [ 3,"cc",20,94,1 ] + - [ 4,"dd",20,93,3 ] + - [ 5,"ee",21,34,1 ] + - id: 16-3 + desc: 多个window指定不同的ts, 数据时间按序插入 + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c3:c7", "index2:c3:c4" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ 4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint" ] + rows: + - [ 1,"aa",20,30,1 ] + - [ 2,"bb",20,61,2 ] + - [ 3,"cc",20,93,3 ] + - [ 4,"dd",20,96,3 ] + - [ 5,"ee",21,34,1 ] + - id: 17 + desc: 两个window其中两个pk为索引列 + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7","index2:c3:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ 4,"cc",20,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint" ] + rows: + - [ 1,"aa",20,30,1 ] + - [ 2,"aa",20,61,2 ] + - [ 3,"cc",20,93,1 ] + - [ 4,"cc",20,96,2 ] + - [ 5,"ee",21,34,1 ] + - id: 18 + desc: 两个window其中一个pk和两个pk + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7","index2:c3:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ 4,"cc",20,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ 5,"cc",21,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint" ] + rows: + - [ 1,"aa",20,30,1 ] + - [ 2,"aa",20,61,2 ] + - [ 3,"cc",20,93,1 ] + - [ 4,"cc",20,96,2 ] + - [ 5,"cc",21,34,1 ] + - id: 19 + desc: 全部window + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c3:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ 4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT id, count(c3) OVER w1 as w1_c3_count, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + columns: [ "id int", "w1_c3_count bigint","w1_c4_sum bigint" ] + order: id + rows: + - [ 1, 1,30 ] + - [ 2, 2,61 ] + - [ 3, 3,93 ] + - [ 4, 3,96 ] + - [ 5, 1,34 ] + - id: 20 + tags: [ "TODO", "@zhaowei暂时不要引入LIMIT的case,LIMIT的case需要spark,rtidb分别预期结果" ] + desc: 结合limit + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ 4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW) limit 2; + expect: + order: id + columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint" ] + rows: + - [ 1,"aa",20,30 ] + - [ 5,"ee",21,34 ] + - id: 22 + desc: window的计算结果不使用别名 + mode: cli-unsupport + inputs: + - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] + indexs: [ "index1:c3:c7" ] + rows: + - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ] + - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ] + - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ] + - [ 4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04" ] + - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: [ "id int","c1 string","c3 int","sum(c4)over w1 bigint" ] + rows: + - [ 1,"aa",20,30 ] + - [ 2,"bb",20,61 ] + - [ 3,"cc",20,93 ] + - [ 4,"dd",20,96 ] + - [ 5,"ee",21,34 ] + - id: 23-1 + desc: ROWS_RANGE Window with MaxSize + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738991000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738995000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0 ] + - [ "aa", 2, 1590738991000, 2.0 ] + - [ "aa", 3, 1590738992000, 2.0 ] + - [ "aa", 4, 1590738993000, 2.0 ] + - [ "aa", 5, 1590738994000, 2.0 ] + - [ "aa", 6, 1590738995000, 2.0 ] + - [ "aa", 7, 1590738999000, 1.0 ] + - [ "aa", 8, 1590739001000, 2.0 ] + - [ "aa", 9, 1590739002000, 2.0 ] + - id: 23-2 + desc: ROWS_RANGE Current History Window with MaxSize 2 + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738991000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738995000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 10); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0 ] + - [ "aa", 2, 1590738991000, 2.0 ] + - [ "aa", 3, 1590738992000, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0 ] + - [ "aa", 5, 1590738994000, 4.0 ] + - [ "aa", 6, 1590738995000, 4.0 ] + - [ "aa", 7, 1590738999000, 1.0 ] + - [ "aa", 8, 1590739001000, 2.0 ] + - [ "aa", 9, 1590739002000, 3.0 ] + - id: 24-1 + desc: ROWS_RANGE Pure History Window + version: 0.6.0 + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738991000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738995000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND 1s PRECEDING); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, NULL ] + - [ "aa", 2, 1590738991000, 1.0 ] + - [ "aa", 3, 1590738992000, 2.0 ] + - [ "aa", 4, 1590738993000, 3.0 ] + - [ "aa", 5, 1590738994000, 3.0 ] + - [ "aa", 6, 1590738995000, 3.0 ] + - [ "aa", 7, 1590738999000, NULL ] + - [ "aa", 8, 1590739001000, 1.0 ] + - [ "aa", 9, 1590739002000, 2.0 ] + - id: 24-2 + desc: ROWS_RANGE Pure History Window With MaxSize + version: 0.6.0 + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738991000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738995000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 3000 PRECEDING, 1000 PRECEDING, maxsize=2), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + + batch_plan: | + PROJECT(type=WindowAggregation) + +-WINDOW(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 3000 PRECEDING, 1000 PRECEDING, maxsize=2)) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND 1s PRECEDING MAXSIZE 2); + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, NULL ] + - [ "aa", 2, 1590738991000, 1.0 ] + - [ "aa", 3, 1590738992000, 2.0 ] + - [ "aa", 4, 1590738993000, 2.0 ] + - [ "aa", 5, 1590738994000, 2.0 ] + - [ "aa", 6, 1590738995000, 2.0 ] + - [ "aa", 7, 1590738999000, NULL ] + - [ "aa", 8, 1590739001000, 1.0 ] + - [ "aa", 9, 1590739002000, 2.0 ] + - id: 25 + desc: ROWS_RANGE Current History Window with MaxSize Merge + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738991000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738995000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 4), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW MAXSIZE 4); + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 3000 PRECEDING, 0 CURRENT, maxsize=4), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + batch_plan: | + PROJECT(type=WindowAggregation) + +-WINDOW(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 3000 PRECEDING, 0 CURRENT, maxsize=4)) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0 ] + - [ "aa", 2, 1590738991000, 2.0, 2.0 ] + - [ "aa", 3, 1590738992000, 3.0, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0, 3.0 ] + - [ "aa", 5, 1590738994000, 4.0, 3.0 ] + - [ "aa", 6, 1590738995000, 4.0, 3.0 ] + - [ "aa", 7, 1590738999000, 1.0, 1.0 ] + - [ "aa", 8, 1590739001000, 2.0, 2.0 ] + - [ "aa", 9, 1590739002000, 3.0, 2.0 ] + - id: 26 + desc: ROWS_RANGE Window with MaxSize Not Merge + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738991000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738995000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2); + request_plan: | + SIMPLE_PROJECT(sources=(c1, c3, c7, w1_c4_sum, w2_c4_sum)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 3000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0 ] + - [ "aa", 2, 1590738991000, 2.0, 2.0 ] + - [ "aa", 3, 1590738992000, 3.0, 2.0 ] + - [ "aa", 4, 1590738993000, 4.0, 2.0 ] + - [ "aa", 5, 1590738994000, 4.0, 2.0 ] + - [ "aa", 6, 1590738995000, 4.0, 2.0 ] + - [ "aa", 7, 1590738999000, 1.0, 1.0 ] + - [ "aa", 8, 1590739001000, 2.0, 2.0 ] + - [ "aa", 9, 1590739002000, 3.0, 2.0 ] + + - id: 27-1 + desc: ROWS and ROWS_RANGE Current History Window with MaxSize Merge + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738991000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738995000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6s PRECEDING AND CURRENT ROW MAXSIZE 5); + request_plan: | + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 6000 PRECEDING, 0 CURRENT, maxsize=5), rows=(auto_t0.c7, 3 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + batch_plan: | + PROJECT(type=WindowAggregation) + +-WINDOW(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 6000 PRECEDING, 0 CURRENT, maxsize=5), rows=(auto_t0.c7, 3 PRECEDING, 0 CURRENT)) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0 ] + - [ "aa", 2, 1590738991000, 2.0, 2.0 ] + - [ "aa", 3, 1590738992000, 3.0, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0, 4.0 ] + - [ "aa", 5, 1590738994000, 4.0, 5.0 ] + - [ "aa", 6, 1590738995000, 4.0, 5.0 ] + - [ "aa", 7, 1590738999000, 4.0, 4.0 ] + - [ "aa", 8, 1590739001000, 4.0, 3.0 ] + - [ "aa", 9, 1590739002000, 4.0, 3.0 ] + - id: 27-2 + desc: ROWS and ROWS_RANGE Current History Window with MaxSize, MaxSize < ROWS Preceding, Can't Merge Frame + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738991000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738995000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6s PRECEDING AND CURRENT ROW MAXSIZE 5); + request_plan: | + SIMPLE_PROJECT(sources=(c1, c3, c7, w1_c4_sum, w2_c4_sum)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(auto_t0.c7, 7 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 6000 PRECEDING, 0 CURRENT, maxsize=5), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, 1.0 ] + - [ "aa", 2, 1590738991000, 2.0, 2.0 ] + - [ "aa", 3, 1590738992000, 3.0, 3.0 ] + - [ "aa", 4, 1590738993000, 4.0, 4.0 ] + - [ "aa", 5, 1590738994000, 5.0, 5.0 ] + - [ "aa", 6, 1590738995000, 6.0, 5.0 ] + - [ "aa", 7, 1590738999000, 7.0, 4.0 ] + - [ "aa", 8, 1590739001000, 8.0, 3.0 ] + - [ "aa", 9, 1590739002000, 8.0, 3.0 ] + + - id: 27-3 + desc: ROWS and ROWS_RANGE Pure History Window Cant' Be Merge + version: 0.6.0 + inputs: + - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ] + indexs: [ "index1:c1:c7" ] + rows: + - [ "aa",1, 1.0, 1590738990000 ] + - [ "aa",2, 1.0, 1590738991000 ] + - [ "aa",3, 1.0, 1590738992000 ] + - [ "aa",4, 1.0, 1590738993000 ] + - [ "aa",5, 1.0, 1590738994000 ] + - [ "aa",6, 1.0, 1590738995000 ] + - [ "aa",7, 1.0, 1590738999000 ] + - [ "aa",8, 1.0, 1590739001000 ] + - [ "aa",9, 1.0, 1590739002000 ] + sql: | + SELECT c1, c3, c7, + sum(c4) OVER w1 as w1_c4_sum, + sum(c4) OVER w2 as w2_c4_sum + FROM {0} WINDOW + w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6s PRECEDING AND 2s PRECEDING); + request_plan: | + SIMPLE_PROJECT(sources=(c1, c3, c7, w1_c4_sum, w2_c4_sum)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(auto_t0.c7, 3 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 6000 PRECEDING, 2000 PRECEDING), index_keys=(auto_t0.c1)) + DATA_PROVIDER(request=auto_t0) + DATA_PROVIDER(type=Partition, table=auto_t0, index=index1) + + expect: + order: c3 + columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ] + rows: + - [ "aa", 1, 1590738990000, 1.0, NULL ] + - [ "aa", 2, 1590738991000, 2.0, NULL ] + - [ "aa", 3, 1590738992000, 3.0, 1.0 ] + - [ "aa", 4, 1590738993000, 4.0, 2.0 ] + - [ "aa", 5, 1590738994000, 4.0, 3.0 ] + - [ "aa", 6, 1590738995000, 4.0, 4.0 ] + - [ "aa", 7, 1590738999000, 4.0, 3.0 ] + - [ "aa", 8, 1590739001000, 4.0, 2.0 ] + - [ "aa", 9, 1590739002000, 4.0, 1.0 ] + - + id: 28 + desc: 匿名窗口 + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) as w1_c4_sum FROM {0}; + expect: + order: c3 + columns: ["c1 string","c3 int","w1_c4_sum bigint"] + rows: + - ["aa",20,30] + - ["aa",21,61] + - ["aa",22,93] + - ["aa",23,96] + - ["bb",24,34] + - + id: 29 + desc: 匿名窗口-没有小括号 + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW as w1_c4_sum FROM {0}; + expect: + success: false + - + id: 30 + desc: smallint为partition by + inputs: + - + columns : ["id int","c1 string","c3 smallint","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 smallint","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [2,"bb",20,61] + - [3,"cc",20,93] + - [4,"dd",20,96] + - [5,"ee",21,34] + - + id: 31 + desc: bool为partition by + tags: ["TODO", "bug"] + inputs: + - + columns : ["id int","c1 string","c2 bool","c3 smallint","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c2:c7"] + rows: + - [1,"aa",true,20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",true,20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",true,20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",true,20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",false,21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c2, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c2 bool","w1_c4_sum bigint"] + rows: + - [1,"aa",true,30] + - [2,"bb",true,61] + - [3,"cc",true,93] + - [4,"dd",true,96] + - [5,"ee",false,34] + - + id: 32 + desc: no partition by + tags: ["TODO"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 33 + desc: int为order by + tags: ["TODO"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [5,"ee",21,31,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c7 ORDER BY {0}.c3 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 34 + desc: smallint为order by + tags: ["TODO"] + inputs: + - + columns : ["id int","c1 string","c3 smallint","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [5,"ee",21,31,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c7 ORDER BY {0}.c3 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 35 + desc: bool为order by + inputs: + - + columns : ["id int","c1 string","c2 bool","c3 smallint","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",true,20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",true,20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",true,20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",true,20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [5,"ee",false,21,31,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c7 ORDER BY {0}.c2 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + + - + id: 36 + desc: no order by + tags: ["TODO","bug待修复"] + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c8:c4"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - + id: 37 + desc: no frame + inputs: + - + columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7); + expect: + success: false + - + id: 38 + desc: bigint为order by-加单位 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c4"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01"] + - [5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"] + rows: + - [1,"aa",30,1] + - [2,"bb",31,2] + - [3,"cc",32,3] + - [4,"dd",33,4] + - [5,"ee",34,1] + - + id: 39 + desc: timestamp为order by-不加单位 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1590738990001,"2020-05-01"] + - [3,"cc",20,32,1.1,2.1,1590738990002,"2020-05-01"] + - [4,"dd",20,33,1.1,2.1,1590738990003,"2020-05-01"] + - [5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"] + rows: + - [1,"aa",30,1] + - [2,"bb",31,2] + - [3,"cc",32,3] + - [4,"dd",33,3] + - [5,"ee",34,1] + - + id: 40 + desc: timestamp为order by-加单位-m + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1606755720000,"2020-05-01"] + - [3,"cc",20,32,1.1,2.1,1606755780000,"2020-05-01"] + - [4,"dd",20,33,1.1,2.1,1606755840000,"2020-05-01"] + - [5,"ee",21,34,1.2,2.2,1606755660000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2m PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"] + rows: + - [1,"aa",30,1] + - [2,"bb",31,2] + - [3,"cc",32,3] + - [4,"dd",33,3] + - [5,"ee",34,1] + - + id: 41 + desc: timestamp为order by-加单位-h + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1606759200000,"2020-05-01"] + - [3,"cc",20,32,1.1,2.1,1606762800000,"2020-05-01"] + - [4,"dd",20,33,1.1,2.1,1606766400000,"2020-05-01"] + - [5,"ee",21,34,1.2,2.2,1606766400000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2h PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"] + rows: + - [1,"aa",30,1] + - [2,"bb",31,2] + - [3,"cc",32,3] + - [4,"dd",33,3] + - [5,"ee",34,1] + - + id: 42 + desc: timestamp为order by-加单位-d + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1606752000000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1606838400000,"2020-05-01"] + - [3,"cc",20,32,1.1,2.1,1606924800000,"2020-05-01"] + - [4,"dd",20,33,1.1,2.1,1607011200000,"2020-05-01"] + - [5,"ee",21,34,1.2,2.2,1606752000000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2d PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"] + rows: + - [1,"aa",30,1] + - [2,"bb",31,2] + - [3,"cc",32,3] + - [4,"dd",33,3] + - [5,"ee",34,1] + - + id: 43 + desc: bigint为order by-前后都不加单位,1-2 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c4"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01"] + - [5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING); + expect: + order: id + columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"] + rows: + - [1,"aa",30,0] + - [2,"bb",31,1] + - [3,"cc",32,2] + - [4,"dd",33,2] + - [5,"ee",34,0] + - + id: 44 + desc: bigint为order by-前后都不加单位,0-2 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c4"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01"] + - [5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2 PRECEDING AND 0 PRECEDING); + expect: + order: id + columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"] + rows: + - [1,"aa",30,1] + - [2,"bb",31,2] + - [3,"cc",32,3] + - [4,"dd",33,3] + - [5,"ee",34,1] + - + id: 45 + desc: bigint为order by-前后都不加单位,-1-2 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c4"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01"] + - [5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2 PRECEDING AND -1 PRECEDING); + expect: + order: id + columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"] + rows: + - [1,"aa",30,1] + - [2,"bb",31,2] + - [3,"cc",32,3] + - [4,"dd",33,3] + - [5,"ee",34,1] + - + id: 46 + desc: timestamp为order by-2s-1s + version: 0.6.0 + inputs: + - + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 1s PRECEDING); + expect: + order: c3 + columns: ["c1 string","c3 int","w1_c4_sum bigint"] + rows: + - ["aa",20,NULL] + - ["aa",21,30] + - ["aa",22,61] + - ["aa",23,63] + - ["bb",24,NULL] + - + id: 47 + desc: timestamp为order by-2s-0s + inputs: + - + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0s PRECEDING); + expect: + order: c3 + columns: ["c1 string","c3 int","w1_c4_sum bigint"] + rows: + - ["aa",20,30] + - ["aa",21,61] + - ["aa",22,93] + - ["aa",23,96] + - ["bb",24,34] + - + id: 48 + desc: timestamp为order by-2s-0 + inputs: + - + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0 PRECEDING); + expect: + order: c3 + columns: ["c1 string","c3 int","w1_c4_sum bigint"] + rows: + - ["aa",20,30] + - ["aa",21,61] + - ["aa",22,93] + - ["aa",23,96] + - ["bb",24,34] + - + id: 49 + desc: timestamp为order by-2s-1 + version: 0.6.0 + inputs: + - + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 1 PRECEDING); + expect: + order: c3 + columns: ["c1 string","c3 int","w1_c4_sum bigint"] + rows: + - ["aa",20, NULL] + - ["aa",21,30] + - ["aa",22,61] + - ["aa",23,93] + - ["bb",24, NULL] + - + id: 50 + desc: timestamp为order by-前后单位不一样 + version: 0.6.0 + inputs: + - + columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"] + - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"] + - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"] + - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"] + sql: | + SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2m PRECEDING AND 1s PRECEDING); + expect: + order: c3 + columns: ["c1 string","c3 int","w1_c4_sum bigint"] + rows: + - ["aa",20,NULL] + - ["aa",21,30] + - ["aa",22,61] + - ["aa",23,93] + - ["bb",24,NULL] diff --git a/cases/integration_test/window/test_window_union.yaml b/cases/integration_test/window/test_window_union.yaml new file mode 100644 index 00000000000..b11957c25e6 --- /dev/null +++ b/cases/integration_test/window/test_window_union.yaml @@ -0,0 +1,1152 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - id: 0 + desc: 正常union + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 1 + desc: union的表列个数不一致 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000] + - [3,"cc",20,32,1.3,2.3,1590738992000] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - id: 2 + desc: 列类型不一致 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 string"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - id: 3 + desc: 列名不一致 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c9 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + - id: 4 + desc: 使用列别名后schema一致 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c9 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION (select id, c1,c3,c4,c5,c6,c7,c9 as c8 from {1}) + PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 5 + desc: 样本表使用索引,UNION表未命中索引 + mode: rtidb-unsupport + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 6 + desc: union表使用索引,样本表未命中索引 + mode: rtidb-unsupport + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 7 + desc: 样本表union表都使用索引 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 8 + desc: union多表 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [6,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1},{2} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,126] + - [5,"dd",20,129] + - [6,"ee",21,34] + - id: 9 + desc: 结合limit + tags: ["TODO", "@zhaowei remove limit case here"] + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) limit 2; + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [5,"ee",21,34] + - id: 10 + desc: 使用两个pk + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1|c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"aa",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - [6,"ee",21,33,1.4,2.4,1590738995000,"2020-05-04"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1|c3:c7"] + rows: + - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"aa",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"aa",20,96] + - [5,"ee",21,34] + - [6,"ee",21,67] + - id: 11 + desc: 样本表和union表都使用子查询 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM (select * from {0}) WINDOW w1 AS (UNION (select * from {1}) PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 12 + desc: union多表,其中一个子查询 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [6,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION (select * from {1}),{2} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,126] + - [5,"dd",20,129] + - [6,"ee",21,34] + - id: 13 + desc: 样本表不进入window + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW + w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,93] + - [4,"dd",20,96] + - [5,"ee",21,34] + + - id: 14-1 + desc: WINDOW UNION 子查询, column cast 和 const cast子查询, string cast as date + mode: offline-unsupport + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4str string","c5 float","c6 double","c7 timestamp"] + indexs: ["index1:c3:c7"] + rows: + - [2, "bb", 20, "31", 1.2, 2.2, 1590738991000] + - [3, "cc", 20, "32", 1.3, 2.3, 1590738992000] + sql: | + SELECT id, c1, c3, c8, + distinct_count(c8) OVER w1 as w1_c8_dis_cnt, + sum(c4) OVER w1 as w1_c4_sum + FROM {0} WINDOW + w1 AS (UNION (select id, c1, c3, bigint(c4str) as c4, c5, c6, c7, date("2020-10-01") as c8 from {1}) + PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int", "c1 string", "c3 int", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"] + rows: + - [1, "aa", 20, "2020-05-01", 2, 93] + - [4, "dd", 20, "2020-05-04", 2, 96] + - [5, "ee", 21, "2020-05-05", 1, 34] + - id: 14-2 + desc: WINDOW UNION 子查询, column cast 和 const cast子查询. cast column as partition key + mode: offline-unsupport + inputs: + - columns: ["id int","c1 string","c3f float","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20.0, 30,1.1,2.1,1590738993000,"2020-05-01"] + - [4,"dd",20.1, 33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21.2, 34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000] + - [3,"cc",20,32,1.3,2.3,1590738992000] + sql: | + SELECT id, c1, c3, c8, + distinct_count(c8) OVER w1 as w1_c8_dis_cnt, + sum(c4) OVER w1 as w1_c4_sum + FROM (select id, c1, int(c3f) as c3, c4, c5, c6, c7, c8 from {0}) WINDOW + w1 AS (UNION (select id, c1, c3, c4, c5, c6, c7, date("2020-10-01") as c8 from {1}) + PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int", "c1 string", "c3 int", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"] + rows: + - [1, "aa", 20, "2020-05-01", 2, 93] + - [4, "dd", 20, "2020-05-04", 2, 96] + - [5, "ee", 21, "2020-05-05", 1, 34] + - id: 14-3 + desc: WINDOW UNION 子查询, timestamp(string) as window ts + mode: offline-unsupport + inputs: + - columns: ["id int","c1 string","c3f float","c4 bigint","c5 float","c6 double","c7str string","c8 date"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa",20.0, 30,1.1,2.1,"2020-05-29 15:56:33","2020-05-01"] + - [4,"dd",20.1, 33,1.4,2.4,"2020-05-29 15:56:34","2020-05-04"] + - [5,"ee",21.2, 34,1.5,2.5,"2020-05-29 15:56:35","2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2, 1590738991000] + - [3,"cc",20,32,1.3,2.3, 1590738992000] + sql: | + SELECT id, c1, c3, c7, c8, + distinct_count(c8) OVER w1 as w1_c8_dis_cnt, + sum(c4) OVER w1 as w1_c4_sum + FROM (select id, c1, int(c3f) as c3, c4, c5, c6, timestamp(c7str) as c7, c8 from {0}) WINDOW + w1 AS (UNION (select id, c1, c3, c4, c5, c6, c7, date("2020-10-01") as c8 from {1}) + PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int", "c1 string", "c3 int", "c7 timestamp", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"] + rows: + - [1, "aa", 20, 1590738993000, "2020-05-01", 2, 93] + - [4, "dd", 20, 1590738994000, "2020-05-04", 2, 96] + - [5, "ee", 21, 1590738995000, "2020-05-05", 1, 34] + - id: 14-4 + desc: WINDOW UNION 子查询, cast另一种写法 cast(column as timestamp) as window ts + mode: offline-unsupport + inputs: + - columns: ["id int","c1 string","c3f float","c4 bigint","c5 float","c6 double","c7str string","c8 date"] + indexs: ["index1:c1:c4"] + rows: + - [1,"aa",20.0, 30,1.1,2.1,"2020-05-29 15:56:33","2020-05-01"] + - [4,"dd",20.1, 33,1.4,2.4,"2020-05-29 15:56:34","2020-05-04"] + - [5,"ee",21.2, 34,1.5,2.5,"2020-05-29 15:56:35","2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2, 1590738991000] + - [3,"cc",20,32,1.3,2.3, 1590738992000] + sql: | + SELECT id, c1, c3, c7, c8, + distinct_count(c8) OVER w1 as w1_c8_dis_cnt, + sum(c4) OVER w1 as w1_c4_sum + FROM (select id, c1, cast(c3f as int) as c3, c4, c5, c6, cast(c7str as timestamp) as c7, c8 from {0}) WINDOW + w1 AS (UNION (select id, c1, c3, c4, c5, c6, c7, cast("2020-10-01" as date) as c8 from {1}) + PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW); + expect: + order: id + columns: ["id int", "c1 string", "c3 int", "c7 timestamp", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"] + rows: + - [1, "aa", 20, 1590738993000, "2020-05-01", 2, 93] + - [4, "dd", 20, 1590738994000, "2020-05-04", 2, 96] + - [5, "ee", 21, 1590738995000, "2020-05-05", 1, 34] + - id: 16 + desc: 主表window 添加 INSTANCE_NOT_IN_WINDOW 没有明显错误日志 + mode: offline-unsupport + db: db_wzx + sql: | + select + c1, + min(c1) over table_1_s2_t1 as table_1_c1_9, + min(c2) over table_1_s2_t1 as table_1_c2_10, + identity(case when lag(d1, 1) != null then distinct_count(d1) else null end) over table_1_s2_t1 as table_1_d1_11, + identity(case when lag(d2, 1) != null then distinct_count(d2) else null end) over table_1_s2_t1 as table_1_d2_12, + identity(case when lag(s1, 1) != null then distinct_count(s1) else null end) over table_1_s2_t1 as table_1_s1_13 + from + {0} as main + window table_1_s2_t1 as (partition by s2 order by t1 rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW); + inputs: + - columns: ["label int", "s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s2:t1", "index2:s1:t1", "index3:d1:t1", "index4:d2:t1"] + rows: + - [1, "1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s2:t1"] + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s1:t1"] + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s1:t1"] + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + expect: + order: c1 + columns: ["c1 int", "table_1_c1_9 int", "table_1_c2_10 bigint", "table_1_d1_11 bigint", "table_1_d2_12 bigint", "table_1_s1_13 bigint"] + rows: + - [1, 1, 2, NULL, NULL, NULL] + + - id: 16-2 + desc: 主表window 添加 INSTANCE_NOT_IN_WINDOW 没有明显错误日志 case when写法优化 + mode: offline-unsupport + db: db_wzx + sql: | + select + c1, + min(c1) over table_1_s2_t1 as table_1_c1_9, + min(c2) over table_1_s2_t1 as table_1_c2_10, + case when !isnull(lag(d1, 1) over table_1_s2_t1) then distinct_count(d1) over table_1_s2_t1 else null end as table_1_d1_11, + case when !isnull(lag(d2, 1) over table_1_s2_t1) then distinct_count(d2) over table_1_s2_t1 else null end as table_1_d2_12, + case when !isnull(lag(s1, 1) over table_1_s2_t1) then distinct_count(s1) over table_1_s2_t1 else null end as table_1_s1_13 + from + {0} as main + window table_1_s2_t1 as (partition by s2 order by t1 rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW); + inputs: + - columns: ["label int", "s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s2:t1", "index2:s1:t1", "index3:d1:t1", "index4:d2:t1"] + rows: + - [1, "1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s2:t1"] + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s1:t1"] + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s1:t1"] + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + expect: + order: c1 + columns: ["c1 int", "table_1_c1_9 int", "table_1_c2_10 bigint", "table_1_d1_11 bigint", "table_1_d2_12 bigint", "table_1_s1_13 bigint"] + rows: + - [1, 1, 2, NULL, NULL, NULL] + - id: 17 + desc: 两个索引不一致的表union + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7","index2:c1:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + + # test correctness for window union when there are rows in union rows and original rows whose ts/key is the same + # refer https://github.com/4paradigm/OpenMLDB/issues/1776#issuecomment-1121258571 for the specification + # - 18-1 & 18-2 test simple case for UNION ROWS_RANGE and UNION ROWS + # - 18-3 test test UNION ROWS_RANGE with MAXSIZE + # - 18-4 & 18-5 test EXCLUDE CURRENT_TIME for UNION ROWS_RANGE/ROWS + - id: 18-1 + desc: | + when UNION ROWS_RANGE has the same key with original rows, original rows first then union rows + mode: disk-unsupport + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 400 + 3, 200, 112, 999 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + 3, 199, 112, 44 + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from t1 window w as( + union t2 + partition by `g` order by `ts` + rows_range between 1s preceding and 0s preceding); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 3, 233, 21, 200 + 2, 4, 400, 21, 21 + 3, 2, 999, 44, 44 + - id: 18-2 + desc: | + when UNION ROWS has the same key with original rows, original rows first then union rows, + union rows filtered out first for max window size limitation + mode: disk-unsupport + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 400 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 88, 111, 999 + 1, 100, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from (select * from t1) window w as( + union (select * from t2) + partition by `g` order by `ts` + ROWS BETWEEN 2 preceding and 0 preceding); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 3, 233, 21, 200 + 2, 3, 400, 21, 21 + - id: 18-3 + mode: disk-unsupport + desc: | + when UNION ROWS_RANGE MAXSIZE has the same key with original rows, original rows first then union rows + union rows filtered out for max window size first + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 0 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from (select * from t1) window w as( + union (select * from t2) + partition by `g` order by `ts` + rows_range between 1s preceding and 0s preceding MAXSIZE 2); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 2, 200, 21, 200 + 2, 2, 21, 0, 21 + - id: 18-4 + mode: disk-unsupport + desc: | + when UNION ROWS_RANGE EXCLUDE CURRENT_TIME has the same key with original rows, original rows first then union rows + other rows except current row filtered out by EXCLUDE CURRENT_TIME + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 0, 0, 111, 19 + 1, 0, 111, 18 + 2, 100, 111, 21 + 3, 100, 111, 5 + 4, 101, 111, 100 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + # raw union window (before filter) + # 0, 0, 111, 19 + # 1, 0, 111, 18 + # 1, 99, 111, 233 (t2) + # 1, 100, 111, 200 (t2) + # 2, 100, 111, 21 + # 3, 100, 111, 5 + # 1, 101, 111, 17 (t2) + # 4, 101, 111, 100 + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from (select * from t1) window w as( + union (select * from t2) + partition by `g` order by `ts` + rows_range between 1s preceding and 0s preceding EXCLUDE CURRENT_TIME); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + rows: + - [0, 1, 19, 19, NULL] + - [1, 1, 18, 18, NULL] + - [2, 4, 233, 18, 233] + - [3, 4, 233, 5, 233] + - [4, 7, 233, 5, 5] + + - id: 18-5 + mode: disk-unsupport + desc: | + UNION ROWS current time rows filtered out + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 10000 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 87, 111, 300 + 1, 88, 111, 999 + 1, 99, 111, 233 + 1, 100, 111, 200 + 1, 101, 111, 17 + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from (select * from t1) window w as( + union (select * from t2) + partition by `g` order by `ts` + ROWS BETWEEN 2 preceding and 0 preceding EXCLUDE CURRENT_TIME); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 3, 999, 21, 233 + 2, 3, 10000, 233, 233 + + # for the case that window unions multiple tables + # the order for rows between those multiple union tables that has same ts key, + # is undefined by specification. + # However, SQL engine explicitly use the order as master table -> first union table in SQL -> second union table in SQL -> .... + # + # 19-* series test case tests for this for SQL engine only, you should never reply on this behavior anyway + - id: 19-1 + mode: disk-unsupport + desc: | + window unions multiple tables, the order for rows in union tables with same ts is explicitly as the order in SQL + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 10000 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 88, 111, 999 + 1, 100, 111, 233 + 1, 100, 111, 200 + - name: t3 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 0 + 1, 100, 111, 33 + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1, + lag(val, 2) over w as l2 + from t1 window w as( + union t2,t3 + partition by `g` order by `ts` + ROWS_RANGE BETWEEN 2s preceding and 0s preceding); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + - l2 int + order: id + data: | + 1, 6, 999, 0, 200, 233 + 2, 7, 10000, 0, 21, 200 + - id: 19-2 + mode: disk-unsupport + desc: | + rows order for pure history window union + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 21 + 2, 100, 111, 10000 + - name: t2 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 88, 111, 999 + 1, 100, 111, 233 + 1, 100, 111, 200 + - name: t3 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 100, 111, 0 + 1, 100, 111, 33 + sql: | + select + id, count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1, + lag(val, 2) over w as l2, + lag(val, 3) over w as l3 + from t1 window w as( + union t2,t3 + partition by `g` order by `ts` + ROWS BETWEEN 3 preceding and 1 preceding INSTANCE_NOT_IN_WINDOW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + - l2 int + - l3 int + order: id + data: | + 1, 3, 233, 33, 200, 233, 33 + 2, 3, 233, 33, 200, 233, 33 + - id: 18 + desc: 主表ts都大于副表的 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738995000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,93] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 19 + desc: 主表ts都小于副表的 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738991000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738992000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738993000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738994000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,63] + - [5,"ee",21,34] + - id: 20 + desc: 主表副表ts有交集 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 21 + desc: 主表和副表分片在同一节点上 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + replicaNum: 3 + partitionNum: 1 + distribution: + - leader: "{tb_endpoint_1}" + followers: [ "{tb_endpoint_0}","{tb_endpoint_2}" ] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + replicaNum: 3 + partitionNum: 1 + distribution: + - leader: "{tb_endpoint_1}" + followers: [ "{tb_endpoint_0}","{tb_endpoint_2}" ] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 21 + desc: 主表和副表分片在不同的节点上 + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + distribution: + - leader: "{tb_endpoint_1}" + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + distribution: + - leader: "{tb_endpoint_0}" + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,30] + - [4,"dd",20,96] + - [5,"ee",21,34] + - id: 22 + desc: 两张副表,一张和主表在同一节点,另一张不在 + db: db_wzx + sql: | + select + c1, + min(c1) over table_1_s2_t1 as table_1_c1_9, + min(c2) over table_1_s2_t1 as table_1_c2_10, + case when !isnull(lag(d1, 1) over table_1_s2_t1) then distinct_count(d1) over table_1_s2_t1 else null end as table_1_d1_11, + case when !isnull(lag(d2, 1) over table_1_s2_t1) then distinct_count(d2) over table_1_s2_t1 else null end as table_1_d2_12, + case when !isnull(lag(s1, 1) over table_1_s2_t1) then distinct_count(s1) over table_1_s2_t1 else null end as table_1_s1_13 + from + {0} as main + window table_1_s2_t1 as (partition by s2 order by t1 rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW); + inputs: + - columns: ["label int", "s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s2:t1", "index2:s1:t1", "index3:d1:t1", "index4:d2:t1"] + distribution: + - leader: "{tb_endpoint_1}" + rows: + - [1, "1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s2:t1"] + distribution: + - leader: "{tb_endpoint_1}" + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s1:t1"] + distribution: + - leader: "{tb_endpoint_0}" + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint", + "ai string", "kn string", "ks string"] + indexs: ["index1:s1:t1"] + distribution: + - leader: "{tb_endpoint_0}" + rows: + - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"] + expect: + order: c1 + columns: ["c1 int", "table_1_c1_9 int", "table_1_c2_10 bigint", "table_1_d1_11 bigint", "table_1_d2_12 bigint", "table_1_s1_13 bigint"] + rows: + - [1, 1, 2, NULL, NULL, NULL] diff --git a/cases/integration_test/window/test_window_union_cluster_thousand.yaml b/cases/integration_test/window/test_window_union_cluster_thousand.yaml new file mode 100644 index 00000000000..aa12f1b549f --- /dev/null +++ b/cases/integration_test/window/test_window_union_cluster_thousand.yaml @@ -0,0 +1,1044 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + +db: test_zw +debugs: [] +version: 0.5.0 +cases: + - id: 0 + desc: 正常union + mode: disk-unsupport + inputs: + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"] + - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"] + - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c3:c7"] + rows: + - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"] + sql: | + SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"] + rows: + - [1,"aa",20,90] + - [4,"dd",20,96] + - [5,"ee",21,34] \ No newline at end of file diff --git a/cases/integration_test/window/window_attributes.yaml b/cases/integration_test/window/window_attributes.yaml new file mode 100644 index 00000000000..53ebc8fcde7 --- /dev/null +++ b/cases/integration_test/window/window_attributes.yaml @@ -0,0 +1,535 @@ +# window query test with OpenMLDB specific window attributes: +# - EXCLUDE CURRENT_TIME +# - EXCLUDE CURRENT_ROW +# - INSTANCE_NOT_IN_WINDOW +# - MAXSIZE + +debugs: [] +version: 0.6.0 +db: test_java +cases: + - id: 0 + desc: ROWS_RANGE window with exclude_current_row + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 0, 0, 111, 0 + 1, 0, 111, 0 + 2, 99000, 111, 21 + 3, 100000, 111, 22 + 4, 101000, 111, 23 + 5, 100000, 114, 56 + sql: | + select + id, + count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from t1 window w as( + partition by `g` order by `ts` + ROWS_RANGE between 2s PRECEDING and 0s preceding EXCLUDE CURRENT_ROW); + batch_plan: | + PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_ROW) + +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 2000 PRECEDING, 0 PRECEDING)) + PROJECT(type=WindowAggregation, NEED_APPEND_INPUT) + +-WINDOW(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT)) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + request_plan: | + SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 2000 PRECEDING, 0 PRECEDING), index_keys=(g)) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + rows: + - [0, 0, NULL, NULL, NULL] + - [1, 1, 0, 0, 0] + - [2, 0, NULL, NULL, 0] + - [3, 1, 21, 21, 21] + - [4, 2, 22, 21, 22] + - [5, 0, NULL, NULL, NULL] + - id: 1 + desc: | + ROWS window with exclude_current_row, '0 PRECEDING EXCLUDE CURRENT_ROW' actually is the same as '0 OPEN PRECEDING' + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99000, 111, 21 + 2, 100000, 111, 22 + 3, 101000, 111, 23 + 4, 100000, 114, 56 + sql: | + select + id, + count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + from t1 window w as( + partition by `g` order by `ts` + ROWS between 2 PRECEDING and 0 preceding EXCLUDE CURRENT_ROW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + rows: + - [1, 0, NULL, NULL, NULL] + - [2, 1, 21, 21, 21] + - [3, 2, 22, 21, 22] + - [4, 0, NULL, NULL, NULL] + - id: 2 + desc: | + ROWS_RANGE pure-history window with exclude_current_row + whether EXCLUDE CURRENT_ROW is set do not matter + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99000, 111, 21 + 2, 100000, 111, 22 + 3, 101000, 111, 23 + 4, 100000, 114, 56 + sql: | + select + id, + count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + FROM t1 WINDOW w as( + PARTITION by `g` ORDER by `ts` + ROWS_RANGE BETWEEN 2s PRECEDING AND 1s PRECEDING EXCLUDE CURRENT_ROW); + batch_plan: | + PROJECT(type=WindowAggregation) + +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 2000 PRECEDING, 1000 PRECEDING)) + PROJECT(type=WindowAggregation, NEED_APPEND_INPUT) + +-WINDOW(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT)) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + request_plan: | + SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(ts, 2000 PRECEDING, 1000 PRECEDING), index_keys=(g)) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + rows: + - [1, 0, NULL, NULL, NULL] + - [2, 1, 21, 21, 21] + - [3, 2, 22, 21, 22] + - [4, 0, NULL, NULL, NULL] + - id: 3 + desc: | + ROWS pure-history window with exclude_current_row + whether EXCLUDE CURRENT_ROW is set do not matter + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99000, 111, 21 + 2, 100000, 111, 22 + 3, 101000, 111, 23 + 4, 100000, 114, 56 + sql: | + select + id, + count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + FROM t1 WINDOW w as( + PARTITION by `g` ORDER by `ts` + ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE CURRENT_ROW); + batch_plan: | + PROJECT(type=WindowAggregation) + +-WINDOW(partition_keys=(g), orders=(ts ASC), rows=(ts, 2 PRECEDING, 0 OPEN PRECEDING)) + PROJECT(type=WindowAggregation, NEED_APPEND_INPUT) + +-WINDOW(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT)) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + request_plan: | + SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 2 PRECEDING, 0 OPEN PRECEDING), index_keys=(g)) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + rows: + - [1, 0, NULL, NULL, NULL] + - [2, 1, 21, 21, 21] + - [3, 2, 22, 21, 22] + - [4, 0, NULL, NULL, NULL] + + - id: 4 + desc: | + rows_range current history window, exclude current_row with maxsize + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99000, 111, 21 + 2, 100000, 111, 22 + 3, 101000, 111, 23 + 4, 102000, 111, 44 + 5, 100000, 114, 56 + 6, 102000, 114, 52 + sql: | + select + id, + count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + FROM t1 WINDOW w as( + PARTITION by `g` ORDER by `ts` + ROWS_RANGE BETWEEN 3s PRECEDING AND 0s PRECEDING MAXSIZE 2 EXCLUDE CURRENT_ROW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + rows: + - [1, 0, NULL, NULL, NULL] + - [2, 1, 21, 21, 21] + - [3, 2, 22, 21, 22] + - [4, 2, 23, 22, 23] + - [5, 0, NULL, NULL, NULL] + - [6, 1, 56, 56, 56] + + - id: 5 + desc: | + ROWS_RANGE window with end frame OPEN, exclude current_row do not matter + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99000, 111, 21 + 2, 100000, 111, 22 + 3, 101000, 111, 23 + 4, 102000, 111, 44 + 5, 100000, 114, 56 + 6, 102000, 114, 52 + sql: | + select + id, + count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + FROM t1 WINDOW w as( + PARTITION by `g` ORDER by `ts` + ROWS_RANGE BETWEEN 3s PRECEDING AND 0s OPEN PRECEDING MAXSIZE 2 EXCLUDE CURRENT_ROW); + batch_plan: | + PROJECT(type=WindowAggregation) + +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 OPEN PRECEDING, maxsize=2)) + PROJECT(type=WindowAggregation, NEED_APPEND_INPUT) + +-WINDOW(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT)) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + request_plan: | + SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 OPEN PRECEDING, maxsize=2), index_keys=(g)) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + rows: + - [1, 0, NULL, NULL, NULL] + - [2, 1, 21, 21, 21] + - [3, 2, 22, 21, 22] + - [4, 2, 23, 22, 23] + - [5, 0, NULL, NULL, NULL] + - [6, 1, 56, 56, 56] + + - id: 6 + desc: | + ROWS window with end frame OPEN, exclude current_row do not matter + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99000, 111, 21 + 2, 100000, 111, 22 + 3, 101000, 111, 23 + 4, 102000, 111, 44 + 5, 100000, 114, 56 + 6, 102000, 114, 52 + sql: | + select + id, + count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + FROM t1 WINDOW w as( + PARTITION by `g` ORDER by `ts` + ROWS BETWEEN 3 PRECEDING AND 0 OPEN PRECEDING EXCLUDE CURRENT_ROW); + batch_plan: | + PROJECT(type=WindowAggregation) + +-WINDOW(partition_keys=(g), orders=(ts ASC), rows=(ts, 3 PRECEDING, 0 OPEN PRECEDING)) + PROJECT(type=WindowAggregation, NEED_APPEND_INPUT) + +-WINDOW(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT)) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + request_plan: | + SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1)) + REQUEST_JOIN(type=kJoinTypeConcat) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 3 PRECEDING, 0 OPEN PRECEDING), index_keys=(g)) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + PROJECT(type=Aggregation) + REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g)) + DATA_PROVIDER(request=t1) + DATA_PROVIDER(type=Partition, table=t1, index=idx) + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + rows: + - [1, 0, NULL, NULL, NULL] + - [2, 1, 21, 21, 21] + - [3, 2, 22, 21, 22] + - [4, 3, 23, 21, 23] + - [5, 0, NULL, NULL, NULL] + - [6, 1, 56, 56, 56] + + - id: 7 + desc: | + ROWS_RANGE window with end frame 'CURRENT_ROW', exclude current_row + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99000, 111, 21 + 2, 100000, 111, 22 + 3, 101000, 111, 23 + 4, 102000, 111, 44 + 5, 100000, 114, 56 + 6, 102000, 114, 52 + sql: | + select + id, + count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + FROM t1 WINDOW w as( + PARTITION by `g` ORDER by `ts` + ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_ROW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + rows: + - [1, 0, NULL, NULL, NULL] + - [2, 1, 21, 21, 21] + - [3, 2, 22, 21, 22] + - [4, 2, 23, 22, 23] + - [5, 0, NULL, NULL, NULL] + - [6, 1, 56, 56, 56] + + - id: 8 + desc: | + ROWS window with end frame 'CURRENT_ROW', exclude current_row + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99000, 111, 21 + 2, 100000, 111, 22 + 3, 101000, 111, 23 + 4, 102000, 111, 44 + 5, 100000, 114, 56 + 6, 102000, 114, 52 + sql: | + select + id, + count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + FROM t1 WINDOW w as( + PARTITION by `g` ORDER by `ts` + ROWS BETWEEN 3 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 0, NULL, NULL, NULL + 2, 1, 21, 21, 21 + 3, 2, 22, 21, 22 + 4, 3, 23, 21, 23 + 5, 0, NULL, NULL, NULL + 6, 1, 56, 56, 56 + - id: 9 + desc: | + ROWS Window with exclude current_time and exclude current_row + inputs: + - name: t1 + columns: + - id int + - ts timestamp + - g int + - val int + indexs: + - idx:g:ts + data: | + 1, 99000, 111, 21 + 2, 100000, 111, 22 + 3, 101000, 111, 23 + 4, 102000, 111, 44 + 5, 0, 114, 0 + 6, 0, 114, 99 + 7, 100000, 114, 56 + 8, 102000, 114, 52 + 9, 104000, 114, 33 + sql: | + select + id, + count(val) over w as cnt, + max(val) over w as mv, + min(val) over w as mi, + lag(val, 1) over w as l1 + FROM t1 WINDOW w as( + PARTITION by `g` ORDER by `ts` + ROWS BETWEEN 3 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME EXCLUDE CURRENT_ROW); + expect: + columns: + - id int + - cnt int64 + - mv int + - mi int + - l1 int + order: id + data: | + 1, 0, NULL, NULL, NULL + 2, 1, 21, 21, 21 + 3, 2, 22, 21, 22 + 4, 3, 23, 21, 23 + 5, 0, NULL, NULL, NULL + 6, 0, NULL, NULL, NULL + 7, 2, 99, 0, 99 + 8, 3, 99, 0, 56 + 9, 3, 99, 52, 52 diff --git a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/java/com/_4paradigm/openmldb/ecosystem/common/KafkaTest.java b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/java/com/_4paradigm/openmldb/ecosystem/common/KafkaTest.java new file mode 100644 index 00000000000..df515a897ab --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/java/com/_4paradigm/openmldb/ecosystem/common/KafkaTest.java @@ -0,0 +1,83 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.openmldb.ecosystem.common; + + +import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; +import com.google.common.collect.Lists; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.testng.annotations.BeforeTest; +import org.testng.annotations.Optional; +import org.testng.annotations.Parameters; + +import java.sql.Statement; + +/** + * @author zhaowei + * @date 2020/6/11 2:02 PM + */ +@Slf4j +public class KafkaTest extends BaseTest { + protected static SqlExecutor executor; + + @BeforeTest() + @Parameters({"env","version","openMLDBPath"}) + public void beforeTest(@Optional("qa") String env,@Optional("main") String version,@Optional("")String openMLDBPath) throws Exception { + OpenMLDBGlobalVar.env = env; + if(env.equalsIgnoreCase("cluster")){ + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version);; + openMLDBDeploy.setOpenMLDBPath(openMLDBPath); + openMLDBDeploy.setCluster(true); + OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); + }else if(env.equalsIgnoreCase("standalone")){ + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); + openMLDBDeploy.setOpenMLDBPath(openMLDBPath); + openMLDBDeploy.setCluster(false); + OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); + }else{ + OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() + .deployType(OpenMLDBDeployType.CLUSTER) + .basePath("/home/zhaowei01/openmldb-auto-test/tmp") + .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") + .zk_cluster("172.24.4.55:30000") + .zk_root_path("/openmldb") + .nsNum(2).tabletNum(3) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) + .build(); + OpenMLDBGlobalVar.env = "cluster"; + + } + String caseEnv = System.getProperty("caseEnv"); + if (!StringUtils.isEmpty(caseEnv)) { + OpenMLDBGlobalVar.env = caseEnv; + } + log.info("fedb global var env: {}", env); + OpenMLDBClient fesqlClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); + executor = fesqlClient.getExecutor(); + log.info("executor:{}",executor); + Statement statement = executor.getStatement(); + statement.execute("SET @@execute_mode='online';"); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/resources/kafka.properties b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/resources/kafka.properties index e69de29bb2d..4416bdb5dcf 100644 --- a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/resources/kafka.properties +++ b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/resources/kafka.properties @@ -0,0 +1,4 @@ +bootstrap.servers=172.24.4.55:39092 +topic=test_kafka +table.create=create table test_kafka(c1 string,c2 smallint,c3 int,c4 bigint,c5 float,c6 double,c7 timestamp,c8 date,c9 bool,index(key=(c1),ts=c7)); + diff --git a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java index f3381b5debf..6adaf44660e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java +++ b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/java/com/_4paradigm/openmldb/ecosystem/tmp/TestKafka.java @@ -36,7 +36,7 @@ public void test(){ KafkaProducer producer = new KafkaProducer<>(properties); // String message = "{\"schema\":{\"type\":\"struct\",\"fields\":[{\"type\":\"int16\",\"optional\":true,\"field\":\"c1_int16\"},{\"type\":\"int32\",\"optional\":true,\"field\":\"c2_int32\"},{\"type\":\"int64\",\"optional\":true,\"field\":\"c3_int64\"},{\"type\":\"float\",\"optional\":true,\"field\":\"c4_float\"},{\"type\":\"double\",\"optional\":true,\"field\":\"c5_double\"},{\"type\":\"boolean\",\"optional\":true,\"field\":\"c6_boolean\"},{\"type\":\"string\",\"optional\":true,\"field\":\"c7_string\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Date\",\"optional\":true,\"field\":\"c8_date\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Timestamp\",\"optional\":true,\"field\":\"c9_timestamp\"}],\"optional\":false,\"name\":\"foobar\"},\"payload\":{\"c1_int16\":1,\"c2_int32\":2,\"c3_int64\":3,\"c4_float\":4.4,\"c5_double\":5.555,\"c6_boolean\":true,\"c7_string\":\"c77777\",\"c8_date\":19109,\"c9_timestamp\":1651051906000}}"; // String message = "{\"data\":[{\"ID\":20,\"UUID\":\"11\",\"PID\":11,\"GID\":11,\"CID\":11}],\"database\":\"d1\",\"table\":\"test_kafka\",\"type\":\"insert\"}"; - String message = "{\"data\":[{\"c1\":\"cc\",\"c2\":1.1,\"c3\":2,\"c4\":3,\"c5\":1.1,\"c6\":2.2,\"c7\":11,\"c8\":1659512628000,\"c9\":true}],\"type\":\"insert\"}"; + String message = "{\"data\":[{\"c1\":\"dd\",\"c2\":1,\"c3\":2,\"c4\":3,\"c5\":1.1,\"c6\":2.2,\"c7\":11,\"c8\":1659512628000,\"c9\":true}],\"type\":\"insert\"}"; // String message = "{\"schema\":{\"type\":\"struct\",\"fields\":[{\"type\":\"string\",\"optional\":true,\"field\":\"c1\"},{\"type\":\"int16\",\"optional\":true,\"field\":\"c2\"},{\"type\":\"int32\",\"optional\":true,\"field\":\"c3\"},{\"type\":\"int64\",\"optional\":true,\"field\":\"c4\"},{\"type\":\"float\",\"optional\":true,\"field\":\"c5\"},{\"type\":\"double\",\"optional\":true,\"field\":\"c6\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Date\",\"optional\":true,\"field\":\"c7\"},{\"type\":\"int64\",\"name\":\"org.apache.kafka.connect.data.Timestamp\",\"optional\":true,\"field\":\"c8\"},{\"type\":\"boolean\",\"optional\":true,\"field\":\"c9\"}],\"optional\":false,\"name\":\"foobar\"},\"payload\":{\"c1\":\"ee\",\"c2\":1,\"c3\":2,\"c4\":3,\"c5\":1.1,\"c6\":2.2,\"c7\":11,\"c8\":1659512628000,\"c9\":true}}"; //发送数据 producer.send(new ProducerRecord("m2",message)); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/JDBCTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/JDBCTest.java index 3921cdddd7a..4b0a12a590c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/JDBCTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/JDBCTest.java @@ -15,6 +15,8 @@ */ package com._4paradigm.openmldb.java_sdk_test.common; +import com._4paradigm.openmldb.test_common.common.BaseTest; + /** * @author zhaowei * @date 2021/3/12 7:53 AM diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java index 7d984a35878..0ffa75fbf09 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java @@ -35,11 +35,11 @@ public class OpenMLDBConfig { public static final List VERSIONS; public static boolean INIT_VERSION_ENV = true; - public static final List FESQL_CASE_LEVELS; - public static final String FESQL_CASE_PATH; - public static final String FESQL_CASE_NAME; - public static final String FESQL_CASE_ID; - public static final String FESQL_CASE_DESC; + public static final List CASE_LEVELS; + public static final String CASE_PATH; + public static final String CASE_NAME; + public static final String CASE_ID; + public static final String CASE_DESC; public static final String YAML_CASE_BASE_DIR; public static final boolean ADD_REPORT_LOG; @@ -49,24 +49,24 @@ public class OpenMLDBConfig { static { String levelStr = System.getProperty("caseLevel"); levelStr = StringUtils.isEmpty(levelStr) ? "0" : levelStr; - FESQL_CASE_LEVELS = Arrays.stream(levelStr.split(",")).map(Integer::parseInt).collect(Collectors.toList()); - FESQL_CASE_NAME = System.getProperty("caseName"); - FESQL_CASE_ID = System.getProperty("caseId"); - FESQL_CASE_DESC = System.getProperty("caseDesc"); - FESQL_CASE_PATH = System.getProperty("casePath"); + CASE_LEVELS = Arrays.stream(levelStr.split(",")).map(Integer::parseInt).collect(Collectors.toList()); + CASE_NAME = System.getProperty("caseName"); + CASE_ID = System.getProperty("caseId"); + CASE_DESC = System.getProperty("caseDesc"); + CASE_PATH = System.getProperty("casePath"); YAML_CASE_BASE_DIR = System.getProperty("yamlCaseBaseDir"); - log.info("FESQL_CASE_LEVELS {}", FESQL_CASE_LEVELS); - if (!StringUtils.isEmpty(FESQL_CASE_NAME)) { - log.info("FESQL_CASE_NAME {}", FESQL_CASE_NAME); + log.info("FESQL_CASE_LEVELS {}", CASE_LEVELS); + if (!StringUtils.isEmpty(CASE_NAME)) { + log.info("FESQL_CASE_NAME {}", CASE_NAME); } - if (!StringUtils.isEmpty(FESQL_CASE_ID)) { - log.info("FESQL_CASE_ID {}", FESQL_CASE_ID); + if (!StringUtils.isEmpty(CASE_ID)) { + log.info("FESQL_CASE_ID {}", CASE_ID); } - if (!StringUtils.isEmpty(FESQL_CASE_PATH)) { - log.info("FESQL_CASE_PATH {}", FESQL_CASE_PATH); + if (!StringUtils.isEmpty(CASE_PATH)) { + log.info("FESQL_CASE_PATH {}", CASE_PATH); } - if (!StringUtils.isEmpty(FESQL_CASE_DESC)) { - log.info("FESQL_CASE_DESC {}", FESQL_CASE_DESC); + if (!StringUtils.isEmpty(CASE_DESC)) { + log.info("FESQL_CASE_DESC {}", CASE_DESC); } if (!StringUtils.isEmpty(YAML_CASE_BASE_DIR)) { log.info("YAML_CASE_BASE_DIR {}", YAML_CASE_BASE_DIR); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java index c502f83a7b3..b3de78e0a23 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java @@ -18,6 +18,7 @@ import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.common.BaseTest; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java index c45ddb4bfab..90424dda790 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java @@ -18,6 +18,7 @@ import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.common.BaseTest; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBProcedureColumn2.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBProcedureColumn2.java deleted file mode 100644 index 13e2eefff06..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBProcedureColumn2.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com._4paradigm.openmldb.java_sdk_test.entity; - -import lombok.Data; - -@Data -public class OpenMLDBProcedureColumn2 { - private int id; - private String field; - private String type; - private boolean constant; //true 可以为null -} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenmldbDeployment2.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenmldbDeployment2.java deleted file mode 100644 index 17374b4d762..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenmldbDeployment2.java +++ /dev/null @@ -1,14 +0,0 @@ -package com._4paradigm.openmldb.java_sdk_test.entity; - -import lombok.Data; - -import java.util.List; - -@Data -public class OpenmldbDeployment2 { - private String dbName; - private String name; - private String sql; - private List inColumns; - private List outColumns; -} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/DiffResultTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/DiffResultTest.java index 9d181d3eaba..63832b3b85e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/DiffResultTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/DiffResultTest.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.diff_test; import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; -import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBCaseFileList; +import com._4paradigm.openmldb.test_common.model.OpenMLDBCaseFileList; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/MysqlTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/MysqlTest.java index 3515ec78641..9efa2b3eefb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/MysqlTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/MysqlTest.java @@ -16,7 +16,7 @@ package com._4paradigm.openmldb.java_sdk_test.diff_test; import com._4paradigm.openmldb.java_sdk_test.common.JDBCTest; -import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBCaseFileList; +import com._4paradigm.openmldb.test_common.model.OpenMLDBCaseFileList; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/Sqlite3Test.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/Sqlite3Test.java index 06a29d9cafd..38f503d2881 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/Sqlite3Test.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/diff_test/Sqlite3Test.java @@ -17,7 +17,7 @@ import com._4paradigm.openmldb.java_sdk_test.common.JDBCTest; -import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBCaseFileList; +import com._4paradigm.openmldb.test_common.model.OpenMLDBCaseFileList; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/ut/UniqueExpectTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/ut/UniqueExpectTest.java index 62a5b20d24e..c948d94064d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/ut/UniqueExpectTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/ut/UniqueExpectTest.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.ut; import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; -import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBCaseFileList; +import com._4paradigm.openmldb.test_common.model.OpenMLDBCaseFileList; import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/BaseTest.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/BaseTest.java similarity index 96% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/BaseTest.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/BaseTest.java index da0050fb919..b97e3195452 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/BaseTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/BaseTest.java @@ -13,13 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.common; +package com._4paradigm.openmldb.test_common.common; -import com._4paradigm.openmldb.java_sdk_test.entity.OpenMLDBCaseFileList; import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.common.ReportLog; import com._4paradigm.openmldb.test_common.model.CaseFile; +import com._4paradigm.openmldb.test_common.model.OpenMLDBCaseFileList; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.provider.Yaml; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBCaseFileList.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/OpenMLDBCaseFileList.java similarity index 57% rename from test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBCaseFileList.java rename to test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/OpenMLDBCaseFileList.java index 4707852dd60..6e52621cbdc 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/entity/OpenMLDBCaseFileList.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/OpenMLDBCaseFileList.java @@ -14,13 +14,11 @@ * limitations under the License. */ -package com._4paradigm.openmldb.java_sdk_test.entity; +package com._4paradigm.openmldb.test_common.model; -import com._4paradigm.openmldb.java_sdk_test.common.BaseTest; -import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBConfig; -import com._4paradigm.openmldb.test_common.model.CaseFile; -import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.openmldb.test_common.common.BaseTest; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.util.Tool; import org.apache.commons.lang3.StringUtils; @@ -36,17 +34,17 @@ public List getCases() { List cases = new ArrayList(); for (CaseFile dataProvider : dataProviderList) { - for (SQLCase sqlCase : dataProvider.getCases(OpenMLDBConfig.FESQL_CASE_LEVELS)) { - if (!StringUtils.isEmpty(OpenMLDBConfig.FESQL_CASE_NAME) && - !OpenMLDBConfig.FESQL_CASE_NAME.equals(BaseTest.CaseNameFormat(sqlCase))) { + for (SQLCase sqlCase : dataProvider.getCases(OpenMLDBGlobalVar.CASE_LEVELS)) { + if (!StringUtils.isEmpty(OpenMLDBGlobalVar.CASE_NAME) && + !OpenMLDBGlobalVar.CASE_NAME.equals(BaseTest.CaseNameFormat(sqlCase))) { continue; } - if (!StringUtils.isEmpty(OpenMLDBConfig.FESQL_CASE_ID) - && !OpenMLDBConfig.FESQL_CASE_ID.equals(sqlCase.getId())) { + if (!StringUtils.isEmpty(OpenMLDBGlobalVar.CASE_ID) + && !OpenMLDBGlobalVar.CASE_ID.equals(sqlCase.getId())) { continue; } - if (!StringUtils.isEmpty(OpenMLDBConfig.FESQL_CASE_DESC) - && !OpenMLDBConfig.FESQL_CASE_DESC.equals(sqlCase.getDesc())) { + if (!StringUtils.isEmpty(OpenMLDBGlobalVar.CASE_DESC) + && !OpenMLDBGlobalVar.CASE_DESC.equals(sqlCase.getDesc())) { continue; } cases.add(sqlCase); @@ -57,27 +55,27 @@ public List getCases() { public static OpenMLDBCaseFileList dataProviderGenerator(String[] caseFiles) throws FileNotFoundException { - OpenMLDBCaseFileList fesqlDataProviderList = new OpenMLDBCaseFileList(); + OpenMLDBCaseFileList openMLDBCaseFileList = new OpenMLDBCaseFileList(); for (String caseFile : caseFiles) { - if (!StringUtils.isEmpty(OpenMLDBConfig.FESQL_CASE_PATH) - && !OpenMLDBConfig.FESQL_CASE_PATH.equals(caseFile)) { + if (!StringUtils.isEmpty(OpenMLDBGlobalVar.CASE_PATH) + && !OpenMLDBGlobalVar.CASE_PATH.equals(caseFile)) { continue; } - String casePath = Tool.getCasePath(OpenMLDBConfig.YAML_CASE_BASE_DIR, caseFile); + String casePath = Tool.getCasePath(OpenMLDBGlobalVar.YAML_CASE_BASE_DIR, caseFile); File file = new File(casePath); if (!file.exists()) { continue; } if (file.isFile()) { - fesqlDataProviderList.dataProviderList.add(CaseFile.parseCaseFile(casePath)); + openMLDBCaseFileList.dataProviderList.add(CaseFile.parseCaseFile(casePath)); } else { File[] files = file.listFiles(f -> f.getName().endsWith(".yaml")); for (File f : files) { - fesqlDataProviderList.dataProviderList.add(CaseFile.parseCaseFile(f.getAbsolutePath())); + openMLDBCaseFileList.dataProviderList.add(CaseFile.parseCaseFile(f.getAbsolutePath())); } } } - return fesqlDataProviderList; + return openMLDBCaseFileList; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java index a95a843fe67..8c4ddb4e6f1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java @@ -33,6 +33,7 @@ public class SQLCase implements Serializable{ private String id; private String desc; private String mode; + private String json; private String db; private String version; private String longWindow; diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java index 767f6009b45..3bb89cfff22 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java @@ -19,6 +19,8 @@ import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import java.util.List; + /** * @author zhaowei * @date 2020/6/11 11:45 AM @@ -31,4 +33,10 @@ public class OpenMLDBGlobalVar { public static OpenMLDBInfo mainInfo; public static String dbName = "test_zw"; public static String tableStorageMode = "memory"; + public static List CASE_LEVELS; + public static String CASE_NAME; + public static String CASE_ID; + public static String CASE_DESC; + public static String CASE_PATH; + public static String YAML_CASE_BASE_DIR; } From 3d853cdc5280f73fc37e5f53542344e06d85fdfa Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 10 Aug 2022 10:19:48 +0800 Subject: [PATCH 117/172] modify case --- .../long_window/test_count_where.yaml | 330 +++++++++++++----- .../openmldb-ecosystem/pom.xml | 12 + .../openmldb/ecosystem/common/KafkaTest.java | 1 + .../openmldb-sdk-test/shell/cluster_dist.yaml | 32 ++ .../openmldb-sdk-test/shell/onebox.yaml | 24 ++ .../shell/standalone_dist.yaml | 11 + .../java_sdk_test/common/OpenMLDBConfig.java | 32 -- .../executor/StoredProcedureSQLExecutor.java | 2 +- .../cluster/v060/LongWindowTest.java | 2 +- .../openmldb/test_common/common/BaseTest.java | 6 +- .../model/OpenMLDBCaseFileList.java | 2 + .../openmldb/OpenMLDBGlobalVar.java | 45 ++- 12 files changed, 364 insertions(+), 135 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/cluster_dist.yaml create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/onebox.yaml create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/standalone_dist.yaml diff --git a/cases/integration_test/long_window/test_count_where.yaml b/cases/integration_test/long_window/test_count_where.yaml index 84740eaa889..a1198020b52 100644 --- a/cases/integration_test/long_window/test_count_where.yaml +++ b/cases/integration_test/long_window/test_count_where.yaml @@ -13,49 +13,24 @@ # limitations under the License. db: test_zw -debugs: ["长窗口count_where,date类型","长窗口count_where,rows"] +debugs: ["长窗口count_where,第二个参数类型是date"] cases: - id: 0 desc: 长窗口count_where,date类型 - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] rows: - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,2,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,3,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,4,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,5,34,1.5,2.5,1590738990004,"2020-05-05",false] - sql: | - SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - order: id - columns: ["id int","c1 string","w1_count bigint"] - rows: - - [1,"aa",1] - - [2,"aa",2] - - [3,"aa",3] - - [4,"aa",2] - - [5,"aa",1] - - - id: 0-1 - desc: 长窗口count_where,rows - longWindow: w1:2 - inputs: - - - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] - index: ["index1:c1:c7:0:latest"] - rows: - - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,2,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,4,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,5,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -72,16 +47,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c2,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c2,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -98,16 +72,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c3,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c3,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -124,16 +97,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c4,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c4,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -150,16 +122,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c1,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c1,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -176,16 +147,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c7,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c7,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -202,16 +172,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(*,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(*,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -228,16 +197,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c9,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c9,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -254,16 +222,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c5,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c5,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -280,16 +247,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c6,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c6,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -306,16 +272,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c8,c9) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c9) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: success: false msg: fail @@ -326,16 +291,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c8,c2=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -352,16 +316,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c8,c2!=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2!=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -378,16 +341,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c8,c2>=2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2>=2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -404,16 +366,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c8,c2<=3) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2<=3) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -430,16 +391,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c8,c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -456,16 +416,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c8,c2<4 and c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2<4 and c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: success: false msg: fail @@ -476,16 +435,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c8,c3>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c3>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: success: false msg: fail @@ -496,16 +454,15 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c8,if_null(c2,0)>4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,if_null(c2,0)>4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); expect: success: false msg: fail @@ -516,16 +473,209 @@ cases: inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] - dataProvider: - - ["ROWS","ROWS_RANGE"] sql: | - SELECT id, c1, count_where(c8,4>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,4>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 21 + desc: 长窗口count_where,rows + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + sql: | + SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 22 + desc: 长窗口count_where,第二个参数类型是int + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + sql: | + SELECT id, c1, count_where(c8,c3<23) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 23 + desc: 长窗口count_where,第二个参数类型是bigint + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + sql: | + SELECT id, c1, count_where(c8,c4<33) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 24 + desc: 长窗口count_where,第二个参数类型是float + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + sql: | + SELECT id, c1, count_where(c8,c5<1.4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 25 + desc: 长窗口count_where,第二个参数类型是double + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + sql: | + SELECT id, c1, count_where(c8,c6<2.4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 26 + desc: 长窗口count_where,第二个参数类型是timestamp + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + sql: | + SELECT id, c1, count_where(c8,c7<1590738990003) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail + - + id: 27 + desc: 长窗口count_where,第二个参数类型是date + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + sql: | + SELECT id, c1, count_where(c8,c8<"2020-05-04") OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 28 + desc: 长窗口count_where,第二个参数类型是bool + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",false] + - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + sql: | + SELECT id, c1, count_where(c8,c9=true) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -535,6 +685,4 @@ cases: - [3,"aa",3] - [4,"aa",2] - [5,"aa",1] - - diff --git a/test/integration-test/openmldb-test-java/openmldb-ecosystem/pom.xml b/test/integration-test/openmldb-test-java/openmldb-ecosystem/pom.xml index 2e648fd5735..c860e8329c5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-ecosystem/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-ecosystem/pom.xml @@ -16,6 +16,18 @@ 8 + + + com.4paradigm.openmldb + openmldb-test-common + ${project.version} + + + com.4paradigm.openmldb + openmldb-deploy + ${project.version} + + org.apache.kafka kafka-clients diff --git a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/java/com/_4paradigm/openmldb/ecosystem/common/KafkaTest.java b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/java/com/_4paradigm/openmldb/ecosystem/common/KafkaTest.java index df515a897ab..79609a1a50b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/java/com/_4paradigm/openmldb/ecosystem/common/KafkaTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/java/com/_4paradigm/openmldb/ecosystem/common/KafkaTest.java @@ -18,6 +18,7 @@ import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.common.BaseTest; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/cluster_dist.yaml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/cluster_dist.yaml new file mode 100644 index 00000000000..17119b4c799 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/cluster_dist.yaml @@ -0,0 +1,32 @@ +mode: cluster +zookeeper: + zk_cluster: 172.24.4.55:30019 + zk_root_path: /openmldb +nameserver: + - + endpoint: 172.24.4.55:30023 + path: /home/zhaowei01/openmldb-auto-test/tmp2/openmldb-ns-1 + is_local: true + - + endpoint: 172.24.4.55:30024 + path: /home/zhaowei01/openmldb-auto-test/tmp2/openmldb-ns-2 + is_local: true +tablet: + - + endpoint: 172.24.4.55:30020 + path: /home/zhaowei01/openmldb-auto-test/tmp2/openmldb-tablet-1 + is_local: true + - + endpoint: 172.24.4.55:30021 + path: /home/zhaowei01/openmldb-auto-test/tmp2/openmldb-tablet-2 + is_local: true + - + endpoint: 172.24.4.55:30022 + path: /home/zhaowei01/openmldb-auto-test/tmp2/openmldb-tablet-3 + is_local: true +taskmanager: + - + endpoint: 172.24.4.55:30026 + path: /home/zhaowei01/openmldb-auto-test/tmp2/openmldb-task_manager-1 + spark_master: local + is_local: true \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/onebox.yaml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/onebox.yaml new file mode 100644 index 00000000000..62e48ecb6f0 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/onebox.yaml @@ -0,0 +1,24 @@ +mode: cluster +zookeeper: + zk_cluster: 172.24.4.55:30019 + zk_root_path: /onebox +nameserver: + - + endpoint: 172.24.4.55:31000 + path: /home/zhaowei01/openmldb-auto-test/onebox/openmldb-0.5.3-linux + is_local: true +tablet: + - + endpoint: 172.24.4.55:31001 + path: /home/zhaowei01/openmldb-auto-test/onebox/openmldb-0.5.3-linux + is_local: true + - + endpoint: 172.24.4.55:31002 + path: /home/zhaowei01/openmldb-auto-test/onebox/openmldb-0.5.3-linux + is_local: true +taskmanager: + - + endpoint: 172.24.4.55:31004 + path: /home/zhaowei01/openmldb-auto-test/onebox/openmldb-0.5.3-linux + spark_master: local + is_local: true \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/standalone_dist.yaml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/standalone_dist.yaml new file mode 100644 index 00000000000..b3155d3c5c6 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/standalone_dist.yaml @@ -0,0 +1,11 @@ +mode: standalone +nameserver: + - + endpoint: 172.24.4.55:30013 + path: /home/zhaowei01/openmldb-auto-test/standalone/openmldb-standalone + is_local: true +tablet: + - + endpoint: 172.24.4.55:30014 + path: /home/zhaowei01/openmldb-auto-test/standalone/openmldb-standalone + is_local: true diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java index 0ffa75fbf09..0fd6553099d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java @@ -35,43 +35,11 @@ public class OpenMLDBConfig { public static final List VERSIONS; public static boolean INIT_VERSION_ENV = true; - public static final List CASE_LEVELS; - public static final String CASE_PATH; - public static final String CASE_NAME; - public static final String CASE_ID; - public static final String CASE_DESC; - public static final String YAML_CASE_BASE_DIR; public static final boolean ADD_REPORT_LOG; - public static final Properties CONFIG = Tool.getProperties("run_case.properties"); static { - String levelStr = System.getProperty("caseLevel"); - levelStr = StringUtils.isEmpty(levelStr) ? "0" : levelStr; - CASE_LEVELS = Arrays.stream(levelStr.split(",")).map(Integer::parseInt).collect(Collectors.toList()); - CASE_NAME = System.getProperty("caseName"); - CASE_ID = System.getProperty("caseId"); - CASE_DESC = System.getProperty("caseDesc"); - CASE_PATH = System.getProperty("casePath"); - YAML_CASE_BASE_DIR = System.getProperty("yamlCaseBaseDir"); - log.info("FESQL_CASE_LEVELS {}", CASE_LEVELS); - if (!StringUtils.isEmpty(CASE_NAME)) { - log.info("FESQL_CASE_NAME {}", CASE_NAME); - } - if (!StringUtils.isEmpty(CASE_ID)) { - log.info("FESQL_CASE_ID {}", CASE_ID); - } - if (!StringUtils.isEmpty(CASE_PATH)) { - log.info("FESQL_CASE_PATH {}", CASE_PATH); - } - if (!StringUtils.isEmpty(CASE_DESC)) { - log.info("FESQL_CASE_DESC {}", CASE_DESC); - } - if (!StringUtils.isEmpty(YAML_CASE_BASE_DIR)) { - log.info("YAML_CASE_BASE_DIR {}", YAML_CASE_BASE_DIR); - } - String versionStr = System.getProperty("diffVersion"); if (StringUtils.isEmpty(versionStr)) { versionStr = CONFIG.getProperty(OpenMLDBGlobalVar.env + "_versions"); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java index 896c499fbb2..4de340711be 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StoredProcedureSQLExecutor.java @@ -98,7 +98,7 @@ public void tearDown(String version,SqlExecutor executor) { } for (String spName : spNames) { String drop = "drop procedure " + spName + ";"; -// SDKUtil.ddl(executor, dbName, drop); + SDKUtil.ddl(executor, dbName, drop); } super.tearDown(version,executor); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/LongWindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/LongWindowTest.java index 92cec6149fc..ec638fb0937 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/LongWindowTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/LongWindowTest.java @@ -15,7 +15,7 @@ public class LongWindowTest extends OpenMLDBTest { @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/long_window/test_count_where.yaml") + @Yaml(filePaths = "integration_test/long_window/test_count_where.yaml") @Story("longWindowDeploy") public void testLongWindow2(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kLongWindow).run(); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/BaseTest.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/BaseTest.java index b97e3195452..bc7d880607d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/BaseTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/BaseTest.java @@ -16,8 +16,6 @@ package com._4paradigm.openmldb.test_common.common; -import com._4paradigm.openmldb.test_common.common.LogProxy; -import com._4paradigm.openmldb.test_common.common.ReportLog; import com._4paradigm.openmldb.test_common.model.CaseFile; import com._4paradigm.openmldb.test_common.model.OpenMLDBCaseFileList; import com._4paradigm.openmldb.test_common.model.SQLCase; @@ -65,11 +63,11 @@ public void BeforeMethod(Method method, Object[] testData) { ReportLog.of().clean(); if(testData==null || testData.length==0) return; Assert.assertNotNull( - testData[0], "fail to run fesql test with null SQLCase: check yaml case"); + testData[0], "fail to run openmldb test with null SQLCase: check yaml case"); if (testData[0] instanceof SQLCase) { SQLCase sqlCase = (SQLCase) testData[0]; Assert.assertNotEquals(CaseFile.FAIL_SQL_CASE, - sqlCase.getDesc(), "fail to run fesql test with FAIL DATA PROVIDER SQLCase: check yaml case"); + sqlCase.getDesc(), "fail to run openmldb test with FAIL DATA PROVIDER SQLCase: check yaml case"); testName.set(String.format("[%d]%s.%s", testNum, method.getName(), CaseNameFormat(sqlCase))); } else { testName.set(String.format("[%d]%s.%s", testNum, method.getName(), null == testData[0] ? "null" : testData[0].toString())); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/OpenMLDBCaseFileList.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/OpenMLDBCaseFileList.java index 6e52621cbdc..99f3922008b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/OpenMLDBCaseFileList.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/OpenMLDBCaseFileList.java @@ -34,6 +34,8 @@ public List getCases() { List cases = new ArrayList(); for (CaseFile dataProvider : dataProviderList) { + System.out.println("--------"); + System.out.println(OpenMLDBGlobalVar.CASE_LEVELS); for (SQLCase sqlCase : dataProvider.getCases(OpenMLDBGlobalVar.CASE_LEVELS)) { if (!StringUtils.isEmpty(OpenMLDBGlobalVar.CASE_NAME) && !OpenMLDBGlobalVar.CASE_NAME.equals(BaseTest.CaseNameFormat(sqlCase))) { diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java index 3bb89cfff22..fba4f9955f1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java @@ -18,13 +18,19 @@ import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.testng.collections.Lists; +import java.util.Arrays; import java.util.List; +import java.util.stream.Collectors; /** * @author zhaowei * @date 2020/6/11 11:45 AM */ +@Slf4j public class OpenMLDBGlobalVar { public static String env; public static String level; @@ -33,10 +39,37 @@ public class OpenMLDBGlobalVar { public static OpenMLDBInfo mainInfo; public static String dbName = "test_zw"; public static String tableStorageMode = "memory"; - public static List CASE_LEVELS; - public static String CASE_NAME; - public static String CASE_ID; - public static String CASE_DESC; - public static String CASE_PATH; - public static String YAML_CASE_BASE_DIR; + public static final List CASE_LEVELS; + public static final String CASE_NAME; + public static final String CASE_ID; + public static final String CASE_DESC; + public static final String CASE_PATH; + public static final String YAML_CASE_BASE_DIR; + + static { + String levelStr = System.getProperty("caseLevel"); + levelStr = StringUtils.isEmpty(levelStr) ? "0" : levelStr; + CASE_LEVELS = Arrays.stream(levelStr.split(",")).map(Integer::parseInt).collect(Collectors.toList()); + CASE_NAME = System.getProperty("caseName"); + CASE_ID = System.getProperty("caseId"); + CASE_DESC = System.getProperty("caseDesc"); + CASE_PATH = System.getProperty("casePath"); + YAML_CASE_BASE_DIR = System.getProperty("yamlCaseBaseDir"); + log.info("CASE_LEVELS {}", CASE_LEVELS); + if (!StringUtils.isEmpty(CASE_NAME)) { + log.info("CASE_NAME {}", CASE_NAME); + } + if (!StringUtils.isEmpty(CASE_ID)) { + log.info("CASE_ID {}", CASE_ID); + } + if (!StringUtils.isEmpty(CASE_PATH)) { + log.info("CASE_PATH {}", CASE_PATH); + } + if (!StringUtils.isEmpty(CASE_DESC)) { + log.info("CASE_DESC {}", CASE_DESC); + } + if (!StringUtils.isEmpty(YAML_CASE_BASE_DIR)) { + log.info("YAML_CASE_BASE_DIR {}", YAML_CASE_BASE_DIR); + } + } } From c1d88c8ab2974fc78dba045bf48ae20f9aa4df43 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 10 Aug 2022 12:49:08 +0800 Subject: [PATCH 118/172] deploy --- .../openmldb-deploy/src/main/resources/deploy.properties | 2 +- .../openmldb-deploy/test-suite/test_deploy.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index cf67073c54e..728c9eed82d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -6,7 +6,7 @@ main=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2 0.2.3=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2.3-linux.tar.gz spark_url=https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0.3.2/spark-3.0.0-bin-openmldbspark.tgz -tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.3-linux.tar.gz +tmp=http://pkg.4paradigm.com:81/rtidb/test/tmp/openmldb-0.5.3-linux.tar.gz tmp_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz tmp_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index f512a758c26..7a98bf98efd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -3,7 +3,7 @@ - + From a69fc6f3b3485bf3b3a4e75d6413b4b420df44c1 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 16 Aug 2022 15:41:55 +0800 Subject: [PATCH 119/172] add long window case --- cases/integration_test/dml/test_delete.yaml | 7 +- .../long_window/long_window.yaml | 357 ----- .../long_window/test_count_where.yaml | 442 +++--- .../long_window/test_long_window.yaml | 319 +++++ .../long_window/test_long_window_batch.yaml | 34 + .../long_window/test_udaf.yaml | 787 +++++++++++ .../long_window/test_xxx_where.yaml | 1209 +++++++++++++++++ .../window/test_current_row.yaml | 3 +- .../src/main/resources/command.properties | 5 - .../java_sdk_test/checker/BaseChecker.java | 10 +- .../checker/CatCheckerByCli.java | 3 +- .../java_sdk_test/checker/Checker.java | 1 - .../checker/CheckerStrategy.java | 45 +- .../java_sdk_test/checker/ColumnsChecker.java | 4 +- .../checker/ColumnsCheckerByCli.java | 2 +- .../checker/ColumnsCheckerByJBDC.java | 2 +- .../java_sdk_test/checker/CountChecker.java | 2 +- .../checker/DeploymentCheckerByCli.java | 7 +- .../DeploymentContainsCheckerByCli.java | 2 +- .../checker/DeploymentCountCheckerByCli.java | 4 +- .../checker/DiffResultChecker.java | 16 +- .../checker/DiffVersionChecker.java | 2 +- .../java_sdk_test/checker/IndexChecker.java | 2 +- .../checker/IndexCountChecker.java | 2 +- .../java_sdk_test/checker/MessageChecker.java | 2 +- .../java_sdk_test/checker/OptionsChecker.java | 4 +- .../java_sdk_test/checker/PreAggChecker.java | 96 ++ .../checker/PreAggListChecker.java | 99 ++ .../java_sdk_test/checker/ResultChecker.java | 11 +- .../checker/ResultCheckerByCli.java | 13 +- .../checker/ResultCheckerByJDBC.java | 13 +- .../java_sdk_test/checker/SuccessChecker.java | 2 +- .../java_sdk_test/common/OpenMLDBConfig.java | 6 +- .../java_sdk_test/common/OpenMLDBTest.java | 4 +- .../executor/BaseSQLExecutor.java | 8 +- .../executor/CommandExecutor.java | 2 +- .../executor/DiffResultExecutor.java | 2 +- .../java_sdk_test/executor/JDBCExecutor.java | 2 +- .../cluster/sql_test/DMLTest.java | 13 + .../cluster/sql_test/LongWindowTest.java | 50 + .../cluster/sql_test/WindowTest.java | 24 +- .../cluster/v050/LongWindowTest.java | 29 - .../java_sdk_test/cluster/v060/DMLTest.java | 53 - .../cluster/v060/LongWindowTest.java | 23 - .../java_sdk_test/temp/TestPreAgg.java | 33 + .../openmldb-test-common/pom.xml | 8 +- .../test_common/bean/OpenMLDBResult.java | 1 + .../openmldb/test_common/model/CaseFile.java | 2 + .../test_common/model/ExpectDesc.java | 2 + .../test_common/model/PreAggTable.java | 13 + .../openmldb/test_common/model/SQLCase.java | 2 +- .../openmldb/OpenMLDBGlobalVar.java | 9 + .../openmldb/test_common/util/BinaryUtil.java | 148 ++ .../openmldb/test_common/util/DataUtil.java | 42 + .../openmldb/test_common/util/ResultUtil.java | 2 +- .../openmldb/test_common/util/SDKUtil.java | 111 +- .../openmldb/test_common/util/SQLUtil.java | 19 +- 57 files changed, 3322 insertions(+), 793 deletions(-) delete mode 100644 cases/integration_test/long_window/long_window.yaml create mode 100644 cases/integration_test/long_window/test_long_window.yaml create mode 100644 cases/integration_test/long_window/test_long_window_batch.yaml create mode 100644 cases/integration_test/long_window/test_udaf.yaml create mode 100644 cases/integration_test/long_window/test_xxx_where.yaml create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/PreAggChecker.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/PreAggListChecker.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/LongWindowTest.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/LongWindowTest.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestPreAgg.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/PreAggTable.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/BinaryUtil.java diff --git a/cases/integration_test/dml/test_delete.yaml b/cases/integration_test/dml/test_delete.yaml index 51e0a39736f..d73709145d5 100644 --- a/cases/integration_test/dml/test_delete.yaml +++ b/cases/integration_test/dml/test_delete.yaml @@ -14,7 +14,7 @@ db: test_zw debugs: [] -version: 0.5.0 +version: 0.6.0 cases: - id: 0 @@ -194,7 +194,7 @@ cases: columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] indexs: ["index1:c3:c7"] rows: - - [1,"aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [1,"aa",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - [3,"aa",1,3,3,1.1,2.1,1590738990000,"2020-05-01",true] sqls: @@ -204,7 +204,7 @@ cases: columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] order: id rows: - - [1,"aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true] + - [1,"aa",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true] - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - id: 11 @@ -572,6 +572,7 @@ cases: - select * from {0}; expect: columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + order: id rows: - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true] - [3,null,1,2,3,1.1,2.1,1590738990000,"2020-05-01",true] diff --git a/cases/integration_test/long_window/long_window.yaml b/cases/integration_test/long_window/long_window.yaml deleted file mode 100644 index 7344aca2cce..00000000000 --- a/cases/integration_test/long_window/long_window.yaml +++ /dev/null @@ -1,357 +0,0 @@ -# Copyright 2021 4Paradigm -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -db: test_zw -debugs: ["options(long_window='w1:2h')"] -cases: - - - id: 0 - desc: options(long_window='w1:2') - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); - expect: - success: true - - - id: 1 - desc: options(long_window='w1:2d') - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7::latest"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w1:2d') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - success: true - - - id: 2 - desc: options(long_window='w1:2h') - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7::latest"] -# rows: -# - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] -# - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] -# - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] -# - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] -# - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sqls: - - deploy deploy_{0} options(long_windows='w1:2d') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); - - show deployment deploy_{0}; - expect: - deployment : - name: deploy_{0} - dbName: test_zw - sql: | - DEPLOY {0} SELECT - id, - c1, - sum(c4) OVER (w1) AS w1_c4_sum - FROM - {0} - WINDOW w1 AS (PARTITION BY {0}.c1 - ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) - ; - inColumns: - - 1,id,kInt32,NO - - 2,c1,kVarchar,NO - - 3,c3,kInt32,NO - - 4,c4,kInt64,NO - - 5,c5,kFloat,NO - - 6,c6,kDouble,NO - - 7,c7,kTimestamp,NO - - 8,c8,kDate,NO - outColumns: - - 1,id,kInt32,NO - - 2,c1,kVarchar,NO - - 3,w1_c4_sum,kInt64,NO - - - id: 3 - desc: options(long_window='w1:2m') - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w1:2m') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); - expect: - success: true - - - id: 4 - desc: options(long_window='w1:2s') - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w1:2s') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW); - expect: - success: true - - - id: 5 - desc: avg算子(smallint, int, bigint, float, double, string) - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w1:2') SELECT id, c1, avg(c4) OVER w1 as w1_c4_avg FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - success: true - - - id: 6 - desc: min算子(smallint, int, bigint, float, double, string) - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w1:2d') SELECT id, c1, min(c4) OVER w1 as w1_c4_min FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - success: true - - - id: 7 - desc: max算子(smallint, int, bigint, float, double, string) - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w1:2h') SELECT id, c1, max(c4) OVER w1 as w1_c4_max FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - success: true - - - id: 8 - desc: count算子(smallint, int, bigint, float, double, string) - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w1:2m') SELECT id, c1, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - success: true - - - id: 9 - desc: 相同的PARTITION BY和ORDER BY,长窗口和短窗口可合并 - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, avg(c4) OVER w1 as w1_c4_avg from {0} - WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW), - w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - success: true - - - id: 10 - desc: 相同的PARTITION BY和ORDER BY,长窗口之间可合并 - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w1:2,w2:2') SELECT id, c1, sum(c3) OVER w1 as w1_c3_sum, avg(c3) OVER w2 as w2_c3_avg from {0} - WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), - w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - success: true - - - id: 11 - desc: 相同的PARTITION BY和ORDER BY,-短窗口之间可合并(三个窗口 一个长窗口,俩个短窗口) - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c3) OVER w1 as w1_c3_sum, avg(c3) OVER w2 as w2_c3_avg, count(c3) OVER w3 as w3_c3_count from {0} - WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), - w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), - w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - success: true - - - id: 12 - desc: 不同的PARTITION BY和ORDER BY,长窗口和短窗口混合-不可合并窗口 - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c5) OVER w1 as w1_c5_sum, - avg(c5) OVER w2 as w2_c5_avg from {0} - WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), - w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - success: true - - - id: 13 - desc: 窗口名不存在 - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w9090:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - success: true - - - id: 14 - desc: options(long_window='w1:2y') - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w9090:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - success: true - - - id: 15 - desc: options格式错误 - inputs: - - - columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] - indexs: ["index1:c1:c7"] - rows: - - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] - - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] - - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] - - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] - - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] - dataProvider: - - ["ROWS","ROWS_RANGE"] - sql: | - deploy {0} options(long_windows='w1:100') SELECT id, c1, avg(c5) OVER w1 as w1_c4_avg FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); - expect: - success: true diff --git a/cases/integration_test/long_window/test_count_where.yaml b/cases/integration_test/long_window/test_count_where.yaml index a1198020b52..331597a323b 100644 --- a/cases/integration_test/long_window/test_count_where.yaml +++ b/cases/integration_test/long_window/test_count_where.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: ["长窗口count_where,第二个参数类型是date"] +debugs: [] cases: - id: 0 @@ -23,10 +23,11 @@ cases: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,2,31,1.2,2.2,1590738991000,"2020-05-02",true] - - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [2,"aa",1,2,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",1,3,32,1.3,2.3,1590738992000,"2020-05-03",true] - [4,"aa",4,4,33,1.4,2.4,1590738993000,"2020-05-04",true] - [5,"aa",5,5,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | @@ -40,22 +41,28 @@ cases: - [3,"aa",3] - [4,"aa",2] - [5,"aa",1] + preAgg: + name: pre_{db_name}_{sp_name}_w1_count_where_c8_c2 + type: bigint + rows: + - ["aa",1590738990000,1590738991999,2,2,1] - id: 1 desc: 长窗口count_where,smallint类型 - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c2,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c2,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -68,19 +75,20 @@ cases: - id: 2 desc: 长窗口count_where,int类型 - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c3,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c3,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -93,19 +101,20 @@ cases: - id: 3 desc: 长窗口count_where,bigint类型 - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c4,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c4,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -118,19 +127,20 @@ cases: - id: 4 desc: 长窗口count_where,string类型 - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c1,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c1,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -143,19 +153,20 @@ cases: - id: 5 desc: 长窗口count_where,timestamp类型 - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c7,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c7,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -168,19 +179,20 @@ cases: - id: 6 desc: 长窗口count_where,row类型 - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(*,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(*,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -193,19 +205,21 @@ cases: - id: 7 desc: 长窗口count_where,bool类型 - longWindow: w1:2 + tags: ["TODO","bug,下个版本修复后测试,@qiliguo"] + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c9,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c9,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -218,19 +232,20 @@ cases: - id: 8 desc: 长窗口count_where,float类型 - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c5,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c5,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -243,19 +258,20 @@ cases: - id: 9 desc: 长窗口count_where,double类型 - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c6,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c6,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -268,38 +284,40 @@ cases: - id: 10 desc: 长窗口count_where,第二个参数使用bool列 - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,c9) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c9) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: success: false msg: fail - id: 11 desc: 长窗口count_where,第二个参数使用= - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,c2=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -312,19 +330,20 @@ cases: - id: 12 desc: 长窗口count_where,第二个参数使用!= - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,c2!=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2!=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -337,19 +356,20 @@ cases: - id: 13 desc: 长窗口count_where,第二个参数使用>= - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,c2>=2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2>=2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -362,19 +382,20 @@ cases: - id: 14 desc: 长窗口count_where,第二个参数使用<= - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,c2<=3) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2<=3) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -387,19 +408,20 @@ cases: - id: 15 desc: 长窗口count_where,第二个参数使用> - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -412,76 +434,80 @@ cases: - id: 17 desc: 长窗口count_where,第二个参数使用and - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,c2<4 and c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2<4 and c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: success: false msg: fail - id: 18 desc: 长窗口count_where,第二个参数使用两个列 - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,c3>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c3>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: success: false msg: fail - id: 19 desc: 长窗口count_where,第二个参数使用嵌套 - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,if_null(c2,0)>4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,if_null(c2,0)>4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: success: false msg: fail - id: 20 desc: 长窗口count_where,第二个参数常量在前 - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,4>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,4>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -494,17 +520,18 @@ cases: - id: 21 desc: 长窗口count_where,rows - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7:0:latest"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: @@ -519,17 +546,18 @@ cases: - id: 22 desc: 长窗口count_where,第二个参数类型是int - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7:0:latest"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | SELECT id, c1, count_where(c8,c3<23) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: @@ -544,17 +572,18 @@ cases: - id: 23 desc: 长窗口count_where,第二个参数类型是bigint - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7:0:latest"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | SELECT id, c1, count_where(c8,c4<33) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: @@ -569,17 +598,18 @@ cases: - id: 24 desc: 长窗口count_where,第二个参数类型是float - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7:0:latest"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | SELECT id, c1, count_where(c8,c5<1.4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: @@ -594,17 +624,18 @@ cases: - id: 25 desc: 长窗口count_where,第二个参数类型是double - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7:0:latest"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | SELECT id, c1, count_where(c8,c6<2.4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: @@ -619,38 +650,60 @@ cases: - id: 26 desc: 长窗口count_where,第二个参数类型是timestamp - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7:0:latest"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,c7<1590738990003) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c7<1590738993000) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: success: false msg: fail - id: 27 desc: 长窗口count_where,第二个参数类型是date - longWindow: w1:2 + longWindow: w1:2s inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7:0:latest"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | SELECT id, c1, count_where(c8,c8<"2020-05-04") OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail + - + id: 28 + desc: 长窗口count_where,第二个参数类型是bool + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",false] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, count_where(c8,c9=true) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -661,21 +714,88 @@ cases: - [4,"aa",2] - [5,"aa",1] - - id: 28 - desc: 长窗口count_where,第二个参数类型是bool + id: 29 + desc: 长窗口count_where,w1:2 longWindow: w1:2 inputs: - columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] index: ["index1:c1:c7:0:latest"] + storage: memory rows: - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] - - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true] - - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true] - - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",false] - - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,c9=true) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail + - + id: 30 + desc: 长窗口count_where,磁盘表 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: SSD + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, count_where(c3,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail + - + id: 31 + desc: 长窗口count_where,第二个参数类型是string + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"] + index: ["index1:c1:c7:0:latest"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01","true"] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02","true"] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03","true"] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04","false"] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05","false"] + sql: | + SELECT id, c1, count_where(c8,c9="true") OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_count bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",2] + - [5,"aa",1] + - + id: 32 + desc: 长窗口count_where,验证预聚合表 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,2,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,4,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,5,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] @@ -686,3 +806,5 @@ cases: - [4,"aa",2] - [5,"aa",1] + + diff --git a/cases/integration_test/long_window/test_long_window.yaml b/cases/integration_test/long_window/test_long_window.yaml new file mode 100644 index 00000000000..c8250a79e5c --- /dev/null +++ b/cases/integration_test/long_window/test_long_window.yaml @@ -0,0 +1,319 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +cases: + - + id: 0 + desc: options(long_window='w1:2y') + longWindow: w1:2y + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",20,30,1.1,2.1,1262278860000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1293814860000,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1325350860000,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1356973260000,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1356973260000,"2020-05-05"] + sql: | + SELECT id, c1, count(c4) OVER w1 as w1_long FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + msg: create aggregator failed + - + id: 1 + desc: options(long_window='w1:2d') + longWindow: w1:2d + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1577811660000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1577898060000,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1577984460000,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1578070860000,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1578157260000,"2020-05-05"] + sql: | + SELECT id, c1, count(c4) OVER w1 as w1_long FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2d PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_long bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + preAgg: + name: pre_{db_name}_{sp_name}_w1_count_c4 + type: bigint + rows: + - ["aa",1577811660000,1577984459999,2,2,null] + - ["aa",1577984460000,1578157259999,2,2,null] + - + id: 2 + desc: options(long_window='w1:2h') + longWindow: w1:2h + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1577811661000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1577815261000,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1577818861000,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1577822461000,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1577826061000,"2020-05-05"] + sql: | + SELECT id, c1, count(c4) OVER w1 as w1_long FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2h PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_long bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + preAgg: + name: pre_{db_name}_{sp_name}_w1_count_c4 + type: bigint + rows: + - ["aa",1577808000000,1577815199999,1,1,null] + - ["aa",1577815200000,1577822399999,2,2,null] + - + id: 3 + desc: options(long_window='w1:2m') + longWindow: w1:2m + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1577812141000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1577812201000,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1577812261000,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1577812321000,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1577812381000,"2020-05-05"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_long FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2m PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_long bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",93] + - [4,"aa",96] + - [5,"aa",99] + preAgg: + name: pre_{db_name}_{sp_name}_w1_sum_c4 + type: bigint + rows: + - ["aa",1577812080000,1577812199999,1,30,null] + - ["aa",1577812200000,1577812319999,2,63,null] + - + id: 4 + desc: options(long_window='w1:2s') + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738991000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738992000,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738993000,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738995000,"2020-05-05"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_long FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_long bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",93] + - [4,"aa",96] + - [5,"aa",99] + preAgg: + name: pre_{db_name}_{sp_name}_w1_sum_c4 + type: bigint + rows: + - ["aa",1590738990000,1590738991999,1,30,null] + - ["aa",1590738992000,1590738993999,2,63,null] + - + id: 5 + desc: 相同的PARTITION BY和ORDER BY,长窗口和短窗口可合并 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_long, count(c4) OVER w2 as w2_long from {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_long bigint","w2_long bigint"] + rows: + - [1,"aa",30,1] + - [2,"aa",61,2] + - [3,"aa",93,3] + - [4,"aa",96,4] + - [5,"aa",99,4] + preAgg: + name: pre_{db_name}_{sp_name}_w1_sum_c4 + type: bigint + rows: + - ["aa",1590738990000,1590738990001,2,61,null] + - ["aa",1590738990002,1590738990003,2,65,null] + - + id: 6 + desc: 相同的PARTITION BY和ORDER BY,长窗口之间可合并 + longWindow: w1:2,w2:2 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_long, min(c3) OVER w2 as w2_long from {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_long bigint","w2_long int"] + rows: + - [1,"aa",30,20] + - [2,"aa",61,20] + - [3,"aa",93,20] + - [4,"aa",96,20] + - [5,"aa",99,21] + preAggList: + - + name: pre_{db_name}_{sp_name}_w1_sum_c4 + type: bigint + rows: + - ["aa",1590738990000,1590738990001,2,61,null] + - ["aa",1590738990002,1590738990003,2,65,null] + - + name: pre_{db_name}_{sp_name}_w2_min_c3 + type: bigint + rows: + - ["aa",1590738990000,1590738990001,2,20,null] + - ["aa",1590738990002,1590738990003,2,22,null] + - + id: 7 + desc: 相同的PARTITION BY和ORDER BY,-短窗口之间可合并(三个窗口 一个长窗口,俩个短窗口) + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_long, avg(c3) OVER w2 as w2_c3_avg, count(c3) OVER w3 as w3_c3_count from {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 1 PRECEDING AND CURRENT ROW), + w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_long bigint","w2_c3_avg double","w3_c3_count bigint"] + rows: + - [1,"aa",30,20,1] + - [2,"aa",61,20.5,2] + - [3,"aa",93,21.5,3] + - [4,"aa",96,22.5,4] + - [5,"aa",99,23.5,4] + preAgg: + name: pre_{db_name}_{sp_name}_w1_sum_c4 + type: bigint + rows: + - ["aa",1590738990000,1590738990001,2,61,null] + - ["aa",1590738990002,1590738990003,2,65,null] + - + id: 8 + desc: 不同的PARTITION BY和ORDER BY,长窗口和短窗口混合-不可合并窗口 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",20,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",20,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",20,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1,c3, sum(c4) OVER w1 as w1_long,count(c5) OVER w2 as w2_c5_count from {0} + WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW), + w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_long bigint","w2_c5_count bigint"] + rows: + - [1,"aa",20,30,1] + - [2,"aa",20,61,2] + - [3,"aa",20,93,3] + - [4,"aa",20,96,3] + - [5,"aa",24,99,1] + preAgg: + name: pre_{db_name}_{sp_name}_w1_sum_c4 + type: bigint + rows: + - ["aa",1590738990000,1590738990001,2,61,null] + - ["aa",1590738990002,1590738990003,2,65,null] + - + id: 9 + desc: 窗口名不存在 + longWindow: w2:2 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: false + msg: long_windows option doesn't match window in sql \ No newline at end of file diff --git a/cases/integration_test/long_window/test_long_window_batch.yaml b/cases/integration_test/long_window/test_long_window_batch.yaml new file mode 100644 index 00000000000..d8bc4577423 --- /dev/null +++ b/cases/integration_test/long_window/test_long_window_batch.yaml @@ -0,0 +1,34 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +cases: + - + id: 0 + desc: options格式错误 + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"] + sql: | + deploy {0} options(long_windows='w1:100') SELECT id, c1, avg(c5) OVER w1 as w1_c4_avg FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW); + expect: + success: true diff --git a/cases/integration_test/long_window/test_udaf.yaml b/cases/integration_test/long_window/test_udaf.yaml new file mode 100644 index 00000000000..8a8a67bf79d --- /dev/null +++ b/cases/integration_test/long_window/test_udaf.yaml @@ -0,0 +1,787 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +cases: + - + id: 0 + desc: 长窗口count/avg/sum/max/min,date类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,2,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,4,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,5,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c8) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min","max","sum","avg","count"] + expectProvider: + 0: + order: id + columns: [ "id int","c1 string","w1_udaf date" ] + rows: + - [1,"aa","2020-05-01"] + - [2,"aa","2020-05-01"] + - [3,"aa","2020-05-01"] + - [4,"aa","2020-05-02"] + - [5,"aa","2020-05-03"] + preAgg: + name: pre_{db_name}_{sp_name}_w1_min_c8 + type: date + rows: + - ["aa",1590738990000,1590738991000,2,"2020-05-01",null] + - ["aa",1590738992000,1590738993000,2,"2020-05-03",null] + 1: + order: id + columns: [ "id int","c1 string","w1_udaf date" ] + rows: + - [1,"aa","2020-05-01"] + - [2,"aa","2020-05-02"] + - [3,"aa","2020-05-03"] + - [4,"aa","2020-05-04"] + - [5,"aa","2020-05-05"] + preAgg: + name: pre_{db_name}_{sp_name}_w1_max_c8 + type: date + rows: + - ["aa",1590738990000,1590738991000,2,"2020-05-02",null] + - ["aa",1590738992000,1590738993000,2,"2020-05-04",null] + 2: + success: false + msg: fail + 3: + success: false + msg: fail + 4: + order: id + columns: ["id int","c1 string","w1_udaf bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + preAgg: + name: pre_{db_name}_{sp_name}_w1_count_c8 + type: bigint + rows: + - ["aa",1590738990000,1590738991000,2,2,null] + - ["aa",1590738992000,1590738993000,2,2,null] + - + id: 1 + desc: 长窗口count/avg/sum/max/min,smallint类型 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + dataProvider: + - ["min","max","sum","avg","count"] + sql: | + SELECT id, c1, d[0](c2) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_udaf smallint"] + expectProvider: + 0: + rows: + - [1,"aa",1] + - [2,"aa",1] + - [3,"aa",1] + - [4,"aa",2] + - [5,"aa",3] + preAgg: + name: pre_{db_name}_{sp_name}_w1_min_c2 + type: smallint + rows: + - ["aa",1590738990000,1590738991000,2,1,null] + - ["aa",1590738992000,1590738993000,2,3,null] + 1: + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",4] + - [5,"aa",5] + preAgg: + name: pre_{db_name}_{sp_name}_w1_max_c2 + type: smallint + rows: + - ["aa",1590738990000,1590738991000,2,2,null] + - ["aa",1590738992000,1590738993000,2,4,null] + 2: + rows: + - [1,"aa",1] + - [2,"aa",3] + - [3,"aa",6] + - [4,"aa",9] + - [5,"aa",12] + preAgg: + name: pre_{db_name}_{sp_name}_w1_sum_c2 + type: smallint + rows: + - ["aa",1590738990000,1590738991000,2,3,null] + - ["aa",1590738992000,1590738993000,2,7,null] + 3: + columns: ["id int","c1 string","w1_udaf double"] + rows: + - [1,"aa",1] + - [2,"aa",1.5] + - [3,"aa",2] + - [4,"aa",3] + - [5,"aa",4] + 4: + columns: ["id int","c1 string","w1_udaf bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + preAgg: + name: pre_{db_name}_{sp_name}_w1_count_c2 + type: bigint + rows: + - ["aa",1590738990000,1590738991000,2,2,null] + - ["aa",1590738992000,1590738993000,2,2,null] + - + id: 2 + desc: 长窗口count/avg/sum/max/min,int类型 # pre_{db_name}_{table_name}_{window_name}_{function_name}_{column_name}; + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + dataProvider: + - ["min","max","sum","avg","count"] + sql: | + SELECT id, c1, d[0](c3) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_udaf int"] + expectProvider: + 0: + rows: + - [1,"aa",20] + - [2,"aa",20] + - [3,"aa",20] + - [4,"aa",21] + - [5,"aa",22] + preAgg: + name: pre_{db_name}_{sp_name}_w1_min_c3 + type: int + rows: + - ["aa",1590738990000,1590738991999,2,20,null] + - ["aa",1590738992000,1590738993999,2,22,null] + 1: + rows: + - [1,"aa",20] + - [2,"aa",21] + - [3,"aa",22] + - [4,"aa",23] + - [5,"aa",24] + preAgg: + name: pre_{db_name}_{sp_name}_w1_max_c3 + type: int + rows: + - ["aa",1590738990000,1590738991999,2,21,null] + - ["aa",1590738992000,1590738993999,2,23,null] + 2: + rows: + - [1,"aa",20] + - [2,"aa",41] + - [3,"aa",63] + - [4,"aa",66] + - [5,"aa",69] + preAgg: + name: pre_{db_name}_{sp_name}_w1_sum_c3 + type: bigint + rows: + - ["aa",1590738990000,1590738991999,2,41,null] + - ["aa",1590738992000,1590738993999,2,45,null] + 3: + columns: ["id int","c1 string","w1_udaf double"] + rows: + - [1,"aa",20] + - [2,"aa",20.5] + - [3,"aa",21] + - [4,"aa",22] + - [5,"aa",23] +# preAgg: +# name: pre_{db_name}_{sp_name}_w1_avg_c3; +# rows: +# - ["aa",1590738990000,1590738991999,2,20.5,null] +# - ["aa",1590738992000,1590738993999,2,22.5,null] + 4: + columns: ["id int","c1 string","w1_udaf bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + preAgg: + name: pre_{db_name}_{sp_name}_w1_count_c3 + type: bigint + rows: + - ["aa",1590738990000,1590738991999,2,2,null] + - ["aa",1590738992000,1590738993999,2,2,null] + - + id: 3 + desc: 长窗口count/avg/sum/max/min,bigint类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + dataProvider: + - ["min","max","sum","avg","count"] + sql: | + SELECT id, c1, d[0](c4) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_udaf bigint"] + expectProvider: + 0: + rows: + - [1,"aa",30] + - [2,"aa",30] + - [3,"aa",30] + - [4,"aa",31] + - [5,"aa",32] + preAgg: + name: pre_{db_name}_{sp_name}_w1_min_c4 + type: bigint + rows: + - ["aa",1590738990000,1590738991999,2,30,null] + - ["aa",1590738992000,1590738993999,2,32,null] + 1: + rows: + - [1,"aa",30] + - [2,"aa",31] + - [3,"aa",32] + - [4,"aa",33] + - [5,"aa",34] + preAgg: + name: pre_{db_name}_{sp_name}_w1_max_c4 + type: bigint + rows: + - ["aa",1590738990000,1590738991999,2,31,null] + - ["aa",1590738992000,1590738993999,2,33,null] + 2: + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",93] + - [4,"aa",96] + - [5,"aa",99] + preAgg: + name: pre_{db_name}_{sp_name}_w1_sum_c4; + type: bigint + rows: + - ["aa",1590738990000,1590738991999,2,61,null] + - ["aa",1590738992000,1590738993999,2,65,null] + 3: + columns: ["id int","c1 string","w1_udaf double"] + rows: + - [1,"aa",30] + - [2,"aa",30.5] + - [3,"aa",31] + - [4,"aa",32] + - [5,"aa",33] + 4: + columns: ["id int","c1 string","w1_udaf bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + preAgg: + name: pre_{db_name}_{sp_name}_w1_count_c4 + type: bigint + rows: + - ["aa",1590738990000,1590738991999,2,2,null] + - ["aa",1590738992000,1590738993999,2,2,null] + - + id: 4 + desc: 长窗口count/avg/sum/max/min,string类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 string","c9 bool"] + index: ["index1:c1:c7"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + dataProvider: + - ["min","max","sum","avg","count"] + sql: | + SELECT id, c1, d[0](c8) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expectProvider: + 0: + order: id + columns: [ "id int","c1 string","w1_udaf string" ] + rows: + - [1,"aa","2020-05-01"] + - [2,"aa","2020-05-01"] + - [3,"aa","2020-05-01"] + - [4,"aa","2020-05-02"] + - [5,"aa","2020-05-03"] + preAgg: + name: pre_{db_name}_{sp_name}_w1_min_c8 + type: string + rows: + - ["aa",1590738990000,1590738991999,2,"2020-05-01",null] + - ["aa",1590738992000,1590738993999,2,"2020-05-03",null] + 1: + order: id + columns: [ "id int","c1 string","w1_udaf string" ] + rows: + - [1,"aa","2020-05-01"] + - [2,"aa","2020-05-02"] + - [3,"aa","2020-05-03"] + - [4,"aa","2020-05-04"] + - [5,"aa","2020-05-05"] + preAgg: + name: pre_{db_name}_{sp_name}_w1_max_c8 + type: string + rows: + - ["aa",1590738990000,1590738991999,2,"2020-05-02",null] + - ["aa",1590738992000,1590738993999,2,"2020-05-04",null] + 2: + success: false + msg: fail + 3: + success: false + msg: fail + 4: + order: id + columns: ["id int","c1 string","w1_udaf bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + preAgg: + name: pre_{db_name}_{sp_name}_w1_count_c8 + type: bigint + rows: + - ["aa",1590738990000,1590738991999,2,2,null] + - ["aa",1590738992000,1590738993999,2,2,null] + - + id: 5 + desc: 长窗口count/avg/sum/max/min,timestamp类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + dataProvider: + - ["min","max","sum","avg","count"] + sql: | + SELECT id, c1, d[0](c7) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expectProvider: + 0: + order: id + columns: [ "id int","c1 string","w1_udaf timestamp" ] + rows: + - [1,"aa",1590738990000] + - [2,"aa",1590738990000] + - [3,"aa",1590738990000] + - [4,"aa",1590738991000] + - [5,"aa",1590738992000] +# preAgg: +# name: pre_{db_name}_{sp_name}_w1_min_c7 +# type: timestamp +# rows: +# - ["aa",1590738990000,1590738991999,2,1590738990000,null] # 101110010 01011111 01101110 10110011 10110000 +# - ["aa",1590738992000,1590738993999,2,1590738992000,null] + 1: + order: id + columns: [ "id int","c1 string","w1_udaf timestamp" ] + rows: + - [1,"aa",1590738990000] + - [2,"aa",1590738991000] + - [3,"aa",1590738992000] + - [4,"aa",1590738993000] + - [5,"aa",1590738994000] +# preAgg: +# name: pre_{db_name}_{sp_name}_w1_max_c7 +# type: timestamp +# rows: +# - ["aa",1590738990000,1590738991999,2,1590738993000,null] +# - ["aa",1590738992000,1590738993999,2,1590738994000,null] + 2: + order: id + columns: [ "id int","c1 string","w1_udaf timestamp" ] + rows: + - [1,"aa",1590738990000] + - [2,"aa",3181477981000] + - [3,"aa",4772216973000] + - [4,"aa",4772216976000] + - [5,"aa",4772216979000] +# preAgg: +# name: pre_{db_name}_{sp_name}_w1_sum_c7 +# type: bigtimestampint +# rows: +# - ["aa",1590738990000,1590738991999,2,3181477981000,null] +# - ["aa",1590738992000,1590738993999,2,3181477985000,null] + 3: + success: false + msg: fail + 4: + order: id + columns: ["id int","c1 string","w1_udaf bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + preAgg: + name: pre_{db_name}_{sp_name}_w1_count_c7 + type: bigint + rows: + - ["aa",1590738990000,1590738991999,2,2,null] + - ["aa",1590738992000,1590738993999,2,2,null] + - + id: 6 + desc: 长窗口count/avg/sum/max/min,row类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + dataProvider: + - ["min","max","sum","avg","count"] + sql: | + SELECT id, c1, d[0](*) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expectProvider: + 0: + success: false + msg: fail + 1: + success: false + msg: fail + 2: + success: false + msg: fail + 3: + success: false + msg: fail + 4: + order: id + columns: ["id int","c1 string","w1_udaf bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + preAgg: + name: pre_{db_name}_{sp_name}_w1_count_ + type: bigint + rows: + - ["aa",1590738990000,1590738991999,2,2,null] + - ["aa",1590738992000,1590738993999,2,2,null] + - + id: 7 + desc: 长窗口count/avg/sum/max/min,bool类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c9) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min","max","sum","avg","count"] + expectProvider: + 0: + success: false + msg: fail + 1: + success: false + msg: fail + 2: + success: false + msg: fail + 3: + success: false + msg: fail + 4: + order: id + columns: ["id int","c1 string","w1_udaf bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + preAgg: + name: pre_{db_name}_{sp_name}_w1_count_c9; + type: bigint + rows: + - ["aa",1590738990000,1590738991999,2,2,null] + - ["aa",1590738992000,1590738993999,2,2,null] + - + id: 8 + desc: 长窗口count/avg/sum/max/min,float类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c5) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min","max","sum","avg","count"] + expect: + order: id + columns: ["id int","c1 string","w1_udaf float"] + expectProvider: + 0: + rows: + - [1,"aa",1.1] + - [2,"aa",1.1] + - [3,"aa",1.1] + - [4,"aa",1.2] + - [5,"aa",1.3] +# preAgg: +# name: pre_{db_name}_{sp_name}_w1_min_c5; +# type: float +# rows: +# - ["aa",1590738990000,1590738991999,2,1.1,null] +# - ["aa",1590738992000,1590738993999,2,1.3,null] + 1: + rows: + - [1,"aa",1.1] + - [2,"aa",1.2] + - [3,"aa",1.3] + - [4,"aa",1.4] + - [5,"aa",1.5] + 2: + rows: + - [1,"aa",1.1] + - [2,"aa",2.3] + - [3,"aa",3.6] + - [4,"aa",3.9] + - [5,"aa",4.2] + 3: + columns: ["id int","c1 string","w1_udaf double"] + rows: + - [1,"aa",1.1] + - [2,"aa",1.15] + - [3,"aa",1.2] + - [4,"aa",1.3] + - [5,"aa",1.4] + 4: + columns: ["id int","c1 string","w1_udaf bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + preAgg: + name: pre_{db_name}_{sp_name}_w1_count_c5; + type: bigint + rows: + - ["aa",1590738990000,1590738991999,2,2,null] + - ["aa",1590738992000,1590738993999,2,2,null] + - + id: 9 + desc: 长窗口count/avg/sum/max/min,double类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c6) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min","max","sum","avg","count"] + expect: + order: id + columns: ["id int","c1 string","w1_udaf double"] + expectProvider: + 0: + rows: + - [1,"aa",2.1] + - [2,"aa",2.1] + - [3,"aa",2.1] + - [4,"aa",2.2] + - [5,"aa",2.3] + 1: + rows: + - [1,"aa",2.1] + - [2,"aa",2.2] + - [3,"aa",2.3] + - [4,"aa",2.4] + - [5,"aa",2.5] + 2: + rows: + - [1,"aa",2.1] + - [2,"aa",4.3] + - [3,"aa",6.6] + - [4,"aa",6.9] + - [5,"aa",7.2] + 3: + columns: ["id int","c1 string","w1_udaf double"] + rows: + - [1,"aa",2.1] + - [2,"aa",2.15] + - [3,"aa",2.2] + - [4,"aa",2.3] + - [5,"aa",2.4] + 4: + columns: ["id int","c1 string","w1_udaf bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + preAgg: + name: pre_{db_name}_{sp_name}_w1_count_c6; + type: bigint + rows: + - ["aa",1590738990000,1590738991999,2,2,null] + - ["aa",1590738992000,1590738993999,2,2,null] + - + id: 10 + desc: 长窗口count/avg/sum/max/min,rows + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c3) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + dataProvider: + - ["min","max","sum","avg","count"] + expect: + order: id + columns: ["id int","c1 string","w1_udaf int"] + expectProvider: + 0: + rows: + - [1,"aa",20] + - [2,"aa",20] + - [3,"aa",20] + - [4,"aa",21] + - [5,"aa",22] + 1: + rows: + - [1,"aa",20] + - [2,"aa",21] + - [3,"aa",22] + - [4,"aa",23] + - [5,"aa",24] + 2: + rows: + - [1,"aa",20] + - [2,"aa",41] + - [3,"aa",63] + - [4,"aa",66] + - [5,"aa",69] + 3: + columns: ["id int","c1 string","w1_udaf double"] + rows: + - [1,"aa",20] + - [2,"aa",20.5] + - [3,"aa",21] + - [4,"aa",22] + - [5,"aa",23] + 4: + columns: ["id int","c1 string","w1_udaf bigint"] + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + preAgg: + name: pre_{db_name}_{sp_name}_w1_count_c3; + type: bigint + rows: + - ["aa",1590738990000,1590738991999,2,2,null] + - ["aa",1590738992000,1590738993999,2,2,null] + diff --git a/cases/integration_test/long_window/test_xxx_where.yaml b/cases/integration_test/long_window/test_xxx_where.yaml new file mode 100644 index 00000000000..572f7ee497a --- /dev/null +++ b/cases/integration_test/long_window/test_xxx_where.yaml @@ -0,0 +1,1209 @@ +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db: test_zw +debugs: [] +cases: + - + id: 0 + desc: 长窗口xxx_where,date类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,2,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,4,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,5,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c8,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + success: false + msg: fail + - + id: 1 + desc: 长窗口xxx_where,smallint类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + sql: | + SELECT id, c1, d[0](c2,c2>2) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_where smallint"] + expectProvider: + 0: + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + 1: + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",3] + - [4,"aa",4] + - [5,"aa",5] + 2: + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",3] + - [4,"aa",7] + - [5,"aa",12] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",3] + - [4,"aa",3.5] + - [5,"aa",4] + - + id: 2 + desc: 长窗口xxx_where,int类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",1,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",1,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + sql: | + SELECT id, c1, d[0](c3,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_where int"] + expectProvider: + 0: + rows: + - [1,"aa",20] + - [2,"aa",20] + - [3,"aa",20] + - [4,"aa",21] + - [5,"aa",22] + preAgg: + name: pre_{db_name}_{sp_name}_w1_min_where_c3_c2 + type: int + rows: + - ["aa",1590738990000,1590738991999,2,20,1] + 1: + rows: + - [1,"aa",20] + - [2,"aa",21] + - [3,"aa",22] + - [4,"aa",22] + - [5,"aa",22] + preAgg: + name: pre_{db_name}_{sp_name}_w1_max_where_c3_c2 + type: int + rows: + - ["aa",1590738990000,1590738991999,2,21,1] + 2: + rows: + - [1,"aa",20] + - [2,"aa",41] + - [3,"aa",63] + - [4,"aa",43] + - [5,"aa",22] + preAgg: + name: pre_{db_name}_{sp_name}_w1_sum_where_c3_c2 + type: int + rows: + - ["aa",1590738990000,1590738991999,2,41,1] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",20] + - [2,"aa",20.5] + - [3,"aa",21] + - [4,"aa",21.5] + - [5,"aa",22] +# preAgg: +# name: pre_{db_name}_{sp_name}_w1_avg_where_c3_c2 +# type: int +# rows: +# - ["aa",1590738990000,1590738991999,2,20,1] + - + id: 3 + desc: 长窗口xxx_where,bigint类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + sql: | + SELECT id, c1, d[0](c4,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_where bigint"] + expectProvider: + 0: + rows: + - [1,"aa",30] + - [2,"aa",30] + - [3,"aa",30] + - [4,"aa",31] + - [5,"aa",32] + 1: + rows: + - [1,"aa",30] + - [2,"aa",31] + - [3,"aa",32] + - [4,"aa",32] + - [5,"aa",32] + 2: + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",93] + - [4,"aa",63] + - [5,"aa",32] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",30] + - [2,"aa",30.5] + - [3,"aa",31] + - [4,"aa",31.5] + - [5,"aa",32] + - + id: 4 + desc: 长窗口xxx_where,string类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + sql: | + SELECT id, c1, d[0](c1,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail + - + id: 5 + desc: 长窗口xxx_where,timestamp类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + sql: | + SELECT id, c1, d[0](c7,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail + - + id: 6 + desc: 长窗口xxx_where,row类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + sql: | + SELECT id, c1, d[0](*,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail + - + id: 7 + desc: 长窗口xxx_where,bool类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c9,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + success: false + msg: fail + - + id: 8 + desc: 长窗口xxx_where,float类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c5,c2>2) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + order: id + columns: ["id int","c1 string","w1_where float"] + expectProvider: + 0: + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",1.3] + - [4,"aa",1.3] + - [5,"aa",1.3] + 1: + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",1.3] + - [4,"aa",1.4] + - [5,"aa",1.5] + 2: + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",1.3] + - [4,"aa",2.7] + - [5,"aa",4.2] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",1.3] + - [4,"aa",1.35] + - [5,"aa",1.4] + - + id: 9 + desc: 长窗口xxx_where,double类型 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c6,c2>2) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + order: id + columns: ["id int","c1 string","w1_where double"] + expectProvider: + 0: + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",2.3] + - [4,"aa",2.3] + - [5,"aa",2.3] + 1: + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",2.3] + - [4,"aa",2.4] + - [5,"aa",2.5] + 2: + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",2.3] + - [4,"aa",4.7] + - [5,"aa",7.2] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",2.3] + - [4,"aa",2.35] + - [5,"aa",2.4] + - + id: 10 + desc: 长窗口xxx_where,第二个参数使用bool列 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c3,c9) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + success: false + msg: fail + - + id: 11 + desc: 长窗口xxx_where,第二个参数使用= + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c2,c2=4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + order: id + columns: ["id int","c1 string","w1_where smallint"] + expectProvider: + 0: + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",null] + - [4,"aa",4] + - [5,"aa",4] + 1: + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",null] + - [4,"aa",4] + - [5,"aa",4] + 2: + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",null] + - [4,"aa",4] + - [5,"aa",4] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",null] + - [4,"aa",4] + - [5,"aa",4] + - + id: 12 + desc: 长窗口xxx_where,第二个参数使用!= + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c2,c2!=4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + order: id + columns: ["id int","c1 string","w1_where smallint"] + expectProvider: + 0: + rows: + - [1,"aa",1] + - [2,"aa",1] + - [3,"aa",1] + - [4,"aa",2] + - [5,"aa",3] + 1: + rows: + - [1,"aa",1] + - [2,"aa",2] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",5] + 2: + rows: + - [1,"aa",1] + - [2,"aa",3] + - [3,"aa",6] + - [4,"aa",5] + - [5,"aa",8] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",1] + - [2,"aa",1.5] + - [3,"aa",2] + - [4,"aa",2.5] + - [5,"aa",4] + - + id: 13 + desc: 长窗口xxx_where,第二个参数使用>= + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c2,c2>=3) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + order: id + columns: ["id int","c1 string","w1_where smallint"] + expectProvider: + 0: + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",3] + - [4,"aa",3] + - [5,"aa",3] + 1: + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",3] + - [4,"aa",4] + - [5,"aa",5] + 2: + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",3] + - [4,"aa",7] + - [5,"aa",12] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",null] + - [2,"aa",null] + - [3,"aa",3] + - [4,"aa",3.5] + - [5,"aa",4] + - + id: 14 + desc: 长窗口xxx_where,第二个参数使用<= + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c3,c2<=3) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + order: id + columns: ["id int","c1 string","w1_where int"] + expectProvider: + 0: + rows: + - [1,"aa",20] + - [2,"aa",20] + - [3,"aa",20] + - [4,"aa",21] + - [5,"aa",22] + 1: + rows: + - [1,"aa",20] + - [2,"aa",21] + - [3,"aa",22] + - [4,"aa",22] + - [5,"aa",22] + 2: + rows: + - [1,"aa",20] + - [2,"aa",41] + - [3,"aa",63] + - [4,"aa",43] + - [5,"aa",22] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",20] + - [2,"aa",20.5] + - [3,"aa",21] + - [4,"aa",21.5] + - [5,"aa",22] + - + id: 17 + desc: 长窗口xxx_where,第二个参数使用and + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + sql: | + SELECT id, c1, d[0](c3,c2<4 and c2>1) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail + - + id: 18 + desc: 长窗口xxx_where,第二个参数使用两个列 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + sql: | + SELECT id, c1, d[0](c3,c3>c2) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + success: false + msg: fail + - + id: 19 + desc: 长窗口xxx_where,第二个参数使用嵌套 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c3,if_null(c2,0)>4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + success: false + msg: fail + - + id: 20 + desc: 长窗口xxx_where,第二个参数常量在前 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c3,4>c2) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + order: id + columns: ["id int","c1 string","w1_where int"] + expectProvider: + 0: + rows: + - [1,"aa",20] + - [2,"aa",20] + - [3,"aa",20] + - [4,"aa",21] + - [5,"aa",22] + 1: + rows: + - [1,"aa",20] + - [2,"aa",21] + - [3,"aa",22] + - [4,"aa",22] + - [5,"aa",22] + 2: + rows: + - [1,"aa",20] + - [2,"aa",41] + - [3,"aa",63] + - [4,"aa",43] + - [5,"aa",22] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",20] + - [2,"aa",20.5] + - [3,"aa",21] + - [4,"aa",21.5] + - [5,"aa",22] + - + id: 21 + desc: 长窗口xxx_where,rows + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c3,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + order: id + columns: ["id int","c1 string","w1_where int"] + expectProvider: + 0: + rows: + - [1,"aa",20] + - [2,"aa",20] + - [3,"aa",20] + - [4,"aa",21] + - [5,"aa",22] + 1: + rows: + - [1,"aa",20] + - [2,"aa",21] + - [3,"aa",22] + - [4,"aa",22] + - [5,"aa",22] + 2: + rows: + - [1,"aa",20] + - [2,"aa",41] + - [3,"aa",63] + - [4,"aa",43] + - [5,"aa",22] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",20] + - [2,"aa",20.5] + - [3,"aa",21] + - [4,"aa",21.5] + - [5,"aa",22] + - + id: 22 + desc: 长窗口xxx_where,第二个参数类型是int + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c3,c3<23) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + order: id + columns: ["id int","c1 string","w1_where int"] + expectProvider: + 0: + rows: + - [1,"aa",20] + - [2,"aa",20] + - [3,"aa",20] + - [4,"aa",21] + - [5,"aa",22] + 1: + rows: + - [1,"aa",20] + - [2,"aa",21] + - [3,"aa",22] + - [4,"aa",22] + - [5,"aa",22] + 2: + rows: + - [1,"aa",20] + - [2,"aa",41] + - [3,"aa",63] + - [4,"aa",43] + - [5,"aa",22] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",20] + - [2,"aa",20.5] + - [3,"aa",21] + - [4,"aa",21.5] + - [5,"aa",22] + - + id: 23 + desc: 长窗口xxx_where,第二个参数类型是bigint + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c3,c4<33) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + order: id + columns: ["id int","c1 string","w1_where int"] + expectProvider: + 0: + rows: + - [1,"aa",20] + - [2,"aa",20] + - [3,"aa",20] + - [4,"aa",21] + - [5,"aa",22] + 1: + rows: + - [1,"aa",20] + - [2,"aa",21] + - [3,"aa",22] + - [4,"aa",22] + - [5,"aa",22] + 2: + rows: + - [1,"aa",20] + - [2,"aa",41] + - [3,"aa",63] + - [4,"aa",43] + - [5,"aa",22] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",20] + - [2,"aa",20.5] + - [3,"aa",21] + - [4,"aa",21.5] + - [5,"aa",22] + - + id: 24 + desc: 长窗口xxx_where,第二个参数类型是float + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c3,c5<1.4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + order: id + columns: ["id int","c1 string","w1_where int"] + expectProvider: + 0: + rows: + - [1,"aa",20] + - [2,"aa",20] + - [3,"aa",20] + - [4,"aa",21] + - [5,"aa",22] + 1: + rows: + - [1,"aa",20] + - [2,"aa",21] + - [3,"aa",22] + - [4,"aa",22] + - [5,"aa",22] + 2: + rows: + - [1,"aa",20] + - [2,"aa",41] + - [3,"aa",63] + - [4,"aa",43] + - [5,"aa",22] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",20] + - [2,"aa",20.5] + - [3,"aa",21] + - [4,"aa",21.5] + - [5,"aa",22] + - + id: 25 + desc: 长窗口xxx_where,第二个参数类型是double + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c3,c6<2.4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + order: id + columns: ["id int","c1 string","w1_where int"] + expectProvider: + 0: + rows: + - [1,"aa",20] + - [2,"aa",20] + - [3,"aa",20] + - [4,"aa",21] + - [5,"aa",22] + 1: + rows: + - [1,"aa",20] + - [2,"aa",21] + - [3,"aa",22] + - [4,"aa",22] + - [5,"aa",22] + 2: + rows: + - [1,"aa",20] + - [2,"aa",41] + - [3,"aa",63] + - [4,"aa",43] + - [5,"aa",22] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",20] + - [2,"aa",20.5] + - [3,"aa",21] + - [4,"aa",21.5] + - [5,"aa",22] + - + id: 26 + desc: 长窗口xxx_where,第二个参数类型是timestamp + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c3,c7<1590738993000) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + success: false + msg: fail + - + id: 27 + desc: 长窗口xxx_where,第二个参数类型是date + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c3,c8<"2020-05-04") OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + success: false + msg: fail + - + id: 28 + desc: 长窗口xxx_where,第二个参数类型是bool + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",false] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c3,c9=true) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + order: id + columns: ["id int","c1 string","w1_where int"] + expectProvider: + 0: + rows: + - [1,"aa",20] + - [2,"aa",20] + - [3,"aa",20] + - [4,"aa",21] + - [5,"aa",22] + 1: + rows: + - [1,"aa",20] + - [2,"aa",21] + - [3,"aa",22] + - [4,"aa",22] + - [5,"aa",22] + 2: + rows: + - [1,"aa",20] + - [2,"aa",41] + - [3,"aa",63] + - [4,"aa",43] + - [5,"aa",22] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",20] + - [2,"aa",20.5] + - [3,"aa",21] + - [4,"aa",21.5] + - [5,"aa",22] + - + id: 29 + desc: 长窗口xxx_where,w1:2 + longWindow: w1:2 + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7:0:latest"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c3,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + success: false + msg: fail + - + id: 30 + desc: 长窗口xxx_where,磁盘表 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"] + index: ["index1:c1:c7"] + storage: SSD + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] + sql: | + SELECT id, c1, d[0](c3,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + success: false + msg: fail + - + id: 31 + desc: 长窗口count_where,第二个参数类型是string + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"] + index: ["index1:c1:c7:0:latest"] + storage: memory + rows: + - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01","true"] + - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02","true"] + - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03","true"] + - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04","false"] + - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05","false"] + sql: | + SELECT id, c1, d[0](c3,c9="true") OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + dataProvider: + - ["min_where","max_where","sum_where","avg_where"] + expect: + order: id + columns: ["id int","c1 string","w1_where int"] + expectProvider: + 0: + rows: + - [1,"aa",20] + - [2,"aa",20] + - [3,"aa",20] + - [4,"aa",21] + - [5,"aa",22] + 1: + rows: + - [1,"aa",20] + - [2,"aa",21] + - [3,"aa",22] + - [4,"aa",22] + - [5,"aa",22] + 2: + rows: + - [1,"aa",20] + - [2,"aa",41] + - [3,"aa",63] + - [4,"aa",43] + - [5,"aa",22] + 3: + columns: ["id int","c1 string","w1_where double"] + rows: + - [1,"aa",20] + - [2,"aa",20.5] + - [3,"aa",21] + - [4,"aa",21.5] + - [5,"aa",22] + + diff --git a/cases/integration_test/window/test_current_row.yaml b/cases/integration_test/window/test_current_row.yaml index a70e63b570c..5b2243e23a4 100644 --- a/cases/integration_test/window/test_current_row.yaml +++ b/cases/integration_test/window/test_current_row.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: [] +debugs: ["两个窗口,一个rows,一个rows_range,current_row"] version: 0.6.0 cases: - id: 0 @@ -653,7 +653,6 @@ cases: - [ "bb",24,null ] - id: 28 desc: 两个窗口,一个rows,一个rows_range,current_row - tags: ["TODO","bug,修复后验证"] inputs: - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ] indexs: [ "index1:c1:c7" ] diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/command.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/command.properties index 81f31828e0f..8b137891791 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/command.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/command.properties @@ -1,6 +1 @@ -#远程执行命令时需要进行配置,本地执行则不需要进行配置 -remote_ip=172.24.4.55 -remote_user=zhaowei01 -remote_password=1qaz0p;/ -#remote_private_key_path=src/main/resources/zw-mac-id_rsa diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/BaseChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/BaseChecker.java index 212f39806d1..07fa19a2b50 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/BaseChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/BaseChecker.java @@ -27,17 +27,17 @@ * @date 2020/6/16 3:37 PM */ public abstract class BaseChecker implements Checker { - protected OpenMLDBResult fesqlResult; + protected OpenMLDBResult openMLDBResult; protected Map resultMap; protected ExpectDesc expect; - public BaseChecker(ExpectDesc expect, OpenMLDBResult fesqlResult){ + public BaseChecker(ExpectDesc expect, OpenMLDBResult openMLDBResult){ this.expect = expect; - this.fesqlResult = fesqlResult; + this.openMLDBResult = openMLDBResult; } - public BaseChecker(OpenMLDBResult fesqlResult, Map resultMap){ - this.fesqlResult = fesqlResult; + public BaseChecker(OpenMLDBResult openMLDBResult, Map resultMap){ + this.openMLDBResult = openMLDBResult; this.resultMap = resultMap; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CatCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CatCheckerByCli.java index 0a0502f6256..3481bdc8970 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CatCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CatCheckerByCli.java @@ -2,7 +2,6 @@ import com._4paradigm.openmldb.test_common.command.CommandUtil; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.model.CatFile; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.util.SQLUtil; @@ -22,7 +21,7 @@ public void check() throws Exception { log.info("cat check"); CatFile expectCat = expect.getCat(); String path = expectCat.getPath(); - path = SQLUtil.formatSql(path, fesqlResult.getTableNames()); + path = SQLUtil.formatSql(path, openMLDBResult.getTableNames()); String command = "cat "+path; List actualList = CommandUtil.run(command); List expectList = expectCat.getLines(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/Checker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/Checker.java index 050bd035918..0db10f4baa1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/Checker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/Checker.java @@ -17,6 +17,5 @@ package com._4paradigm.openmldb.java_sdk_test.checker; public interface Checker { - void check() throws Exception; } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java index 6d38d16c138..cf585b0fba8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java @@ -17,6 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.checker; +import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; @@ -30,60 +31,66 @@ public class CheckerStrategy { - public static List build(SQLCase fesqlCase, OpenMLDBResult fesqlResult, SQLCaseType executorType) { + public static List build(SqlExecutor executor,SQLCase sqlCase, OpenMLDBResult openMLDBResult, SQLCaseType executorType) { List checkList = new ArrayList<>(); - if (null == fesqlCase) { + if (null == sqlCase) { return checkList; } - ExpectDesc expect = fesqlCase.getOnlineExpectByType(executorType); + ExpectDesc expect = sqlCase.getOnlineExpectByType(executorType); - checkList.add(new SuccessChecker(expect, fesqlResult)); + checkList.add(new SuccessChecker(expect, openMLDBResult)); if (CollectionUtils.isNotEmpty(expect.getColumns())) { if(executorType==SQLCaseType.kSQLITE3 || executorType==SQLCaseType.kMYSQL){ - checkList.add(new ColumnsCheckerByJBDC(expect, fesqlResult)); + checkList.add(new ColumnsCheckerByJBDC(expect, openMLDBResult)); }else if(executorType==SQLCaseType.kCLI||executorType==SQLCaseType.kStandaloneCLI||executorType==SQLCaseType.kClusterCLI){ - checkList.add(new ColumnsCheckerByCli(expect, fesqlResult)); + checkList.add(new ColumnsCheckerByCli(expect, openMLDBResult)); }else { - checkList.add(new ColumnsChecker(expect, fesqlResult)); + checkList.add(new ColumnsChecker(expect, openMLDBResult)); } } if (!expect.getRows().isEmpty()) { if(executorType==SQLCaseType.kSQLITE3){ - checkList.add(new ResultCheckerByJDBC(expect, fesqlResult)); + checkList.add(new ResultCheckerByJDBC(expect, openMLDBResult)); }else if(executorType==SQLCaseType.kCLI||executorType==SQLCaseType.kStandaloneCLI||executorType==SQLCaseType.kClusterCLI){ - checkList.add(new ResultCheckerByCli(expect, fesqlResult)); + checkList.add(new ResultCheckerByCli(expect, openMLDBResult)); }else { - checkList.add(new ResultChecker(expect, fesqlResult)); + checkList.add(new ResultChecker(expect, openMLDBResult)); } } if (expect.getCount() >= 0) { - checkList.add(new CountChecker(expect, fesqlResult)); + checkList.add(new CountChecker(expect, openMLDBResult)); } if(MapUtils.isNotEmpty(expect.getOptions())){ - checkList.add(new OptionsChecker(expect, fesqlResult)); + checkList.add(new OptionsChecker(expect, openMLDBResult)); } if(CollectionUtils.isNotEmpty(expect.getIdxs())){ - checkList.add(new IndexChecker(expect, fesqlResult)); + checkList.add(new IndexChecker(expect, openMLDBResult)); } if (expect.getIndexCount() >= 0) { - checkList.add(new IndexCountChecker(expect, fesqlResult)); + checkList.add(new IndexCountChecker(expect, openMLDBResult)); } if(expect.getDeployment()!=null){ - checkList.add(new DeploymentCheckerByCli(expect, fesqlResult)); + checkList.add(new DeploymentCheckerByCli(expect, openMLDBResult)); } if(expect.getDeploymentContains()!=null){ - checkList.add(new DeploymentContainsCheckerByCli(expect, fesqlResult)); + checkList.add(new DeploymentContainsCheckerByCli(expect, openMLDBResult)); } if(expect.getDeploymentCount()>=0){ - checkList.add(new DeploymentCountCheckerByCli(expect, fesqlResult)); + checkList.add(new DeploymentCountCheckerByCli(expect, openMLDBResult)); } if(expect.getCat()!=null){ - checkList.add(new CatCheckerByCli(expect, fesqlResult)); + checkList.add(new CatCheckerByCli(expect, openMLDBResult)); } if(StringUtils.isNotEmpty(expect.getMsg())){ - checkList.add(new MessageChecker(expect, fesqlResult)); + checkList.add(new MessageChecker(expect, openMLDBResult)); + } + if(expect.getPreAgg()!=null){ + checkList.add(new PreAggChecker(executor, expect, openMLDBResult)); + } + if(CollectionUtils.isNotEmpty(expect.getPreAggList())){ + checkList.add(new PreAggListChecker(executor, expect, openMLDBResult)); } return checkList; } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java index b352280c23b..f9cf540610d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsChecker.java @@ -43,8 +43,8 @@ public void check() throws Exception { if (expectColumns == null || expectColumns.size() == 0) { return; } - List columnNames = fesqlResult.getColumnNames(); - List columnTypes = fesqlResult.getColumnTypes(); + List columnNames = openMLDBResult.getColumnNames(); + List columnTypes = openMLDBResult.getColumnTypes(); Assert.assertEquals(expectColumns.size(),columnNames.size(), "Illegal schema size"); for (int i = 0; i < expectColumns.size(); i++) { // Assert.assertEquals(columnNames.get(i)+" "+columnTypes.get(i),expectColumns.get(i)); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByCli.java index 0a81899ef47..b6b9feac768 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByCli.java @@ -42,7 +42,7 @@ public void check() throws Exception { if (expectColumns == null || expectColumns.size() == 0) { return; } - List columnNames = fesqlResult.getColumnNames(); + List columnNames = openMLDBResult.getColumnNames(); Assert.assertEquals(expectColumns.size(),columnNames.size(), "Illegal schema size"); for (int i = 0; i < expectColumns.size(); i++) { Assert.assertEquals(columnNames.get(i), Table.getColumnName(expectColumns.get(i)).replace(" ","")); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByJBDC.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByJBDC.java index 1e3c3f0abda..49752a46b99 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByJBDC.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ColumnsCheckerByJBDC.java @@ -42,7 +42,7 @@ public void check() throws Exception { if (expectColumns == null || expectColumns.size() == 0) { return; } - List columnNames = fesqlResult.getColumnNames(); + List columnNames = openMLDBResult.getColumnNames(); Assert.assertEquals(expectColumns.size(),columnNames.size(), "Illegal schema size"); for (int i = 0; i < expectColumns.size(); i++) { Assert.assertEquals(columnNames.get(i), Table.getColumnName(expectColumns.get(i)).replace(" ","")); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CountChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CountChecker.java index 0a8e707b2dd..0aa916d237e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CountChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CountChecker.java @@ -38,7 +38,7 @@ public CountChecker(ExpectDesc expect, OpenMLDBResult fesqlResult){ public void check() throws Exception { log.info("count check"); int expectCount = expect.getCount(); - int actual = fesqlResult.getCount(); + int actual = openMLDBResult.getCount(); Assert.assertEquals(actual,expectCount,"count验证失败"); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java index c3b202627e7..970fc673a8b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCheckerByCli.java @@ -18,7 +18,6 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; import com._4paradigm.openmldb.test_common.util.SQLUtil; @@ -40,15 +39,15 @@ public void check() throws Exception { log.info("deployment check"); OpenmldbDeployment expectDeployment = expect.getDeployment(); String name = expectDeployment.getName(); - name = SQLUtil.formatSql(name, fesqlResult.getTableNames()); + name = SQLUtil.formatSql(name, openMLDBResult.getTableNames()); expectDeployment.setName(name); String sql = expectDeployment.getSql(); - sql = SQLUtil.formatSql(sql, fesqlResult.getTableNames()); + sql = SQLUtil.formatSql(sql, openMLDBResult.getTableNames()); expectDeployment.setSql(sql); if (expectDeployment == null) { return; } - OpenmldbDeployment actualDeployment = fesqlResult.getDeployment(); + OpenmldbDeployment actualDeployment = openMLDBResult.getDeployment(); Assert.assertEquals(actualDeployment,expectDeployment); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentContainsCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentContainsCheckerByCli.java index 3ad385e9f50..c82260708dd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentContainsCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentContainsCheckerByCli.java @@ -42,7 +42,7 @@ public void check() throws Exception { if (expectDeployment == null) { return; } - List actualDeployments = fesqlResult.getDeployments(); + List actualDeployments = openMLDBResult.getDeployments(); long count = actualDeployments.stream() .filter(d -> d.getDbName().equals(expectDeployment.getDbName()) && d.getName().equals(expectDeployment.getName())) .count(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java index c75a200920c..e76267e4fbb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DeploymentCountCheckerByCli.java @@ -39,8 +39,8 @@ public DeploymentCountCheckerByCli(ExpectDesc expect, OpenMLDBResult fesqlResult public void check() throws Exception { log.info("deployment count check"); int expectDeploymentCount = expect.getDeploymentCount(); - List actualDeployments = fesqlResult.getDeployments(); - Integer deploymentCount = fesqlResult.getDeploymentCount(); + List actualDeployments = openMLDBResult.getDeployments(); + Integer deploymentCount = openMLDBResult.getDeploymentCount(); Assert.assertEquals((int) deploymentCount,expectDeploymentCount); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffResultChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffResultChecker.java index bf1cdf1231c..e6a39441957 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffResultChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffResultChecker.java @@ -51,12 +51,12 @@ public void check() throws Exception { public void checkMysql(OpenMLDBResult mysqlResult) throws Exception { log.info("diff mysql check"); //验证success - boolean fesqlOk = fesqlResult.isOk(); + boolean fesqlOk = openMLDBResult.isOk(); boolean sqlite3Ok = mysqlResult.isOk(); Assert.assertEquals(fesqlOk,sqlite3Ok,"success 不一致,fesql:"+fesqlOk+",sqlite3:"+sqlite3Ok); if(!fesqlOk) return; //验证result - List> fesqlRows = fesqlResult.getResult(); + List> fesqlRows = openMLDBResult.getResult(); List> mysqlRows = mysqlResult.getResult(); log.info("fesqlRows:{}", fesqlRows); log.info("mysqlRows:{}", mysqlRows); @@ -67,12 +67,12 @@ public void checkMysql(OpenMLDBResult mysqlResult) throws Exception { public void checkSqlite3(OpenMLDBResult sqlite3Result) throws Exception { log.info("diff sqlite3 check"); //验证success - boolean fesqlOk = fesqlResult.isOk(); + boolean fesqlOk = openMLDBResult.isOk(); boolean sqlite3Ok = sqlite3Result.isOk(); Assert.assertEquals(fesqlOk,sqlite3Ok,"success 不一致,fesql:"+fesqlOk+",sqlite3:"+sqlite3Ok); if(!fesqlOk) return; //验证result - List> fesqlRows = fesqlResult.getResult(); + List> fesqlRows = openMLDBResult.getResult(); List> sqlite3Rows = sqlite3Result.getResult(); log.info("fesqlRows:{}", fesqlRows); log.info("sqlite3Rows:{}", sqlite3Rows); @@ -98,7 +98,7 @@ public void checkSqlite3(OpenMLDBResult sqlite3Result) throws Exception { "ResultChecker fail: row=%d column=%d sqlite3=%s fesql=%s\nsqlite3 %s\nfesql %s", i, j, sqlite3_val, fesql_val, sqlite3Result.toString(), - fesqlResult.toString())); + openMLDBResult.toString())); }else if (sqlite3_val != null && sqlite3_val instanceof Double) { // Assert.assertTrue(expect_val != null && expect_val instanceof Double); if(fesql_val instanceof Float){ @@ -113,7 +113,7 @@ public void checkSqlite3(OpenMLDBResult sqlite3Result) throws Exception { String.format("ResultChecker fail: row=%d column=%d sqlite3=%s fesql=%s\nsqlite3 %s\nfesql %s", i, j, sqlite3_val, fesql_val, sqlite3Result.toString(), - fesqlResult.toString()) + openMLDBResult.toString()) ); } else if(fesql_val != null && fesql_val instanceof Timestamp){ @@ -122,13 +122,13 @@ public void checkSqlite3(OpenMLDBResult sqlite3Result) throws Exception { "ResultChecker fail: row=%d column=%d sqlite3=%s fesql=%s\nsqlite3 %s\nfesql %s", i, j, sqlite3_val, fesql_val, sqlite3Result.toString(), - fesqlResult.toString())); + openMLDBResult.toString())); } else{ Assert.assertEquals(String.valueOf(fesql_val), String.valueOf(sqlite3_val), String.format( "ResultChecker fail: row=%d column=%d sqlite3=%s fesql=%s\nsqlite3 %s\nfesql %s", i, j, sqlite3_val, fesql_val, sqlite3Result.toString(), - fesqlResult.toString())); + openMLDBResult.toString())); } } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffVersionChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffVersionChecker.java index 894120bfeef..53592ca96c1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffVersionChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/DiffVersionChecker.java @@ -40,7 +40,7 @@ public void check() throws Exception { resultMap.entrySet().stream().forEach(e->{ String version = e.getKey(); OpenMLDBResult result = e.getValue(); - Assert.assertTrue(fesqlResult.equals(result),"版本结果对比不一致\nmainVersion:\n"+fesqlResult+"\nversion:"+version+"\n"+result); + Assert.assertTrue(openMLDBResult.equals(result),"版本结果对比不一致\nmainVersion:\n"+ openMLDBResult +"\nversion:"+version+"\n"+result); }); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/IndexChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/IndexChecker.java index f1b7ef08b8e..c80630e5229 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/IndexChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/IndexChecker.java @@ -46,7 +46,7 @@ public IndexChecker(ExpectDesc expect, OpenMLDBResult fesqlResult){ public void check() throws Exception { logger.info("index check"); List expectIndexs = expect.getIdxs(); - List actualIndexs = fesqlResult.getSchema().getIndexs(); + List actualIndexs = openMLDBResult.getSchema().getIndexs(); Assert.assertEquals(actualIndexs.size(),expectIndexs.size(),"index count 不一致"); for(int i=0;i> actualRows = actualResult.getResult(); + actualRows.stream().forEach(l->{ + Object o = DataUtil.parseBinary((String)l.get(4),type); + l.set(4,o); + }); + List expectColumns = Lists.newArrayList("string","timestamp","timestamp","int","string","string"); + List> expectRows = DataUtil.convertRows(preAgg.getRows(), expectColumns); + + int index = 1; + Collections.sort(expectRows, new RowsSort(index)); + Collections.sort(actualRows, new RowsSort(index)); + log.info("expect:{}", expectRows); + log.info("actual:{}", actualRows); + + Assert.assertEquals(actualRows.size(), expectRows.size(), String.format("ResultChecker fail: expect size %d, real size %d", expectRows.size(), actualRows.size())); + for (int i = 0; i < actualRows.size(); ++i) { + List actual_list = actualRows.get(i); + List expect_list = expectRows.get(i); + Assert.assertEquals(actual_list.size(), expect_list.size(), String.format( + "ResultChecker fail at %dth row: expect row size %d, real row size %d", i, expect_list.size(), actual_list.size())); + for (int j = 0; j < actual_list.size(); ++j) { + Object actual_val = actual_list.get(j); + Object expect_val = expect_list.get(j); + Assert.assertEquals(actual_val, expect_val, String.format( + "ResultChecker fail: row=%d column=%d expect=%s real=%s\nexpect %s\nreal %s", + i, j, expect_val, actual_val, expectRows, actualRows)); + } + } + } + +} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/PreAggListChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/PreAggListChecker.java new file mode 100644 index 00000000000..7c620c2f6b6 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/PreAggListChecker.java @@ -0,0 +1,99 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.openmldb.java_sdk_test.checker; + + +import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.model.ExpectDesc; +import com._4paradigm.openmldb.test_common.model.PreAggTable; +import com._4paradigm.openmldb.test_common.util.DataUtil; +import com._4paradigm.openmldb.test_common.util.SDKUtil; +import com._4paradigm.openmldb.test_common.util.SQLUtil; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.collections4.CollectionUtils; +import org.testng.Assert; +import org.testng.collections.Lists; + +import java.text.ParseException; +import java.util.Collections; +import java.util.List; + +/** + * @author zhaowei + * @date 2020/6/16 3:14 PM + */ +@Slf4j +public class PreAggListChecker extends BaseChecker { + private SqlExecutor executor; + + public PreAggListChecker(ExpectDesc expect, OpenMLDBResult openMLDBResult) { + super(expect, openMLDBResult); + } + + public PreAggListChecker(SqlExecutor executor, ExpectDesc expect, OpenMLDBResult openMLDBResult){ + this(expect,openMLDBResult); + this.executor = executor; + } + + @Override + public void check() throws ParseException { + log.info("pre agg check"); +// if (CollectionUtils.isEmpty(expect.getPreAggList())) { +// throw new RuntimeException("fail check pre agg list: PreAggTable is empty"); +// } + String dbName = openMLDBResult.getDbName(); + String spName = openMLDBResult.getSpName(); + List preAggList = expect.getPreAggList(); + for(PreAggTable preAgg:preAggList) { + String preAggTableName = preAgg.getName(); + String type = preAgg.getType(); + preAggTableName = SQLUtil.replaceDBNameAndSpName(dbName, spName, preAggTableName); + String sql = String.format("select key,ts_start,ts_end,num_rows,agg_val,filter_key from %s", preAggTableName); + OpenMLDBResult actualResult = SDKUtil.select(executor, "__PRE_AGG_DB", sql); + List> actualRows = actualResult.getResult(); + actualRows.stream().forEach(l -> { + Object o = DataUtil.parseBinary((String) l.get(4), type); + l.set(4, o); + }); + List expectColumns = Lists.newArrayList("string", "timestamp", "timestamp", "int", "string", "string"); + List> expectRows = DataUtil.convertRows(preAgg.getRows(), expectColumns); + + int index = 1; + Collections.sort(expectRows, new RowsSort(index)); + Collections.sort(actualRows, new RowsSort(index)); + log.info("expect:{}", expectRows); + log.info("actual:{}", actualRows); + + Assert.assertEquals(actualRows.size(), expectRows.size(), String.format("ResultChecker fail: expect size %d, real size %d", expectRows.size(), actualRows.size())); + for (int i = 0; i < actualRows.size(); ++i) { + List actual_list = actualRows.get(i); + List expect_list = expectRows.get(i); + Assert.assertEquals(actual_list.size(), expect_list.size(), String.format( + "ResultChecker fail at %dth row: expect row size %d, real row size %d", i, expect_list.size(), actual_list.size())); + for (int j = 0; j < actual_list.size(); ++j) { + Object actual_val = actual_list.get(j); + Object expect_val = expect_list.get(j); + Assert.assertEquals(actual_val, expect_val, String.format( + "ResultChecker fail: row=%d column=%d expect=%s real=%s\nexpect %s\nreal %s", + i, j, expect_val, actual_val, expectRows, actualRows)); + } + } + } + } + +} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java index 5643a8f69c5..00e5680c98e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultChecker.java @@ -19,7 +19,6 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.util.DataUtil; -import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.Table; import com._4paradigm.openmldb.test_common.util.SchemaUtil; @@ -50,11 +49,11 @@ public void check() throws ParseException { } List> expectRows = DataUtil.convertRows(expect.getRows(), expect.getColumns()); - List> actual = fesqlResult.getResult(); + List> actual = openMLDBResult.getResult(); String orderName = expect.getOrder(); if (StringUtils.isNotEmpty(orderName)) { - int index = SchemaUtil.getIndexByColumnName(fesqlResult.getColumnNames(),orderName); + int index = SchemaUtil.getIndexByColumnName(openMLDBResult.getColumnNames(),orderName); Collections.sort(expectRows, new RowsSort(index)); Collections.sort(actual, new RowsSort(index)); } @@ -79,7 +78,7 @@ public void check() throws ParseException { String.format("ResultChecker fail: row=%d column=%d expect=%s real=%s\nexpect %s\nreal %s", i, j, expect_val, actual_val, Table.getTableString(expect.getColumns(), expectRows), - fesqlResult.toString()) + openMLDBResult.toString()) ); } else if (actual_val != null && actual_val instanceof Double) { @@ -89,7 +88,7 @@ public void check() throws ParseException { String.format("ResultChecker fail: row=%d column=%d expect=%s real=%s\nexpect %s\nreal %s", i, j, expect_val, actual_val, Table.getTableString(expect.getColumns(), expectRows), - fesqlResult.toString()) + openMLDBResult.toString()) ); } else { @@ -97,7 +96,7 @@ public void check() throws ParseException { "ResultChecker fail: row=%d column=%d expect=%s real=%s\nexpect %s\nreal %s", i, j, expect_val, actual_val, Table.getTableString(expect.getColumns(), expectRows), - fesqlResult.toString())); + openMLDBResult.toString())); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java index 68cf423aca7..b00422a8de2 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByCli.java @@ -19,7 +19,6 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.util.DataUtil; -import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.Table; import com._4paradigm.openmldb.test_common.util.SchemaUtil; @@ -49,11 +48,11 @@ public void check() throws ParseException { throw new RuntimeException("fail check result: columns are empty"); } List> expectRows = DataUtil.convertRows(expect.getRows(), expect.getColumns()); - List> actual = DataUtil.convertRows(fesqlResult.getResult(), expect.getColumns()); + List> actual = DataUtil.convertRows(openMLDBResult.getResult(), expect.getColumns()); String orderName = expect.getOrder(); if (StringUtils.isNotEmpty(orderName)) { - int index = SchemaUtil.getIndexByColumnName(fesqlResult.getColumnNames(),orderName); + int index = SchemaUtil.getIndexByColumnName(openMLDBResult.getColumnNames(),orderName); Collections.sort(expectRows, new RowsSort(index)); Collections.sort(actual, new RowsSort(index)); } @@ -79,7 +78,7 @@ public void check() throws ParseException { String.format("ResultChecker fail: row=%d column=%d expect=%s real=%s\nexpect %s\nreal %s", i, j, expect_val, actual_val, Table.getTableString(expect.getColumns(), expectRows), - fesqlResult.toString()) + openMLDBResult.toString()) ); } else if (actual_val != null && actual_val instanceof Double) { @@ -89,7 +88,7 @@ public void check() throws ParseException { String.format("ResultChecker fail: row=%d column=%d expect=%s real=%s\nexpect %s\nreal %s", i, j, expect_val, actual_val, Table.getTableString(expect.getColumns(), expectRows), - fesqlResult.toString()) + openMLDBResult.toString()) ); } else if (String.valueOf(actual_val).equalsIgnoreCase("null")){ @@ -97,14 +96,14 @@ public void check() throws ParseException { String.format("ResultChecker fail: row=%d column=%d expect=%s real=%s\nexpect %s\nreal %s", i, j, expect_val, actual_val, Table.getTableString(expect.getColumns(), expectRows), - fesqlResult.toString()) + openMLDBResult.toString()) ); }else { Assert.assertEquals(actual_val, expect_val, String.format( "ResultChecker fail: row=%d column=%d expect=%s real=%s\nexpect %s\nreal %s", i, j, expect_val, actual_val, Table.getTableString(expect.getColumns(), expectRows), - fesqlResult.toString())); + openMLDBResult.toString())); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java index a20971c4491..db2d1bb0415 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/ResultCheckerByJDBC.java @@ -17,7 +17,6 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.util.DataUtil; -import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.Table; import com._4paradigm.openmldb.test_common.util.SchemaUtil; @@ -48,11 +47,11 @@ public void check() throws Exception { } List> expectRows = DataUtil.convertRows(expect.getRows(), expect.getColumns()); - List> actual = fesqlResult.getResult(); + List> actual = openMLDBResult.getResult(); String orderName = expect.getOrder(); if (StringUtils.isNotEmpty(orderName)) { - int index = SchemaUtil.getIndexByColumnName(fesqlResult.getColumnNames(),orderName); + int index = SchemaUtil.getIndexByColumnName(openMLDBResult.getColumnNames(),orderName); Collections.sort(expectRows, new RowsSort(index)); Collections.sort(actual, new RowsSort(index)); } @@ -81,7 +80,7 @@ public void check() throws Exception { "ResultChecker fail: row=%d column=%d expect=%s real=%s\nexpect %s\nreal %s", i, j, expect_val, actual_val, Table.getTableString(expect.getColumns(), expectRows), - fesqlResult.toString())); + openMLDBResult.toString())); }else if (actual_val != null && actual_val instanceof Double) { // Assert.assertTrue(expect_val != null && expect_val instanceof Double); if(expect_val instanceof Float){ @@ -96,7 +95,7 @@ public void check() throws Exception { String.format("ResultChecker fail: row=%d column=%d expect=%s real=%s\nexpect %s\nreal %s", i, j, expect_val, actual_val, Table.getTableString(expect.getColumns(), expectRows), - fesqlResult.toString()) + openMLDBResult.toString()) ); } else if(expect_val != null && expect_val instanceof Timestamp){ @@ -105,13 +104,13 @@ public void check() throws Exception { "ResultChecker fail: row=%d column=%d expect=%s real=%s\nexpect %s\nreal %s", i, j, expect_val, actual_val, Table.getTableString(expect.getColumns(), expectRows), - fesqlResult.toString())); + openMLDBResult.toString())); } else{ Assert.assertEquals(String.valueOf(actual_val), String.valueOf(expect_val), String.format( "ResultChecker fail: row=%d column=%d expect=%s real=%s\nexpect %s\nreal %s", i, j, expect_val, actual_val, Table.getTableString(expect.getColumns(), expectRows), - fesqlResult.toString())); + openMLDBResult.toString())); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/SuccessChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/SuccessChecker.java index 18be5712257..df3a526f6c1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/SuccessChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/SuccessChecker.java @@ -35,7 +35,7 @@ public SuccessChecker(ExpectDesc expect, OpenMLDBResult fesqlResult){ public void check() throws Exception { log.info("success check"); boolean success = expect.getSuccess(); - boolean actual = fesqlResult.isOk(); + boolean actual = openMLDBResult.isOk(); Assert.assertEquals(actual,success,"success验证失败"); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java index 0fd6553099d..95ad1c81010 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java @@ -60,11 +60,7 @@ public class OpenMLDBConfig { if (StringUtils.isNotEmpty(init_env)) { INIT_VERSION_ENV = Boolean.parseBoolean(init_env); } - String tableStorageMode = CONFIG.getProperty("table_storage_mode"); - if(StringUtils.isNotEmpty(tableStorageMode)){ - OpenMLDBGlobalVar.tableStorageMode = tableStorageMode; - } - log.info("test tableStorageMode: {}", OpenMLDBGlobalVar.tableStorageMode); + String version = CONFIG.getProperty("version"); if(StringUtils.isNotEmpty(version)){ OpenMLDBGlobalVar.version = version; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java index b3de78e0a23..623d3af6503 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java @@ -75,8 +75,8 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi OpenMLDBGlobalVar.env = caseEnv; } log.info("fedb global var env: {}", env); - OpenMLDBClient fesqlClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); - executor = fesqlClient.getExecutor(); + OpenMLDBClient openMLDBClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); + executor = openMLDBClient.getExecutor(); log.info("executor:{}",executor); Statement statement = executor.getStatement(); statement.execute("SET @@execute_mode='online';"); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java index 57daa0b02f5..5a7d4c3e14e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java @@ -60,11 +60,11 @@ public BaseSQLExecutor(SqlExecutor executor, SQLCase fesqlCase, SQLCaseType exec } } - public BaseSQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map fedbInfoMap, SQLCaseType executorType) { - this(executor,fesqlCase,executorType); + public BaseSQLExecutor(SQLCase sqlCase, SqlExecutor executor, Map executorMap, Map openMLDBInfoMap, SQLCaseType executorType) { + this(executor,sqlCase,executorType); this.executor = executor; this.executorMap = executorMap; - this.openMLDBInfoMap = fedbInfoMap; + this.openMLDBInfoMap = openMLDBInfoMap; } @Override @@ -94,7 +94,7 @@ public void execute() { @Override public void check() throws Exception { - List strategyList = CheckerStrategy.build(sqlCase, mainResult, executorType); + List strategyList = CheckerStrategy.build(executor, sqlCase, mainResult, executorType); if(MapUtils.isNotEmpty(resultMap)) { strategyList.add(new DiffVersionChecker(mainResult, resultMap)); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java index b068674e72b..2f7c57c08e5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java @@ -152,7 +152,7 @@ protected OpenMLDBResult execute(String version, OpenMLDBInfo openMLDBInfo){ @Override public void check() throws Exception { - List strategyList = CheckerStrategy.build(sqlCase, mainResult, executorType); + List strategyList = CheckerStrategy.build(null,sqlCase, mainResult, executorType); if(MapUtils.isNotEmpty(resultMap)) { strategyList.add(new DiffVersionChecker(mainResult, resultMap)); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/DiffResultExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/DiffResultExecutor.java index 5ad9f6c96c1..ff5d311cdb8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/DiffResultExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/DiffResultExecutor.java @@ -89,7 +89,7 @@ public void tearDown() { @Override public void check() throws Exception { - List strategyList = CheckerStrategy.build(sqlCase, mainResult, executorType); + List strategyList = CheckerStrategy.build(executor,sqlCase, mainResult, executorType); strategyList.add(new DiffResultChecker(mainResult, resultMap)); for (Checker checker : strategyList) { checker.check(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/JDBCExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/JDBCExecutor.java index fddb6c09e23..745324be00f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/JDBCExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/JDBCExecutor.java @@ -46,7 +46,7 @@ public JDBCExecutor(SQLCase fesqlCase, SQLCaseType sqlCaseType) { @Override public void check() throws Exception { - List strategyList = CheckerStrategy.build(sqlCase, mainResult,executorType); + List strategyList = CheckerStrategy.build(null,sqlCase, mainResult,executorType); for (Checker checker : strategyList) { checker.check(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DMLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DMLTest.java index 9b697eb46a8..bc297e3ae64 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DMLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DMLTest.java @@ -69,5 +69,18 @@ public void testMultiInsert(SQLCase testCase){ public void testMultiInsertByCli(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); } + @Test(dataProvider = "getCase") + @Yaml(filePaths = {"integration_test/dml/test_delete.yaml"}) + @Story("delete") + public void testDelete(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); + } + + @Test(dataProvider = "getCase",enabled = false) + @Yaml(filePaths = {"integration_test/dml/test_delete.yaml"}) + @Story("delete") + public void testDeleteByCli(SQLCase testCase){ + ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/LongWindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/LongWindowTest.java new file mode 100644 index 00000000000..c7d530b96a5 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/LongWindowTest.java @@ -0,0 +1,50 @@ +package com._4paradigm.openmldb.java_sdk_test.cluster.sql_test; + +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; +import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; +import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.provider.Yaml; +import io.qameta.allure.Feature; +import io.qameta.allure.Story; +import lombok.extern.slf4j.Slf4j; +import org.testng.annotations.Test; + +@Slf4j +@Feature("long_window") +public class LongWindowTest extends OpenMLDBTest { + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "integration_test/long_window/test_long_window.yaml") + @Story("longWindow") + public void testLongWindow(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kLongWindow).run(); + } + @Test(dataProvider = "getCase",enabled = false) + @Yaml(filePaths = "integration_test/long_window/test_long_window_batch.yaml") + @Story("longWindow-batch") + public void testLongWindowByBatch(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); + } + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "integration_test/long_window/test_count_where.yaml") + @Story("count_where") + public void testCountWhere(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kLongWindow).run(); + } + + @Test(dataProvider = "getCase") + @Yaml(filePaths = "integration_test/long_window/test_xxx_where.yaml") + @Story("xxx_where") + public void testXXXWhere(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kLongWindow).run(); + } + @Test(dataProvider = "getCase") + @Yaml(filePaths = "integration_test/long_window/test_udaf.yaml") + @Story("udaf") + public void testUDAF(SQLCase testCase){ + ExecutorFactory.build(executor,testCase, SQLCaseType.kLongWindow).run(); + } + +} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/WindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/WindowTest.java index bf3003b1248..8b98ca4972b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/WindowTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/WindowTest.java @@ -36,33 +36,33 @@ public class WindowTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/window/", - "function/cluster/", - "function/test_index_optimized.yaml"}) + @Yaml(filePaths = {"integration_test/window/", + "integration_test/cluster/", + "integration_test/test_index_optimized.yaml"}) public void testWindowBatch(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kBatch).run(); } @Story("requestWithSp") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/window/", - "function/cluster/", - "function/test_index_optimized.yaml"}) + @Yaml(filePaths = {"integration_test/window/", + "integration_test/cluster/", + "integration_test/test_index_optimized.yaml"}) public void testWindowRequestMode(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); } @Story("requestWithSp") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/window/", - "function/cluster/", - "function/test_index_optimized.yaml"}) + @Yaml(filePaths = {"integration_test/window/", + "integration_test/cluster/", + "integration_test/test_index_optimized.yaml"}) public void testWindowRequestModeWithSp(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); } @Story("requestWithSpAysn") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/window/", - "function/cluster/", - "function/test_index_optimized.yaml"}) + @Yaml(filePaths = {"integration_test/window/", + "integration_test/cluster/", + "integration_test/test_index_optimized.yaml"}) public void testWindowRequestModeWithSpAsync(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java deleted file mode 100644 index 7f6426141cc..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java +++ /dev/null @@ -1,29 +0,0 @@ -package com._4paradigm.openmldb.java_sdk_test.cluster.v050; - -import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; -import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; -import com._4paradigm.openmldb.test_common.model.SQLCase; -import com._4paradigm.openmldb.test_common.model.SQLCaseType; -import com._4paradigm.openmldb.test_common.provider.Yaml; -import io.qameta.allure.Story; -import lombok.extern.slf4j.Slf4j; -import org.testng.annotations.Test; - -@Slf4j -public class LongWindowTest extends OpenMLDBTest { - - - @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/long_window/long_window.yaml") - @Story("Out-In") - public void testLongWindow1(SQLCase testCase){ - ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); - } - - @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/long_window/long_window.yaml") - @Story("Out-In") - public void testLongWindow2(SQLCase testCase){ - ExecutorFactory.build(executor,testCase, SQLCaseType.kRequest).run(); - } -} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java deleted file mode 100644 index bcc7f2620af..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/DMLTest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com._4paradigm.openmldb.java_sdk_test.cluster.v060; - - -import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; -import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; -import com._4paradigm.openmldb.test_common.model.SQLCase; -import com._4paradigm.openmldb.test_common.model.SQLCaseType; -import com._4paradigm.openmldb.test_common.provider.Yaml; -import io.qameta.allure.Feature; -import io.qameta.allure.Story; -import lombok.extern.slf4j.Slf4j; -import org.testng.annotations.Test; - -/** - * @author zhaowei - * @date 2020/6/11 2:53 PM - */ -@Slf4j -@Feature("DML") -public class DMLTest extends OpenMLDBTest { - - @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/dml/test_delete.yaml"}) - @Story("delete") - public void testDelete(SQLCase testCase){ - ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); - } - - @Test(dataProvider = "getCase",enabled = false) - @Yaml(filePaths = {"function/dml/test_delete.yaml"}) - @Story("delete") - public void testDeleteByCli(SQLCase testCase){ - ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); - } - - -} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/LongWindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/LongWindowTest.java deleted file mode 100644 index ec638fb0937..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v060/LongWindowTest.java +++ /dev/null @@ -1,23 +0,0 @@ -package com._4paradigm.openmldb.java_sdk_test.cluster.v060; - -import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; -import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; -import com._4paradigm.openmldb.test_common.model.SQLCase; -import com._4paradigm.openmldb.test_common.model.SQLCaseType; -import com._4paradigm.openmldb.test_common.provider.Yaml; -import io.qameta.allure.Feature; -import io.qameta.allure.Story; -import lombok.extern.slf4j.Slf4j; -import org.testng.annotations.Test; - -@Slf4j -@Feature("long_window") -public class LongWindowTest extends OpenMLDBTest { - - @Test(dataProvider = "getCase") - @Yaml(filePaths = "integration_test/long_window/test_count_where.yaml") - @Story("longWindowDeploy") - public void testLongWindow2(SQLCase testCase){ - ExecutorFactory.build(executor,testCase, SQLCaseType.kLongWindow).run(); - } -} diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestPreAgg.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestPreAgg.java new file mode 100644 index 00000000000..e1c75c61d18 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestPreAgg.java @@ -0,0 +1,33 @@ +package com._4paradigm.openmldb.java_sdk_test.temp; + +import com._4paradigm.openmldb.test_common.util.BinaryUtil; +import org.testng.annotations.Test; + +import java.math.BigInteger; + +public class TestPreAgg { + @Test + public void test(){ + double d = 20.5; + String s = Long.toBinaryString(Double.doubleToRawLongBits(d)); + String ss = new String(); + System.out.println("ss = " + ss); + double doubleVal = Double.longBitsToDouble(new BigInteger(s, 2).longValue()); + System.out.println(doubleVal); + } + @Test + public void test1(){ + long l = 1590738990000L; + String s = Long.toBinaryString(l); + System.out.println("s = " + s); + String s1 = Integer.toString(222, 2); + System.out.println("s1 = " + s1); + } + @Test + public void test2(){ + String s = "ff!"; + String s1 = BinaryUtil.binaryStrToBinaryStr16(BinaryUtil.strToBinaryStr(s)); + System.out.println("s1 = " + s1); + System.out.println(BinaryUtil.strToBinaryStr(s)); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index 4e3a061e279..dde927e4321 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -15,8 +15,8 @@ 8 8 - 0.5.0-SNAPSHOT - 0.5.0-macos-SNAPSHOT + 0.6.0 + 0.6.0-macos @@ -31,14 +31,14 @@ openmldb-jdbc ${openmldb.jdbc.version} system - /Users/zhaowei/Downloads/openmldb-jdbc-0.5.0-SNAPSHOT.jar + /Users/zhaowei/Downloads/openmldb-jdbc-0.6.0.jar com.4paradigm.openmldb openmldb-native ${openmldb.navtive.version} system - /Users/zhaowei/Downloads/openmldb-native-0.5.0-SNAPSHOT.jar + /Users/zhaowei/Downloads/openmldb-native-0.6.0.jar diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java index 022a1b80a80..533f1bdf1f0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/bean/OpenMLDBResult.java @@ -31,6 +31,7 @@ public class OpenMLDBResult { private String dbName; private List tableNames; + private String spName; private String sql; private boolean haveResult; private boolean ok; diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java index c79a5e1f767..d327211b0da 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/CaseFile.java @@ -224,12 +224,14 @@ private List generateCaseByDataProvider(SQLCase sqlCase, List d String order = expectDesc.getOrder(); List columns = expectDesc.getColumns(); List> rows = expectDesc.getRows(); + PreAggTable preAgg = expectDesc.getPreAgg(); int count = expectDesc.getCount(); if (success == false) newExpectDesc.setSuccess(success); if (count > 0) newExpectDesc.setCount(count); if (CollectionUtils.isNotEmpty(columns)) newExpectDesc.setColumns(columns); if (StringUtils.isNotEmpty(order)) newExpectDesc.setOrder(order); if (CollectionUtils.isNotEmpty(rows)) newExpectDesc.setRows(rows); + if(preAgg != null) newExpectDesc.setPreAgg(preAgg); } } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/ExpectDesc.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/ExpectDesc.java index bb6a0e08b3c..83f873d2d97 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/ExpectDesc.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/ExpectDesc.java @@ -35,4 +35,6 @@ public class ExpectDesc extends Table { private List diffTables; private CatFile cat; private String msg; + private PreAggTable preAgg; + private List preAggList; } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/PreAggTable.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/PreAggTable.java new file mode 100644 index 00000000000..90671962859 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/PreAggTable.java @@ -0,0 +1,13 @@ +package com._4paradigm.openmldb.test_common.model; + +import lombok.Data; + +import java.io.Serializable; +import java.util.List; + +@Data +public class PreAggTable implements Serializable { + private String name; + private String type; + private List> rows; +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java index 8c4ddb4e6f1..aa6133f6b21 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java @@ -95,7 +95,7 @@ public boolean isSupportDiskTable(){ return false; } for(InputDesc input:inputs){ - if (CollectionUtils.isNotEmpty(input.getColumns())&& StringUtils.isEmpty(input.getCreate())) { + if (CollectionUtils.isNotEmpty(input.getColumns())&& StringUtils.isEmpty(input.getCreate())&&StringUtils.isEmpty(input.getStorage())) { return true; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java index fba4f9955f1..a285b3750c1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java @@ -17,6 +17,7 @@ package com._4paradigm.openmldb.test_common.openmldb; +import com._4paradigm.openmldb.test_common.util.Tool; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; @@ -24,6 +25,7 @@ import java.util.Arrays; import java.util.List; +import java.util.Properties; import java.util.stream.Collectors; /** @@ -46,6 +48,8 @@ public class OpenMLDBGlobalVar { public static final String CASE_PATH; public static final String YAML_CASE_BASE_DIR; + public static final Properties CONFIG = Tool.getProperties("run_case.properties"); + static { String levelStr = System.getProperty("caseLevel"); levelStr = StringUtils.isEmpty(levelStr) ? "0" : levelStr; @@ -71,5 +75,10 @@ public class OpenMLDBGlobalVar { if (!StringUtils.isEmpty(YAML_CASE_BASE_DIR)) { log.info("YAML_CASE_BASE_DIR {}", YAML_CASE_BASE_DIR); } + String tableStorageMode = CONFIG.getProperty("table_storage_mode"); + if(StringUtils.isNotEmpty(tableStorageMode)){ + OpenMLDBGlobalVar.tableStorageMode = tableStorageMode; + } + log.info("test tableStorageMode: {}", OpenMLDBGlobalVar.tableStorageMode); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/BinaryUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/BinaryUtil.java new file mode 100644 index 00000000000..b3669c01c5a --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/BinaryUtil.java @@ -0,0 +1,148 @@ +package com._4paradigm.openmldb.test_common.util; + +import org.apache.commons.lang3.StringUtils; + +public class BinaryUtil { + // 将Unicode字符串转换成bool型数组 + public static boolean[] StrToBool(String input) { + boolean[] output = Binstr16ToBool(binaryStrToBinaryStr16(strToBinaryStr(input))); + return output; + } + + // 将bool型数组转换成Unicode字符串 + public static String BoolToStr(boolean[] input) { + String output = binaryStrToStr(Binstr16ToBinstr(BoolToBinstr16(input))); + return output; + } + + // 将字符串转换成二进制字符串 + public static String strToBinaryStr(String str) { + char[] strChar = str.toCharArray(); + String result = ""; + for (int i = 0; i < strChar.length; i++) { + String s = Integer.toBinaryString(strChar[i]); + result += s; + } + return result; + } + public static String strToBinaryStr16(String str) { + char[] strChar = str.toCharArray(); + String result = ""; + for (int i = 0; i < strChar.length; i++) { + String s = Integer.toBinaryString(strChar[i]); + s = StringUtils.leftPad(s,16,'0'); + result += s; + } + return result; + } + public static String strToBinaryStr(String str,String separator) { + char[] strChar = str.toCharArray(); + String result = ""; + for (int i = 0; i < strChar.length; i++) { + int x = (int)strChar[i]; + String s = Integer.toBinaryString(strChar[i]) + separator; + result += s; + } + return result; + } + + public static String strToStr(String str) { + String binaryStr = strToBinaryStr(str); + String result = binaryStrToStr(binaryStr); + return result; + } + + // 将二进制字符串转换成Unicode字符串 + private static String binaryStrToStr(String binStr) { + String[] tempStr = strToStrArray(binStr); + char[] tempChar = new char[tempStr.length]; + for (int i = 0; i < tempStr.length; i++) { + tempChar[i] = binaryStrToChar(tempStr[i]); + } + return String.valueOf(tempChar); + } + + // 将二进制字符串格式化成全16位带空格的Binstr + public static String binaryStrToBinaryStr16(String input) { + StringBuffer output = new StringBuffer(); + String[] tempStr = strToStrArray(input); + for (int i = 0; i < tempStr.length; i++) { + for (int j = 16 - tempStr[i].length(); j > 0; j--) { + output.append('0'); + } + output.append(tempStr[i] + " "); + } + return output.toString(); + } + + // 将全16位带空格的Binstr转化成去0前缀的带空格Binstr + private static String Binstr16ToBinstr(String input) { + StringBuffer output = new StringBuffer(); + String[] tempStr = strToStrArray(input); + for (int i = 0; i < tempStr.length; i++) { + for (int j = 0; j < 16; j++) { + if (tempStr[i].charAt(j) == '1') { + output.append(tempStr[i].substring(j) + " "); + break; + } + if (j == 15 && tempStr[i].charAt(j) == '0') + output.append("0" + " "); + } + } + return output.toString(); + } + + // 二进制字串转化为boolean型数组 输入16位有空格的Binstr + private static boolean[] Binstr16ToBool(String input) { + String[] tempStr = strToStrArray(input); + boolean[] output = new boolean[tempStr.length * 16]; + for (int i = 0, j = 0; i < input.length(); i++, j++) + if (input.charAt(i) == '1') + output[j] = true; + else if (input.charAt(i) == '0') + output[j] = false; + else + j--; + return output; + } + + // boolean型数组转化为二进制字串 返回带0前缀16位有空格的Binstr + private static String BoolToBinstr16(boolean[] input) { + StringBuffer output = new StringBuffer(); + for (int i = 0; i < input.length; i++) { + if (input[i]) + output.append('1'); + else + output.append('0'); + if ((i + 1) % 16 == 0) + output.append(' '); + } + output.append(' '); + return output.toString(); + } + + // 将二进制字符串转换为char + private static char binaryStrToChar(String binStr) { + int[] temp = binaryStrToIntArray(binStr); + int sum = 0; + for (int i = 0; i < temp.length; i++) { + sum += temp[temp.length - 1 - i] << i; + } + return (char) sum; + } + + // 将初始二进制字符串转换成字符串数组,以空格相隔 + private static String[] strToStrArray(String str) { + return str.split(" "); + } + + // 将二进制字符串转换成int数组 + private static int[] binaryStrToIntArray(String binStr) { + char[] temp = binStr.toCharArray(); + int[] result = new int[temp.length]; + for (int i = 0; i < temp.length; i++) { + result[i] = temp[i] - 48; + } + return result; + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java index 734b577c5d0..9ab444499bf 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/DataUtil.java @@ -2,7 +2,9 @@ import com._4paradigm.openmldb.jdbc.SQLResultSet; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import java.math.BigInteger; import java.sql.*; import java.text.ParseException; import java.text.SimpleDateFormat; @@ -11,6 +13,46 @@ @Slf4j public class DataUtil { + public static String parseBinary(String str,String type){ + int length = str.length(); +// System.out.println("length = " + length); + String binaryStr = BinaryUtil.strToBinaryStr(str); + switch (type){ + case "smallint": + return String.valueOf(Short.parseShort(binaryStr, 2)); + case "int": + return String.valueOf(Integer.parseInt(binaryStr, 2)); + case "bigint": + return String.valueOf(Long.parseLong(binaryStr, 2)); + case "timestamp": + String binary = ""; + for (int i = 0; i < length; i++) { + String s = Integer.toBinaryString(str.charAt(i)); + System.out.println("s = " + s); + s = StringUtils.leftPad(s, 16, "0"); + System.out.println("AAAAA s = " + s); + binary += s; + } + System.out.println("binary = " + binary); + return String.valueOf(Long.parseLong(binary, 2)); + case "float": +// return String.valueOf(Float.intBitsToFloat(new BigInteger(binaryStr, 2).intValue())); + return BinaryUtil.strToStr(str); + case "double": + return String.valueOf(Double.longBitsToDouble(new BigInteger(binaryStr, 2).longValue())); + case "date": + int year = (int)(str.charAt(2))+1900; + int month = (int)(str.charAt(1))+1; + int day = str.charAt(0); + return year+"-"+(month<10?"0"+month:month)+"-"+(day<10?"0"+day:day); + case "string": + return str; + default: + throw new IllegalArgumentException("parse binary not support type:"+type); + } + + } + public static Object parseTime(Object data){ String dataStr = String.valueOf(data); if(dataStr.equals("{currentTime}")){ diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java index 76295d1b77e..04b5dddcc68 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/ResultUtil.java @@ -163,7 +163,7 @@ public static Object getColumnData(SQLResultSet rs, int index) throws SQLExcepti obj = rs.getLong(index + 1); } else if (columnType == Types.VARCHAR) { obj = rs.getString(index + 1); - log.info("conver string data {}", obj); +// log.info("convert string data {}", obj); } else if (columnType == Types.TIMESTAMP) { obj = rs.getTimestamp(index + 1); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java index 5a86cae9c8f..cef7810af29 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java @@ -62,7 +62,6 @@ public static OpenMLDBResult executeLongWindowDeploy(SqlExecutor executor, SQLCa } public static OpenMLDBResult deploy(SqlExecutor sqlExecutor,String sql){ - OpenMLDBResult openMLDBResult = new OpenMLDBResult(); openMLDBResult.setSql(sql); Statement statement = sqlExecutor.getStatement(); @@ -75,7 +74,7 @@ public static OpenMLDBResult deploy(SqlExecutor sqlExecutor,String sql){ openMLDBResult.setMsg(e.getMessage()); e.printStackTrace(); } - log.info("deploy sql:{}",openMLDBResult); + log.info("deploy sql:{}",sql); return openMLDBResult; } @@ -106,13 +105,13 @@ public static OpenMLDBResult sqlBatchRequestMode(SqlExecutor executor, String db public static OpenMLDBResult sqlRequestModeWithProcedure(SqlExecutor executor, String dbName, String spName, Boolean needInsertRequestRow, String sql, InputDesc rows, boolean isAsyn) throws SQLException { - OpenMLDBResult fesqlResult = null; + OpenMLDBResult openMLDBResult = null; if (sql.toLowerCase().startsWith("create procedure") || sql.toLowerCase().startsWith("deploy ")) { - fesqlResult = selectRequestModeWithSp(executor, dbName, spName, needInsertRequestRow, sql, rows, isAsyn); + openMLDBResult = selectRequestModeWithSp(executor, dbName, spName, needInsertRequestRow, sql, rows, isAsyn); } else { - throw new IllegalArgumentException("unsupport sql: "+ sql); + throw new IllegalArgumentException("not support sql: "+ sql); } - return fesqlResult; + return openMLDBResult; } public static OpenMLDBResult sql(SqlExecutor executor, String dbName, String sql) { @@ -136,6 +135,8 @@ public static OpenMLDBResult sql(SqlExecutor executor, String dbName, String sql openMLDBResult = desc(executor,dbName,sql); }else if(sql.contains("outfile")){ openMLDBResult = selectInto(executor, dbName, sql); + }else if(sql.contains("deploy ")){ + openMLDBResult = deploy(executor, sql); }else { openMLDBResult = select(executor, dbName, sql); } @@ -149,22 +150,22 @@ public static OpenMLDBResult selectInto(SqlExecutor executor, String dbName, Str return null; } log.info("select into:{}",outSql); - OpenMLDBResult fesqlResult = new OpenMLDBResult(); + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); ResultSet rawRs = executor.executeSQL(dbName, outSql); if (rawRs == null) { - fesqlResult.setOk(false); - fesqlResult.setMsg("executeSQL fail, result is null"); + openMLDBResult.setOk(false); + openMLDBResult.setMsg("executeSQL fail, result is null"); } else if (rawRs instanceof SQLResultSet){ try { SQLResultSet rs = (SQLResultSet)rawRs; - fesqlResult.setOk(true); + openMLDBResult.setOk(true); } catch (Exception e) { - fesqlResult.setOk(false); - fesqlResult.setMsg(e.getMessage()); + openMLDBResult.setOk(false); + openMLDBResult.setMsg(e.getMessage()); } } - log.info("select result:{} \n", fesqlResult); - return fesqlResult; + log.info("select result:{} \n", openMLDBResult); + return openMLDBResult; } public static OpenMLDBResult showDeploy(SqlExecutor executor, String dbName, String showDeploySql){ @@ -590,14 +591,18 @@ private static OpenMLDBResult selectRequestModeWithSp(SqlExecutor executor, Stri log.info("procedure sql:{}", sql); String insertDbName = input.getDb().isEmpty() ? dbName : input.getDb(); - OpenMLDBResult fesqlResult = new OpenMLDBResult(); + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); + openMLDBResult.setSpName(spName); if(sql.startsWith("deploy ")){ - deploy(executor,sql); + OpenMLDBResult deployResult = deploy(executor, sql); + if(!deployResult.isOk()){ + return deployResult; + } }else if (!executor.executeDDL(dbName, sql)) { log.error("execute ddl failed! sql: {}", sql); - fesqlResult.setOk(false); - fesqlResult.setMsg("execute ddl failed"); - return fesqlResult; + openMLDBResult.setOk(false); + openMLDBResult.setMsg("execute ddl failed"); + return openMLDBResult; } List> result = Lists.newArrayList(); for (int i = 0; i < rows.size(); i++) { @@ -610,9 +615,9 @@ private static OpenMLDBResult selectRequestModeWithSp(SqlExecutor executor, Stri try { rps = executor.getCallablePreparedStmt(dbName, spName); if (rps == null) { - fesqlResult.setOk(false); - fesqlResult.setMsg("Fail to getCallablePreparedStmt"); - return fesqlResult; + openMLDBResult.setOk(false); + openMLDBResult.setMsg("Fail to getCallablePreparedStmt"); + return openMLDBResult; } if (!isAsyn) { resultSet = buildRequestPreparedStatement(rps, rows.get(i)); @@ -620,33 +625,33 @@ private static OpenMLDBResult selectRequestModeWithSp(SqlExecutor executor, Stri resultSet = buildRequestPreparedStatementAsync(rps, rows.get(i)); } if (resultSet == null) { - fesqlResult.setOk(false); - fesqlResult.setMsg("result set is null"); - log.error("select result:{}", fesqlResult); - return fesqlResult; + openMLDBResult.setOk(false); + openMLDBResult.setMsg("result set is null"); + log.error("select result:{}", openMLDBResult); + return openMLDBResult; } result.addAll(ResultUtil.toList((SQLResultSet) resultSet)); if (needInsertRequestRow && !executor.executeInsert(insertDbName, inserts.get(i))) { - fesqlResult.setOk(false); - fesqlResult.setMsg("fail to execute sql in request mode: fail to insert request row after query"); - log.error(fesqlResult.getMsg()); - return fesqlResult; + openMLDBResult.setOk(false); + openMLDBResult.setMsg("fail to execute sql in request mode: fail to insert request row after query"); + log.error(openMLDBResult.getMsg()); + return openMLDBResult; } if (i == 0) { try { - ResultUtil.setSchema(resultSet.getMetaData(),fesqlResult); + ResultUtil.setSchema(resultSet.getMetaData(),openMLDBResult); } catch (SQLException throwables) { - fesqlResult.setOk(false); - fesqlResult.setMsg("fail to get/set meta data"); - return fesqlResult; + openMLDBResult.setOk(false); + openMLDBResult.setMsg("fail to get/set meta data"); + return openMLDBResult; } } } catch (SQLException throwables) { throwables.printStackTrace(); log.error("has exception. sql: {}", sql); - fesqlResult.setOk(false); - fesqlResult.setMsg("fail to execute sql"); - return fesqlResult; + openMLDBResult.setOk(false); + openMLDBResult.setMsg("fail to execute sql"); + return openMLDBResult; } finally { try { if (resultSet != null) resultSet.close(); @@ -656,11 +661,11 @@ private static OpenMLDBResult selectRequestModeWithSp(SqlExecutor executor, Stri } } } - fesqlResult.setResult(result); - fesqlResult.setCount(result.size()); - fesqlResult.setOk(true); - log.info("select result:{}", fesqlResult); - return fesqlResult; + openMLDBResult.setResult(result); + openMLDBResult.setCount(result.size()); + openMLDBResult.setOk(true); + log.info("select result:{}", openMLDBResult); + return openMLDBResult; } public static OpenMLDBResult selectBatchRequestModeWithSp(SqlExecutor executor, String dbName, String spName, @@ -835,26 +840,26 @@ public static OpenMLDBResult select(SqlExecutor executor, String dbName, String return null; } log.info("select sql:{}", selectSql); - OpenMLDBResult fesqlResult = new OpenMLDBResult(); + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); ResultSet rawRs = executor.executeSQL(dbName, selectSql); if (rawRs == null) { - fesqlResult.setOk(false); - fesqlResult.setMsg("executeSQL fail, result is null"); + openMLDBResult.setOk(false); + openMLDBResult.setMsg("executeSQL fail, result is null"); } else if (rawRs instanceof SQLResultSet){ try { SQLResultSet rs = (SQLResultSet)rawRs; - ResultUtil.setSchema(rs.getMetaData(),fesqlResult); - fesqlResult.setOk(true); + ResultUtil.setSchema(rs.getMetaData(),openMLDBResult); + openMLDBResult.setOk(true); List> result = ResultUtil.toList(rs); - fesqlResult.setCount(result.size()); - fesqlResult.setResult(result); + openMLDBResult.setCount(result.size()); + openMLDBResult.setResult(result); } catch (Exception e) { - fesqlResult.setOk(false); - fesqlResult.setMsg(e.getMessage()); + openMLDBResult.setOk(false); + openMLDBResult.setMsg(e.getMessage()); } } - log.info("select result:{} \n", fesqlResult); - return fesqlResult; + log.info("select result:{} \n", openMLDBResult); + return openMLDBResult; } // public static Object getColumnData(com._4paradigm.openmldb.ResultSet rs, Schema schema, int index) { diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java index 54183e3742b..bdc5da51683 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SQLUtil.java @@ -15,6 +15,21 @@ public class SQLUtil { private static String reg = "\\{(\\d+)\\}"; private static Pattern pattern = Pattern.compile(reg); + public static String replaceDBNameAndTableName(String dbName,List tableNames,String str){ + Matcher matcher = pattern.matcher(str); + while (matcher.find()) { + int index = Integer.parseInt(matcher.group(1)); + str = str.replace("{" + index + "}", tableNames.get(index)); + } + str = str.replace("{db_name}",dbName); + return str; + } + public static String replaceDBNameAndSpName(String dbName,String spName,String str){ + str = str.replace("{sp_name}",spName); + str = str.replace("{db_name}",dbName); + return str; + } + public static String getLongWindowDeploySQL(String name,String longWindow,String sql){ String deploySql = String.format("deploy %s options(long_windows='%s') %s",name,longWindow,sql); return deploySql; @@ -70,13 +85,13 @@ public static String buildInsertSQLWithPrepared(String name, List column return builder.toString(); } - public static String formatSql(String sql, List tableNames, OpenMLDBInfo fedbInfo) { + public static String formatSql(String sql, List tableNames, OpenMLDBInfo openMLDBInfo) { Matcher matcher = pattern.matcher(sql); while (matcher.find()) { int index = Integer.parseInt(matcher.group(1)); sql = sql.replace("{" + index + "}", tableNames.get(index)); } - sql = formatSql(sql,fedbInfo); + sql = formatSql(sql,openMLDBInfo); return sql; } From 9ca61e78de8b6e3156dcd5ffd365425f9438f983 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 16 Aug 2022 19:43:02 +0800 Subject: [PATCH 120/172] add upgrade test --- .../long_window/test_long_window.yaml | 6 +- .../common/OpenMLDBDeploy.java | 8 +- .../src/main/resources/deploy.properties | 9 +- .../openmldb/devops_test/util/CheckUtil.java | 30 ++++ .../high_availability/TestCluster.java | 48 +++---- .../devops_test/upgrade_test/UpgradeTest.java | 135 ++++++++++++++++++ .../test_suite/test_upgrade.xml | 14 ++ .../test_common/openmldb/NsClient.java | 26 ++++ .../test_common/openmldb/OpenMLDBDevops.java | 91 +++++++++++- 9 files changed, 326 insertions(+), 41 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeTest.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml diff --git a/cases/integration_test/long_window/test_long_window.yaml b/cases/integration_test/long_window/test_long_window.yaml index c8250a79e5c..ba25910885d 100644 --- a/cases/integration_test/long_window/test_long_window.yaml +++ b/cases/integration_test/long_window/test_long_window.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: [] +debugs: ["options(long_window='w1:2d')"] cases: - id: 0 @@ -63,8 +63,8 @@ cases: name: pre_{db_name}_{sp_name}_w1_count_c4 type: bigint rows: - - ["aa",1577811660000,1577984459999,2,2,null] - - ["aa",1577984460000,1578157259999,2,2,null] + - ["aa",1577664000000,1577836799999,2,1,null] + - ["aa",1577836800000,1578009599999,2,2,null] - id: 2 desc: options(long_window='w1:2h') diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java index 5b315279d78..7a264d61281 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java @@ -70,9 +70,9 @@ public OpenMLDBInfo deployStandalone(){ file.mkdirs(); } downloadOpenMLDB(testPath); - OpenMLDBInfo fedbInfo = deployStandalone(testPath,ip); - log.info("openmldb-info:"+fedbInfo); - return fedbInfo; + OpenMLDBInfo openMLDBInfo = deployStandalone(testPath,ip); + log.info("openmldb-info:"+openMLDBInfo); + return openMLDBInfo; } public OpenMLDBInfo deployCluster(int ns, int tablet){ return deployCluster(null,ns,tablet); @@ -151,7 +151,7 @@ public OpenMLDBInfo deployCluster(String clusterName, int ns, int tablet){ return fedbInfo; } - private String downloadOpenMLDB(String testPath){ + public String downloadOpenMLDB(String testPath){ try { String command; log.info("openMLDBUrl:{}",openMLDBUrl); diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index 728c9eed82d..062d5be93e9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -22,4 +22,11 @@ tmp2_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbsp tmp_mac=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-darwin.tar.gz tmp_mac_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz -tmp_mac_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz \ No newline at end of file +tmp_mac_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz + +0.4.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.0-linux.tar.gz +0.5.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.0-linux.tar.gz +0.6.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.6.0-linux.tar.gz + + + diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java new file mode 100644 index 00000000000..0a767d20e6b --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java @@ -0,0 +1,30 @@ +package com._4paradigm.openmldb.devops_test.util; + +import com._4paradigm.openmldb.test_common.openmldb.NsClient; +import com._4paradigm.openmldb.test_common.openmldb.SDKClient; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.lang3.RandomStringUtils; +import org.testng.Assert; +import org.testng.collections.Lists; + +import java.util.ArrayList; +import java.util.List; + +public class CheckUtil { + public static void addDataCheck(SDKClient sdkClient, NsClient nsClient, String dbName, List tableNames, int originalCount, int addCount){ + List> addDataList = new ArrayList<>(); + for(int i=0;i list = Lists.newArrayList(c1 + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); + addDataList.add(list); + } + String msg = "table add data check count failed."; + for(String tableName:tableNames){ + if (CollectionUtils.isNotEmpty(addDataList)) { + sdkClient.insertList(tableName,addDataList); + } + Assert.assertEquals(sdkClient.getTableRowCount(tableName),originalCount+addCount,msg); + } + nsClient.checkTableOffSet(dbName,null); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index 91a5917dd73..f1bd535dd3d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -1,6 +1,7 @@ package com._4paradigm.openmldb.devops_test.high_availability; import com._4paradigm.openmldb.devops_test.common.ClusterTest; +import com._4paradigm.openmldb.devops_test.util.CheckUtil; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.openmldb.*; import com._4paradigm.qa.openmldb_deploy.util.Tool; @@ -85,41 +86,41 @@ public void testMoreReplica(){ Assert.assertEquals(sdkClient.getTableRowCount(hddTable),dataCount,oneTabletStopMsg); // tablet start,数据可以回复,要看磁盘表和内存表。 openMLDBDevops.operateTablet(0,"start"); - addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); //创建磁盘表和内存表,在重启tablet,数据可回复,内存表和磁盘表可以正常访问。 openMLDBDevops.operateTablet(0,"restart"); - addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+10,10); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+10,10); //创建磁盘表和内存表,插入一些数据,然后make snapshot,在重启tablet,数据可回复。 nsClient.makeSnapshot(dbName,memoryTable); nsClient.makeSnapshot(dbName,ssdTable); nsClient.makeSnapshot(dbName,hddTable); //tablet 依次restart,数据可回复,可以访问。 openMLDBDevops.operateTablet("restart"); - addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); //1个ns stop,可以正常访问。 openMLDBDevops.operateNs(0,"stop"); resetClient(); - addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 1个ns start 可以访问。 openMLDBDevops.operateNs(0,"start"); - addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 1个ns restart 可以访问。 openMLDBDevops.operateNs(0,"restart"); resetClient(); - addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 单zk stop 在start后 可以访问 openMLDBDevops.operateZKOne("stop"); Tool.sleep(3000); openMLDBDevops.operateZKOne("start"); Tool.sleep(3000); - addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 单zk restart 后可以访问 openMLDBDevops.operateZKOne("restart"); - addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 2个tablet stop 可以访问 openMLDBDevops.operateTablet(0,"stop"); openMLDBDevops.operateTablet(1,"stop"); - addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); //3个tablet stop,不能访问。 openMLDBDevops.operateTablet(2,"stop"); OpenMLDBResult openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); @@ -127,7 +128,7 @@ public void testMoreReplica(){ // // 1个tablet启动,数据可回复,分片所在的表,可以访问。 // openMLDBDevops.operateTablet(0,"start"); -// addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); +// CheckUtil.addDataCheck(sdkClient,nsClient,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); //2个ns stop,不能访问。 // openMLDBDevops.operateNs(1,"stop"); @@ -201,24 +202,24 @@ public void testSingle(){ Assert.assertTrue(openMLDBResult.getMsg().contains("fail")); // tablet start,数据可以回复,要看磁盘表和内存表。 openMLDBDevops.operateTablet(0,"start"); - addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); //make snapshot,在重启tablet,数据可回复。 nsClient.makeSnapshot(dbName,memoryTable); nsClient.makeSnapshot(dbName,ssdTable); nsClient.makeSnapshot(dbName,hddTable); //重启tablet,数据可回复,内存表和磁盘表可以正常访问。 openMLDBDevops.operateTablet(0,"restart"); - addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+10,10); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+10,10); //ns stop start 可以正常访问。 openMLDBDevops.operateNs(0,"stop"); // resetClient(); //ns start 可以访问。 openMLDBDevops.operateNs(0,"start"); - addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,0); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,0); //ns restart 可以访问。 openMLDBDevops.operateNs(0,"restart"); // resetClient(); - addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,0); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,0); // stop tablet ns 后 在启动 ns tablet 可以访问 openMLDBDevops.operateTablet(0,"stop"); openMLDBDevops.operateNs(0,"stop"); @@ -226,24 +227,7 @@ public void testSingle(){ openMLDBDevops.operateNs(0,"start"); Tool.sleep(10*1000); openMLDBDevops.operateTablet(0,"start"); - addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); - } - - public void addDataCheck(SDKClient sdkClient, NsClient nsClient,String dbName,List tableNames,int originalCount,int addCount){ - List> addDataList = new ArrayList<>(); - for(int i=0;i list = Lists.newArrayList(c1 + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); - addDataList.add(list); - } - String msg = "table add data check count failed."; - for(String tableName:tableNames){ - if (CollectionUtils.isNotEmpty(addDataList)) { - sdkClient.insertList(tableName,addDataList); - } - Assert.assertEquals(sdkClient.getTableRowCount(tableName),originalCount+addCount,msg); - } - nsClient.checkTableOffSet(dbName,null); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); } public void resetClient(){ OpenMLDBClient openMLDBClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeTest.java new file mode 100644 index 00000000000..c0ac852521b --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeTest.java @@ -0,0 +1,135 @@ +package com._4paradigm.openmldb.devops_test.upgrade_test; + +import com._4paradigm.openmldb.devops_test.common.ClusterTest; +import com._4paradigm.openmldb.devops_test.util.CheckUtil; +import com._4paradigm.openmldb.test_common.openmldb.NsClient; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBDevops; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.openmldb.SDKClient; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; +import com._4paradigm.qa.openmldb_deploy.util.DeployUtil; +import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; +import lombok.extern.slf4j.Slf4j; +import org.testng.Assert; +import org.testng.annotations.*; +import org.testng.collections.Lists; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +@Slf4j +public class UpgradeTest extends ClusterTest { + private String dbName; + private String memoryTableName; + private String ssdTableName; + private String hddTableName; + private SDKClient sdkClient; + private NsClient nsClient; + private OpenMLDBDevops openMLDBDevops; + private String openMLDBPath; + private String confPath; + private String upgradePath; + @BeforeClass + @Parameters("upgradeVersion") + public void beforeClass(@Optional("0.6.0") String upgradeVersion){ + dbName = "test_upgrade"; + memoryTableName = "test_memory"; + ssdTableName = "test_ssd"; + hddTableName = "test_hdd"; + sdkClient = SDKClient.of(executor); + nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); + openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); + + int dataCount = 100; + sdkClient.createAndUseDB(dbName); + String memoryTableDDL = "create table test_memory(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3);"; + String ssdTableDDL = "create table test_ssd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"SSD\");"; + String hddTableDDL = "create table test_hdd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"HDD\");"; + sdkClient.execute(Lists.newArrayList(memoryTableDDL,ssdTableDDL,hddTableDDL)); + // 插入一定量的数据 + List> dataList = new ArrayList<>(); + for(int i=0;i list = Lists.newArrayList("aa" + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); + dataList.add(list); + } + sdkClient.insertList(memoryTableName,dataList); + sdkClient.insertList(ssdTableName,dataList); + sdkClient.insertList(hddTableName,dataList); + upgradePath = DeployUtil.getTestPath(version)+"/upgrade_"+upgradeVersion; + File file = new File(upgradePath); + if(!file.exists()){ + file.mkdirs(); + } + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(upgradeVersion); + String upgradeDirectoryName = openMLDBDeploy.downloadOpenMLDB(upgradePath); + openMLDBPath = upgradeDirectoryName+"/bin/openmldb"; + confPath = upgradeDirectoryName+"/conf"; + } + @Test + public void testUpgrade(){ + Map> map1 = nsClient.getTableOffset(dbName); + log.info("升级前offset:"+map1); + openMLDBDevops.upgradeNs(openMLDBPath,confPath); + openMLDBDevops.upgradeTablet(openMLDBPath,confPath); + Map> map2 = nsClient.getTableOffset(dbName); + log.info("升级后offset:"+map2); + Assert.assertEquals(map1,map2); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTableName,ssdTableName,hddTableName),100,10); + } + +// public void upgradeNs(){ +// Map> map1 = nsClient.getTableOffset(dbName); +// log.info("升级前offset:"+map1); +// openMLDBDevops.upgradeNs(openMLDBPath,confPath); +// Map> map2 = nsClient.getTableOffset(dbName); +// log.info("升级后offset:"+map2); +// Assert.assertEquals(map1,map2); +// } +// public void upgradeTablet(){ +// Map> map1 = nsClient.getTableOffset(dbName); +// log.info("升级前offset:"+map1); +// openMLDBDevops.upgradeTablet(openMLDBPath,confPath); +// Map> map2 = nsClient.getTableOffset(dbName); +// log.info("升级后offset:"+map2); +// Assert.assertEquals(map1,map2); +// CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTableName,ssdTableName,hddTableName),100,10); +// } + +// @AfterClass + public void afterClass(){ + String command = "rm -rf "+upgradePath; + ExecutorUtil.run(command); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml new file mode 100644 index 00000000000..88b9d6877ec --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index 8b492c3db09..906194377a0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -174,4 +174,30 @@ public Map> getTableEndPoint(String dbName,String tableName return map; } + public Map> getTableOffset(String dbName){ + List lines = showTable(dbName,null); + Map> offsets = new HashMap<>(); + for(int i=2;i value = offsets.get(key); + String role = infos[4]; + long offset = 0; + String offsetStr = infos[7].trim(); + if(!offsetStr.equals("-")&&!offsetStr.equals("")){ + offset = Long.parseLong(offsetStr); + } + if(value==null){ + value = new ArrayList<>(); + offsets.put(key,value); + } + if(role.equals("leader")){ + value.add(0,offset); + }else { + value.add(offset); + } + } + return offsets; + } + } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java index a9034d576db..b3728cbe2df 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java @@ -4,11 +4,13 @@ import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com._4paradigm.qa.openmldb_deploy.util.Tool; import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; +import lombok.extern.slf4j.Slf4j; import org.testng.Assert; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; - +@Slf4j public class OpenMLDBDevops { private OpenMLDBInfo openMLDBInfo; private String dbName; @@ -60,4 +62,91 @@ public void operateZKOne(String operator){ ExecutorUtil.run(command); Tool.sleep(5*1000); } + public void upgradeNs(String openMLDBPath,String confPath){ + String basePath = openMLDBInfo.getBasePath(); + int nsNum = openMLDBInfo.getNsNum(); + for(int i=1;i<=nsNum;i++) { + log.info("开始升级第{}个ns",i); + String nsPath = basePath + "/openmldb-ns-"+i; + backUp(nsPath); + cpOpenMLDB(nsPath, openMLDBPath); + cpConf(nsPath, confPath); + modifyNsConf(nsPath, openMLDBInfo.getNsEndpoints().get(i-1), openMLDBInfo.getZk_cluster()); + operateNs(i,"restart"); + Tool.sleep(20*1000); + log.info("第{}个ns升级结束",i); + } + } + public void upgradeTablet(String openMLDBPath, String confPath){ + String basePath = openMLDBInfo.getBasePath(); + int tabletNum = openMLDBInfo.getTabletNum(); + for(int i=1;i<=tabletNum;i++) { + log.info("开始升级第{}个tablet",i); + String tabletPath = basePath + "/openmldb-tablet-"+i; + backUp(tabletPath); + cpOpenMLDB(tabletPath, openMLDBPath); + cpConf(tabletPath, confPath); + modifyTabletConf(tabletPath, openMLDBInfo.getTabletEndpoints().get(i-1), openMLDBInfo.getZk_cluster()); + operateTablet(i,"stop"); + Tool.sleep(10*1000); + operateTablet(i,"start"); + Tool.sleep(20*1000); + log.info("第{}个tablet升级结束",i); + } + } + public static void backUp(String path){ + String command = "cp -rf "+path +"/conf "+path+"/conf-back"; + ExecutorUtil.run(command); + command = "ls "+path+" | grep conf-back"; + List result = ExecutorUtil.run(command); + Assert.assertEquals(result.get(0),"conf-back"); + command = "cp -rf "+path +"/bin/openmldb "+path+"/openmldb.back"; + ExecutorUtil.run(command); + command = "ls "+path+" | grep openmldb.back"; + result = ExecutorUtil.run(command); + Assert.assertEquals(result.get(0),"openmldb.back"); + } + public static void cpOpenMLDB(String path,String openMLDBPath){ + String command = "rm -rf "+path+"/bin/openmldb"; + List result = ExecutorUtil.run(command); + Assert.assertEquals(result.size(),0); + command = "cp -rf "+openMLDBPath+" "+path+"/bin"; + result = ExecutorUtil.run(command); + Assert.assertEquals(result.size(),0); + } + public static void cpConf(String path,String confPath){ + String command = "rm -rf "+path+"/conf"; + List result = ExecutorUtil.run(command); + Assert.assertEquals(result.size(),0); + command = "cp -rf "+confPath+" "+path; + result = ExecutorUtil.run(command); + Assert.assertEquals(result.size(),0); + } + public static void modifyNsConf(String nsPath,String ip_port,String zk_endpoint){ + String[] commands = { + "sed -i 's#--endpoint=.*#--endpoint="+ip_port+"#' "+nsPath+"/conf/nameserver.flags", + "sed -i 's#--zk_cluster=.*#--zk_cluster="+zk_endpoint+"#' "+nsPath+"/conf/nameserver.flags" + }; + for(String command:commands){ + ExecutorUtil.run(command); + } + } + public static void modifyTabletConf(String tabletPath,String ip_port,String zk_endpoint){ + String[] commands = { + "sed -i 's#--endpoint=.*#--endpoint="+ip_port+"#' "+tabletPath+"/conf/tablet.flags", + "sed -i 's/--zk_cluster=.*/--zk_cluster="+zk_endpoint+"/' "+tabletPath+"/conf/tablet.flags", + "sed -i 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+tabletPath+"/conf/tablet.flags", + "sed -i 's@#--zk_cluster=.*@--zk_cluster="+zk_endpoint+"@' "+tabletPath+"/conf/tablet.flags", + "sed -i 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+tabletPath+"/conf/tablet.flags", + "sed -i 's@#--make_snapshot_threshold_offset=100000@--make_snapshot_threshold_offset=10@' "+tabletPath+"/conf/tablet.flags", + "sed -i 's@--gc_interval=60@--gc_interval=1@' "+tabletPath+"/conf/tablet.flags", + "echo '--hdd_root_path=./db_hdd' >> "+tabletPath+"/conf/tablet.flags", + "echo '--recycle_bin_hdd_root_path=./recycle_hdd' >> "+tabletPath+"/conf/tablet.flags", + "echo '--ssd_root_path=./db_ssd' >> "+tabletPath+"/conf/tablet.flags", + "echo '--recycle_bin_ssd_root_path=./recycle_ssd' >> "+tabletPath+"/conf/tablet.flags" + }; + for(String command:commands){ + ExecutorUtil.run(command); + } + } } From 73389d89d4392a3fa6dff5d2ef0154057e941a11 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 16 Aug 2022 19:52:36 +0800 Subject: [PATCH 121/172] add upgrade test --- .../openmldb-test-java/openmldb-test-common/pom.xml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index dde927e4321..88a3b1460e8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -16,7 +16,7 @@ 8 0.6.0 - 0.6.0-macos + 0.6.0 @@ -30,15 +30,15 @@ com.4paradigm.openmldb openmldb-jdbc ${openmldb.jdbc.version} - system - /Users/zhaowei/Downloads/openmldb-jdbc-0.6.0.jar + + com.4paradigm.openmldb openmldb-native ${openmldb.navtive.version} - system - /Users/zhaowei/Downloads/openmldb-native-0.6.0.jar + + From 70b2ec67268e21b84e0d0a86d01f46966204004f Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 17 Aug 2022 08:26:49 +0800 Subject: [PATCH 122/172] add upgrade test --- .../{UpgradeTest.java => UpgradeCluster.java} | 2 +- .../upgrade_test/UpgradeStandalone.java | 138 ++++++++++++++++++ .../test_suite/test_upgrade.xml | 2 +- 3 files changed, 140 insertions(+), 2 deletions(-) rename test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/{UpgradeTest.java => UpgradeCluster.java} (99%) create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeStandalone.java diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java similarity index 99% rename from test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeTest.java rename to test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java index c0ac852521b..7a2d54a306a 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java @@ -20,7 +20,7 @@ import java.util.Map; @Slf4j -public class UpgradeTest extends ClusterTest { +public class UpgradeCluster extends ClusterTest { private String dbName; private String memoryTableName; private String ssdTableName; diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeStandalone.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeStandalone.java new file mode 100644 index 00000000000..2eefe3e639a --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeStandalone.java @@ -0,0 +1,138 @@ +package com._4paradigm.openmldb.devops_test.upgrade_test; + +import com._4paradigm.openmldb.devops_test.common.ClusterTest; +import com._4paradigm.openmldb.devops_test.util.CheckUtil; +import com._4paradigm.openmldb.test_common.openmldb.NsClient; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBDevops; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.openmldb.SDKClient; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; +import com._4paradigm.qa.openmldb_deploy.util.DeployUtil; +import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; +import lombok.extern.slf4j.Slf4j; +import org.testng.Assert; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Optional; +import org.testng.annotations.Parameters; +import org.testng.annotations.Test; +import org.testng.collections.Lists; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +@Slf4j +public class UpgradeStandalone extends ClusterTest { + private String dbName; + private String memoryTableName; + private String ssdTableName; + private String hddTableName; + private SDKClient sdkClient; + private NsClient nsClient; + private OpenMLDBDevops openMLDBDevops; + private String openMLDBPath; + private String confPath; + private String upgradePath; + @BeforeClass + @Parameters("upgradeVersion") + public void beforeClass(@Optional("0.6.0") String upgradeVersion){ + dbName = "test_upgrade"; + memoryTableName = "test_memory"; + ssdTableName = "test_ssd"; + hddTableName = "test_hdd"; + sdkClient = SDKClient.of(executor); + nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); + openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); + + int dataCount = 100; + sdkClient.createAndUseDB(dbName); + String memoryTableDDL = "create table test_memory(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3);"; + String ssdTableDDL = "create table test_ssd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"SSD\");"; + String hddTableDDL = "create table test_hdd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"HDD\");"; + sdkClient.execute(Lists.newArrayList(memoryTableDDL,ssdTableDDL,hddTableDDL)); + // 插入一定量的数据 + List> dataList = new ArrayList<>(); + for(int i=0;i list = Lists.newArrayList("aa" + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); + dataList.add(list); + } + sdkClient.insertList(memoryTableName,dataList); + sdkClient.insertList(ssdTableName,dataList); + sdkClient.insertList(hddTableName,dataList); + upgradePath = DeployUtil.getTestPath(version)+"/upgrade_"+upgradeVersion; + File file = new File(upgradePath); + if(!file.exists()){ + file.mkdirs(); + } + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(upgradeVersion); + String upgradeDirectoryName = openMLDBDeploy.downloadOpenMLDB(upgradePath); + openMLDBPath = upgradeDirectoryName+"/bin/openmldb"; + confPath = upgradeDirectoryName+"/conf"; + } + @Test + public void testUpgrade(){ + Map> map1 = nsClient.getTableOffset(dbName); + log.info("升级前offset:"+map1); + openMLDBDevops.upgradeNs(openMLDBPath,confPath); + openMLDBDevops.upgradeTablet(openMLDBPath,confPath); + Map> map2 = nsClient.getTableOffset(dbName); + log.info("升级后offset:"+map2); + Assert.assertEquals(map1,map2); + CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTableName,ssdTableName,hddTableName),100,10); + } + +// public void upgradeNs(){ +// Map> map1 = nsClient.getTableOffset(dbName); +// log.info("升级前offset:"+map1); +// openMLDBDevops.upgradeNs(openMLDBPath,confPath); +// Map> map2 = nsClient.getTableOffset(dbName); +// log.info("升级后offset:"+map2); +// Assert.assertEquals(map1,map2); +// } +// public void upgradeTablet(){ +// Map> map1 = nsClient.getTableOffset(dbName); +// log.info("升级前offset:"+map1); +// openMLDBDevops.upgradeTablet(openMLDBPath,confPath); +// Map> map2 = nsClient.getTableOffset(dbName); +// log.info("升级后offset:"+map2); +// Assert.assertEquals(map1,map2); +// CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTableName,ssdTableName,hddTableName),100,10); +// } + +// @AfterClass + public void afterClass(){ + String command = "rm -rf "+upgradePath; + ExecutorUtil.run(command); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml index 88b9d6877ec..ce0721c8e8c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml @@ -6,7 +6,7 @@ - + From a4ea194e3453398c98ce7d9bfe9352242bf2896c Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 17 Aug 2022 10:32:28 +0800 Subject: [PATCH 123/172] add upgrade test --- .../openmldb-test-java/openmldb-test-common/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index 88a3b1460e8..9d244ca1f5b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -16,7 +16,7 @@ 8 0.6.0 - 0.6.0 + 0.6.0-macos From a0188c4058b6fdd4ce5552aa341471ee7c46e1d4 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 17 Aug 2022 10:58:28 +0800 Subject: [PATCH 124/172] add Missing files --- .../java_sdk_test/common/OpenMLDBConfig.java | 75 +++++++++++++++++ .../openmldb/test_common/common/BaseTest.java | 4 +- .../model/OpenMLDBCaseFileList.java | 2 - .../openmldb/OpenMLDBGlobalVar.java | 84 +++++++++++++++++++ 4 files changed, 161 insertions(+), 4 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java new file mode 100644 index 00000000000..95ad1c81010 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBConfig.java @@ -0,0 +1,75 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.openmldb.java_sdk_test.common; + +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.util.Tool; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.testng.collections.Lists; + +import java.util.Arrays; +import java.util.List; +import java.util.Properties; +import java.util.stream.Collectors; + +/** + * @author zhaowei + * @date 2020/6/11 11:34 AM + */ +@Slf4j +public class OpenMLDBConfig { + public static final List VERSIONS; + public static boolean INIT_VERSION_ENV = true; + public static final boolean ADD_REPORT_LOG; + + public static final Properties CONFIG = Tool.getProperties("run_case.properties"); + + static { + String versionStr = System.getProperty("diffVersion"); + if (StringUtils.isEmpty(versionStr)) { + versionStr = CONFIG.getProperty(OpenMLDBGlobalVar.env + "_versions"); + } + if (StringUtils.isNotEmpty(versionStr)) { + VERSIONS = Arrays.stream(versionStr.split(",")).collect(Collectors.toList()); + } else { + VERSIONS = Lists.newArrayList(); + } + log.info("HybridSEConfig: versions: {}", VERSIONS); + String reportLogStr = System.getProperty("reportLog"); + if(StringUtils.isNotEmpty(reportLogStr)){ + ADD_REPORT_LOG = Boolean.parseBoolean(reportLogStr); + }else{ + ADD_REPORT_LOG = true; + } + String init_env = CONFIG.getProperty(OpenMLDBGlobalVar.env + "_init_version_env"); + if (StringUtils.isNotEmpty(init_env)) { + INIT_VERSION_ENV = Boolean.parseBoolean(init_env); + } + + String version = CONFIG.getProperty("version"); + if(StringUtils.isNotEmpty(version)){ + OpenMLDBGlobalVar.version = version; + } + log.info("test version: {}", OpenMLDBGlobalVar.version); + } + + public static boolean isCluster() { + return OpenMLDBGlobalVar.env.equals("cluster"); + } + +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/BaseTest.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/BaseTest.java index bc7d880607d..a0f9e5a9e9f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/BaseTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/BaseTest.java @@ -37,7 +37,7 @@ */ @Slf4j public class BaseTest implements ITest { - protected static final Logger logger = new LogProxy(log); +// protected static final Logger logger = new LogProxy(log); private ThreadLocal testName = new ThreadLocal<>(); private int testNum = 0; @@ -54,7 +54,7 @@ public Object[] getCaseByYaml(Method method) throws FileNotFoundException { } OpenMLDBCaseFileList dp = OpenMLDBCaseFileList.dataProviderGenerator(casePaths); Object[] caseArray = dp.getCases().toArray(); - logger.info("caseArray.length:{}",caseArray.length); + log.info("caseArray.length:{}",caseArray.length); return caseArray; } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/OpenMLDBCaseFileList.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/OpenMLDBCaseFileList.java index 99f3922008b..6e52621cbdc 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/OpenMLDBCaseFileList.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/OpenMLDBCaseFileList.java @@ -34,8 +34,6 @@ public List getCases() { List cases = new ArrayList(); for (CaseFile dataProvider : dataProviderList) { - System.out.println("--------"); - System.out.println(OpenMLDBGlobalVar.CASE_LEVELS); for (SQLCase sqlCase : dataProvider.getCases(OpenMLDBGlobalVar.CASE_LEVELS)) { if (!StringUtils.isEmpty(OpenMLDBGlobalVar.CASE_NAME) && !OpenMLDBGlobalVar.CASE_NAME.equals(BaseTest.CaseNameFormat(sqlCase))) { diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java new file mode 100644 index 00000000000..a285b3750c1 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBGlobalVar.java @@ -0,0 +1,84 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.openmldb.test_common.openmldb; + + +import com._4paradigm.openmldb.test_common.util.Tool; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.testng.collections.Lists; + +import java.util.Arrays; +import java.util.List; +import java.util.Properties; +import java.util.stream.Collectors; + +/** + * @author zhaowei + * @date 2020/6/11 11:45 AM + */ +@Slf4j +public class OpenMLDBGlobalVar { + public static String env; + public static String level; + public static String version; + public static String openMLDBPath; + public static OpenMLDBInfo mainInfo; + public static String dbName = "test_zw"; + public static String tableStorageMode = "memory"; + public static final List CASE_LEVELS; + public static final String CASE_NAME; + public static final String CASE_ID; + public static final String CASE_DESC; + public static final String CASE_PATH; + public static final String YAML_CASE_BASE_DIR; + + public static final Properties CONFIG = Tool.getProperties("run_case.properties"); + + static { + String levelStr = System.getProperty("caseLevel"); + levelStr = StringUtils.isEmpty(levelStr) ? "0" : levelStr; + CASE_LEVELS = Arrays.stream(levelStr.split(",")).map(Integer::parseInt).collect(Collectors.toList()); + CASE_NAME = System.getProperty("caseName"); + CASE_ID = System.getProperty("caseId"); + CASE_DESC = System.getProperty("caseDesc"); + CASE_PATH = System.getProperty("casePath"); + YAML_CASE_BASE_DIR = System.getProperty("yamlCaseBaseDir"); + log.info("CASE_LEVELS {}", CASE_LEVELS); + if (!StringUtils.isEmpty(CASE_NAME)) { + log.info("CASE_NAME {}", CASE_NAME); + } + if (!StringUtils.isEmpty(CASE_ID)) { + log.info("CASE_ID {}", CASE_ID); + } + if (!StringUtils.isEmpty(CASE_PATH)) { + log.info("CASE_PATH {}", CASE_PATH); + } + if (!StringUtils.isEmpty(CASE_DESC)) { + log.info("CASE_DESC {}", CASE_DESC); + } + if (!StringUtils.isEmpty(YAML_CASE_BASE_DIR)) { + log.info("YAML_CASE_BASE_DIR {}", YAML_CASE_BASE_DIR); + } + String tableStorageMode = CONFIG.getProperty("table_storage_mode"); + if(StringUtils.isNotEmpty(tableStorageMode)){ + OpenMLDBGlobalVar.tableStorageMode = tableStorageMode; + } + log.info("test tableStorageMode: {}", OpenMLDBGlobalVar.tableStorageMode); + } +} From 67e48ddf999417005a4e93a97d2a1336f408bde0 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 17 Aug 2022 11:18:40 +0800 Subject: [PATCH 125/172] add log config --- .../src/main/resources/log4j.properties | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100755 test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/resources/log4j.properties diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/resources/log4j.properties b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/resources/log4j.properties new file mode 100755 index 00000000000..8aa7e8e77dc --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/resources/log4j.properties @@ -0,0 +1,51 @@ +### set log levels ### +log4j.rootLogger=debug,info,stdout,warn,error + +# console log +log4j.appender.stdout = org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target = System.out +log4j.appender.stdout.Threshold = INFO +log4j.appender.stdout.layout = org.apache.log4j.PatternLayout +log4j.appender.stdout.Encoding=UTF-8 +log4j.appender.stdout.layout.ConversionPattern = %d{yyyy-MM-dd HH:mm:ss} [ %c.%M(%F:%L) ] - [ %p ] %m%n + +#info log +log4j.logger.info=info +log4j.appender.info=org.apache.log4j.DailyRollingFileAppender +log4j.appender.info.DatePattern='_'yyyy-MM-dd'.log' +log4j.appender.info.File=logs/info.log +log4j.appender.info.Append=true +log4j.appender.info.Threshold=INFO +log4j.appender.info.Encoding=UTF-8 +log4j.appender.info.layout=org.apache.log4j.PatternLayout +log4j.appender.info.layout.ConversionPattern= %d{yyyy-MM-dd HH:mm:ss} [ %c.%M(%F:%L) ] - [ %p ] %m%n +#debugs log +log4j.logger.debug=debug +log4j.appender.debug=org.apache.log4j.DailyRollingFileAppender +log4j.appender.debug.DatePattern='_'yyyy-MM-dd'.log' +log4j.appender.debug.File=logs/debug.log +log4j.appender.debug.Append=true +log4j.appender.debug.Threshold=DEBUG +log4j.appender.debug.Encoding=UTF-8 +log4j.appender.debug.layout=org.apache.log4j.PatternLayout +log4j.appender.debug.layout.ConversionPattern= %d{yyyy-MM-dd HH:mm:ss} [ %c.%M(%F:%L) ] - [ %p ] %m%n +#warn log +log4j.logger.warn=warn +log4j.appender.warn=org.apache.log4j.DailyRollingFileAppender +log4j.appender.warn.DatePattern='_'yyyy-MM-dd'.log' +log4j.appender.warn.File=logs/warn.log +log4j.appender.warn.Append=true +log4j.appender.warn.Threshold=WARN +log4j.appender.warn.Encoding=UTF-8 +log4j.appender.warn.layout=org.apache.log4j.PatternLayout +log4j.appender.warn.layout.ConversionPattern= %d{yyyy-MM-dd HH:mm:ss} [ %c.%M(%F:%L) ] - [ %p ] %m%n +#error +log4j.logger.error=error +log4j.appender.error = org.apache.log4j.DailyRollingFileAppender +log4j.appender.error.DatePattern='_'yyyy-MM-dd'.log' +log4j.appender.error.File = logs/error.log +log4j.appender.error.Append = true +log4j.appender.error.Threshold = ERROR +log4j.appender.error.Encoding=UTF-8 +log4j.appender.error.layout = org.apache.log4j.PatternLayout +log4j.appender.error.layout.ConversionPattern = %d{yyyy-MM-dd HH:mm:ss} [ %c.%M(%F:%L) ] - [ %p ] %m%n From 652dfa7c33b8f823b27d2e13f8959a04c680e5d5 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 17 Aug 2022 11:19:30 +0800 Subject: [PATCH 126/172] change version --- .../openmldb-test-java/openmldb-test-common/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index 9d244ca1f5b..88a3b1460e8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -16,7 +16,7 @@ 8 0.6.0 - 0.6.0-macos + 0.6.0 From d83b28edddb1fcf66a722d856318d14bb84c3da7 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 17 Aug 2022 11:25:04 +0800 Subject: [PATCH 127/172] add common config --- .../src/main/resources/log4j.properties | 51 +++++++++++++++++++ .../src/main/resources/run_case.properties | 4 ++ 2 files changed, 55 insertions(+) create mode 100755 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/resources/log4j.properties create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/resources/run_case.properties diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/resources/log4j.properties b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/resources/log4j.properties new file mode 100755 index 00000000000..8aa7e8e77dc --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/resources/log4j.properties @@ -0,0 +1,51 @@ +### set log levels ### +log4j.rootLogger=debug,info,stdout,warn,error + +# console log +log4j.appender.stdout = org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target = System.out +log4j.appender.stdout.Threshold = INFO +log4j.appender.stdout.layout = org.apache.log4j.PatternLayout +log4j.appender.stdout.Encoding=UTF-8 +log4j.appender.stdout.layout.ConversionPattern = %d{yyyy-MM-dd HH:mm:ss} [ %c.%M(%F:%L) ] - [ %p ] %m%n + +#info log +log4j.logger.info=info +log4j.appender.info=org.apache.log4j.DailyRollingFileAppender +log4j.appender.info.DatePattern='_'yyyy-MM-dd'.log' +log4j.appender.info.File=logs/info.log +log4j.appender.info.Append=true +log4j.appender.info.Threshold=INFO +log4j.appender.info.Encoding=UTF-8 +log4j.appender.info.layout=org.apache.log4j.PatternLayout +log4j.appender.info.layout.ConversionPattern= %d{yyyy-MM-dd HH:mm:ss} [ %c.%M(%F:%L) ] - [ %p ] %m%n +#debugs log +log4j.logger.debug=debug +log4j.appender.debug=org.apache.log4j.DailyRollingFileAppender +log4j.appender.debug.DatePattern='_'yyyy-MM-dd'.log' +log4j.appender.debug.File=logs/debug.log +log4j.appender.debug.Append=true +log4j.appender.debug.Threshold=DEBUG +log4j.appender.debug.Encoding=UTF-8 +log4j.appender.debug.layout=org.apache.log4j.PatternLayout +log4j.appender.debug.layout.ConversionPattern= %d{yyyy-MM-dd HH:mm:ss} [ %c.%M(%F:%L) ] - [ %p ] %m%n +#warn log +log4j.logger.warn=warn +log4j.appender.warn=org.apache.log4j.DailyRollingFileAppender +log4j.appender.warn.DatePattern='_'yyyy-MM-dd'.log' +log4j.appender.warn.File=logs/warn.log +log4j.appender.warn.Append=true +log4j.appender.warn.Threshold=WARN +log4j.appender.warn.Encoding=UTF-8 +log4j.appender.warn.layout=org.apache.log4j.PatternLayout +log4j.appender.warn.layout.ConversionPattern= %d{yyyy-MM-dd HH:mm:ss} [ %c.%M(%F:%L) ] - [ %p ] %m%n +#error +log4j.logger.error=error +log4j.appender.error = org.apache.log4j.DailyRollingFileAppender +log4j.appender.error.DatePattern='_'yyyy-MM-dd'.log' +log4j.appender.error.File = logs/error.log +log4j.appender.error.Append = true +log4j.appender.error.Threshold = ERROR +log4j.appender.error.Encoding=UTF-8 +log4j.appender.error.layout = org.apache.log4j.PatternLayout +log4j.appender.error.layout.ConversionPattern = %d{yyyy-MM-dd HH:mm:ss} [ %c.%M(%F:%L) ] - [ %p ] %m%n diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/resources/run_case.properties b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/resources/run_case.properties new file mode 100644 index 00000000000..d361f7ddc73 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/resources/run_case.properties @@ -0,0 +1,4 @@ +# memory/ssd/hdd +table_storage_mode=memory +#version=0.5.0 + From bb1810bdfe373547d08a3317fe8de01466b38c28 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 17 Aug 2022 19:02:51 +0800 Subject: [PATCH 128/172] support upgrade test --- .../long_window/test_count_where.yaml | 2 +- .../long_window/test_long_window.yaml | 2 +- .../long_window/test_xxx_where.yaml | 2 +- .../common/OpenMLDBDeploy.java | 6 +- .../src/main/resources/deploy.properties | 3 +- .../upgrade_test/UpgradeCluster.java | 26 ++-- .../java_sdk_test/common/OpenMLDBTest.java | 84 ++++++++++++ .../test_common/openmldb/OpenMLDBDevops.java | 122 ++++++++++++++++-- .../openmldb/test_common/util/SDKUtil.java | 2 +- 9 files changed, 220 insertions(+), 29 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java diff --git a/cases/integration_test/long_window/test_count_where.yaml b/cases/integration_test/long_window/test_count_where.yaml index 331597a323b..333889414b9 100644 --- a/cases/integration_test/long_window/test_count_where.yaml +++ b/cases/integration_test/long_window/test_count_where.yaml @@ -611,7 +611,7 @@ cases: - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, count_where(c8,c5<1.4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, count_where(c8,c5<1.35) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: order: id columns: ["id int","c1 string","w1_count bigint"] diff --git a/cases/integration_test/long_window/test_long_window.yaml b/cases/integration_test/long_window/test_long_window.yaml index ba25910885d..64c0ab46c40 100644 --- a/cases/integration_test/long_window/test_long_window.yaml +++ b/cases/integration_test/long_window/test_long_window.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: ["options(long_window='w1:2d')"] +debugs: [] cases: - id: 0 diff --git a/cases/integration_test/long_window/test_xxx_where.yaml b/cases/integration_test/long_window/test_xxx_where.yaml index 572f7ee497a..9cb16c57989 100644 --- a/cases/integration_test/long_window/test_xxx_where.yaml +++ b/cases/integration_test/long_window/test_xxx_where.yaml @@ -925,7 +925,7 @@ cases: - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true] - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false] sql: | - SELECT id, c1, d[0](c3,c5<1.4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); + SELECT id, c1, d[0](c3,c5<1.35) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); dataProvider: - ["min_where","max_where","sum_where","avg_where"] expect: diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java index 7a264d61281..802e3841126 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java @@ -370,11 +370,13 @@ public String deploySpark(String testPath){ } throw new RuntimeException("spark 部署失败"); } - public int deployTaskManager(String testPath, String ip, int index, String zk_endpoint){ + int port = LinuxUtil.getNoUsedPort(); + return deployTaskManager(testPath,ip,port,index,zk_endpoint); + } + public int deployTaskManager(String testPath, String ip, int port, int index, String zk_endpoint){ try { String sparkHome = deploySpark(testPath); - int port = LinuxUtil.getNoUsedPort(); String task_manager_name = "/openmldb-task_manager-"+index; ExecutorUtil.run("cp -r " + testPath + "/" + openMLDBDirectoryName + " " + testPath + task_manager_name); if(batchJobJarPath==null) { diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index 062d5be93e9..483a4b18855 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -27,6 +27,7 @@ tmp_mac_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmld 0.4.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.0-linux.tar.gz 0.5.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.0-linux.tar.gz 0.6.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.6.0-linux.tar.gz - +0.4.0_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz +0.4.0_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java index 7a2d54a306a..5f91478765b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java @@ -29,8 +29,10 @@ public class UpgradeCluster extends ClusterTest { private NsClient nsClient; private OpenMLDBDevops openMLDBDevops; private String openMLDBPath; + private String newBinPath; private String confPath; private String upgradePath; + private OpenMLDBDeploy openMLDBDeploy; @BeforeClass @Parameters("upgradeVersion") public void beforeClass(@Optional("0.6.0") String upgradeVersion){ @@ -77,36 +79,44 @@ public void beforeClass(@Optional("0.6.0") String upgradeVersion){ "c8 date,\n" + "c9 bool,\n" + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"HDD\");"; - sdkClient.execute(Lists.newArrayList(memoryTableDDL,ssdTableDDL,hddTableDDL)); - // 插入一定量的数据 List> dataList = new ArrayList<>(); for(int i=0;i list = Lists.newArrayList("aa" + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); dataList.add(list); } + sdkClient.execute(Lists.newArrayList(memoryTableDDL)); sdkClient.insertList(memoryTableName,dataList); - sdkClient.insertList(ssdTableName,dataList); - sdkClient.insertList(hddTableName,dataList); + if(version.compareTo("0.5.0")>=0) { + sdkClient.execute(Lists.newArrayList(ssdTableDDL, hddTableDDL)); + sdkClient.insertList(ssdTableName, dataList); + sdkClient.insertList(hddTableName, dataList); + } upgradePath = DeployUtil.getTestPath(version)+"/upgrade_"+upgradeVersion; File file = new File(upgradePath); if(!file.exists()){ file.mkdirs(); } - OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(upgradeVersion); + openMLDBDeploy = new OpenMLDBDeploy(upgradeVersion); String upgradeDirectoryName = openMLDBDeploy.downloadOpenMLDB(upgradePath); openMLDBPath = upgradeDirectoryName+"/bin/openmldb"; + newBinPath = upgradeDirectoryName+"/bin/"; confPath = upgradeDirectoryName+"/conf"; } @Test public void testUpgrade(){ Map> map1 = nsClient.getTableOffset(dbName); log.info("升级前offset:"+map1); - openMLDBDevops.upgradeNs(openMLDBPath,confPath); - openMLDBDevops.upgradeTablet(openMLDBPath,confPath); + openMLDBDevops.upgradeNs(newBinPath,confPath); + openMLDBDevops.upgradeTablet(newBinPath,confPath); + openMLDBDevops.upgradeApiServer(newBinPath,confPath); + openMLDBDevops.upgradeTaskManager(openMLDBDeploy); Map> map2 = nsClient.getTableOffset(dbName); log.info("升级后offset:"+map2); Assert.assertEquals(map1,map2); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTableName,ssdTableName,hddTableName),100,10); + CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(memoryTableName), 100, 10); + if(version.compareTo("0.5.0")>=0) { + CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); + } } // public void upgradeNs(){ diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java new file mode 100644 index 00000000000..bd7416a96d1 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java @@ -0,0 +1,84 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.openmldb.java_sdk_test.common; + + +import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.common.BaseTest; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; +import com.google.common.collect.Lists; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.testng.annotations.BeforeTest; +import org.testng.annotations.Optional; +import org.testng.annotations.Parameters; + +import java.sql.Statement; + +/** + * @author zhaowei + * @date 2020/6/11 2:02 PM + */ +@Slf4j +public class OpenMLDBTest extends BaseTest { + protected static SqlExecutor executor; + + @BeforeTest() + @Parameters({"env","version","openMLDBPath"}) + public void beforeTest(@Optional("qa") String env,@Optional("main") String version,@Optional("")String openMLDBPath) throws Exception { + OpenMLDBGlobalVar.env = env; + if(env.equalsIgnoreCase("cluster")){ + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version);; + openMLDBDeploy.setOpenMLDBPath(openMLDBPath); + openMLDBDeploy.setCluster(true); + OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); + }else if(env.equalsIgnoreCase("standalone")){ + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version); + openMLDBDeploy.setOpenMLDBPath(openMLDBPath); + openMLDBDeploy.setCluster(false); + OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); + }else{ + OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() + .deployType(OpenMLDBDeployType.CLUSTER) + .basePath("/home/zhaowei01/openmldb-auto-test/tmp") + .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") + .zk_cluster("172.24.4.55:30000") + .zk_root_path("/openmldb") + .nsNum(2).tabletNum(3) + .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) + .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) + .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) + .build(); + OpenMLDBGlobalVar.env = "cluster"; + + } + String caseEnv = System.getProperty("caseEnv"); + if (!StringUtils.isEmpty(caseEnv)) { + OpenMLDBGlobalVar.env = caseEnv; + } + log.info("openMLDB global var env: {}", env); + OpenMLDBClient fesqlClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); + executor = fesqlClient.getExecutor(); + log.info("executor:{}",executor); + Statement statement = executor.getStatement(); + statement.execute("SET @@execute_mode='online';"); + } +} \ No newline at end of file diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java index b3728cbe2df..477a3bf3b03 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java @@ -1,15 +1,14 @@ package com._4paradigm.openmldb.test_common.openmldb; -import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; import com._4paradigm.qa.openmldb_deploy.util.Tool; import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; import lombok.extern.slf4j.Slf4j; import org.testng.Assert; import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; + @Slf4j public class OpenMLDBDevops { private OpenMLDBInfo openMLDBInfo; @@ -57,19 +56,31 @@ public void operateNs(int nsIndex,String operator){ } // nsClient.checkOPStatusDone(dbName,null); } + public void operateApiServer(int apiServerIndex,String operator){ + String command = String.format("sh %s/openmldb-apiserver-%d/bin/start.sh %s apiserver",basePath,apiServerIndex+1,operator); + ExecutorUtil.run(command); + Tool.sleep(5*1000); + } + public void operateTaskManager(int taskManagerIndex,String operator){ + String command = String.format("sh %s/openmldb-task_manager-%d/bin/start.sh %s taskmanager",basePath,taskManagerIndex+1,operator); + ExecutorUtil.run(command); + Tool.sleep(5*1000); + String checkStatus = operator.equals("stop")?"offline":"online"; + sdkClient.checkComponentStatus(openMLDBInfo.getTaskManagerEndpoints().get(taskManagerIndex), checkStatus); + } public void operateZKOne(String operator){ String command = String.format("sh %s/zookeeper-3.4.14/bin/zkServer.sh %s",basePath,operator); ExecutorUtil.run(command); Tool.sleep(5*1000); } - public void upgradeNs(String openMLDBPath,String confPath){ + public void upgradeNs(String binPath,String confPath){ String basePath = openMLDBInfo.getBasePath(); int nsNum = openMLDBInfo.getNsNum(); for(int i=1;i<=nsNum;i++) { log.info("开始升级第{}个ns",i); String nsPath = basePath + "/openmldb-ns-"+i; backUp(nsPath); - cpOpenMLDB(nsPath, openMLDBPath); + cpBin(nsPath, binPath); cpConf(nsPath, confPath); modifyNsConf(nsPath, openMLDBInfo.getNsEndpoints().get(i-1), openMLDBInfo.getZk_cluster()); operateNs(i,"restart"); @@ -77,14 +88,14 @@ public void upgradeNs(String openMLDBPath,String confPath){ log.info("第{}个ns升级结束",i); } } - public void upgradeTablet(String openMLDBPath, String confPath){ + public void upgradeTablet(String binPath, String confPath){ String basePath = openMLDBInfo.getBasePath(); int tabletNum = openMLDBInfo.getTabletNum(); for(int i=1;i<=tabletNum;i++) { log.info("开始升级第{}个tablet",i); String tabletPath = basePath + "/openmldb-tablet-"+i; backUp(tabletPath); - cpOpenMLDB(tabletPath, openMLDBPath); + cpBin(tabletPath, binPath); cpConf(tabletPath, confPath); modifyTabletConf(tabletPath, openMLDBInfo.getTabletEndpoints().get(i-1), openMLDBInfo.getZk_cluster()); operateTablet(i,"stop"); @@ -94,23 +105,66 @@ public void upgradeTablet(String openMLDBPath, String confPath){ log.info("第{}个tablet升级结束",i); } } + public void upgradeApiServer(String binPath,String confPath){ + String basePath = openMLDBInfo.getBasePath(); + int apiServerNum = openMLDBInfo.getApiServerEndpoints().size(); + for(int i=1;i<=apiServerNum;i++) { + log.info("开始升级第{}个apiserver",i); + String apiServerPath = basePath + "/openmldb-apiserver-"+i; + backUp(apiServerPath); + cpBin(apiServerPath, binPath); + cpConf(apiServerPath, confPath); + modifyApiServerConf(apiServerPath, openMLDBInfo.getApiServerEndpoints().get(i-1), openMLDBInfo.getZk_cluster()); + operateApiServer(i,"restart"); + Tool.sleep(20*1000); + log.info("第{}个ns升级结束",i); + } + } + public void upgradeTaskManager(OpenMLDBDeploy openMLDBDeploy){ + String basePath = openMLDBInfo.getBasePath(); + int taskManagerNum = openMLDBInfo.getTaskManagerEndpoints().size(); + for(int i=1;i<=taskManagerNum;i++) { + log.info("开始升级第{}个taskmanager",i); + String taskManagerPath = basePath + "/openmldb-task_manager-"+i; + backDirectory(taskManagerPath); + operateTaskManager(i,"stop"); + ExecutorUtil.run("rm -rf "+taskManagerPath); + String ipPort = openMLDBInfo.getTaskManagerEndpoints().get(i); + String[] ss = ipPort.split(":"); + String ip = ss[0]; + int port = Integer.parseInt(ss[1]); + openMLDBDeploy.deployTaskManager(basePath,ip,port,i,openMLDBInfo.getZk_cluster()); + log.info("第{}个taskmanager升级结束",i); + } + } public static void backUp(String path){ String command = "cp -rf "+path +"/conf "+path+"/conf-back"; ExecutorUtil.run(command); command = "ls "+path+" | grep conf-back"; List result = ExecutorUtil.run(command); Assert.assertEquals(result.get(0),"conf-back"); - command = "cp -rf "+path +"/bin/openmldb "+path+"/openmldb.back"; + command = "cp -rf "+path +"/bin "+path+"/bin-back"; ExecutorUtil.run(command); - command = "ls "+path+" | grep openmldb.back"; + command = "ls "+path+" | grep bin-back"; result = ExecutorUtil.run(command); - Assert.assertEquals(result.get(0),"openmldb.back"); + Assert.assertEquals(result.get(0),"bin-back"); } - public static void cpOpenMLDB(String path,String openMLDBPath){ - String command = "rm -rf "+path+"/bin/openmldb"; + public static void backDirectory(String path){ + if(path.endsWith("/")){ + path = path.substring(0,path.length()-1); + } + String directoryName = path.substring(path.lastIndexOf("/")+1); + String command = "cp -rf "+path +" "+path+"-back"; + ExecutorUtil.run(command); + command = "ls "+path+" | grep "+directoryName+"-back"; + List result = ExecutorUtil.run(command); + Assert.assertEquals(result.get(0),directoryName+"-back"); + } + public static void cpBin(String path, String binPath){ + String command = "rm -rf "+path+"/bin/"; List result = ExecutorUtil.run(command); Assert.assertEquals(result.size(),0); - command = "cp -rf "+openMLDBPath+" "+path+"/bin"; + command = "cp -rf "+binPath+" "+path; result = ExecutorUtil.run(command); Assert.assertEquals(result.size(),0); } @@ -125,7 +179,11 @@ public static void cpConf(String path,String confPath){ public static void modifyNsConf(String nsPath,String ip_port,String zk_endpoint){ String[] commands = { "sed -i 's#--endpoint=.*#--endpoint="+ip_port+"#' "+nsPath+"/conf/nameserver.flags", - "sed -i 's#--zk_cluster=.*#--zk_cluster="+zk_endpoint+"#' "+nsPath+"/conf/nameserver.flags" + "sed -i 's#--zk_cluster=.*#--zk_cluster=" + zk_endpoint + "#' " + nsPath + "/conf/nameserver.flags", + "sed -i 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+nsPath+"/conf/nameserver.flags", + "sed -i 's@#--zk_cluster=.*@--zk_cluster=" + zk_endpoint + "@' " + nsPath + "/conf/nameserver.flags", + "sed -i 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+nsPath+"/conf/nameserver.flags", + "sed -i 's@--tablet=.*@#--tablet=127.0.0.1:9921@' "+nsPath+"/conf/nameserver.flags" }; for(String command:commands){ ExecutorUtil.run(command); @@ -149,4 +207,40 @@ public static void modifyTabletConf(String tabletPath,String ip_port,String zk_e ExecutorUtil.run(command); } } + public static void modifyApiServerConf(String apiServerPath,String ip_port,String zk_endpoint){ + String[] commands = { + "sed -i 's#--endpoint=.*#--endpoint="+ip_port+"#' "+apiServerPath+"/conf/apiserver.flags", + "sed -i 's/--zk_cluster=.*/--zk_cluster="+zk_endpoint+"/' "+apiServerPath+"/conf/apiserver.flags", + "sed -i 's@--zk_root_path=.*@--zk_root_path=/openmldb@' "+apiServerPath+"/conf/apiserver.flags", + "sed -i 's@#--zk_cluster=.*@--zk_cluster="+zk_endpoint+"@' "+apiServerPath+"/conf/apiserver.flags", + "sed -i 's@#--zk_root_path=.*@--zk_root_path=/openmldb@' "+apiServerPath+"/conf/apiserver.flags", + "sed -i 's@--nameserver=.*@#--nameserver=127.0.0.1:6527@' "+apiServerPath+"/conf/apiserver.flags" + }; + for(String command:commands){ + ExecutorUtil.run(command); + } + } + public static void modifyTaskManagerConf(String taskManagerPath,String ip_port,String zk_endpoint,String sparkHome){ + String[] ss = ip_port.split(":"); + String ip = ss[0]; + String port = ss[1]; + String sparkMaster = "local"; + String batchJobName = ExecutorUtil.run("ls " + taskManagerPath + "/taskmanager/lib | grep openmldb-batchjob").get(0); + String batchJobJarPath = taskManagerPath + "/taskmanager/lib/" + batchJobName; + String[] commands = { + "sed -i 's#server.host=.*#server.host=" + ip + "#' " + taskManagerPath + "/conf/taskmanager.properties", + "sed -i 's#server.port=.*#server.port=" + port + "#' " + taskManagerPath+ "/conf/taskmanager.properties", + "sed -i 's#zookeeper.cluster=.*#zookeeper.cluster=" + zk_endpoint + "#' " + taskManagerPath + "/conf/taskmanager.properties", + "sed -i 's@zookeeper.root_path=.*@zookeeper.root_path=/openmldb@' "+taskManagerPath+ "/conf/taskmanager.properties", + "sed -i 's@spark.master=.*@spark.master=" + sparkMaster + "@' "+taskManagerPath+ "/conf/taskmanager.properties", + "sed -i 's@spark.home=.*@spark.home=" + sparkHome + "@' "+taskManagerPath+ "/conf/taskmanager.properties", + "sed -i 's@batchjob.jar.path=.*@batchjob.jar.path=" + batchJobJarPath + "@' "+taskManagerPath+ "/conf/taskmanager.properties", +// "sed -i 's@spark.yarn.jars=.*@spark.yarn.jars=" + sparkYarnJars + "@' "+taskManagerPath+ "/conf/taskmanager.properties", +// "sed -i 's@offline.data.prefix=.*@offline.data.prefix=" + offlineDataPrefix + "@' "+taskManagerPath+ "/conf/taskmanager.properties", +// "sed -i 's@namenode.uri=.*@namenode.uri=" + nameNodeUri + "@' "+taskManagerPath+ "/conf/taskmanager.properties" + }; + for(String command:commands){ + ExecutorUtil.run(command); + } + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java index cef7810af29..3cc512613e3 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java @@ -74,7 +74,7 @@ public static OpenMLDBResult deploy(SqlExecutor sqlExecutor,String sql){ openMLDBResult.setMsg(e.getMessage()); e.printStackTrace(); } - log.info("deploy sql:{}",sql); + log.info("deploy:{}",openMLDBResult); return openMLDBResult; } From 2c8b49db2b668707c2d13017e7d5ff95a46e04d2 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 18 Aug 2022 14:48:59 +0800 Subject: [PATCH 129/172] support upgrade test --- .github/workflows/integration-test-src.yml | 3 - .../common/OpenMLDBDeploy.java | 4 +- .../src/main/resources/deploy.properties | 4 +- .../test-suite/test_deploy.xml | 2 +- .../devops_test/common/ClusterTest.java | 1 + .../upgrade_test/UpgradeSingleton.java | 148 ++++++++++++++++++ .../upgrade_test/UpgradeStandalone.java | 78 ++++----- .../test_suite/test_upgrade_single.xml | 14 ++ .../test_common/openmldb/OpenMLDBDevops.java | 51 ++++++ 9 files changed, 252 insertions(+), 53 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeSingleton.java create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade_single.xml diff --git a/.github/workflows/integration-test-src.yml b/.github/workflows/integration-test-src.yml index abcf44e47c0..5688ee70706 100644 --- a/.github/workflows/integration-test-src.yml +++ b/.github/workflows/integration-test-src.yml @@ -86,7 +86,6 @@ jobs: uses: EnricoMi/publish-unit-test-result-action@v1 with: files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml - comment_mode: "create new" check_name: "SRC java-sdk-cluster-memory-0 Report" comment_title: "SRC java-sdk-cluster-memory-0 Report" @@ -139,7 +138,6 @@ jobs: uses: EnricoMi/publish-unit-test-result-action@v1 with: files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml - comment_mode: "create new" check_name: "SRC java-sdk-cluster-ssd-0 Report" comment_title: "SRC java-sdk-cluster-ssd-0 Report" @@ -166,7 +164,6 @@ jobs: uses: EnricoMi/publish-unit-test-result-action@v1 with: files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml - comment_mode: "create new" check_name: "SRC java-sdk-cluster-hdd-0 Report" comment_title: "SRC java-sdk-cluster-hdd-0 Report" diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java index 802e3841126..d94f6b08784 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java @@ -447,7 +447,7 @@ public OpenMLDBInfo deployStandalone(String testPath, String ip){ boolean apiServerOk = LinuxUtil.checkPortIsUsed(apiServerPort,3000,30); if(nsOk&&tabletOk&&apiServerOk){ log.info(String.format("standalone 部署成功,nsPort:{},tabletPort:{},apiServerPort:{}",nsPort,tabletPort,apiServerPort)); - OpenMLDBInfo fedbInfo = OpenMLDBInfo.builder() + OpenMLDBInfo openMLDBInfo = OpenMLDBInfo.builder() .deployType(OpenMLDBDeployType.STANDALONE) .openMLDBPath(testPath+"/openmldb-standalone/bin/openmldb") .apiServerEndpoints(Lists.newArrayList()) @@ -460,7 +460,7 @@ public OpenMLDBInfo deployStandalone(String testPath, String ip){ .tabletEndpoints(Lists.newArrayList(tabletEndpoint)) .apiServerEndpoints(Lists.newArrayList(apiServerEndpoint)) .build(); - return fedbInfo; + return openMLDBInfo; } }catch (Exception e){ e.printStackTrace(); diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index 483a4b18855..3e01e67f3cf 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -6,9 +6,9 @@ main=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2 0.2.3=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2.3-linux.tar.gz spark_url=https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0.3.2/spark-3.0.0-bin-openmldbspark.tgz -tmp=http://pkg.4paradigm.com:81/rtidb/test/tmp/openmldb-0.5.3-linux.tar.gz +tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.6.0-linux.tar.gz tmp_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz -tmp_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz +tmp_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-pkg/spark-3.0.0-bin-openmldbspark-v060.tgz single=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.3-linux.tar.gz single_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index 7a98bf98efd..f512a758c26 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -3,7 +3,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index b4abcf6600c..cbecd725508 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -56,6 +56,7 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version);; openMLDBDeploy.setOpenMLDBPath(openMLDBPath); openMLDBDeploy.setCluster(true); + openMLDBDeploy.setSystemTableReplicaNum(1); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(1, 1); }else{ // OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeSingleton.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeSingleton.java new file mode 100644 index 00000000000..0e23d5535fd --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeSingleton.java @@ -0,0 +1,148 @@ +package com._4paradigm.openmldb.devops_test.upgrade_test; + +import com._4paradigm.openmldb.devops_test.common.ClusterTest; +import com._4paradigm.openmldb.devops_test.util.CheckUtil; +import com._4paradigm.openmldb.test_common.openmldb.NsClient; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBDevops; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.openmldb.SDKClient; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; +import com._4paradigm.qa.openmldb_deploy.util.DeployUtil; +import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; +import lombok.extern.slf4j.Slf4j; +import org.testng.Assert; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Optional; +import org.testng.annotations.Parameters; +import org.testng.annotations.Test; +import org.testng.collections.Lists; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +@Slf4j +public class UpgradeSingleton extends ClusterTest { + private String dbName; + private String memoryTableName; + private String ssdTableName; + private String hddTableName; + private SDKClient sdkClient; + private NsClient nsClient; + private OpenMLDBDevops openMLDBDevops; + private String openMLDBPath; + private String newBinPath; + private String confPath; + private String upgradePath; + private OpenMLDBDeploy openMLDBDeploy; + @BeforeClass + @Parameters("upgradeVersion") + public void beforeClass(@Optional("0.6.0") String upgradeVersion){ + dbName = "test_upgrade"; + memoryTableName = "test_memory"; + ssdTableName = "test_ssd"; + hddTableName = "test_hdd"; + sdkClient = SDKClient.of(executor); + nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); + openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); + + int dataCount = 100; + sdkClient.createAndUseDB(dbName); + String memoryTableDDL = "create table test_memory(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3);"; + String ssdTableDDL = "create table test_ssd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"SSD\");"; + String hddTableDDL = "create table test_hdd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"HDD\");"; + List> dataList = new ArrayList<>(); + for(int i=0;i list = Lists.newArrayList("aa" + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); + dataList.add(list); + } + sdkClient.execute(Lists.newArrayList(memoryTableDDL)); + sdkClient.insertList(memoryTableName,dataList); + if(version.compareTo("0.5.0")>=0) { + sdkClient.execute(Lists.newArrayList(ssdTableDDL, hddTableDDL)); + sdkClient.insertList(ssdTableName, dataList); + sdkClient.insertList(hddTableName, dataList); + } + upgradePath = DeployUtil.getTestPath(version)+"/upgrade_"+upgradeVersion; + File file = new File(upgradePath); + if(!file.exists()){ + file.mkdirs(); + } + openMLDBDeploy = new OpenMLDBDeploy(upgradeVersion); + String upgradeDirectoryName = openMLDBDeploy.downloadOpenMLDB(upgradePath); + openMLDBPath = upgradeDirectoryName+"/bin/openmldb"; + newBinPath = upgradeDirectoryName+"/bin/"; + confPath = upgradeDirectoryName+"/conf"; + } + @Test + public void testUpgrade(){ + Map> map1 = nsClient.getTableOffset(dbName); + log.info("升级前offset:"+map1); + openMLDBDevops.upgradeNs(newBinPath,confPath); + openMLDBDevops.upgradeTablet(newBinPath,confPath); + openMLDBDevops.upgradeApiServer(newBinPath,confPath); + openMLDBDevops.upgradeTaskManager(openMLDBDeploy); + Map> map2 = nsClient.getTableOffset(dbName); + log.info("升级后offset:"+map2); + Assert.assertEquals(map1,map2); + CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(memoryTableName), 100, 10); + if(version.compareTo("0.5.0")>=0) { + CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); + } + } + +// public void upgradeNs(){ +// Map> map1 = nsClient.getTableOffset(dbName); +// log.info("升级前offset:"+map1); +// openMLDBDevops.upgradeNs(openMLDBPath,confPath); +// Map> map2 = nsClient.getTableOffset(dbName); +// log.info("升级后offset:"+map2); +// Assert.assertEquals(map1,map2); +// } +// public void upgradeTablet(){ +// Map> map1 = nsClient.getTableOffset(dbName); +// log.info("升级前offset:"+map1); +// openMLDBDevops.upgradeTablet(openMLDBPath,confPath); +// Map> map2 = nsClient.getTableOffset(dbName); +// log.info("升级后offset:"+map2); +// Assert.assertEquals(map1,map2); +// CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTableName,ssdTableName,hddTableName),100,10); +// } + +// @AfterClass + public void afterClass(){ + String command = "rm -rf "+upgradePath; + ExecutorUtil.run(command); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeStandalone.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeStandalone.java index 2eefe3e639a..eef23166e70 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeStandalone.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeStandalone.java @@ -31,7 +31,7 @@ public class UpgradeStandalone extends ClusterTest { private SDKClient sdkClient; private NsClient nsClient; private OpenMLDBDevops openMLDBDevops; - private String openMLDBPath; + private String newBinPath; private String confPath; private String upgradePath; @BeforeClass @@ -45,8 +45,25 @@ public void beforeClass(@Optional("0.6.0") String upgradeVersion){ nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); - int dataCount = 100; sdkClient.createAndUseDB(dbName); + upgradePath = DeployUtil.getTestPath(version)+"/upgrade_"+upgradeVersion; + File file = new File(upgradePath); + if(!file.exists()){ + file.mkdirs(); + } + OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(upgradeVersion); + String upgradeDirectoryName = openMLDBDeploy.downloadOpenMLDB(upgradePath); + newBinPath = upgradeDirectoryName+"/bin/"; + confPath = upgradeDirectoryName+"/conf"; + } + @Test + public void testUpgrade(){ +// Map> map1 = nsClient.getTableOffset(dbName); +// log.info("升级前offset:"+map1); + openMLDBDevops.upgradeStandalone(newBinPath,confPath); +// Map> map2 = nsClient.getTableOffset(dbName); +// log.info("升级后offset:"+map2); +// Assert.assertEquals(map1,map2); String memoryTableDDL = "create table test_memory(\n" + "c1 string,\n" + "c2 smallint,\n" + @@ -57,7 +74,7 @@ public void beforeClass(@Optional("0.6.0") String upgradeVersion){ "c7 timestamp,\n" + "c8 date,\n" + "c9 bool,\n" + - "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3);"; + "index(key=(c1),ts=c7))options(partitionnum=1,replicanum=1);"; String ssdTableDDL = "create table test_ssd(\n" + "c1 string,\n" + "c2 smallint,\n" + @@ -68,7 +85,7 @@ public void beforeClass(@Optional("0.6.0") String upgradeVersion){ "c7 timestamp,\n" + "c8 date,\n" + "c9 bool,\n" + - "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"SSD\");"; + "index(key=(c1),ts=c7))options(partitionnum=1,replicanum=1,storage_mode=\"SSD\");"; String hddTableDDL = "create table test_hdd(\n" + "c1 string,\n" + "c2 smallint,\n" + @@ -79,56 +96,27 @@ public void beforeClass(@Optional("0.6.0") String upgradeVersion){ "c7 timestamp,\n" + "c8 date,\n" + "c9 bool,\n" + - "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"HDD\");"; - sdkClient.execute(Lists.newArrayList(memoryTableDDL,ssdTableDDL,hddTableDDL)); + "index(key=(c1),ts=c7))options(partitionnum=1,replicanum=1,storage_mode=\"HDD\");"; // 插入一定量的数据 + int dataCount = 100; List> dataList = new ArrayList<>(); for(int i=0;i list = Lists.newArrayList("aa" + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); dataList.add(list); } + sdkClient.execute(Lists.newArrayList(memoryTableDDL)); sdkClient.insertList(memoryTableName,dataList); - sdkClient.insertList(ssdTableName,dataList); - sdkClient.insertList(hddTableName,dataList); - upgradePath = DeployUtil.getTestPath(version)+"/upgrade_"+upgradeVersion; - File file = new File(upgradePath); - if(!file.exists()){ - file.mkdirs(); + if(version.compareTo("0.5.0")>=0) { + sdkClient.execute(Lists.newArrayList(ssdTableDDL, hddTableDDL)); + sdkClient.insertList(ssdTableName, dataList); + sdkClient.insertList(hddTableName, dataList); } - OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(upgradeVersion); - String upgradeDirectoryName = openMLDBDeploy.downloadOpenMLDB(upgradePath); - openMLDBPath = upgradeDirectoryName+"/bin/openmldb"; - confPath = upgradeDirectoryName+"/conf"; - } - @Test - public void testUpgrade(){ - Map> map1 = nsClient.getTableOffset(dbName); - log.info("升级前offset:"+map1); - openMLDBDevops.upgradeNs(openMLDBPath,confPath); - openMLDBDevops.upgradeTablet(openMLDBPath,confPath); - Map> map2 = nsClient.getTableOffset(dbName); - log.info("升级后offset:"+map2); - Assert.assertEquals(map1,map2); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTableName,ssdTableName,hddTableName),100,10); - } -// public void upgradeNs(){ -// Map> map1 = nsClient.getTableOffset(dbName); -// log.info("升级前offset:"+map1); -// openMLDBDevops.upgradeNs(openMLDBPath,confPath); -// Map> map2 = nsClient.getTableOffset(dbName); -// log.info("升级后offset:"+map2); -// Assert.assertEquals(map1,map2); -// } -// public void upgradeTablet(){ -// Map> map1 = nsClient.getTableOffset(dbName); -// log.info("升级前offset:"+map1); -// openMLDBDevops.upgradeTablet(openMLDBPath,confPath); -// Map> map2 = nsClient.getTableOffset(dbName); -// log.info("升级后offset:"+map2); -// Assert.assertEquals(map1,map2); -// CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTableName,ssdTableName,hddTableName),100,10); -// } + CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(memoryTableName), 100, 10); + if(version.compareTo("0.5.0")>=0) { + CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); + } + } // @AfterClass public void afterClass(){ diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade_single.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade_single.xml new file mode 100644 index 00000000000..dd51a0562ab --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade_single.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java index 477a3bf3b03..0db729b3f7c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java @@ -38,6 +38,20 @@ public void operateTablet(int tabletIndex,String operator){ nsClient.checkTableIsAlive(dbName, null); } } + public void operateStandalone(String operator){ + String command = ""; + switch (operator){ + case "start": + command = String.format("sh %s/openmldb-standalone/bin/start-standalone.sh",basePath); + break; + case "stop": + command = String.format("sh %s/openmldb-standalone/bin/stop-standalone.sh",basePath); + break; + } + ExecutorUtil.run(command); + Tool.sleep(5*1000); + + } public void operateTablet(String operator){ int size = openMLDBInfo.getTabletEndpoints().size(); for(int i=0;i> "+standalonePath+"/conf/standalone_tablet.flags", + "echo '--recycle_bin_hdd_root_path=./recycle_hdd' >> "+standalonePath+"/conf/standalone_tablet.flags", + "echo '--ssd_root_path=./db_ssd' >> "+standalonePath+"/conf/standalone_tablet.flags", + "echo '--recycle_bin_ssd_root_path=./recycle_ssd' >> "+standalonePath+"/conf/standalone_tablet.flags", + "sed -i 's@--zk_cluster=.*@#--zk_cluster=127.0.0.1:2181@' "+standalonePath+"/conf/standalone_apiserver.flags", + "sed -i 's@--zk_root_path=.*@#--zk_root_path=/openmldb@' "+standalonePath+"/conf/standalone_apiserver.flags", + "sed -i 's#--endpoint=.*#--endpoint="+apiServerEndpoint+"#' "+standalonePath+"/conf/standalone_apiserver.flags", + "sed -i 's#--nameserver=.*#--nameserver="+nsEndpoint+"#' "+standalonePath+"/conf/standalone_apiserver.flags" + }; + for(String command:commands){ + ExecutorUtil.run(command); + } + } } From 1a60094a29fbd2e8963ed77c24f99a25f0623665 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 18 Aug 2022 20:16:07 +0800 Subject: [PATCH 130/172] support upgrade test --- .../long_window/test_count_where.yaml | 1 + .../long_window/test_long_window.yaml | 82 ++++++++++- .../long_window/test_long_window_batch.yaml | 1 + .../long_window/test_udaf.yaml | 1 + .../long_window/test_xxx_where.yaml | 1 + .../devops_test/common/ClusterTest.java | 2 +- .../checker/CheckerStrategy.java | 4 +- .../java_sdk_test/checker/PreAggChecker.java | 7 + .../executor/BaseSQLExecutor.java | 10 +- .../executor/BatchSQLExecutor.java | 12 +- .../executor/LongWindowExecutor.java | 72 ++++++++-- .../java_sdk_test/executor/StepExecutor.java | 133 ++++++++++++++++++ .../openmldb-test-common/pom.xml | 4 +- .../test_common/model/PreAggTable.java | 1 + .../openmldb/test_common/model/SQLCase.java | 1 + .../openmldb/test_common/util/SDKUtil.java | 10 +- 16 files changed, 307 insertions(+), 35 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StepExecutor.java diff --git a/cases/integration_test/long_window/test_count_where.yaml b/cases/integration_test/long_window/test_count_where.yaml index 333889414b9..e2ac7304c72 100644 --- a/cases/integration_test/long_window/test_count_where.yaml +++ b/cases/integration_test/long_window/test_count_where.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.6.0 cases: - id: 0 diff --git a/cases/integration_test/long_window/test_long_window.yaml b/cases/integration_test/long_window/test_long_window.yaml index 64c0ab46c40..3a4c3788a4b 100644 --- a/cases/integration_test/long_window/test_long_window.yaml +++ b/cases/integration_test/long_window/test_long_window.yaml @@ -13,7 +13,8 @@ # limitations under the License. db: test_zw -debugs: [] +debugs: ["delete 组合索引"] +version: 0.5.0 cases: - id: 0 @@ -316,4 +317,81 @@ cases: SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW); expect: success: false - msg: long_windows option doesn't match window in sql \ No newline at end of file + msg: long_windows option doesn't match window in sql + - + id: 10 + version: 0.6.1 + desc: delete pk + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738991000,"2020-05-01"] + - [2,"aa",21,31,1.2,2.2,1590738992000,"2020-05-02"] + - [3,"aa",22,32,1.3,2.3,1590738993000,"2020-05-03"] + - [4,"aa",23,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"aa",24,34,1.5,2.5,1590738995000,"2020-05-05"] + steps: + - sql: SELECT id, c1, sum(c4) OVER w1 as w1_long FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","w1_long bigint"] + rows: + - [1,"aa",30] + - [2,"aa",61] + - [3,"aa",93] + - [4,"aa",96] + - [5,"aa",99] + preAgg: + name: pre_{db_name}_{sp_name}_w1_sum_c4 + type: bigint + rows: + - ["aa",1590738991000,1590738992999,2,61,null] + - ["aa",1590738993000,1590738994999,2,65,null] + - sql: delete from {0} where c1='aa'; + expect: + preAgg: + name: pre_{db_name}_{sp_name}_w1_sum_c4 + count: 0 + - + id: 11 + version: 0.6.1 + desc: delete 组合索引 + longWindow: w1:2s + inputs: + - + columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"] + indexs: ["index1:c1|c3:c7"] + rows: + - [1,"aa",20,30,1.1,2.1,1590738991000,"2020-05-01"] + - [2,"aa",20,31,1.2,2.2,1590738992000,"2020-05-02"] + - [3,"aa",20,32,1.3,2.3,1590738993000,"2020-05-03"] + - [4,"aa",20,33,1.4,2.4,1590738994000,"2020-05-04"] + - [5,"aa",20,34,1.5,2.5,1590738995000,"2020-05-05"] + steps: + - sql: SELECT id, c1,c3, sum(c4) OVER w1 as w1_long FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW); + expect: + order: id + columns: ["id int","c1 string","c3 int","w1_long bigint"] + rows: + - [1,"aa",20,30] + - [2,"aa",20,61] + - [3,"aa",20,93] + - [4,"aa",20,96] + - [5,"aa",20,99] + preAgg: + name: pre_{db_name}_{sp_name}_w1_sum_c4 + type: bigint + rows: + - ["aa|20",1590738991000,1590738992999,2,61,null] + - ["aa|20",1590738993000,1590738994999,2,65,null] + - sql: delete from {0} where c1='aa' and c3=20; + expect: + preAgg: + name: pre_{db_name}_{sp_name}_w1_sum_c4 + count: 0 + + + diff --git a/cases/integration_test/long_window/test_long_window_batch.yaml b/cases/integration_test/long_window/test_long_window_batch.yaml index d8bc4577423..60c938490d4 100644 --- a/cases/integration_test/long_window/test_long_window_batch.yaml +++ b/cases/integration_test/long_window/test_long_window_batch.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 diff --git a/cases/integration_test/long_window/test_udaf.yaml b/cases/integration_test/long_window/test_udaf.yaml index 8a8a67bf79d..1eb2778c6e5 100644 --- a/cases/integration_test/long_window/test_udaf.yaml +++ b/cases/integration_test/long_window/test_udaf.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.5.0 cases: - id: 0 diff --git a/cases/integration_test/long_window/test_xxx_where.yaml b/cases/integration_test/long_window/test_xxx_where.yaml index 9cb16c57989..7915ceb3e2b 100644 --- a/cases/integration_test/long_window/test_xxx_where.yaml +++ b/cases/integration_test/long_window/test_xxx_where.yaml @@ -14,6 +14,7 @@ db: test_zw debugs: [] +version: 0.6.0 cases: - id: 0 diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index cbecd725508..f74ab99a5dd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -106,7 +106,7 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi if (!StringUtils.isEmpty(caseEnv)) { OpenMLDBGlobalVar.env = caseEnv; } - log.info("fedb global var env: {}", env); + log.info("openmldb global var env: {}", env); OpenMLDBClient openMLDBClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); executor = openMLDBClient.getExecutor(); log.info("executor:{}",executor); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java index cf585b0fba8..8fe3bce709c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/CheckerStrategy.java @@ -37,7 +37,9 @@ public static List build(SqlExecutor executor,SQLCase sqlCase, OpenMLDB return checkList; } ExpectDesc expect = sqlCase.getOnlineExpectByType(executorType); - + if (null == expect) { + return checkList; + } checkList.add(new SuccessChecker(expect, openMLDBResult)); if (CollectionUtils.isNotEmpty(expect.getColumns())) { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/PreAggChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/PreAggChecker.java index e6105633b73..c05c91b7dc0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/PreAggChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/PreAggChecker.java @@ -64,6 +64,13 @@ public void check() throws ParseException { String sql = String.format("select key,ts_start,ts_end,num_rows,agg_val,filter_key from %s",preAggTableName); OpenMLDBResult actualResult = SDKUtil.select(executor, "__PRE_AGG_DB", sql); List> actualRows = actualResult.getResult(); + int count = preAgg.getCount(); + if(count>=0){ + Assert.assertEquals(actualRows.size(),count,"preAggTable count 不一致"); + } + if(count==0){ + return; + } actualRows.stream().forEach(l->{ Object o = DataUtil.parseBinary((String)l.get(4),type); l.set(4,o); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java index 5a7d4c3e14e..b5a06c4789c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BaseSQLExecutor.java @@ -48,13 +48,13 @@ public abstract class BaseSQLExecutor extends BaseExecutor{ protected Map openMLDBInfoMap; private Map resultMap; - public BaseSQLExecutor(SqlExecutor executor, SQLCase fesqlCase, SQLCaseType executorType) { + public BaseSQLExecutor(SqlExecutor executor, SQLCase sqlCase, SQLCaseType executorType) { this.executor = executor; - this.sqlCase = fesqlCase; + this.sqlCase = sqlCase; this.executorType = executorType; - dbName = Objects.isNull(fesqlCase.getDb()) ? "" : fesqlCase.getDb(); - if (!CollectionUtils.isEmpty(fesqlCase.getInputs())) { - for (InputDesc inputDesc : fesqlCase.getInputs()) { + dbName = Objects.isNull(sqlCase.getDb()) ? "" : sqlCase.getDb(); + if (!CollectionUtils.isEmpty(sqlCase.getInputs())) { + for (InputDesc inputDesc : sqlCase.getInputs()) { tableNames.add(inputDesc.getName()); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java index ee6c21a01c7..19bbc787c2f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/BatchSQLExecutor.java @@ -41,8 +41,8 @@ public class BatchSQLExecutor extends BaseSQLExecutor { public BatchSQLExecutor(SqlExecutor executor, SQLCase fesqlCase, SQLCaseType executorType) { super(executor, fesqlCase, executorType); } - public BatchSQLExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map fedbInfoMap, SQLCaseType executorType) { - super(fesqlCase, executor, executorMap, fedbInfoMap, executorType); + public BatchSQLExecutor(SQLCase sqlCase, SqlExecutor executor, Map executorMap, Map fedbInfoMap, SQLCaseType executorType) { + super(sqlCase, executor, executorMap, fedbInfoMap, executorType); } @Override @@ -94,7 +94,7 @@ public void prepare(String version,SqlExecutor executor){ @Override public OpenMLDBResult execute(String version, SqlExecutor executor){ log.info("version:{} execute begin",version); - OpenMLDBResult fesqlResult = null; + OpenMLDBResult openMLDBResult = null; List sqls = sqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { @@ -104,7 +104,7 @@ public OpenMLDBResult execute(String version, SqlExecutor executor){ }else { sql = SQLUtil.formatSql(sql, tableNames); } - fesqlResult = SDKUtil.sql(executor, dbName, sql); + openMLDBResult = SDKUtil.sql(executor, dbName, sql); } } String sql = sqlCase.getSql(); @@ -115,9 +115,9 @@ public OpenMLDBResult execute(String version, SqlExecutor executor){ }else { sql = SQLUtil.formatSql(sql, tableNames); } - fesqlResult = SDKUtil.sql(executor, dbName, sql); + openMLDBResult = SDKUtil.sql(executor, dbName, sql); } log.info("version:{} execute end",version); - return fesqlResult; + return openMLDBResult; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java index 2e500d8b983..01dc3df4f0b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java @@ -16,14 +16,20 @@ package com._4paradigm.openmldb.java_sdk_test.executor; +import com._4paradigm.openmldb.java_sdk_test.checker.Checker; +import com._4paradigm.openmldb.java_sdk_test.checker.CheckerStrategy; import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; import com._4paradigm.openmldb.test_common.util.SDKUtil; +import com._4paradigm.openmldb.test_common.util.SQLUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.collections4.MapUtils; +import org.apache.commons.lang3.StringUtils; import java.sql.SQLException; import java.util.ArrayList; @@ -40,33 +46,69 @@ public LongWindowExecutor(SqlExecutor executor, SQLCase fesqlCase, boolean isBat spNames = new ArrayList<>(); } - public LongWindowExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map fedbInfoMap, boolean isBatchRequest, boolean isAsyn, SQLCaseType executorType) { - super(fesqlCase, executor, executorMap, fedbInfoMap, isBatchRequest, isAsyn, executorType); + public LongWindowExecutor(SQLCase sqlCase, SqlExecutor executor, Map executorMap, Map openMLDBInfoMap, boolean isBatchRequest, boolean isAsyn, SQLCaseType executorType) { + super(sqlCase, executor, executorMap, openMLDBInfoMap, isBatchRequest, isAsyn, executorType); spNames = new ArrayList<>(); } @Override public OpenMLDBResult execute(String version, SqlExecutor executor) { log.info("version:{} execute begin",version); - OpenMLDBResult fesqlResult = null; + OpenMLDBResult openMLDBResult = null; try { - if (sqlCase.getInputs().isEmpty() || - CollectionUtils.isEmpty(sqlCase.getInputs().get(0).getRows())) { - log.error("fail to execute in request query sql executor: sql case inputs is empty"); - return null; - } - String sql = sqlCase.getSql(); - log.info("sql: {}", sql); - if (sql == null || sql.length() == 0) { - return null; + List steps = sqlCase.getSteps(); + if(CollectionUtils.isNotEmpty(steps)) { + for (SQLCase step : steps) { + String sql = step.getSql(); + if (MapUtils.isNotEmpty(openMLDBInfoMap)) { + sql = SQLUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); + } else { + sql = SQLUtil.formatSql(sql, tableNames); + } + if(sql.toLowerCase().startsWith("select ")) { + openMLDBResult = SDKUtil.executeLongWindowDeploy(executor, sqlCase, sql, false); + openMLDBResult.setDbName(dbName); + spNames.add(sqlCase.getSpName()); + }else{ + openMLDBResult = SDKUtil.sql(executor, dbName, sql); + openMLDBResult.setDbName(dbName); + openMLDBResult.setSpName(spNames.get(0)); + } + + +// if (executorType == SQLCaseType.kRequest) { +// InputDesc request = sqlCase.getInputs().get(0); +// openMLDBResult = SDKUtil.sqlRequestMode(executor, dbName, true, sql, request); +// } else if (executorType == SQLCaseType.kLongWindow) { +// openMLDBResult = SDKUtil.executeLongWindowDeploy(executor, sqlCase, sql, false); +// spNames.add(sqlCase.getSpName()); +// } else { +// openMLDBResult = SDKUtil.sql(executor, dbName, sql); +// } + List strategyList = CheckerStrategy.build(executor, step, openMLDBResult, executorType); + for (Checker checker : strategyList) { + checker.check(); + } + } + }else { + if (sqlCase.getInputs().isEmpty() || + CollectionUtils.isEmpty(sqlCase.getInputs().get(0).getRows())) { + log.error("fail to execute in request query sql executor: sql case inputs is empty"); + return null; + } + String sql = sqlCase.getSql(); + log.info("sql: {}", sql); + if (sql == null || sql.length() == 0) { + return null; + } + openMLDBResult = SDKUtil.executeLongWindowDeploy(executor, sqlCase, this.isAsyn); + spNames.add(sqlCase.getSpName()); } - fesqlResult = SDKUtil.executeLongWindowDeploy(executor, sqlCase, this.isAsyn); - spNames.add(sqlCase.getSpName()); }catch (Exception e){ e.printStackTrace(); } log.info("version:{} execute end",version); - return fesqlResult; + return openMLDBResult; } // private OpenMLDBResult executeSingle(SqlExecutor executor, String sql, boolean isAsyn) throws SQLException { diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StepExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StepExecutor.java new file mode 100644 index 00000000000..88d109ab386 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/StepExecutor.java @@ -0,0 +1,133 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com._4paradigm.openmldb.java_sdk_test.executor; + +import com._4paradigm.openmldb.java_sdk_test.checker.Checker; +import com._4paradigm.openmldb.java_sdk_test.checker.CheckerStrategy; +import com._4paradigm.openmldb.java_sdk_test.checker.DiffVersionChecker; +import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBConfig; +import com._4paradigm.openmldb.sdk.SqlExecutor; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.model.InputDesc; +import com._4paradigm.openmldb.test_common.model.SQLCase; +import com._4paradigm.openmldb.test_common.model.SQLCaseType; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.util.SDKUtil; +import com._4paradigm.openmldb.test_common.util.SQLUtil; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.collections4.MapUtils; + +import java.util.List; +import java.util.Map; + +/** + * @author zhaowei + * @date 2020/6/15 11:29 AM + */ +@Slf4j +public class StepExecutor extends BaseSQLExecutor { + + protected List spNames; + + public StepExecutor(SqlExecutor executor, SQLCase sqlCase, SQLCaseType executorType) { + super(executor, sqlCase, executorType); + } + public StepExecutor(SQLCase fesqlCase, SqlExecutor executor, Map executorMap, Map openMLDBInfoMap, SQLCaseType executorType) { + super(fesqlCase, executor, executorMap, openMLDBInfoMap, executorType); + } + + @Override + public boolean verify() { + if (null != sqlCase.getMode() && sqlCase.getMode().contains("hybridse-only")) { + log.info("skip case in batch mode: {}", sqlCase.getDesc()); + return false; + } + if (null != sqlCase.getMode() && sqlCase.getMode().contains("batch-unsupport")) { + log.info("skip case in batch mode: {}", sqlCase.getDesc()); + return false; + } + if (null != sqlCase.getMode() && sqlCase.getMode().contains("rtidb-batch-unsupport")) { + log.info("skip case in rtidb batch mode: {}", sqlCase.getDesc()); + return false; + } + if (null != sqlCase.getMode() && sqlCase.getMode().contains("rtidb-unsupport")) { + log.info("skip case in rtidb mode: {}", sqlCase.getDesc()); + return false; + } + if (null != sqlCase.getMode() && sqlCase.getMode().contains("performance-sensitive-unsupport")) { + log.info("skip case in rtidb mode: {}", sqlCase.getDesc()); + return false; + } + if (null != sqlCase.getMode() && !OpenMLDBGlobalVar.tableStorageMode.equals("memory") && sqlCase.getMode().contains("disk-unsupport")) { + log.info("skip case in disk mode: {}", sqlCase.getDesc()); + return false; + } + if (OpenMLDBConfig.isCluster() && null != sqlCase.getMode() && sqlCase.getMode().contains("cluster-unsupport")) { + log.info("skip case in cluster mode: {}", sqlCase.getDesc()); + return false; + } + return true; + } + + @Override + public void prepare(String version,SqlExecutor executor){ + log.info("version:{} prepare begin",version); + boolean dbOk = executor.createDB(dbName); + log.info("version:{},create db:{},{}", version, dbName, dbOk); + SDKUtil.useDB(executor,dbName); + OpenMLDBResult res = SDKUtil.createAndInsert(executor, dbName, sqlCase.getInputs(), false); + if (!res.isOk()) { + throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail . version:"+version); + } + log.info("version:{} prepare end",version); + } + + @Override + public OpenMLDBResult execute(String version, SqlExecutor executor){ + log.info("version:{} execute begin",version); + OpenMLDBResult openMLDBResult = null; + try { + List steps = sqlCase.getSteps(); + for (SQLCase step : steps) { + String sql = step.getSql(); + if (MapUtils.isNotEmpty(openMLDBInfoMap)) { + sql = SQLUtil.formatSql(sql, tableNames, openMLDBInfoMap.get(version)); + } else { + sql = SQLUtil.formatSql(sql, tableNames); + } + if (executorType == SQLCaseType.kRequest) { + InputDesc request = sqlCase.getInputs().get(0); + openMLDBResult = SDKUtil.sqlRequestMode(executor, dbName, true, sql, request); + } else if (executorType == SQLCaseType.kLongWindow) { + openMLDBResult = SDKUtil.executeLongWindowDeploy(executor, sqlCase, sql, false); + spNames.add(sqlCase.getSpName()); + } else { + openMLDBResult = SDKUtil.sql(executor, dbName, sql); + } + List strategyList = CheckerStrategy.build(executor, step, openMLDBResult, executorType); + for (Checker checker : strategyList) { + checker.check(); + } + } + }catch (Exception e){ + e.printStackTrace(); + } + log.info("version:{} execute end",version); + return openMLDBResult; + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index 88a3b1460e8..eea335f61de 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -15,8 +15,8 @@ 8 8 - 0.6.0 - 0.6.0 + 0.4.0 + 0.4.0 diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/PreAggTable.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/PreAggTable.java index 90671962859..0f896f391d9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/PreAggTable.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/PreAggTable.java @@ -9,5 +9,6 @@ public class PreAggTable implements Serializable { private String name; private String type; + private int count = -1; private List> rows; } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java index aa6133f6b21..6957a284074 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/model/SQLCase.java @@ -60,6 +60,7 @@ public class SQLCase implements Serializable{ private List tearDown; private List excludes; private String only; + private List steps; public static String formatSql(String sql, int idx, String name) { return sql.replaceAll("\\{" + idx + "\\}", name); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java index 3cc512613e3..bb6d5f71510 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java @@ -54,7 +54,11 @@ public static OpenMLDBResult sqlList(SqlExecutor executor, String dbName, List commonColumnIndices) throws SQLException { + List commonColumnIndices) { OpenMLDBResult fesqlResult = null; if (sql.toLowerCase().startsWith("select")) { fesqlResult = selectBatchRequestModeWithPreparedStatement( From 2256fa0ce85bd6d4482e5ff2650269d7b76fb7a7 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 18 Aug 2022 20:22:48 +0800 Subject: [PATCH 131/172] support upgrade test --- .../openmldb-test-java/openmldb-test-common/pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index eea335f61de..88a3b1460e8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -15,8 +15,8 @@ 8 8 - 0.4.0 - 0.4.0 + 0.6.0 + 0.6.0 From 94bc7411be5e71eea6eb14dde8a991ec7be07e85 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 18 Aug 2022 20:24:58 +0800 Subject: [PATCH 132/172] support upgrade test --- .../openmldb-devops-test/test_suite/test_upgrade.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml index ce0721c8e8c..d1434e108d8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml @@ -3,8 +3,8 @@ - - + + From 22abf644526b43a217da8e3589e46afc994fb9bd Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 18 Aug 2022 20:52:08 +0800 Subject: [PATCH 133/172] support upgrade test --- .../openmldb-deploy/src/main/resources/deploy.properties | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index 3e01e67f3cf..2dcea3f48de 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -29,5 +29,7 @@ tmp_mac_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmld 0.6.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.6.0-linux.tar.gz 0.4.0_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz 0.4.0_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz +0.5.0_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz +0.5.0_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz From 33f6095bc58348f94810f1a3f5eff23593934a3e Mon Sep 17 00:00:00 2001 From: zhaowei Date: Sun, 21 Aug 2022 08:41:32 +0800 Subject: [PATCH 134/172] support upgrade test --- .../openmldb/test_common/openmldb/OpenMLDBDevops.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java index 0db729b3f7c..80fabe2d4b7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java @@ -97,7 +97,7 @@ public void upgradeNs(String binPath,String confPath){ cpBin(nsPath, binPath); cpConf(nsPath, confPath); modifyNsConf(nsPath, openMLDBInfo.getNsEndpoints().get(i-1), openMLDBInfo.getZk_cluster()); - operateNs(i,"restart"); + operateNs(i-1,"restart"); Tool.sleep(20*1000); log.info("第{}个ns升级结束",i); } @@ -112,9 +112,9 @@ public void upgradeTablet(String binPath, String confPath){ cpBin(tabletPath, binPath); cpConf(tabletPath, confPath); modifyTabletConf(tabletPath, openMLDBInfo.getTabletEndpoints().get(i-1), openMLDBInfo.getZk_cluster()); - operateTablet(i,"stop"); + operateTablet(i-1,"stop"); Tool.sleep(10*1000); - operateTablet(i,"start"); + operateTablet(i-1,"start"); Tool.sleep(20*1000); log.info("第{}个tablet升级结束",i); } @@ -129,7 +129,7 @@ public void upgradeApiServer(String binPath,String confPath){ cpBin(apiServerPath, binPath); cpConf(apiServerPath, confPath); modifyApiServerConf(apiServerPath, openMLDBInfo.getApiServerEndpoints().get(i-1), openMLDBInfo.getZk_cluster()); - operateApiServer(i,"restart"); + operateApiServer(i-1,"restart"); Tool.sleep(20*1000); log.info("第{}个ns升级结束",i); } @@ -143,7 +143,7 @@ public void upgradeTaskManager(OpenMLDBDeploy openMLDBDeploy){ backDirectory(taskManagerPath); operateTaskManager(i,"stop"); ExecutorUtil.run("rm -rf "+taskManagerPath); - String ipPort = openMLDBInfo.getTaskManagerEndpoints().get(i); + String ipPort = openMLDBInfo.getTaskManagerEndpoints().get(i-1); String[] ss = ipPort.split(":"); String ip = ss[0]; int port = Integer.parseInt(ss[1]); From b3d1dfc309b62310ebbade04be2a75fbd00b70db Mon Sep 17 00:00:00 2001 From: zhaowei Date: Sun, 21 Aug 2022 10:09:11 +0800 Subject: [PATCH 135/172] support upgrade test --- .../openmldb/devops_test/upgrade_test/UpgradeCluster.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java index 5f91478765b..3913d16aea9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java @@ -98,9 +98,9 @@ public void beforeClass(@Optional("0.6.0") String upgradeVersion){ } openMLDBDeploy = new OpenMLDBDeploy(upgradeVersion); String upgradeDirectoryName = openMLDBDeploy.downloadOpenMLDB(upgradePath); - openMLDBPath = upgradeDirectoryName+"/bin/openmldb"; - newBinPath = upgradeDirectoryName+"/bin/"; - confPath = upgradeDirectoryName+"/conf"; + openMLDBPath = upgradePath+"/"+upgradeDirectoryName+"/bin/openmldb"; + newBinPath = upgradePath+"/"+upgradeDirectoryName+"/bin/"; + confPath = upgradePath+"/"+upgradeDirectoryName+"/conf"; } @Test public void testUpgrade(){ From 2b6c9e0c8468806630864625804f51b230133513 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Sun, 21 Aug 2022 20:51:53 +0800 Subject: [PATCH 136/172] support upgrade test --- .../openmldb-sdk-test/shell/stop-openmldb.sh | 3 ++- .../test_common/common/Condition.java | 1 + .../test_common/common/ConditionResult.java | 11 ++++++++++ .../test_common/openmldb/NsClient.java | 14 +++++++++++++ .../test_common/openmldb/OpenMLDBDevops.java | 10 ++++++---- .../openmldb/test_common/util/WaitUtil.java | 20 +++++++++++++++++++ 6 files changed, 54 insertions(+), 5 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/ConditionResult.java diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-openmldb.sh b/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-openmldb.sh index 996aa02cc30..af22cc3d7c3 100755 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-openmldb.sh +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/shell/stop-openmldb.sh @@ -54,4 +54,5 @@ rm -rf openmldb-ns-1/bin/openmldb rm -rf openmldb-ns-2/bin/openmldb rm -rf openmldb-tablet-1/bin/openmldb rm -rf openmldb-tablet-2/bin/openmldb -rm -rf openmldb-tablet-3/bin/openmldb \ No newline at end of file +rm -rf openmldb-tablet-3/bin/openmldb + diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/Condition.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/Condition.java index a755976d457..356330ae85c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/Condition.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/Condition.java @@ -3,6 +3,7 @@ /** * Created by zhangguanglin on 2020/1/16. */ +@FunctionalInterface public interface Condition { Boolean execute(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/ConditionResult.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/ConditionResult.java new file mode 100644 index 00000000000..811840b6a1a --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/ConditionResult.java @@ -0,0 +1,11 @@ +package com._4paradigm.openmldb.test_common.common; + +import org.apache.commons.lang3.tuple.Pair; + +/** + * Created by zhangguanglin on 2020/1/16. + */ +@FunctionalInterface +public interface ConditionResult { + Pair execute(); +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index 906194377a0..4e1345f7886 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -7,6 +7,7 @@ import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; import org.testng.Assert; import java.util.*; @@ -58,6 +59,19 @@ public void checkOPStatusDone(String dbName,String tableName){ Assert.assertTrue(b,"check op done failed."); } public List showTable(String dbName,String tableName){ + String command = StringUtils.isNotEmpty(tableName) ?"showtable "+tableName:"showtable"; + String nsCommand = genNsCommand(dbName,command); + Tool.sleep(3*1000); + List result = WaitUtil.waitCondition(() -> { + List lines = CommandUtil.run(nsCommand); + if (lines.size() <= 2) { + return Pair.of(false, lines); + } + return Pair.of(true, lines); + }); + return result; + } + public List showTableHaveTable(String dbName,String tableName){ String command = StringUtils.isNotEmpty(tableName) ?"showtable "+tableName:"showtable"; List lines = runNs(dbName,command); return lines; diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java index 80fabe2d4b7..dc990b065dd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java @@ -92,12 +92,13 @@ public void upgradeNs(String binPath,String confPath){ int nsNum = openMLDBInfo.getNsNum(); for(int i=1;i<=nsNum;i++) { log.info("开始升级第{}个ns",i); + operateNs(i-1,"stop"); String nsPath = basePath + "/openmldb-ns-"+i; backUp(nsPath); cpBin(nsPath, binPath); cpConf(nsPath, confPath); modifyNsConf(nsPath, openMLDBInfo.getNsEndpoints().get(i-1), openMLDBInfo.getZk_cluster()); - operateNs(i-1,"restart"); + operateNs(i-1,"start"); Tool.sleep(20*1000); log.info("第{}个ns升级结束",i); } @@ -107,12 +108,12 @@ public void upgradeTablet(String binPath, String confPath){ int tabletNum = openMLDBInfo.getTabletNum(); for(int i=1;i<=tabletNum;i++) { log.info("开始升级第{}个tablet",i); + operateTablet(i-1,"stop"); String tabletPath = basePath + "/openmldb-tablet-"+i; backUp(tabletPath); cpBin(tabletPath, binPath); cpConf(tabletPath, confPath); modifyTabletConf(tabletPath, openMLDBInfo.getTabletEndpoints().get(i-1), openMLDBInfo.getZk_cluster()); - operateTablet(i-1,"stop"); Tool.sleep(10*1000); operateTablet(i-1,"start"); Tool.sleep(20*1000); @@ -124,12 +125,13 @@ public void upgradeApiServer(String binPath,String confPath){ int apiServerNum = openMLDBInfo.getApiServerEndpoints().size(); for(int i=1;i<=apiServerNum;i++) { log.info("开始升级第{}个apiserver",i); + operateApiServer(i-1,"stop"); String apiServerPath = basePath + "/openmldb-apiserver-"+i; backUp(apiServerPath); cpBin(apiServerPath, binPath); cpConf(apiServerPath, confPath); modifyApiServerConf(apiServerPath, openMLDBInfo.getApiServerEndpoints().get(i-1), openMLDBInfo.getZk_cluster()); - operateApiServer(i-1,"restart"); + operateApiServer(i-1,"start"); Tool.sleep(20*1000); log.info("第{}个ns升级结束",i); } @@ -139,9 +141,9 @@ public void upgradeTaskManager(OpenMLDBDeploy openMLDBDeploy){ int taskManagerNum = openMLDBInfo.getTaskManagerEndpoints().size(); for(int i=1;i<=taskManagerNum;i++) { log.info("开始升级第{}个taskmanager",i); + operateTaskManager(i,"stop"); String taskManagerPath = basePath + "/openmldb-task_manager-"+i; backDirectory(taskManagerPath); - operateTaskManager(i,"stop"); ExecutorUtil.run("rm -rf "+taskManagerPath); String ipPort = openMLDBInfo.getTaskManagerEndpoints().get(i-1); String[] ss = ipPort.split(":"); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java index a8c97406356..134e1794411 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/WaitUtil.java @@ -2,8 +2,10 @@ import com._4paradigm.openmldb.test_common.common.Condition; +import com._4paradigm.openmldb.test_common.common.ConditionResult; import lombok.extern.log4j.Log4j; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.tuple.Pair; @Slf4j @@ -11,6 +13,9 @@ public class WaitUtil { public static boolean waitCondition(Condition condition) { return waitCondition(condition,10,1200); } + public static T waitCondition(ConditionResult condition) { + return waitCondition(condition,10,1200); + } public static boolean waitCondition(Condition condition,Condition fail) { return waitCondition(condition,fail,10,1200); } @@ -38,6 +43,21 @@ private static boolean waitCondition(Condition condition, int interval, int time log.info("wait timeout!"); return false; } + private static T waitCondition(ConditionResult condition, int interval, int timeout) { + int count = 1; + while (timeout > 0){ + log.info("retry count:{}",count); + Pair execute = condition.execute(); + if (execute.getLeft()){ + return execute.getRight(); + }else { + timeout -= interval; + Tool.sleep(interval*1000); + } + count++; + } + throw new IllegalStateException("wait result timeout!"); + } /** * * @param condition 等待的条件 From 0afbbfbedb37a4fe425dce0bf22f4672e0954f4d Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 22 Aug 2022 14:21:13 +0800 Subject: [PATCH 137/172] support upgrade test --- .../src/main/resources/deploy.properties | 8 +- .../devops_test/tmp/TestSDKClient.java | 6 +- .../upgrade_test/UpgradeCluster.java | 1 + .../upgrade_test/UpgradeClusterByCLI.java | 128 ++++++++++++++++++ .../test_suite/test_upgrade.xml | 2 +- .../executor/RestfulCliExecutor.java | 11 +- .../executor/CommandExecutor.java | 14 +- .../executor/LongWindowExecutor.java | 6 +- .../standalone/v030/DMLTest.java | 8 +- ...Facade.java => OpenMLDBCommandFacade.java} | 18 +-- .../command/OpenMLDBCommandUtil.java | 9 +- .../test_common/command/chain/DMLHandler.java | 10 +- .../command/chain/SqlChainManager.java | 4 +- .../test_common/openmldb/CliClient.java | 77 +++++++++++ .../test_common/openmldb/NsClient.java | 6 +- .../test_common/openmldb/SDKClient.java | 1 + 16 files changed, 256 insertions(+), 53 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java rename test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/{OpenMLDBComamndFacade.java => OpenMLDBCommandFacade.java} (75%) create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/CliClient.java diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index 2dcea3f48de..20cf20259dc 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -25,11 +25,13 @@ tmp_mac_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz tmp_mac_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz 0.4.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.0-linux.tar.gz -0.5.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.0-linux.tar.gz -0.6.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.6.0-linux.tar.gz 0.4.0_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz 0.4.0_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz +0.5.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.0-linux.tar.gz +0.6.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.6.0-linux.tar.gz 0.5.0_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz 0.5.0_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz - +0.5.3=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.3-linux.tar.gz +0.5.3_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz +0.5.3_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v053.tgz diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java index f867f227131..4c0e84912ba 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestSDKClient.java @@ -1,10 +1,8 @@ package com._4paradigm.openmldb.devops_test.tmp; import com._4paradigm.openmldb.devops_test.common.ClusterTest; -import com._4paradigm.openmldb.test_common.command.OpenMLDBComamndFacade; -import com._4paradigm.openmldb.test_common.openmldb.NsClient; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFacade; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; -import com._4paradigm.openmldb.test_common.openmldb.SDKClient; import org.testng.annotations.Test; public class TestSDKClient extends ClusterTest { @@ -17,7 +15,7 @@ public void testComponents(){ // NsClient nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); // boolean flag = nsClient.checkOPStatusDone("test_devops4",null); - OpenMLDBComamndFacade.sql(OpenMLDBGlobalVar.mainInfo,"test_devops","select * from test_ssd;"); + OpenMLDBCommandFacade.sql(OpenMLDBGlobalVar.mainInfo,"test_devops","select * from test_ssd;"); } diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java index 3913d16aea9..704b21141cd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java @@ -45,6 +45,7 @@ public void beforeClass(@Optional("0.6.0") String upgradeVersion){ openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); int dataCount = 100; + sdkClient.createAndUseDB(dbName); String memoryTableDDL = "create table test_memory(\n" + "c1 string,\n" + diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java new file mode 100644 index 00000000000..c456b563660 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java @@ -0,0 +1,128 @@ +package com._4paradigm.openmldb.devops_test.upgrade_test; + +import com._4paradigm.openmldb.devops_test.common.ClusterTest; +import com._4paradigm.openmldb.devops_test.util.CheckUtil; +import com._4paradigm.openmldb.test_common.openmldb.*; +import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; +import com._4paradigm.qa.openmldb_deploy.util.DeployUtil; +import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; +import lombok.extern.slf4j.Slf4j; +import org.testng.Assert; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Optional; +import org.testng.annotations.Parameters; +import org.testng.annotations.Test; +import org.testng.collections.Lists; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +@Slf4j +public class UpgradeClusterByCLI extends ClusterTest { + private String dbName; + private String memoryTableName; + private String ssdTableName; + private String hddTableName; + private CliClient cliClient; + private NsClient nsClient; + private OpenMLDBDevops openMLDBDevops; + private String openMLDBPath; + private String newBinPath; + private String confPath; + private String upgradePath; + private OpenMLDBDeploy openMLDBDeploy; + @BeforeClass + @Parameters("upgradeVersion") + public void beforeClass(@Optional("0.6.0") String upgradeVersion){ + dbName = "test_upgrade"; + memoryTableName = "test_memory"; + ssdTableName = "test_ssd"; + hddTableName = "test_hdd"; + cliClient = CliClient.of(OpenMLDBGlobalVar.mainInfo,dbName); + nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); + openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); + + int dataCount = 100; + + cliClient.createAndUseDB(dbName); + String memoryTableDDL = "create table test_memory(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3);"; + String ssdTableDDL = "create table test_ssd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"SSD\");"; + String hddTableDDL = "create table test_hdd(\n" + + "c1 string,\n" + + "c2 smallint,\n" + + "c3 int,\n" + + "c4 bigint,\n" + + "c5 float,\n" + + "c6 double,\n" + + "c7 timestamp,\n" + + "c8 date,\n" + + "c9 bool,\n" + + "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"HDD\");"; + List> dataList = new ArrayList<>(); + for(int i=0;i list = Lists.newArrayList("aa" + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); + dataList.add(list); + } + cliClient.execute(Lists.newArrayList(memoryTableDDL)); + cliClient.insertList(memoryTableName,dataList); + if(version.compareTo("0.5.0")>=0) { + cliClient.execute(Lists.newArrayList(ssdTableDDL, hddTableDDL)); + cliClient.insertList(ssdTableName, dataList); + cliClient.insertList(hddTableName, dataList); + } + upgradePath = DeployUtil.getTestPath(version)+"/upgrade_"+upgradeVersion; + File file = new File(upgradePath); + if(!file.exists()){ + file.mkdirs(); + } + openMLDBDeploy = new OpenMLDBDeploy(upgradeVersion); + String upgradeDirectoryName = openMLDBDeploy.downloadOpenMLDB(upgradePath); + openMLDBPath = upgradePath+"/"+upgradeDirectoryName+"/bin/openmldb"; + newBinPath = upgradePath+"/"+upgradeDirectoryName+"/bin/"; + confPath = upgradePath+"/"+upgradeDirectoryName+"/conf"; + } + @Test + public void testUpgrade(){ + Map> map1 = nsClient.getTableOffset(dbName); + log.info("升级前offset:"+map1); + openMLDBDevops.upgradeNs(newBinPath,confPath); + openMLDBDevops.upgradeTablet(newBinPath,confPath); + openMLDBDevops.upgradeApiServer(newBinPath,confPath); + openMLDBDevops.upgradeTaskManager(openMLDBDeploy); + Map> map2 = nsClient.getTableOffset(dbName); + log.info("升级后offset:"+map2); + Assert.assertEquals(map1,map2); +// CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(memoryTableName), 100, 10); +// if(version.compareTo("0.5.0")>=0) { +// CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); +// } + } + +// @AfterClass + public void afterClass(){ + String command = "rm -rf "+upgradePath; + ExecutorUtil.run(command); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml index d1434e108d8..95775fffb3d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml @@ -3,7 +3,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulCliExecutor.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulCliExecutor.java index d946b5b699b..feb76c966ce 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulCliExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/executor/RestfulCliExecutor.java @@ -20,10 +20,9 @@ import com._4paradigm.openmldb.http_test.common.RestfulGlobalVar; import com._4paradigm.openmldb.http_test.config.FedbRestfulConfig; import com._4paradigm.openmldb.java_sdk_test.checker.ResultChecker; -import com._4paradigm.openmldb.test_common.command.OpenMLDBComamndFacade; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFacade; import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandUtil; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.common.Checker; import com._4paradigm.openmldb.test_common.model.ExpectDesc; import com._4paradigm.openmldb.test_common.model.InputDesc; @@ -91,7 +90,7 @@ public void prepare() { return sql; }) .collect(Collectors.toList()); - OpenMLDBComamndFacade.sqls(RestfulGlobalVar.mainInfo,FedbRestfulConfig.DB_NAME,sqls); + OpenMLDBCommandFacade.sqls(RestfulGlobalVar.mainInfo,FedbRestfulConfig.DB_NAME,sqls); } logger.info("prepare end"); } @@ -125,7 +124,7 @@ public void tearDown() { return sql; }) .collect(Collectors.toList()); - fesqlResult = OpenMLDBComamndFacade.sqls(RestfulGlobalVar.mainInfo, FedbRestfulConfig.DB_NAME, sqls); + fesqlResult = OpenMLDBCommandFacade.sqls(RestfulGlobalVar.mainInfo, FedbRestfulConfig.DB_NAME, sqls); } } @@ -137,7 +136,7 @@ public void tearDown() { for (InputDesc table : tables) { if(table.isDrop()) { String drop = "drop table " + table.getName() + ";"; - OpenMLDBComamndFacade.sql(RestfulGlobalVar.mainInfo, FedbRestfulConfig.DB_NAME, drop); + OpenMLDBCommandFacade.sql(RestfulGlobalVar.mainInfo, FedbRestfulConfig.DB_NAME, drop); } } } @@ -151,7 +150,7 @@ protected void afterAction(){ List sqls = afterAction.getSqls().stream() .map(sql -> SQLUtil.formatSql(sql,tableNames, RestfulGlobalVar.mainInfo)) .collect(Collectors.toList()); - fesqlResult = OpenMLDBComamndFacade.sqls(RestfulGlobalVar.mainInfo, FedbRestfulConfig.DB_NAME, sqls); + fesqlResult = OpenMLDBCommandFacade.sqls(RestfulGlobalVar.mainInfo, FedbRestfulConfig.DB_NAME, sqls); } ExpectDesc expect = afterAction.getExpect(); if(expect!=null){ diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java index 2f7c57c08e5..6922576e0e5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/CommandExecutor.java @@ -19,7 +19,7 @@ import com._4paradigm.openmldb.java_sdk_test.checker.Checker; import com._4paradigm.openmldb.java_sdk_test.checker.CheckerStrategy; import com._4paradigm.openmldb.java_sdk_test.checker.DiffVersionChecker; -import com._4paradigm.openmldb.test_common.command.OpenMLDBComamndFacade; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFacade; import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandUtil; import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; @@ -99,8 +99,8 @@ public void prepare(){ protected void prepare(String version, OpenMLDBInfo openMLDBInfo){ log.info("version:{} prepare begin",version); - OpenMLDBResult fesqlResult = OpenMLDBCommandUtil.createDB(openMLDBInfo,dbName); - log.info("version:{},create db:{},{}", version, dbName, fesqlResult.isOk()); + OpenMLDBResult openMLDBResult = OpenMLDBCommandUtil.createDB(openMLDBInfo,dbName); + log.info("version:{},create db:{},{}", version, dbName, openMLDBResult.isOk()); OpenMLDBResult res = OpenMLDBCommandUtil.createAndInsert(openMLDBInfo, dbName, sqlCase.getInputs()); if (!res.isOk()) { throw new RuntimeException("fail to run BatchSQLExecutor: prepare fail . version:"+version); @@ -123,7 +123,7 @@ public void execute() { protected OpenMLDBResult execute(String version, OpenMLDBInfo openMLDBInfo){ log.info("version:{} execute begin",version); - OpenMLDBResult fesqlResult = null; + OpenMLDBResult openMLDBResult = null; List sqls = sqlCase.getSqls(); if (sqls != null && sqls.size() > 0) { for (String sql : sqls) { @@ -133,7 +133,7 @@ protected OpenMLDBResult execute(String version, OpenMLDBInfo openMLDBInfo){ }else { sql = SQLUtil.formatSql(sql, tableNames); } - fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo, dbName, sql); + openMLDBResult = OpenMLDBCommandFacade.sql(openMLDBInfo, dbName, sql); } } String sql = sqlCase.getSql(); @@ -144,10 +144,10 @@ protected OpenMLDBResult execute(String version, OpenMLDBInfo openMLDBInfo){ }else { sql = SQLUtil.formatSql(sql, tableNames); } - fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo, dbName, sql); + openMLDBResult = OpenMLDBCommandFacade.sql(openMLDBInfo, dbName, sql); } log.info("version:{} execute end",version); - return fesqlResult; + return openMLDBResult; } @Override diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java index 01dc3df4f0b..41d1c0ffcfd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/executor/LongWindowExecutor.java @@ -41,8 +41,8 @@ public class LongWindowExecutor extends StoredProcedureSQLExecutor { // private List spNames; - public LongWindowExecutor(SqlExecutor executor, SQLCase fesqlCase, boolean isBatchRequest, boolean isAsyn, SQLCaseType executorType) { - super(executor, fesqlCase, isBatchRequest, isAsyn, executorType); + public LongWindowExecutor(SqlExecutor executor, SQLCase sqlCase, boolean isBatchRequest, boolean isAsyn, SQLCaseType executorType) { + super(executor, sqlCase, isBatchRequest, isAsyn, executorType); spNames = new ArrayList<>(); } @@ -74,8 +74,6 @@ public OpenMLDBResult execute(String version, SqlExecutor executor) { openMLDBResult.setDbName(dbName); openMLDBResult.setSpName(spNames.get(0)); } - - // if (executorType == SQLCaseType.kRequest) { // InputDesc request = sqlCase.getInputs().get(0); // openMLDBResult = SDKUtil.sqlRequestMode(executor, dbName, true, sql, request); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DMLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DMLTest.java index cef60cde89e..553d51172fe 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DMLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/standalone/v030/DMLTest.java @@ -17,7 +17,7 @@ package com._4paradigm.openmldb.java_sdk_test.standalone.v030; -import com._4paradigm.openmldb.test_common.command.OpenMLDBComamndFacade; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFacade; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.java_sdk_test.common.StandaloneTest; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; @@ -67,7 +67,7 @@ public void testInsertMulti1000(){ " c8 date not null,\n" + " c9 bool not null,\n" + " index(key=(c1), ts=c5));"; - OpenMLDBComamndFacade.sql(OpenMLDBGlobalVar.mainInfo, OpenMLDBGlobalVar.dbName,createSql); + OpenMLDBCommandFacade.sql(OpenMLDBGlobalVar.mainInfo, OpenMLDBGlobalVar.dbName,createSql); StringBuilder sb = new StringBuilder("insert into auto_multi_insert_1000 values "); int total = 1000; for(int i=0;i sqls) { - OpenMLDBResult fesqlResult = null; + OpenMLDBResult openMLDBResult = null; for(String sql:sqls){ - fesqlResult = sql(openMLDBInfo,dbName,sql); + openMLDBResult = sql(openMLDBInfo,dbName,sql); } - return fesqlResult; + return openMLDBResult; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandUtil.java index 2282876cfc4..dbf652462ea 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandUtil.java @@ -19,7 +19,6 @@ import com._4paradigm.openmldb.test_common.common.LogProxy; import com._4paradigm.openmldb.test_common.model.InputDesc; import com._4paradigm.openmldb.test_common.model.SQLCase; -import com._4paradigm.openmldb.test_common.util.SDKUtil; import com._4paradigm.openmldb.test_common.util.SQLUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; @@ -36,13 +35,13 @@ public class OpenMLDBCommandUtil { public static OpenMLDBResult createDB(OpenMLDBInfo openMLDBInfo, String dbName) { String sql = String.format("create database %s ;",dbName); - OpenMLDBResult fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,sql); + OpenMLDBResult fesqlResult = OpenMLDBCommandFacade.sql(openMLDBInfo,dbName,sql); return fesqlResult; } public static OpenMLDBResult desc(OpenMLDBInfo openMLDBInfo, String dbName, String tableName) { String sql = String.format("desc %s ;",tableName); - OpenMLDBResult fesqlResult = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,sql); + OpenMLDBResult fesqlResult = OpenMLDBCommandFacade.sql(openMLDBInfo,dbName,sql); return fesqlResult; } @@ -72,7 +71,7 @@ public static OpenMLDBResult createAndInsert(OpenMLDBInfo openMLDBInfo, String d createSql = SQLCase.formatSql(createSql, i, tableName); createSql = SQLUtil.formatSql(createSql, openMLDBInfo); if (!createSql.isEmpty()) { - OpenMLDBResult res = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,createSql); + OpenMLDBResult res = OpenMLDBCommandFacade.sql(openMLDBInfo,dbName,createSql); if (!res.isOk()) { logger.error("fail to create table"); // reportLog.error("fail to create table"); @@ -84,7 +83,7 @@ public static OpenMLDBResult createAndInsert(OpenMLDBInfo openMLDBInfo, String d for (String insertSql : inserts) { insertSql = SQLCase.formatSql(insertSql, i, input.getName()); if (!insertSql.isEmpty()) { - OpenMLDBResult res = OpenMLDBComamndFacade.sql(openMLDBInfo,dbName,insertSql); + OpenMLDBResult res = OpenMLDBCommandFacade.sql(openMLDBInfo,dbName,insertSql); if (!res.isOk()) { logger.error("fail to insert table"); // reportLog.error("fail to insert table"); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DMLHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DMLHandler.java index 5f402063d1f..6cff029319c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DMLHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DMLHandler.java @@ -32,12 +32,12 @@ public boolean preHandle(String sql) { @Override public OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { - OpenMLDBResult fesqlResult = new OpenMLDBResult(); + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); List result = OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); - fesqlResult.setMsg(Joiner.on("\n").join(result)); - fesqlResult.setOk(CommandResultUtil.success(result)); - fesqlResult.setDbName(dbName); - return fesqlResult; + openMLDBResult.setMsg(Joiner.on("\n").join(result)); + openMLDBResult.setOk(CommandResultUtil.success(result)); + openMLDBResult.setDbName(dbName); + return openMLDBResult; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/SqlChainManager.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/SqlChainManager.java index 50aa40d8ebe..09958192844 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/SqlChainManager.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/SqlChainManager.java @@ -48,7 +48,7 @@ public static SqlChainManager of() { return ClassHolder.holder; } public OpenMLDBResult sql(OpenMLDBInfo openMLDBInfo, String dbName, String sql){ - OpenMLDBResult fesqlResult = sqlHandler.doHandle(openMLDBInfo, dbName, sql); - return fesqlResult; + OpenMLDBResult openMLDBResult = sqlHandler.doHandle(openMLDBInfo, dbName, sql); + return openMLDBResult; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/CliClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/CliClient.java new file mode 100644 index 00000000000..a1e5c743713 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/CliClient.java @@ -0,0 +1,77 @@ +package com._4paradigm.openmldb.test_common.openmldb; + +import com._4paradigm.openmldb.jdbc.SQLResultSet; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.command.CommandUtil; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFacade; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; +import com._4paradigm.openmldb.test_common.util.*; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; +import org.testng.Assert; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.*; + +@Slf4j +public class CliClient { + private OpenMLDBInfo openMLDBInfo; + private String dbName; + + private CliClient(OpenMLDBInfo openMLDBInfo,String dbName){ + this.openMLDBInfo = openMLDBInfo; + this.dbName = dbName; + } + public static CliClient of(OpenMLDBInfo openMLDBInfo,String dbName){ + return new CliClient(openMLDBInfo,dbName); + } + public void createAndUseDB(String dbName){ + List sqlList = new ArrayList<>(); + if (!dbIsExist(dbName)) { + sqlList.add(String.format("create database %s;", dbName)); + } + sqlList.add(String.format("use %s;", dbName)); + OpenMLDBCommandFacade.sqls(openMLDBInfo, dbName, sqlList); + } + + public boolean dbIsExist(String dbName){ + String sql = "show databases;"; + try { + OpenMLDBResult openMLDBResult = OpenMLDBCommandFacade.sql(openMLDBInfo, dbName, sql); + List> rows = openMLDBResult.getResult(); + for(List row:rows){ + if(row.get(0).equals(dbName)){ + return true; + } + } + return false; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + public OpenMLDBResult execute(String sql) { + OpenMLDBResult openMLDBResult = OpenMLDBCommandFacade.sql(openMLDBInfo, dbName, sql); + openMLDBResult.setSql(sql);; + return openMLDBResult; + } + public OpenMLDBResult execute(List sqlList) { + OpenMLDBResult openMLDBResult = null; + for(String sql:sqlList){ + openMLDBResult = execute(sql); + } + return openMLDBResult; + } + public void insert(String tableName,List list){ + List> dataList = new ArrayList<>(); + dataList.add(list); + insertList(tableName,dataList); + } + public void insertList(String tableName,List> dataList){ + String sql = SQLUtil.genInsertSQL(tableName,dataList); + OpenMLDBCommandFacade.sql(openMLDBInfo,dbName,sql); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index 4e1345f7886..9ec61fc310b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -58,7 +58,7 @@ public void checkOPStatusDone(String dbName,String tableName){ }); Assert.assertTrue(b,"check op done failed."); } - public List showTable(String dbName,String tableName){ + public List showTableHaveTable(String dbName,String tableName){ String command = StringUtils.isNotEmpty(tableName) ?"showtable "+tableName:"showtable"; String nsCommand = genNsCommand(dbName,command); Tool.sleep(3*1000); @@ -71,7 +71,7 @@ public List showTable(String dbName,String tableName){ }); return result; } - public List showTableHaveTable(String dbName,String tableName){ + public List showTable(String dbName,String tableName){ String command = StringUtils.isNotEmpty(tableName) ?"showtable "+tableName:"showtable"; List lines = runNs(dbName,command); return lines; @@ -189,7 +189,7 @@ public Map> getTableEndPoint(String dbName,String tableName } public Map> getTableOffset(String dbName){ - List lines = showTable(dbName,null); + List lines = showTableHaveTable(dbName,null); Map> offsets = new HashMap<>(); for(int i=2;i list){ List> dataList = new ArrayList<>(); + dataList.add(list); insertList(tableName,dataList); } public void insertList(String tableName,List> dataList){ From e188d355bdef6f8515a9ebcbaab7d67ee72ed1e3 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 22 Aug 2022 14:43:58 +0800 Subject: [PATCH 138/172] support upgrade test --- .../openmldb-deploy/src/main/resources/deploy.properties | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index 20cf20259dc..ea7bce0d4fd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -31,7 +31,7 @@ tmp_mac_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmld 0.6.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.6.0-linux.tar.gz 0.5.0_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz 0.5.0_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz -0.5.3=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.3-linux.tar.gz +0.5.3=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.5.3-linux.tar.gz 0.5.3_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz -0.5.3_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v053.tgz +0.5.3_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-pkg/spark-3.0.0-bin-openmldbspark-v053.tgz From 0e2f5ee42c1755bfefa98d25777c57e62b756a59 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 22 Aug 2022 14:53:52 +0800 Subject: [PATCH 139/172] support upgrade test --- .../openmldb-deploy/test-suite/test_deploy.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index f512a758c26..7a98bf98efd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -3,7 +3,7 @@ - + From 6e81fe2813a4e61500f831055fd0d86d35f86733 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 25 Aug 2022 10:58:04 +0800 Subject: [PATCH 140/172] modify deploy --- .../openmldb-deploy/src/main/resources/deploy.properties | 4 ++-- .../openmldb-deploy/test-suite/test_deploy_tmp2.xml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index ea7bce0d4fd..a9b47218121 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -16,9 +16,9 @@ single_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldb standalone=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-linux.tar.gz -tmp2=http://pkg.4paradigm.com:81/rtidb/test/openmldb-snapshot-linux.tar.gz +tmp2=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.6.0-linux.tar.gz tmp2_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz -tmp2_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz +tmp2_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-pkg/spark-3.0.0-bin-openmldbspark-v060.tgz tmp_mac=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-darwin.tar.gz tmp_mac_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml index d67cb1e5448..c3104b2b7b6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml @@ -2,7 +2,7 @@ - + From 7373a57e35c9d22cb7202cdde158ad7bf89276d3 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 25 Aug 2022 14:30:32 +0800 Subject: [PATCH 141/172] modify deploy --- .../openmldb-deploy/src/main/resources/deploy.properties | 3 +-- .../openmldb-deploy/test-suite/test_deploy.xml | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index a9b47218121..e864c48336e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -28,10 +28,9 @@ tmp_mac_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmld 0.4.0_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz 0.4.0_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz 0.5.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.0-linux.tar.gz -0.6.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.6.0-linux.tar.gz 0.5.0_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz 0.5.0_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz 0.5.3=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.5.3-linux.tar.gz 0.5.3_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz 0.5.3_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-pkg/spark-3.0.0-bin-openmldbspark-v053.tgz - +0.6.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.6.0-linux.tar.gz diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index 7a98bf98efd..fff9340a4ee 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -2,7 +2,7 @@ - + From 240ef37f796e3d7515ac36e1ab8207c6579d5206 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 25 Aug 2022 14:34:59 +0800 Subject: [PATCH 142/172] modify deploy --- .../openmldb-deploy/src/main/resources/deploy.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index e864c48336e..b19d4394612 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -27,7 +27,7 @@ tmp_mac_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmld 0.4.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.0-linux.tar.gz 0.4.0_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz 0.4.0_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz -0.5.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.0-linux.tar.gz +0.5.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.5.0-linux.tar.gz 0.5.0_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz 0.5.0_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz 0.5.3=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.5.3-linux.tar.gz From 994b7383343459a3d20737aaadf7c20b1b4e58d3 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 25 Aug 2022 15:42:39 +0800 Subject: [PATCH 143/172] modify deploy --- .../openmldb-deploy/test-suite/test_deploy.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml index fff9340a4ee..7a98bf98efd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy.xml @@ -2,7 +2,7 @@ - + From 088b04ca7d1becb81992203e26ed4fa3d83bc8eb Mon Sep 17 00:00:00 2001 From: zhaowei Date: Sat, 27 Aug 2022 22:02:25 +0800 Subject: [PATCH 144/172] modify case path --- .../cluster/sql_test/BatchRequestTest.java | 6 +++--- .../cluster/sql_test/DDLTest.java | 20 +++++++++---------- .../cluster/sql_test/DMLTest.java | 10 +++++----- .../cluster/sql_test/ExpressTest.java | 8 ++++---- .../cluster/sql_test/FunctionTest.java | 8 ++++---- .../cluster/sql_test/LastJoinTest.java | 8 ++++---- .../cluster/sql_test/MultiDBTest.java | 8 ++++---- .../cluster/sql_test/SelectTest.java | 8 ++++---- .../openmldb/test_common/common/BaseTest.java | 3 +++ 9 files changed, 41 insertions(+), 38 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/BatchRequestTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/BatchRequestTest.java index f3203c4dbdd..1c889a5f5c9 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/BatchRequestTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/BatchRequestTest.java @@ -29,19 +29,19 @@ public class BatchRequestTest extends OpenMLDBTest { @Story("BatchRequest") @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/test_batch_request.yaml") + @Yaml(filePaths = "integration_test/test_batch_request.yaml") public void testBatchRequest(SQLCase testCase) { ExecutorFactory.build(executor, testCase, SQLCaseType.kBatchRequest).run(); } @Story("SPBatchRequest") @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/test_batch_request.yaml") + @Yaml(filePaths = "integration_test/test_batch_request.yaml") public void testSPBatchRequest(SQLCase testCase) { ExecutorFactory.build(executor, testCase, SQLCaseType.kBatchRequestWithSp).run(); } @Story("SPBatchRequestAsyn") @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/test_batch_request.yaml") + @Yaml(filePaths = "integration_test/test_batch_request.yaml") public void testSPBatchRequestAsyn(SQLCase testCase) { ExecutorFactory.build(executor, testCase, SQLCaseType.kBatchRequestWithSpAsync).run(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java index 52ebeca22bc..3138f5629e2 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DDLTest.java @@ -34,13 +34,13 @@ @Feature("DDL") public class DDLTest extends OpenMLDBTest { @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/ddl/test_create.yaml") + @Yaml(filePaths = "integration_test/ddl/test_create.yaml") @Story("create") public void testCreate(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); } - @Yaml(filePaths = "function/ddl/test_create.yaml") + @Yaml(filePaths = "integration_test/ddl/test_create.yaml") @Story("create") @Test(dataProvider = "getCase",enabled = false) public void testCreateByCli(SQLCase testCase){ @@ -48,53 +48,53 @@ public void testCreateByCli(SQLCase testCase){ } @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/ddl/test_ttl.yaml") + @Yaml(filePaths = "integration_test/ddl/test_ttl.yaml") @Story("ttl") public void testTTL(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); } @Test(dataProvider = "getCase",enabled = false) - @Yaml(filePaths = "function/ddl/test_ttl.yaml") + @Yaml(filePaths = "integration_test/ddl/test_ttl.yaml") @Story("ttl") public void testTTLByCli(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); } @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/ddl/test_create_index.yaml") + @Yaml(filePaths = "integration_test/ddl/test_create_index.yaml") @Story("create_index") public void testCreateIndex(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); } @Test(dataProvider = "getCase",enabled = false) - @Yaml(filePaths = "function/ddl/test_create_index.yaml") + @Yaml(filePaths = "integration_test/ddl/test_create_index.yaml") @Story("create_index") public void testCreateIndexByCli(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); } @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/ddl/test_options.yaml") + @Yaml(filePaths = "integration_test/ddl/test_options.yaml") @Story("options") public void testOptions(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); } @Test(dataProvider = "getCase",enabled = false) - @Yaml(filePaths = "function/ddl/test_options.yaml") + @Yaml(filePaths = "integration_test/ddl/test_options.yaml") @Story("options") public void testOptionsByCli(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); } @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/ddl/test_create_no_index.yaml") + @Yaml(filePaths = "integration_test/ddl/test_create_no_index.yaml") @Story("create_no_index") public void testCreateNoIndex(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); } @Test(dataProvider = "getCase",enabled = false) - @Yaml(filePaths = "function/ddl/test_create_no_index.yaml") + @Yaml(filePaths = "integration_test/ddl/test_create_no_index.yaml") @Story("create_no_index") public void testCreateNoIndexByCli(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DMLTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DMLTest.java index bc297e3ae64..3f030154580 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DMLTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/DMLTest.java @@ -36,35 +36,35 @@ public class DMLTest extends OpenMLDBTest { @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/dml/test_insert.yaml"}) + @Yaml(filePaths = {"integration_test/dml/test_insert.yaml"}) @Story("insert") public void testInsert(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); } @Test(dataProvider = "getCase",enabled = false) - @Yaml(filePaths = {"function/dml/test_insert.yaml"}) + @Yaml(filePaths = {"integration_test/dml/test_insert.yaml"}) @Story("insert") public void testInsertByCli(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); } @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/dml/test_insert_prepared.yaml") + @Yaml(filePaths = "integration_test/dml/test_insert_prepared.yaml") @Story("insert-prepared") public void testInsertWithPrepared(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kInsertPrepared).run(); } @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/dml/multi_insert.yaml") + @Yaml(filePaths = "integration_test/dml/multi_insert.yaml") @Story("multi-insert") public void testMultiInsert(SQLCase testCase){ ExecutorFactory.build(executor,testCase, SQLCaseType.kDDL).run(); } @Test(dataProvider = "getCase",enabled = false) - @Yaml(filePaths = "function/dml/multi_insert.yaml") + @Yaml(filePaths = "integration_test/dml/multi_insert.yaml") @Story("multi-insert") public void testMultiInsertByCli(SQLCase testCase){ ExecutorFactory.build(testCase, SQLCaseType.kClusterCLI).run(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/ExpressTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/ExpressTest.java index 0b4f5a1ebf8..1326a447492 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/ExpressTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/ExpressTest.java @@ -37,7 +37,7 @@ public class ExpressTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase",enabled = false) @Yaml(filePaths = { - "function/expression/" + "integration_test/expression/" }) public void testExpress(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kBatch).run(); @@ -45,7 +45,7 @@ public void testExpress(SQLCase testCase) throws Exception { @Story("request") @Test(dataProvider = "getCase") @Yaml(filePaths = { - "function/expression/" + "integration_test/expression/" }) public void testExpressRequestMode(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); @@ -53,7 +53,7 @@ public void testExpressRequestMode(SQLCase testCase) throws Exception { @Story("requestWithSp") @Test(dataProvider = "getCase") @Yaml(filePaths = { - "function/expression/" + "integration_test/expression/" }) public void testExpressRequestModeWithSp(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); @@ -61,7 +61,7 @@ public void testExpressRequestModeWithSp(SQLCase testCase) throws Exception { @Story("requestWithSpAysn") @Test(dataProvider = "getCase") @Yaml(filePaths = { - "function/expression/" + "integration_test/expression/" }) public void testExpressRequestModeWithSpAysn(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java index 1fff9b1547a..14124a60205 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/FunctionTest.java @@ -36,25 +36,25 @@ public class FunctionTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase",enabled = false) - @Yaml(filePaths = "function/function/") + @Yaml(filePaths = "integration_test/function/") public void testFunction(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kBatch).run(); } @Story("request") @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/function/") + @Yaml(filePaths = "integration_test/function/") public void testFunctionRequestMode(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); } @Story("requestWithSp") @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/function/") + @Yaml(filePaths = "integration_test/function/") public void testFunctionRequestModeWithSp(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); } @Story("requestWithSpAysn") @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/function/") + @Yaml(filePaths = "integration_test/function/") public void testFunctionRequestModeWithSpAysn(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/LastJoinTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/LastJoinTest.java index 128e21c2e0c..cea123ff9a6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/LastJoinTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/LastJoinTest.java @@ -36,25 +36,25 @@ public class LastJoinTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase",enabled = false) - @Yaml(filePaths = {"function/join/"}) + @Yaml(filePaths = {"integration_test/join/"}) public void testLastJoin(SQLCase testCase) throws Exception { ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); } @Story("request") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/join/"}) + @Yaml(filePaths = {"integration_test/join/"}) public void testLastJoinRequestMode(SQLCase testCase) throws Exception { ExecutorFactory.build(executor,testCase, SQLCaseType.kRequest).run(); } @Story("requestWithSp") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/join/"}) + @Yaml(filePaths = {"integration_test/join/"}) public void testLastJoinRequestModeWithSp(SQLCase testCase) throws Exception { ExecutorFactory.build(executor,testCase, SQLCaseType.kRequestWithSp).run(); } @Story("requestWithSpAysn") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/join/"}) + @Yaml(filePaths = {"integration_test/join/"}) public void testLastJoinRequestModeWithSpAsync(SQLCase testCase) throws Exception { ExecutorFactory.build(executor,testCase, SQLCaseType.kRequestWithSpAsync).run(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/MultiDBTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/MultiDBTest.java index 4dcfd0df47a..680c62832d6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/MultiDBTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/MultiDBTest.java @@ -37,26 +37,26 @@ public class MultiDBTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase",enabled = false) - @Yaml(filePaths = {"function/multiple_databases/"}) + @Yaml(filePaths = {"integration_test/multiple_databases/"}) @Step("{testCase.desc}") public void testMultiDB(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kBatch).run(); } @Story("request") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/multiple_databases/"}) + @Yaml(filePaths = {"integration_test/multiple_databases/"}) public void testMultiDBRequestMode(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); } @Story("requestWithSp") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/multiple_databases/"}) + @Yaml(filePaths = {"integration_test/multiple_databases/"}) public void testMultiDBRequestModeWithSp(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); } @Story("requestWithSpAysn") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/multiple_databases/"}) + @Yaml(filePaths = {"integration_test/multiple_databases/"}) public void testMultiDBRequestModeWithSpAysn(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/SelectTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/SelectTest.java index 9930e689ad6..474e44f386f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/SelectTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/sql_test/SelectTest.java @@ -37,26 +37,26 @@ public class SelectTest extends OpenMLDBTest { @Story("batch") @Test(dataProvider = "getCase",enabled = false) - @Yaml(filePaths = {"function/select/","query/const_query.yaml"}) + @Yaml(filePaths = {"integration_test/select/","query/const_query.yaml"}) @Step("{testCase.desc}") public void testSelect(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kBatch).run(); } @Story("request") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/select/","query/const_query.yaml"}) + @Yaml(filePaths = {"integration_test/select/","query/const_query.yaml"}) public void testSelectRequestMode(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequest).run(); } @Story("requestWithSp") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/select/","query/const_query.yaml"}) + @Yaml(filePaths = {"integration_test/select/","query/const_query.yaml"}) public void testSelectRequestModeWithSp(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSp).run(); } @Story("requestWithSpAysn") @Test(dataProvider = "getCase") - @Yaml(filePaths = {"function/select/","query/const_query.yaml"}) + @Yaml(filePaths = {"integration_test/select/","query/const_query.yaml"}) public void testSelectRequestModeWithSpAysn(SQLCase testCase) throws Exception { ExecutorFactory.build(executor, testCase, SQLCaseType.kRequestWithSpAsync).run(); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/BaseTest.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/BaseTest.java index a0f9e5a9e9f..bbf084faebb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/BaseTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/common/BaseTest.java @@ -66,6 +66,9 @@ public void BeforeMethod(Method method, Object[] testData) { testData[0], "fail to run openmldb test with null SQLCase: check yaml case"); if (testData[0] instanceof SQLCase) { SQLCase sqlCase = (SQLCase) testData[0]; + System.out.println("AAAAAA"); + log.info(sqlCase.getDesc()); + System.out.println(sqlCase.getDesc()); Assert.assertNotEquals(CaseFile.FAIL_SQL_CASE, sqlCase.getDesc(), "fail to run openmldb test with FAIL DATA PROVIDER SQLCase: check yaml case"); testName.set(String.format("[%d]%s.%s", testNum, method.getName(), CaseNameFormat(sqlCase))); From ec5de3aa65368e99c895a6f41ad28aff56abafa9 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Sun, 28 Aug 2022 20:40:02 +0800 Subject: [PATCH 145/172] update case --- cases/integration_test/window/window_attributes.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/cases/integration_test/window/window_attributes.yaml b/cases/integration_test/window/window_attributes.yaml index 53ebc8fcde7..c77844b7b00 100644 --- a/cases/integration_test/window/window_attributes.yaml +++ b/cases/integration_test/window/window_attributes.yaml @@ -486,6 +486,7 @@ cases: - id: 9 desc: | ROWS Window with exclude current_time and exclude current_row + mode: disk-unsupport inputs: - name: t1 columns: From 100f7fe34eee1a8bd6b753ee40fab031dd93352d Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 29 Aug 2022 16:55:29 +0800 Subject: [PATCH 146/172] support upgrade test --- .../long_window/test_long_window.yaml | 12 ++--- .../src/main/resources/deploy.properties | 4 +- .../devops_test/common/ClusterTest.java | 53 ++++++++++--------- .../openmldb/devops_test/util/CheckUtil.java | 18 ++++++- .../high_availability/TestCluster.java | 30 +++++------ .../openmldb/devops_test/tmp/TestCommand.java | 13 ++++- .../upgrade_test/UpgradeCluster.java | 4 +- .../upgrade_test/UpgradeClusterByCLI.java | 34 ++++++++---- .../upgrade_test/UpgradeSingleton.java | 4 +- .../upgrade_test/UpgradeStandalone.java | 6 +-- .../test_suite/test_upgrade.xml | 4 +- .../openmldb-sdk-test/test_suite/test_tmp.xml | 21 ++++---- .../openmldb-test-common/pom.xml | 2 +- .../command/OpenMLDBCommandUtil.java | 14 ++--- .../test_common/command/chain/DDLHandler.java | 10 ++-- .../command/chain/DescHandler.java | 12 ++--- .../command/chain/QueryHandler.java | 16 +++--- .../command/chain/ShowDeploymentHandler.java | 12 ++--- .../command/chain/ShowDeploymentsHandler.java | 14 ++--- .../command/chain/ShowTableStatusHandler.java | 47 ++++++++++++++++ .../command/chain/SqlChainManager.java | 2 + .../test_common/openmldb/CliClient.java | 8 +++ .../test_common/util/CommandResultUtil.java | 21 ++++++++ 23 files changed, 241 insertions(+), 120 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowTableStatusHandler.java diff --git a/cases/integration_test/long_window/test_long_window.yaml b/cases/integration_test/long_window/test_long_window.yaml index 3a4c3788a4b..75f6f6193a5 100644 --- a/cases/integration_test/long_window/test_long_window.yaml +++ b/cases/integration_test/long_window/test_long_window.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: ["delete 组合索引"] +debugs: [] version: 0.5.0 cases: - @@ -64,7 +64,7 @@ cases: name: pre_{db_name}_{sp_name}_w1_count_c4 type: bigint rows: - - ["aa",1577664000000,1577836799999,2,1,null] + - ["aa",1577664000000,1577836799999,1,1,null] - ["aa",1577836800000,1578009599999,2,2,null] - id: 2 @@ -348,8 +348,8 @@ cases: name: pre_{db_name}_{sp_name}_w1_sum_c4 type: bigint rows: - - ["aa",1590738991000,1590738992999,2,61,null] - - ["aa",1590738993000,1590738994999,2,65,null] + - ["aa",1590738990000,1590738991999,1,30,null] + - ["aa",1590738992000,1590738993999,2,63,null] - sql: delete from {0} where c1='aa'; expect: preAgg: @@ -385,8 +385,8 @@ cases: name: pre_{db_name}_{sp_name}_w1_sum_c4 type: bigint rows: - - ["aa|20",1590738991000,1590738992999,2,61,null] - - ["aa|20",1590738993000,1590738994999,2,65,null] + - ["aa|20",1590738990000,1590738991999,1,30,null] + - ["aa|20",1590738992000,1590738993999,2,63,null] - sql: delete from {0} where c1='aa' and c3=20; expect: preAgg: diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index b19d4394612..8012d74a6c2 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -20,9 +20,9 @@ tmp2=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.6.0-linux.ta tmp2_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz tmp2_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-pkg/spark-3.0.0-bin-openmldbspark-v060.tgz -tmp_mac=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.5.2-darwin.tar.gz +tmp_mac=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.6.0-darwin.tar.gz tmp_mac_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz -tmp_mac_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz +tmp_mac_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-pkg/spark-3.0.0-bin-openmldbspark-v060.tgz 0.4.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-0.4.0-linux.tar.gz 0.4.0_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index f74ab99a5dd..fe90cb83111 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -59,19 +59,20 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi openMLDBDeploy.setSystemTableReplicaNum(1); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(1, 1); }else{ -// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() -// .deployType(OpenMLDBDeployType.CLUSTER) -// .openMLDBDirectoryName("openmldb-0.5.2-darwin") -// .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") -// .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") -// .zk_cluster("127.0.0.1:30000") -// .zk_root_path("/openmldb") -// .nsNum(2).tabletNum(3) -// .nsEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30005")) -// .tabletEndpoints(Lists.newArrayList("127.0.0.1:30001", "127.0.0.1:30002", "127.0.0.1:30003")) -// .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30006")) -// .build(); -// OpenMLDBGlobalVar.env = "cluster"; + OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() + .deployType(OpenMLDBDeployType.CLUSTER) + .openMLDBDirectoryName("openmldb-0.6.0-darwin") + .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") + .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") + .zk_cluster("127.0.0.1:30003") + .zk_root_path("/openmldb") + .nsNum(2).tabletNum(3) + .nsEndpoints(Lists.newArrayList("127.0.0.1:30008", "127.0.0.1:30009")) + .tabletEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30006", "127.0.0.1:30007")) + .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30010")) + .taskManagerEndpoints(Lists.newArrayList("127.0.0.1:30011")) + .build(); + OpenMLDBGlobalVar.env = "cluster"; // OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() // .openMLDBDirectoryName("openmldb-0.5.2-linux") @@ -87,19 +88,19 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi // .build(); // OpenMLDBGlobalVar.env = "cluster"; - OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() - .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/home/zhaowei01/openmldb-auto-test/single") - .openMLDBPath("/home/zhaowei01/openmldb-auto-test/single/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:30008") - .zk_root_path("/openmldb") - .nsNum(1).tabletNum(1) - .nsEndpoints(Lists.newArrayList("172.24.4.55:30010")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30009")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30011")) - .taskManagerEndpoints(Lists.newArrayList("172.24.4.55:30012")) - .build(); - OpenMLDBGlobalVar.env = "cluster"; +// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() +// .deployType(OpenMLDBDeployType.CLUSTER) +// .basePath("/home/zhaowei01/openmldb-auto-test/single") +// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/single/openmldb-ns-1/bin/openmldb") +// .zk_cluster("172.24.4.55:30008") +// .zk_root_path("/openmldb") +// .nsNum(1).tabletNum(1) +// .nsEndpoints(Lists.newArrayList("172.24.4.55:30010")) +// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30009")) +// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30011")) +// .taskManagerEndpoints(Lists.newArrayList("172.24.4.55:30012")) +// .build(); +// OpenMLDBGlobalVar.env = "cluster"; } String caseEnv = System.getProperty("caseEnv"); diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java index 0a767d20e6b..4b3b198d852 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java @@ -1,5 +1,6 @@ package com._4paradigm.openmldb.devops_test.util; +import com._4paradigm.openmldb.test_common.openmldb.CliClient; import com._4paradigm.openmldb.test_common.openmldb.NsClient; import com._4paradigm.openmldb.test_common.openmldb.SDKClient; import org.apache.commons.collections4.CollectionUtils; @@ -11,7 +12,7 @@ import java.util.List; public class CheckUtil { - public static void addDataCheck(SDKClient sdkClient, NsClient nsClient, String dbName, List tableNames, int originalCount, int addCount){ + public static void addDataCheckByOffset(SDKClient sdkClient, NsClient nsClient, String dbName, List tableNames, int originalCount, int addCount){ List> addDataList = new ArrayList<>(); for(int i=0;i tableNames, int originalCount, int addCount){ + List> addDataList = new ArrayList<>(); + for(int i=0;i list = Lists.newArrayList(c1 + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); + addDataList.add(list); + } + String msg = "table add data check count failed."; + for(String tableName:tableNames){ + if (CollectionUtils.isNotEmpty(addDataList)) { + sdkClient.insertList(tableName,addDataList); + } + Assert.assertEquals(sdkClient.getTableRowCount(tableName),originalCount+addCount,msg); + } + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index f1bd535dd3d..7b16271dea7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -5,8 +5,6 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.openmldb.*; import com._4paradigm.qa.openmldb_deploy.util.Tool; -import org.apache.commons.collections4.CollectionUtils; -import org.apache.commons.lang3.RandomStringUtils; import org.testng.Assert; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; @@ -86,41 +84,41 @@ public void testMoreReplica(){ Assert.assertEquals(sdkClient.getTableRowCount(hddTable),dataCount,oneTabletStopMsg); // tablet start,数据可以回复,要看磁盘表和内存表。 openMLDBDevops.operateTablet(0,"start"); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); + CheckUtil.addDataCheckByOffset(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); //创建磁盘表和内存表,在重启tablet,数据可回复,内存表和磁盘表可以正常访问。 openMLDBDevops.operateTablet(0,"restart"); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+10,10); + CheckUtil.addDataCheckByOffset(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+10,10); //创建磁盘表和内存表,插入一些数据,然后make snapshot,在重启tablet,数据可回复。 nsClient.makeSnapshot(dbName,memoryTable); nsClient.makeSnapshot(dbName,ssdTable); nsClient.makeSnapshot(dbName,hddTable); //tablet 依次restart,数据可回复,可以访问。 openMLDBDevops.operateTablet("restart"); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); + CheckUtil.addDataCheckByOffset(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); //1个ns stop,可以正常访问。 openMLDBDevops.operateNs(0,"stop"); resetClient(); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + CheckUtil.addDataCheckByOffset(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 1个ns start 可以访问。 openMLDBDevops.operateNs(0,"start"); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + CheckUtil.addDataCheckByOffset(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 1个ns restart 可以访问。 openMLDBDevops.operateNs(0,"restart"); resetClient(); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + CheckUtil.addDataCheckByOffset(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 单zk stop 在start后 可以访问 openMLDBDevops.operateZKOne("stop"); Tool.sleep(3000); openMLDBDevops.operateZKOne("start"); Tool.sleep(3000); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + CheckUtil.addDataCheckByOffset(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 单zk restart 后可以访问 openMLDBDevops.operateZKOne("restart"); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + CheckUtil.addDataCheckByOffset(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); // 2个tablet stop 可以访问 openMLDBDevops.operateTablet(0,"stop"); openMLDBDevops.operateTablet(1,"stop"); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); + CheckUtil.addDataCheckByOffset(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+30,0); //3个tablet stop,不能访问。 openMLDBDevops.operateTablet(2,"stop"); OpenMLDBResult openMLDBResult = sdkClient.execute(String.format("select * from %s",memoryTable)); @@ -202,24 +200,24 @@ public void testSingle(){ Assert.assertTrue(openMLDBResult.getMsg().contains("fail")); // tablet start,数据可以回复,要看磁盘表和内存表。 openMLDBDevops.operateTablet(0,"start"); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); + CheckUtil.addDataCheckByOffset(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount,10); //make snapshot,在重启tablet,数据可回复。 nsClient.makeSnapshot(dbName,memoryTable); nsClient.makeSnapshot(dbName,ssdTable); nsClient.makeSnapshot(dbName,hddTable); //重启tablet,数据可回复,内存表和磁盘表可以正常访问。 openMLDBDevops.operateTablet(0,"restart"); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+10,10); + CheckUtil.addDataCheckByOffset(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+10,10); //ns stop start 可以正常访问。 openMLDBDevops.operateNs(0,"stop"); // resetClient(); //ns start 可以访问。 openMLDBDevops.operateNs(0,"start"); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,0); + CheckUtil.addDataCheckByOffset(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,0); //ns restart 可以访问。 openMLDBDevops.operateNs(0,"restart"); // resetClient(); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,0); + CheckUtil.addDataCheckByOffset(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,0); // stop tablet ns 后 在启动 ns tablet 可以访问 openMLDBDevops.operateTablet(0,"stop"); openMLDBDevops.operateNs(0,"stop"); @@ -227,7 +225,7 @@ public void testSingle(){ openMLDBDevops.operateNs(0,"start"); Tool.sleep(10*1000); openMLDBDevops.operateTablet(0,"start"); - CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); + CheckUtil.addDataCheckByOffset(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTable,ssdTable,hddTable),dataCount+20,10); } public void resetClient(){ OpenMLDBClient openMLDBClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java index 6542cfa361a..4e3a1b8f15c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java @@ -1,5 +1,10 @@ package com._4paradigm.openmldb.devops_test.tmp; +import com._4paradigm.openmldb.devops_test.common.ClusterTest; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFacade; +import com._4paradigm.openmldb.test_common.command.chain.SqlChainManager; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.test_tool.command_tool.common.CommandUtil; import com._4paradigm.test_tool.command_tool.common.ExecUtil; import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; @@ -7,7 +12,7 @@ import java.util.List; -public class TestCommand { +public class TestCommand extends ClusterTest { @Test public void test1(){ List list = ExecutorUtil.run("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb --zk_cluster=127.0.0.1:30000 --zk_root_path=/openmldb --role=ns_client --interactive=false --database=test_devops4 --cmd='showopstatus'"); @@ -35,4 +40,10 @@ public void test5(){ String str = ExecUtil.exeCommand("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb --zk_cluster=127.0.0.1:30000 --zk_root_path=/openmldb --role=sql_client --interactive=false --database=test_devops --cmd='select * from test_ssd;'"); System.out.println("str = " + str); } + @Test + public void test6(){ + OpenMLDBResult openMLDBResult = OpenMLDBCommandFacade.sql(OpenMLDBGlobalVar.mainInfo, "test1", "show table status;"); + List> result = openMLDBResult.getResult(); + result.forEach(l->System.out.println(l)); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java index 704b21141cd..c59d20f4643 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeCluster.java @@ -114,9 +114,9 @@ public void testUpgrade(){ Map> map2 = nsClient.getTableOffset(dbName); log.info("升级后offset:"+map2); Assert.assertEquals(map1,map2); - CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(memoryTableName), 100, 10); + CheckUtil.addDataCheckByOffset(sdkClient, nsClient, dbName, Lists.newArrayList(memoryTableName), 100, 10); if(version.compareTo("0.5.0")>=0) { - CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); + CheckUtil.addDataCheckByOffset(sdkClient, nsClient, dbName, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java index c456b563660..57613d2c20e 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java @@ -29,13 +29,16 @@ public class UpgradeClusterByCLI extends ClusterTest { private NsClient nsClient; private OpenMLDBDevops openMLDBDevops; private String openMLDBPath; + private SDKClient sdkClient; private String newBinPath; private String confPath; private String upgradePath; private OpenMLDBDeploy openMLDBDeploy; + private String upgradeVersion; @BeforeClass @Parameters("upgradeVersion") public void beforeClass(@Optional("0.6.0") String upgradeVersion){ + this.upgradeVersion = upgradeVersion; dbName = "test_upgrade"; memoryTableName = "test_memory"; ssdTableName = "test_ssd"; @@ -105,19 +108,32 @@ public void beforeClass(@Optional("0.6.0") String upgradeVersion){ } @Test public void testUpgrade(){ - Map> map1 = nsClient.getTableOffset(dbName); - log.info("升级前offset:"+map1); + Map> beforeMap; + if(version.compareTo("0.6.0")>=0){ + beforeMap = nsClient.getTableOffset(dbName); + }else{ + beforeMap = cliClient.showTableStatus(); + } + log.info("升级前offset:"+beforeMap); openMLDBDevops.upgradeNs(newBinPath,confPath); openMLDBDevops.upgradeTablet(newBinPath,confPath); openMLDBDevops.upgradeApiServer(newBinPath,confPath); openMLDBDevops.upgradeTaskManager(openMLDBDeploy); - Map> map2 = nsClient.getTableOffset(dbName); - log.info("升级后offset:"+map2); - Assert.assertEquals(map1,map2); -// CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(memoryTableName), 100, 10); -// if(version.compareTo("0.5.0")>=0) { -// CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); -// } + Map> afterMap; + if(version.compareTo("0.6.0")>=0){ + afterMap = nsClient.getTableOffset(dbName); + }else{ + afterMap = cliClient.showTableStatus(); + } + log.info("升级后offset:"+afterMap); + Assert.assertEquals(beforeMap,afterMap); + sdkClient = SDKClient.of(executor); + sdkClient.useDB(dbName); + if(upgradeVersion.compareTo("0.6.0")>=0) { + CheckUtil.addDataCheckByOffset(sdkClient, nsClient, dbName, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); + }else{ + CheckUtil.addDataCheckByCount(sdkClient, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); + } } // @AfterClass diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeSingleton.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeSingleton.java index 0e23d5535fd..84d4c589b15 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeSingleton.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeSingleton.java @@ -116,9 +116,9 @@ public void testUpgrade(){ Map> map2 = nsClient.getTableOffset(dbName); log.info("升级后offset:"+map2); Assert.assertEquals(map1,map2); - CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(memoryTableName), 100, 10); + CheckUtil.addDataCheckByOffset(sdkClient, nsClient, dbName, Lists.newArrayList(memoryTableName), 100, 10); if(version.compareTo("0.5.0")>=0) { - CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); + CheckUtil.addDataCheckByOffset(sdkClient, nsClient, dbName, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeStandalone.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeStandalone.java index eef23166e70..2ca68ca8636 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeStandalone.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeStandalone.java @@ -10,7 +10,6 @@ import com._4paradigm.qa.openmldb_deploy.util.DeployUtil; import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; import lombok.extern.slf4j.Slf4j; -import org.testng.Assert; import org.testng.annotations.BeforeClass; import org.testng.annotations.Optional; import org.testng.annotations.Parameters; @@ -20,7 +19,6 @@ import java.io.File; import java.util.ArrayList; import java.util.List; -import java.util.Map; @Slf4j public class UpgradeStandalone extends ClusterTest { @@ -112,9 +110,9 @@ public void testUpgrade(){ sdkClient.insertList(hddTableName, dataList); } - CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(memoryTableName), 100, 10); + CheckUtil.addDataCheckByOffset(sdkClient, nsClient, dbName, Lists.newArrayList(memoryTableName), 100, 10); if(version.compareTo("0.5.0")>=0) { - CheckUtil.addDataCheck(sdkClient, nsClient, dbName, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); + CheckUtil.addDataCheckByOffset(sdkClient, nsClient, dbName, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml index 95775fffb3d..b18434882bd 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml @@ -3,10 +3,10 @@ - + - + diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml index 6eba18c1912..351aa6a4152 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_tmp.xml @@ -4,15 +4,18 @@ - - - - - - - - - + + + + + + + + + + + + diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index 88a3b1460e8..9d244ca1f5b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -16,7 +16,7 @@ 8 0.6.0 - 0.6.0 + 0.6.0-macos diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandUtil.java index dbf652462ea..affcfd98c19 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandUtil.java @@ -35,14 +35,14 @@ public class OpenMLDBCommandUtil { public static OpenMLDBResult createDB(OpenMLDBInfo openMLDBInfo, String dbName) { String sql = String.format("create database %s ;",dbName); - OpenMLDBResult fesqlResult = OpenMLDBCommandFacade.sql(openMLDBInfo,dbName,sql); - return fesqlResult; + OpenMLDBResult openMLDBResult = OpenMLDBCommandFacade.sql(openMLDBInfo,dbName,sql); + return openMLDBResult; } public static OpenMLDBResult desc(OpenMLDBInfo openMLDBInfo, String dbName, String tableName) { String sql = String.format("desc %s ;",tableName); - OpenMLDBResult fesqlResult = OpenMLDBCommandFacade.sql(openMLDBInfo,dbName,sql); - return fesqlResult; + OpenMLDBResult openMLDBResult = OpenMLDBCommandFacade.sql(openMLDBInfo,dbName,sql); + return openMLDBResult; } public static OpenMLDBResult createAndInsert(OpenMLDBInfo openMLDBInfo, String defaultDBName, List inputs) { @@ -60,7 +60,7 @@ public static OpenMLDBResult createAndInsert(OpenMLDBInfo openMLDBInfo, String d } } } - OpenMLDBResult fesqlResult = new OpenMLDBResult(); + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); if (inputs != null && inputs.size() > 0) { for (int i = 0; i < inputs.size(); i++) { InputDesc inputDesc = inputs.get(i); @@ -93,7 +93,7 @@ public static OpenMLDBResult createAndInsert(OpenMLDBInfo openMLDBInfo, String d } } } - fesqlResult.setOk(true); - return fesqlResult; + openMLDBResult.setOk(true); + return openMLDBResult; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DDLHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DDLHandler.java index ad0c446e0e1..ca4956ba492 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DDLHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DDLHandler.java @@ -34,16 +34,16 @@ public boolean preHandle(String sql) { @Override public OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { - OpenMLDBResult fesqlResult = new OpenMLDBResult(); + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); List result = OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); - fesqlResult.setMsg(Joiner.on("\n").join(result)); - fesqlResult.setOk(CommandResultUtil.success(result)); - fesqlResult.setDbName(dbName); + openMLDBResult.setMsg(Joiner.on("\n").join(result)); + openMLDBResult.setOk(CommandResultUtil.success(result)); + openMLDBResult.setDbName(dbName); if(sql.toLowerCase().startsWith("create index")){ // TODO 希望有更好的解决方案 Tool.sleep(10000); } - return fesqlResult; + return openMLDBResult; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DescHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DescHandler.java index 080579f79d2..b3a1c438d04 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DescHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DescHandler.java @@ -33,15 +33,15 @@ public boolean preHandle(String sql) { @Override public OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { - OpenMLDBResult fesqlResult = new OpenMLDBResult(); + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); List result = OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); boolean ok = CommandResultUtil.success(result); - fesqlResult.setMsg(Joiner.on("\n").join(result)); - fesqlResult.setOk(ok); - fesqlResult.setDbName(dbName); + openMLDBResult.setMsg(Joiner.on("\n").join(result)); + openMLDBResult.setOk(ok); + openMLDBResult.setDbName(dbName); if (ok) { - fesqlResult.setSchema(CommandResultUtil.parseSchema(result)); + openMLDBResult.setSchema(CommandResultUtil.parseSchema(result)); } - return fesqlResult; + return openMLDBResult; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/QueryHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/QueryHandler.java index 6496e1621e9..cc6b4bf50f4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/QueryHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/QueryHandler.java @@ -36,12 +36,12 @@ public boolean preHandle(String sql) { @Override public OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { - OpenMLDBResult fesqlResult = new OpenMLDBResult(); + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); List result = OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); boolean ok = CommandResultUtil.success(result); - fesqlResult.setMsg(Joiner.on("\n").join(result)); - fesqlResult.setOk(ok); - fesqlResult.setDbName(dbName); + openMLDBResult.setMsg(Joiner.on("\n").join(result)); + openMLDBResult.setOk(ok); + openMLDBResult.setDbName(dbName); if (ok) { int count = 0; List> rows = new ArrayList<>(); @@ -52,11 +52,11 @@ public OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String List row = Arrays.asList(result.get(i).split("\\s+")); rows.add(row); } - fesqlResult.setColumnNames(columnNames); + openMLDBResult.setColumnNames(columnNames); } - fesqlResult.setCount(count); - fesqlResult.setResult(rows); + openMLDBResult.setCount(count); + openMLDBResult.setResult(rows); } - return fesqlResult; + return openMLDBResult; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowDeploymentHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowDeploymentHandler.java index 7740138a2df..090715c8e89 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowDeploymentHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowDeploymentHandler.java @@ -34,15 +34,15 @@ public boolean preHandle(String sql) { @Override public OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { - OpenMLDBResult fesqlResult = new OpenMLDBResult(); + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); List result = OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); boolean ok = CommandResultUtil.success(result); - fesqlResult.setMsg(Joiner.on("\n").join(result)); - fesqlResult.setOk(ok); - fesqlResult.setDbName(dbName); + openMLDBResult.setMsg(Joiner.on("\n").join(result)); + openMLDBResult.setOk(ok); + openMLDBResult.setDbName(dbName); if (ok && result.size()>9) { - fesqlResult.setDeployment(CommandResultUtil.parseDeployment(result)); + openMLDBResult.setDeployment(CommandResultUtil.parseDeployment(result)); } - return fesqlResult; + return openMLDBResult; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowDeploymentsHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowDeploymentsHandler.java index 1c6c481fe40..d2b589ffb46 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowDeploymentsHandler.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowDeploymentsHandler.java @@ -35,17 +35,17 @@ public boolean preHandle(String sql) { @Override public OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { - OpenMLDBResult fesqlResult = new OpenMLDBResult(); + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); List result = OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); boolean ok = CommandResultUtil.success(result); - fesqlResult.setMsg(Joiner.on("\n").join(result)); - fesqlResult.setOk(ok); - fesqlResult.setDbName(dbName); + openMLDBResult.setMsg(Joiner.on("\n").join(result)); + openMLDBResult.setOk(ok); + openMLDBResult.setDbName(dbName); if (ok && result.size()>3) { - fesqlResult.setDeployments(CommandResultUtil.parseDeployments(result)); + openMLDBResult.setDeployments(CommandResultUtil.parseDeployments(result)); }else if(result.get(0).equals("Empty set")){ - fesqlResult.setDeployments(Lists.newArrayList()); + openMLDBResult.setDeployments(Lists.newArrayList()); } - return fesqlResult; + return openMLDBResult; } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowTableStatusHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowTableStatusHandler.java new file mode 100644 index 00000000000..f09844af096 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/ShowTableStatusHandler.java @@ -0,0 +1,47 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com._4paradigm.openmldb.test_common.command.chain; + + + +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; +import com._4paradigm.openmldb.test_common.util.CommandResultUtil; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com.google.common.base.Joiner; + +import java.util.List; + +public class ShowTableStatusHandler extends AbstractSQLHandler{ + @Override + public boolean preHandle(String sql) { + return sql.toLowerCase().startsWith("show table status"); + } + + @Override + public OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); + List result = OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); + boolean ok = CommandResultUtil.success(result); + openMLDBResult.setMsg(Joiner.on("\n").join(result)); + openMLDBResult.setOk(ok); + openMLDBResult.setDbName(dbName); + if (ok) { + CommandResultUtil.parseResult(result,openMLDBResult); + } + return openMLDBResult; + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/SqlChainManager.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/SqlChainManager.java index 09958192844..e5d7df4c3bb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/SqlChainManager.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/SqlChainManager.java @@ -32,11 +32,13 @@ private AbstractSQLHandler initHandler(){ DescHandler descHandler = new DescHandler(); ShowDeploymentHandler showDeploymentHandler = new ShowDeploymentHandler(); ShowDeploymentsHandler showDeploymentsHandler = new ShowDeploymentsHandler(); + ShowTableStatusHandler showTableStatusHandler = new ShowTableStatusHandler(); queryHandler.setNextHandler(dmlHandler); dmlHandler.setNextHandler(ddlHandler); ddlHandler.setNextHandler(descHandler); descHandler.setNextHandler(showDeploymentHandler); showDeploymentHandler.setNextHandler(showDeploymentsHandler); + showDeploymentsHandler.setNextHandler(showTableStatusHandler); return queryHandler; } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/CliClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/CliClient.java index a1e5c743713..32322dadaef 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/CliClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/CliClient.java @@ -11,6 +11,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.testng.Assert; +import org.testng.collections.Lists; import java.sql.ResultSet; import java.sql.SQLException; @@ -74,4 +75,11 @@ public void insertList(String tableName,List> dataList){ String sql = SQLUtil.genInsertSQL(tableName,dataList); OpenMLDBCommandFacade.sql(openMLDBInfo,dbName,sql); } + public Map> showTableStatus(){ + OpenMLDBResult openMLDBResult = execute("show table status;"); + List> result = openMLDBResult.getResult(); + Map> map = new HashMap<>(); + result.forEach(l->map.put(String.valueOf(l.get(1)), Lists.newArrayList(Long.parseLong(String.valueOf(l.get(4)))))); + return map; + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/CommandResultUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/CommandResultUtil.java index 66c51c3e5e0..02ccfba5451 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/CommandResultUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/CommandResultUtil.java @@ -2,6 +2,7 @@ import com._4paradigm.openmldb.test_common.bean.OpenMLDBColumn; import com._4paradigm.openmldb.test_common.bean.OpenMLDBIndex; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.bean.OpenMLDBTable; import com._4paradigm.openmldb.test_common.model.OpenmldbDeployment; import com.google.common.base.Joiner; @@ -123,4 +124,24 @@ public static List parseDeployments(List lines){ } return deployments; } + // ---------- ---------------- --------------- -------------- ------ ------------------ ---------------- ----------- ------------------- --------- --------------------------------------------------------------- ---------------- ------------------- + // Table_id Table_name Database_name Storage_type Rows Memory_data_size Disk_data_size Partition Partition_unalive Replica Offline_path Offline_format Offline_deep_copy + // ---------- ---------------- --------------- -------------- ------ ------------------ ---------------- ----------- ------------------- --------- --------------------------------------------------------------- ---------------- ------------------- + // 27 auto_AITzyByZ default_db ssd 1 0 473414 2 0 3 NULL NULL NULL + // 19 auto_GlcndMiH default_db hdd 1 0 515239 2 0 3 NULL + public static void parseResult(List lines, OpenMLDBResult openMLDBResult){ + int count = 0; + List> rows = new ArrayList<>(); + if(CollectionUtils.isNotEmpty(lines)&&lines.size()>=2) { + List columnNames = Arrays.asList(lines.get(1).split("\\s+")); + for (int i = 3; i < lines.size() - 2; i++) { + count++; + List row = Arrays.asList(lines.get(i).split("\\s+")); + rows.add(row); + } + openMLDBResult.setColumnNames(columnNames); + } + openMLDBResult.setCount(count); + openMLDBResult.setResult(rows); + } } From 8010a65f99a1021336403b37369b5dc938108e03 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 29 Aug 2022 16:55:43 +0800 Subject: [PATCH 147/172] support upgrade test --- .../openmldb-test-java/openmldb-test-common/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml index 9d244ca1f5b..88a3b1460e8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/pom.xml @@ -16,7 +16,7 @@ 8 0.6.0 - 0.6.0-macos + 0.6.0 From b23365dace4829487e5cdc2d53593c1f79830063 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 29 Aug 2022 17:38:14 +0800 Subject: [PATCH 148/172] support upgrade test --- .../openmldb-deploy/src/main/resources/deploy.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index 8012d74a6c2..01d50e54c3f 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -29,7 +29,7 @@ tmp_mac_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-pkg/spark-3.0.0-b 0.4.0_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz 0.5.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.5.0-linux.tar.gz 0.5.0_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz -0.5.0_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-3.0.0-bin-openmldbspark-v052.tgz +0.5.0_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-pkg/spark-3.0.0-bin-openmldbspark-v050.tgz 0.5.3=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.5.3-linux.tar.gz 0.5.3_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz 0.5.3_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-pkg/spark-3.0.0-bin-openmldbspark-v053.tgz From 294787a74341af7206361231e99f3c09ff0fa050 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 29 Aug 2022 17:53:39 +0800 Subject: [PATCH 149/172] support upgrade test --- .../openmldb/devops_test/tmp/TestCommand.java | 6 +++ .../command/chain/DefaultHandler.java | 51 +++++++++++++++++++ .../command/chain/SqlChainManager.java | 2 + 3 files changed, 59 insertions(+) create mode 100644 test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DefaultHandler.java diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java index 4e3a1b8f15c..75f868091dc 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestCommand.java @@ -46,4 +46,10 @@ public void test6(){ List> result = openMLDBResult.getResult(); result.forEach(l->System.out.println(l)); } + @Test + public void test7(){ + OpenMLDBResult openMLDBResult = OpenMLDBCommandFacade.sql(OpenMLDBGlobalVar.mainInfo, "test1", "show databases;"); + List> result = openMLDBResult.getResult(); + result.forEach(l->System.out.println(l)); + } } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DefaultHandler.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DefaultHandler.java new file mode 100644 index 00000000000..688bdab10fd --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/DefaultHandler.java @@ -0,0 +1,51 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com._4paradigm.openmldb.test_common.command.chain; + + + +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; +import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; +import com._4paradigm.openmldb.test_common.util.CommandResultUtil; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com.google.common.base.Joiner; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.lang3.StringUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class DefaultHandler extends AbstractSQLHandler{ + @Override + public boolean preHandle(String sql) { + return StringUtils.isNotEmpty(sql); + } + + @Override + public OpenMLDBResult onHandle(OpenMLDBInfo openMLDBInfo, String dbName, String sql) { + OpenMLDBResult openMLDBResult = new OpenMLDBResult(); + List result = OpenMLDBCommandFactory.runNoInteractive(openMLDBInfo,dbName,sql); + boolean ok = CommandResultUtil.success(result); + openMLDBResult.setMsg(Joiner.on("\n").join(result)); + openMLDBResult.setOk(ok); + openMLDBResult.setDbName(dbName); + if (ok) { + CommandResultUtil.parseResult(result,openMLDBResult); + } + return openMLDBResult; + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/SqlChainManager.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/SqlChainManager.java index e5d7df4c3bb..a0599069d86 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/SqlChainManager.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/chain/SqlChainManager.java @@ -33,12 +33,14 @@ private AbstractSQLHandler initHandler(){ ShowDeploymentHandler showDeploymentHandler = new ShowDeploymentHandler(); ShowDeploymentsHandler showDeploymentsHandler = new ShowDeploymentsHandler(); ShowTableStatusHandler showTableStatusHandler = new ShowTableStatusHandler(); + DefaultHandler defaultHandler = new DefaultHandler(); queryHandler.setNextHandler(dmlHandler); dmlHandler.setNextHandler(ddlHandler); ddlHandler.setNextHandler(descHandler); descHandler.setNextHandler(showDeploymentHandler); showDeploymentHandler.setNextHandler(showDeploymentsHandler); showDeploymentsHandler.setNextHandler(showTableStatusHandler); + showTableStatusHandler.setNextHandler(defaultHandler); return queryHandler; } From 4055c33af8af0b43ccf59ecc9872661df73b350c Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 30 Aug 2022 09:32:18 +0800 Subject: [PATCH 150/172] support upgrade test --- .../openmldb/test_common/openmldb/OpenMLDBDevops.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java index dc990b065dd..f85e90a45de 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java @@ -133,7 +133,7 @@ public void upgradeApiServer(String binPath,String confPath){ modifyApiServerConf(apiServerPath, openMLDBInfo.getApiServerEndpoints().get(i-1), openMLDBInfo.getZk_cluster()); operateApiServer(i-1,"start"); Tool.sleep(20*1000); - log.info("第{}个ns升级结束",i); + log.info("第{}个apiserver升级结束",i); } } public void upgradeTaskManager(OpenMLDBDeploy openMLDBDeploy){ @@ -141,7 +141,7 @@ public void upgradeTaskManager(OpenMLDBDeploy openMLDBDeploy){ int taskManagerNum = openMLDBInfo.getTaskManagerEndpoints().size(); for(int i=1;i<=taskManagerNum;i++) { log.info("开始升级第{}个taskmanager",i); - operateTaskManager(i,"stop"); + operateTaskManager(i-1,"stop"); String taskManagerPath = basePath + "/openmldb-task_manager-"+i; backDirectory(taskManagerPath); ExecutorUtil.run("rm -rf "+taskManagerPath); From 0fee57c3fd74c3353b85b8afd5533027755c2351 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 30 Aug 2022 16:14:28 +0800 Subject: [PATCH 151/172] support upgrade test --- .../qa/openmldb_deploy/bean/OpenMLDBInfo.java | 1 + .../common/OpenMLDBDeploy.java | 27 ++++++++++--------- .../src/main/resources/deploy.properties | 2 ++ .../test_common/openmldb/OpenMLDBDevops.java | 9 +++++-- 4 files changed, 25 insertions(+), 14 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java index d37f11100c2..22befda91c7 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java @@ -53,6 +53,7 @@ public class OpenMLDBInfo { private List apiServerNames = new ArrayList<>(); private List taskManagerEndpoints = new ArrayList<>(); private String runCommand; + private String sparkHome; public String getRunCommand(){ if(deployType==OpenMLDBDeployType.CLUSTER) { diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java index d94f6b08784..333e1465087 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java @@ -40,6 +40,7 @@ public class OpenMLDBDeploy { private String version; private String openMLDBUrl; private String openMLDBDirectoryName; + private String sparkHome; private String openMLDBPath; private boolean useName; private boolean isCluster = true; @@ -103,17 +104,17 @@ public OpenMLDBInfo deployCluster(String clusterName, int ns, int tablet){ builder.taskManagerEndpoints(Lists.newArrayList()); builder.openMLDBPath(testPath+"/openmldb-ns-1/bin/openmldb"); builder.openMLDBDirectoryName(openMLDBDirectoryName); - OpenMLDBInfo fedbInfo = builder.build(); + OpenMLDBInfo openMLDBInfo = builder.build(); for(int i=1;i<=tablet;i++) { int tablet_port ; if(useName){ String tabletName = clusterName+"-tablet-"+i; tablet_port = deployTablet(testPath,null, i, zk_point,tabletName); - fedbInfo.getTabletNames().add(tabletName); + openMLDBInfo.getTabletNames().add(tabletName); }else { tablet_port = deployTablet(testPath, ip, i, zk_point,null); } - fedbInfo.getTabletEndpoints().add(ip+":"+tablet_port); + openMLDBInfo.getTabletEndpoints().add(ip+":"+tablet_port); Tool.sleep(SLEEP_TIME); } for(int i=1;i<=ns;i++){ @@ -121,11 +122,11 @@ public OpenMLDBInfo deployCluster(String clusterName, int ns, int tablet){ if(useName){ String nsName = clusterName+"-ns-"+i; ns_port = deployNS(testPath,null, i, zk_point,nsName); - fedbInfo.getNsNames().add(nsName); + openMLDBInfo.getNsNames().add(nsName); }else { ns_port = deployNS(testPath, ip, i, zk_point,null); } - fedbInfo.getNsEndpoints().add(ip+":"+ns_port); + openMLDBInfo.getNsEndpoints().add(ip+":"+ns_port); Tool.sleep(SLEEP_TIME); } @@ -134,21 +135,22 @@ public OpenMLDBInfo deployCluster(String clusterName, int ns, int tablet){ if(useName){ String apiserverName = clusterName+"-apiserver-"+i; apiserver_port = deployApiserver(testPath,null, i, zk_point,apiserverName); - fedbInfo.getApiServerNames().add(apiserverName); + openMLDBInfo.getApiServerNames().add(apiserverName); }else { apiserver_port = deployApiserver(testPath, ip, i, zk_point,null); } - fedbInfo.getApiServerEndpoints().add(ip+":"+apiserver_port); + openMLDBInfo.getApiServerEndpoints().add(ip+":"+apiserver_port); Tool.sleep(SLEEP_TIME); } if(version.equals("tmp")||version.compareTo("0.4.0")>=0) { for (int i = 1; i <= 1; i++) { int task_manager_port = deployTaskManager(testPath, ip, i, zk_point); - fedbInfo.getTaskManagerEndpoints().add(ip + ":" + task_manager_port); + openMLDBInfo.getTaskManagerEndpoints().add(ip + ":" + task_manager_port); + openMLDBInfo.setSparkHome(sparkHome); } } - log.info("openmldb-info:"+fedbInfo); - return fedbInfo; + log.info("openmldb-info:"+openMLDBInfo); + return openMLDBInfo; } public String downloadOpenMLDB(String testPath){ @@ -362,8 +364,9 @@ public String deploySpark(String testPath){ ExecutorUtil.run("wget -P "+testPath+" -q "+ OpenMLDBDeployConfig.getSparkUrl(version)); String tarName = ExecutorUtil.run("ls "+ testPath +" | grep spark").get(0); ExecutorUtil.run("tar -zxvf " + testPath + "/"+tarName+" -C "+testPath); - String sparkHome = ExecutorUtil.run("ls "+ testPath +" | grep spark | grep -v .tgz ").get(0); - String sparkPath = testPath+"/"+sparkHome; + String sparkDirectoryName = ExecutorUtil.run("ls "+ testPath +" | grep spark | grep -v .tgz ").get(0); + String sparkPath = testPath+"/"+sparkDirectoryName; + this.sparkHome = sparkPath; return sparkPath; }catch (Exception e){ e.printStackTrace(); diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index 01d50e54c3f..e1ef99fb326 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -34,3 +34,5 @@ tmp_mac_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-pkg/spark-3.0.0-b 0.5.3_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz 0.5.3_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-pkg/spark-3.0.0-bin-openmldbspark-v053.tgz 0.6.0=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.6.0-linux.tar.gz +0.6.0_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz +0.6.0_spark_url=http://pkg.4paradigm.com:81/rtidb/test/spark-pkg/spark-3.0.0-bin-openmldbspark-v060.tgz diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java index f85e90a45de..ac4a5d7431c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java @@ -79,8 +79,12 @@ public void operateTaskManager(int taskManagerIndex,String operator){ String command = String.format("sh %s/openmldb-task_manager-%d/bin/start.sh %s taskmanager",basePath,taskManagerIndex+1,operator); ExecutorUtil.run(command); Tool.sleep(5*1000); - String checkStatus = operator.equals("stop")?"offline":"online"; - sdkClient.checkComponentStatus(openMLDBInfo.getTaskManagerEndpoints().get(taskManagerIndex), checkStatus); + String taskManagerEndpoint = openMLDBInfo.getTaskManagerEndpoints().get(taskManagerIndex); + if(operator.equals("stop")){ + sdkClient.checkComponentNotExist(taskManagerEndpoint); + }else { + sdkClient.checkComponentStatus(taskManagerEndpoint, "online"); + } } public void operateZKOne(String operator){ String command = String.format("sh %s/zookeeper-3.4.14/bin/zkServer.sh %s",basePath,operator); @@ -145,6 +149,7 @@ public void upgradeTaskManager(OpenMLDBDeploy openMLDBDeploy){ String taskManagerPath = basePath + "/openmldb-task_manager-"+i; backDirectory(taskManagerPath); ExecutorUtil.run("rm -rf "+taskManagerPath); + ExecutorUtil.run("rm -rf "+openMLDBInfo.getSparkHome()); String ipPort = openMLDBInfo.getTaskManagerEndpoints().get(i-1); String[] ss = ipPort.split(":"); String ip = ss[0]; From f8e81f04b1ede709beea785cae05639d2a6bcae6 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Tue, 30 Aug 2022 18:01:41 +0800 Subject: [PATCH 152/172] support upgrade test --- .../upgrade_test/UpgradeClusterByCLI.java | 5 ++--- .../test_common/openmldb/CliClient.java | 18 ++++++------------ .../test_common/util/CommandResultUtil.java | 8 ++++---- 3 files changed, 12 insertions(+), 19 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java index 57613d2c20e..a9b9b0eee82 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java @@ -46,10 +46,9 @@ public void beforeClass(@Optional("0.6.0") String upgradeVersion){ cliClient = CliClient.of(OpenMLDBGlobalVar.mainInfo,dbName); nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); - + cliClient.setGlobalOnline(); int dataCount = 100; - - cliClient.createAndUseDB(dbName); + cliClient.create(dbName); String memoryTableDDL = "create table test_memory(\n" + "c1 string,\n" + "c2 smallint,\n" + diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/CliClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/CliClient.java index 32322dadaef..f1590b7ca30 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/CliClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/CliClient.java @@ -1,21 +1,12 @@ package com._4paradigm.openmldb.test_common.openmldb; -import com._4paradigm.openmldb.jdbc.SQLResultSet; import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; -import com._4paradigm.openmldb.test_common.command.CommandUtil; import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFacade; -import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; import com._4paradigm.openmldb.test_common.util.*; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.tuple.Pair; -import org.testng.Assert; import org.testng.collections.Lists; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; import java.util.*; @Slf4j @@ -30,12 +21,11 @@ private CliClient(OpenMLDBInfo openMLDBInfo,String dbName){ public static CliClient of(OpenMLDBInfo openMLDBInfo,String dbName){ return new CliClient(openMLDBInfo,dbName); } - public void createAndUseDB(String dbName){ + public void create(String dbName){ List sqlList = new ArrayList<>(); if (!dbIsExist(dbName)) { sqlList.add(String.format("create database %s;", dbName)); } - sqlList.add(String.format("use %s;", dbName)); OpenMLDBCommandFacade.sqls(openMLDBInfo, dbName, sqlList); } @@ -73,7 +63,11 @@ public void insert(String tableName,List list){ } public void insertList(String tableName,List> dataList){ String sql = SQLUtil.genInsertSQL(tableName,dataList); - OpenMLDBCommandFacade.sql(openMLDBInfo,dbName,sql); + execute(sql); + } + public void setGlobalOnline(){ + String sql = "set @@global.execute_mode='online';"; + execute(sql); } public Map> showTableStatus(){ OpenMLDBResult openMLDBResult = execute("show table status;"); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/CommandResultUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/CommandResultUtil.java index 02ccfba5451..7d00df85773 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/CommandResultUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/CommandResultUtil.java @@ -130,9 +130,9 @@ public static List parseDeployments(List lines){ // 27 auto_AITzyByZ default_db ssd 1 0 473414 2 0 3 NULL NULL NULL // 19 auto_GlcndMiH default_db hdd 1 0 515239 2 0 3 NULL public static void parseResult(List lines, OpenMLDBResult openMLDBResult){ - int count = 0; - List> rows = new ArrayList<>(); if(CollectionUtils.isNotEmpty(lines)&&lines.size()>=2) { + int count = 0; + List> rows = new ArrayList<>(); List columnNames = Arrays.asList(lines.get(1).split("\\s+")); for (int i = 3; i < lines.size() - 2; i++) { count++; @@ -140,8 +140,8 @@ public static void parseResult(List lines, OpenMLDBResult openMLDBResult rows.add(row); } openMLDBResult.setColumnNames(columnNames); + openMLDBResult.setCount(count); + openMLDBResult.setResult(rows); } - openMLDBResult.setCount(count); - openMLDBResult.setResult(rows); } } From 5d5c0f6119c6293575e96676560789933e584762 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 31 Aug 2022 11:51:32 +0800 Subject: [PATCH 153/172] support upgrade test --- .../openmldb/test_common/openmldb/OpenMLDBDevops.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java index ac4a5d7431c..f723b20d39c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java @@ -189,9 +189,10 @@ public static void backDirectory(String path){ path = path.substring(0,path.length()-1); } String directoryName = path.substring(path.lastIndexOf("/")+1); + String parentName = path.substring(0,path.lastIndexOf("/")); String command = "cp -rf "+path +" "+path+"-back"; ExecutorUtil.run(command); - command = "ls "+path+" | grep "+directoryName+"-back"; + command = "ls "+parentName+" | grep "+directoryName+"-back"; List result = ExecutorUtil.run(command); Assert.assertEquals(result.get(0),directoryName+"-back"); } From 40adb4aa2256738d77c5db4b086db2ba1ee875c4 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 31 Aug 2022 14:37:20 +0800 Subject: [PATCH 154/172] support upgrade test --- .../devops_test/upgrade_test/UpgradeClusterByCLI.java | 4 +++- .../openmldb/test_common/openmldb/OpenMLDBDevops.java | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java index a9b9b0eee82..db97eff2c11 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java @@ -35,6 +35,7 @@ public class UpgradeClusterByCLI extends ClusterTest { private String upgradePath; private OpenMLDBDeploy openMLDBDeploy; private String upgradeVersion; + private String upgradeDirectoryName; @BeforeClass @Parameters("upgradeVersion") public void beforeClass(@Optional("0.6.0") String upgradeVersion){ @@ -100,7 +101,7 @@ public void beforeClass(@Optional("0.6.0") String upgradeVersion){ file.mkdirs(); } openMLDBDeploy = new OpenMLDBDeploy(upgradeVersion); - String upgradeDirectoryName = openMLDBDeploy.downloadOpenMLDB(upgradePath); + upgradeDirectoryName = openMLDBDeploy.downloadOpenMLDB(upgradePath); openMLDBPath = upgradePath+"/"+upgradeDirectoryName+"/bin/openmldb"; newBinPath = upgradePath+"/"+upgradeDirectoryName+"/bin/"; confPath = upgradePath+"/"+upgradeDirectoryName+"/conf"; @@ -117,6 +118,7 @@ public void testUpgrade(){ openMLDBDevops.upgradeNs(newBinPath,confPath); openMLDBDevops.upgradeTablet(newBinPath,confPath); openMLDBDevops.upgradeApiServer(newBinPath,confPath); + ExecutorUtil.run("cp -r " + upgradePath+"/"+upgradeDirectoryName + " " + OpenMLDBGlobalVar.mainInfo.getBasePath()); openMLDBDevops.upgradeTaskManager(openMLDBDeploy); Map> afterMap; if(version.compareTo("0.6.0")>=0){ diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java index f723b20d39c..1c58bc3b0ce 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/OpenMLDBDevops.java @@ -150,6 +150,7 @@ public void upgradeTaskManager(OpenMLDBDeploy openMLDBDeploy){ backDirectory(taskManagerPath); ExecutorUtil.run("rm -rf "+taskManagerPath); ExecutorUtil.run("rm -rf "+openMLDBInfo.getSparkHome()); + ExecutorUtil.run("rm -rf "+basePath + "/spark-*.tgz"); String ipPort = openMLDBInfo.getTaskManagerEndpoints().get(i-1); String[] ss = ipPort.split(":"); String ip = ss[0]; From ae585be9cf10326f8d2725b3c4e83e293a8280f5 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 31 Aug 2022 16:21:50 +0800 Subject: [PATCH 155/172] support upgrade test --- .github/workflows/devops-test.yml | 263 ++++++++++++++++++ .../openmldb/devops_test/util/CheckUtil.java | 3 + .../upgrade_test/UpgradeClusterByCLI.java | 12 +- .../test_suite/test_upgrade.xml | 2 +- .../test_common/openmldb/NsClient.java | 18 ++ 5 files changed, 295 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/devops-test.yml diff --git a/.github/workflows/devops-test.yml b/.github/workflows/devops-test.yml new file mode 100644 index 00000000000..883dd19eaa3 --- /dev/null +++ b/.github/workflows/devops-test.yml @@ -0,0 +1,263 @@ +name: DEVOPS-TEST + +on: + workflow_dispatch: + inputs: + JAVA_SDK_VERSION: + description: 'java sdk version' + required: true + default: '0.4.2' + OPENMLDB_SERVER_VERSION: + description: 'openmldb server version' + required: true + default: '0.4.2' + PYTHON_SDK_VERSION: + description: 'python sdk version' + required: true + default: '0.4.2' + BATCH_VERSION: + description: 'batch version' + required: true + default: '0.4.2-allinone' + DIFF_VERSIONS: + description: 'diff versions' + required: false + default: '0.4.0' + EXEC_TEST_TYPE: + description: 'Which tests need to be executed? The options are all, python, java, batch, cli, standalone-cli and apiserver' + required: true + default: 'all' + +env: + GIT_SUBMODULE_STRATEGY: recursive + HYBRIDSE_SOURCE: + +jobs: +# java-sdk-test-standalone-0: +# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }} +# runs-on: ubuntu-latest +# container: +# image: ghcr.io/4paradigm/hybridsql:0.4.1 +# env: +# OS: linux +# steps: +# - uses: actions/checkout@v2 +# - name: modify-properties +# run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} +# - name: test +# run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b PKG -c test_all.xml -d standalone -l "0" +# - name: TEST Results +# if: always() +# uses: EnricoMi/publish-unit-test-result-action@v1 +# with: +# files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml +# comment_mode: "create new" +# deduplicate_classes_by_file_name: true +# check_name: Java SDK Test Standalone0 PKG Report +# comment_title: Java SDK Test Standalone0 PKG Report +# +# java-sdk-test-standalone-1: +# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }} +# runs-on: ubuntu-latest +# container: +# image: ghcr.io/4paradigm/hybridsql:0.4.1 +# env: +# OS: linux +# steps: +# - uses: actions/checkout@v2 +# - name: modify-properties +# run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} +# - name: test +# run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b PKG -c test_all.xml -d standalone -l "1,2,3,4,5" +# - name: TEST Results +# if: always() +# uses: EnricoMi/publish-unit-test-result-action@v1 +# with: +# files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml +# check_name: Java SDK Test Standalone1 PKG Report +# comment_title: Java SDK Test Standalone1 PKG Report + + java-sdk-test-cluster-0: + if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }} + runs-on: ubuntu-latest + container: + image: ghcr.io/4paradigm/hybridsql:latest + env: + OS: linux + steps: + - uses: actions/checkout@v2 + - name: modify-properties + run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} + - name: test + run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b PKG -c test_all.xml -d cluster -l "0" + - name: TEST Results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml + comment_mode: "create new" + check_name: Java SDK Test Cluster0 PKG Report + comment_title: Java SDK Test Cluster0 PKG Report + + java-sdk-test-cluster-1: + if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }} + runs-on: ubuntu-latest + container: + image: ghcr.io/4paradigm/hybridsql:latest + env: + OS: linux + steps: + - uses: actions/checkout@v2 + - name: modify-properties + run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} + - name: test + run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b PKG -c test_all.xml -d cluster -l "1,2,3,4,5" + - name: TEST Results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml + check_name: Java SDK Test Cluster1 PKG Report + comment_title: Java SDK Test Cluster1 PKG Report + +# standalone-cli-test-0: +# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'standalone-cli' || github.event.inputs.EXEC_TEST_TYPE == 'cli' }} +# runs-on: ubuntu-latest +# container: +# image: ghcr.io/4paradigm/hybridsql:0.4.1 +# env: +# OS: linux +# steps: +# - uses: actions/checkout@v2 +# - name: modify-properties +# run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} +# - name: test +# run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b PKG -c test_standalone.xml -d standalone -l "0" +# - name: TEST Results +# if: always() +# uses: EnricoMi/publish-unit-test-result-action@v1 +# with: +# files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml +# check_name: Standalone CLI0 Test PKG Report +# comment_title: Standalone CLI0 Test PKG Report +# +# standalone-cli-test-1: +# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'standalone-cli' || github.event.inputs.EXEC_TEST_TYPE == 'cli' }} +# runs-on: ubuntu-latest +# container: +# image: ghcr.io/4paradigm/hybridsql:0.4.1 +# env: +# OS: linux +# steps: +# - uses: actions/checkout@v2 +# - name: modify-properties +# run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} +# - name: test +# run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b PKG -c test_standalone.xml -d standalone -l "1,2,3,4,5" +# - name: TEST Results +# if: always() +# uses: EnricoMi/publish-unit-test-result-action@v1 +# with: +# files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml +# check_name: Standalone CLI1 Test PKG Report +# comment_title: Standalone CLI1 Test PKG Report + + python-sdk-test-standalone-0: + if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'python' }} + runs-on: ubuntu-latest + container: + image: ghcr.io/4paradigm/hybridsql:latest + env: + OS: linux + steps: + - uses: actions/checkout@v2 + - name: modify-properties + run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} + - name: test + run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-python.sh -b PKG -d standalone -l "0" + - name: upload test results + if: always() + uses: actions/upload-artifact@v2 + with: + name: python-sdk-standalone-0-pkg-${{ github.sha }} + path: | + python/report/allure-results +# - name: allure-report +# uses: simple-elf/allure-report-action@master +# if: always() +# id: allure-report +# with: +# allure_results: python/report/allure-results +# gh_pages: gh-pages +# allure_report: allure-report +# allure_history: allure-history +# - name: Deploy report to Github Pages +# if: always() +# uses: peaceiris/actions-gh-pages@v2 +# env: +# PERSONAL_TOKEN: ${{ secrets.CR_PAT_ZW }} +# PUBLISH_BRANCH: gh-pages +# PUBLISH_DIR: allure-history + + python-sdk-test-standalone-1: + if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'python' }} + runs-on: ubuntu-latest + container: + image: ghcr.io/4paradigm/hybridsql:latest + env: + OS: linux + steps: + - uses: actions/checkout@v2 + - name: modify-properties + run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} + - name: test + run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-python.sh -b PKG -d standalone -l "1,2,3,4,5" + - name: upload test results + if: always() + uses: actions/upload-artifact@v2 + with: + name: python-sdk-standalone-1-pkg-${{ github.sha }} + path: | + python/report/allure-results + + apiserver-test: + if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'apiserver' }} + runs-on: ubuntu-latest + container: + image: ghcr.io/4paradigm/hybridsql:latest + env: + OS: linux + steps: + - uses: actions/checkout@v2 + - name: modify-properties + run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} + - name: test + run: source /root/.bashrc && bash test/steps/openmldb-apiserver-test.sh -b PKG -c test_all.xml -d standalone -l "0" + - name: TEST Results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + files: test/integration-test/openmldb-test-java/openmldb-http-test/target/surefire-reports/TEST-*.xml + check_name: APIServer PKG Report + comment_title: APIServer PKG Report + + batch-test: + if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'batch' }} + runs-on: ubuntu-latest + container: + image: ghcr.io/4paradigm/hybridsql:latest + env: + OS: linux + steps: + - uses: actions/checkout@v2 + - name: modify-properties + run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} + - name: test + run: source /root/.bashrc && bash test/steps/openmldb-batch-test.sh -b PKG + - name: TEST Results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + files: test/batch-test/openmldb-batch-test/target/surefire-reports/TEST-*.xml + check_name: Batch Test PKG Report + comment_title: Batch Test PKG Report diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java index 4b3b198d852..776d2e8a6d5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java @@ -3,6 +3,7 @@ import com._4paradigm.openmldb.test_common.openmldb.CliClient; import com._4paradigm.openmldb.test_common.openmldb.NsClient; import com._4paradigm.openmldb.test_common.openmldb.SDKClient; +import com._4paradigm.qa.openmldb_deploy.util.Tool; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.RandomStringUtils; import org.testng.Assert; @@ -23,8 +24,10 @@ public static void addDataCheckByOffset(SDKClient sdkClient, NsClient nsClient, for(String tableName:tableNames){ if (CollectionUtils.isNotEmpty(addDataList)) { sdkClient.insertList(tableName,addDataList); + Tool.sleep(10*1000); } Assert.assertEquals(sdkClient.getTableRowCount(tableName),originalCount+addCount,msg); + Assert.assertEquals(nsClient.getTableRowCount(dbName,tableName),originalCount+addCount,msg); } nsClient.checkTableOffSet(dbName,null); } diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java index db97eff2c11..f04b5e5f0a4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeClusterByCLI.java @@ -131,9 +131,17 @@ public void testUpgrade(){ sdkClient = SDKClient.of(executor); sdkClient.useDB(dbName); if(upgradeVersion.compareTo("0.6.0")>=0) { - CheckUtil.addDataCheckByOffset(sdkClient, nsClient, dbName, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); + if(version.compareTo("0.5.0")>=0) { + CheckUtil.addDataCheckByOffset(sdkClient, nsClient, dbName, Lists.newArrayList(memoryTableName, ssdTableName, hddTableName), 100, 10); + }else{ + CheckUtil.addDataCheckByOffset(sdkClient, nsClient, dbName, Lists.newArrayList(memoryTableName), 100, 10); + } }else{ - CheckUtil.addDataCheckByCount(sdkClient, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); + if(version.compareTo("0.5.0")>=0) { + CheckUtil.addDataCheckByCount(sdkClient, Lists.newArrayList(memoryTableName, ssdTableName, hddTableName), 100, 10); + }else{ + CheckUtil.addDataCheckByCount(sdkClient, Lists.newArrayList(memoryTableName), 100, 10); + } } } diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml index b18434882bd..d4703f3f3c1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade.xml @@ -3,7 +3,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index 9ec61fc310b..573606335c0 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -1,5 +1,6 @@ package com._4paradigm.openmldb.test_common.openmldb; +import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.command.CommandUtil; import com._4paradigm.openmldb.test_common.util.NsResultUtil; import com._4paradigm.openmldb.test_common.util.Tool; @@ -76,6 +77,23 @@ public List showTable(String dbName,String tableName){ List lines = runNs(dbName,command); return lines; } + public long getTableRowCount(String dbName,String tableName){ + List lines = showTableHaveTable(dbName,tableName); + long count = 0; + for(int i=2;i lines = showTable(dbName,tableName); From 28da96c4db591c932eb6edc54ca0f19fd2d9bf9a Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 31 Aug 2022 17:08:06 +0800 Subject: [PATCH 156/172] support upgrade test --- .github/workflows/devops-test.yml | 2 +- .../openmldb/test_common/command/OpenMLDBCommandFactory.java | 4 ++-- .../_4paradigm/openmldb/test_common/openmldb/NsClient.java | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/devops-test.yml b/.github/workflows/devops-test.yml index 883dd19eaa3..32c8207f573 100644 --- a/.github/workflows/devops-test.yml +++ b/.github/workflows/devops-test.yml @@ -24,7 +24,7 @@ on: required: false default: '0.4.0' EXEC_TEST_TYPE: - description: 'Which tests need to be executed? The options are all, python, java, batch, cli, standalone-cli and apiserver' + description: 'Which tests need to be executed? The options are all, upgrade, node_failure, node_expansion' required: true default: 'all' diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandFactory.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandFactory.java index cc1437e6f50..7f3a2aa8e24 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandFactory.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandFactory.java @@ -33,9 +33,9 @@ private static String getNoInteractiveCommandByStandalone(String rtidbPath,Strin return line; } private static String getNoInteractiveCommandByCLuster(String rtidbPath,String zkEndPoint,String zkRootPath,String dbName,String command){ - String line = "%s --zk_cluster=%s --zk_root_path=%s --role=sql_client --interactive=false --database=%s --cmd='%s'"; + String line = "%s --zk_session_timeout=10000 --zk_cluster=%s --zk_root_path=%s --role=sql_client --interactive=false --database=%s --cmd='%s'"; if(command.contains("'")){ - line = "%s --zk_cluster=%s --zk_root_path=%s --role=sql_client --interactive=false --database=%s --cmd=\"%s\""; + line = "%s --zk_session_timeout=10000 --zk_cluster=%s --zk_root_path=%s --role=sql_client --interactive=false --database=%s --cmd=\"%s\""; } line = String.format(line,rtidbPath,zkEndPoint,zkRootPath,dbName,command); // logger.info("generate rtidb no interactive command:{}",line); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index 573606335c0..314824b51c4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -31,7 +31,7 @@ public static NsClient of(OpenMLDBInfo openMLDBInfo){ } public String genNsCommand(String openMLDBPath,String zkCluster,String zkRootPath,String dbName,String command){ String dbStr = StringUtils.isNotEmpty(dbName)?"--database="+dbName:""; - String line = "%s --zk_cluster=%s --zk_root_path=%s --role=ns_client --interactive=false %s --cmd='%s'"; + String line = "%s --zk_session_timeout=10000 --zk_cluster=%s --zk_root_path=%s --role=ns_client --interactive=false %s --cmd='%s'"; line = String.format(line,openMLDBPath,zkCluster,zkRootPath,dbStr,command); log.info("ns command:"+line); return line; From 846df2807d1f95cfeb02ceae5490ec3255044505 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 31 Aug 2022 18:11:20 +0800 Subject: [PATCH 157/172] add devops cicd --- .github/workflows/devops-test.yml | 242 ++---------------- .../src/main/resources/deploy.properties | 2 +- .../high_availability/TestCluster.java | 2 +- .../test_suite/test_cluster.xml | 2 + .../test_suite/test_single.xml | 2 + .../test_suite/test_deploy.xml | 13 - test/steps/modify_devops_config.sh | 61 +++++ test/steps/openmldb-node-failure-test.sh | 89 +++++++ 8 files changed, 180 insertions(+), 233 deletions(-) delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_deploy.xml create mode 100755 test/steps/modify_devops_config.sh create mode 100755 test/steps/openmldb-node-failure-test.sh diff --git a/.github/workflows/devops-test.yml b/.github/workflows/devops-test.yml index 32c8207f573..7b3d413bb18 100644 --- a/.github/workflows/devops-test.yml +++ b/.github/workflows/devops-test.yml @@ -3,26 +3,10 @@ name: DEVOPS-TEST on: workflow_dispatch: inputs: - JAVA_SDK_VERSION: - description: 'java sdk version' + PRE_UPGRADE_VERSION: + description: 'version before upgrade' required: true - default: '0.4.2' - OPENMLDB_SERVER_VERSION: - description: 'openmldb server version' - required: true - default: '0.4.2' - PYTHON_SDK_VERSION: - description: 'python sdk version' - required: true - default: '0.4.2' - BATCH_VERSION: - description: 'batch version' - required: true - default: '0.4.2-allinone' - DIFF_VERSIONS: - description: 'diff versions' - required: false - default: '0.4.0' + default: '0.5.0' EXEC_TEST_TYPE: description: 'Which tests need to be executed? The options are all, upgrade, node_failure, node_expansion' required: true @@ -30,55 +14,11 @@ on: env: GIT_SUBMODULE_STRATEGY: recursive - HYBRIDSE_SOURCE: + HYBRIDSE_SOURCE: local jobs: -# java-sdk-test-standalone-0: -# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }} -# runs-on: ubuntu-latest -# container: -# image: ghcr.io/4paradigm/hybridsql:0.4.1 -# env: -# OS: linux -# steps: -# - uses: actions/checkout@v2 -# - name: modify-properties -# run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} -# - name: test -# run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b PKG -c test_all.xml -d standalone -l "0" -# - name: TEST Results -# if: always() -# uses: EnricoMi/publish-unit-test-result-action@v1 -# with: -# files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml -# comment_mode: "create new" -# deduplicate_classes_by_file_name: true -# check_name: Java SDK Test Standalone0 PKG Report -# comment_title: Java SDK Test Standalone0 PKG Report -# -# java-sdk-test-standalone-1: -# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }} -# runs-on: ubuntu-latest -# container: -# image: ghcr.io/4paradigm/hybridsql:0.4.1 -# env: -# OS: linux -# steps: -# - uses: actions/checkout@v2 -# - name: modify-properties -# run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} -# - name: test -# run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b PKG -c test_all.xml -d standalone -l "1,2,3,4,5" -# - name: TEST Results -# if: always() -# uses: EnricoMi/publish-unit-test-result-action@v1 -# with: -# files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml -# check_name: Java SDK Test Standalone1 PKG Report -# comment_title: Java SDK Test Standalone1 PKG Report - - java-sdk-test-cluster-0: - if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }} + node-failure-test-cluster: + if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'node_failure' }} runs-on: ubuntu-latest container: image: ghcr.io/4paradigm/hybridsql:latest @@ -86,45 +26,28 @@ jobs: OS: linux steps: - uses: actions/checkout@v2 - - name: modify-properties - run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} + - name: build jsdk and package + run: | + make configure CMAKE_INSTALL_PREFIX=openmldb-linux + make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install + tar -zcvf openmldb-linux.tar.gz openmldb-linux + echo "openmldb-pkg:" + ls -al - name: test - run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b PKG -c test_all.xml -d cluster -l "0" + run: source /root/.bashrc && bash test/steps/openmldb-node-failure-test.sh -c test_cluster.xml -d cluster - name: TEST Results if: always() uses: EnricoMi/publish-unit-test-result-action@v1 with: - files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml - comment_mode: "create new" - check_name: Java SDK Test Cluster0 PKG Report - comment_title: Java SDK Test Cluster0 PKG Report + files: test/integration-test/openmldb-test-java/openmldb-devops-test/target/surefire-reports/TEST-*.xml + check_name: "node-failure-test-cluster Report" + comment_title: "node-failure-test-cluster Report" - java-sdk-test-cluster-1: - if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }} - runs-on: ubuntu-latest - container: - image: ghcr.io/4paradigm/hybridsql:latest - env: - OS: linux - steps: - - uses: actions/checkout@v2 - - name: modify-properties - run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} - - name: test - run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b PKG -c test_all.xml -d cluster -l "1,2,3,4,5" - - name: TEST Results - if: always() - uses: EnricoMi/publish-unit-test-result-action@v1 - with: - files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml - check_name: Java SDK Test Cluster1 PKG Report - comment_title: Java SDK Test Cluster1 PKG Report - -# standalone-cli-test-0: -# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'standalone-cli' || github.event.inputs.EXEC_TEST_TYPE == 'cli' }} +# upgrade-test-cluster: +# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'upgrade' }} # runs-on: ubuntu-latest # container: -# image: ghcr.io/4paradigm/hybridsql:0.4.1 +# image: ghcr.io/4paradigm/hybridsql:latest # env: # OS: linux # steps: @@ -132,132 +55,15 @@ jobs: # - name: modify-properties # run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} # - name: test -# run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b PKG -c test_standalone.xml -d standalone -l "0" +# run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b PKG -c test_all.xml -d cluster -l "0" # - name: TEST Results # if: always() # uses: EnricoMi/publish-unit-test-result-action@v1 # with: # files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml -# check_name: Standalone CLI0 Test PKG Report -# comment_title: Standalone CLI0 Test PKG Report -# -# standalone-cli-test-1: -# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'standalone-cli' || github.event.inputs.EXEC_TEST_TYPE == 'cli' }} -# runs-on: ubuntu-latest -# container: -# image: ghcr.io/4paradigm/hybridsql:0.4.1 -# env: -# OS: linux -# steps: -# - uses: actions/checkout@v2 -# - name: modify-properties -# run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} -# - name: test -# run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b PKG -c test_standalone.xml -d standalone -l "1,2,3,4,5" -# - name: TEST Results -# if: always() -# uses: EnricoMi/publish-unit-test-result-action@v1 -# with: -# files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml -# check_name: Standalone CLI1 Test PKG Report -# comment_title: Standalone CLI1 Test PKG Report - - python-sdk-test-standalone-0: - if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'python' }} - runs-on: ubuntu-latest - container: - image: ghcr.io/4paradigm/hybridsql:latest - env: - OS: linux - steps: - - uses: actions/checkout@v2 - - name: modify-properties - run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} - - name: test - run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-python.sh -b PKG -d standalone -l "0" - - name: upload test results - if: always() - uses: actions/upload-artifact@v2 - with: - name: python-sdk-standalone-0-pkg-${{ github.sha }} - path: | - python/report/allure-results -# - name: allure-report -# uses: simple-elf/allure-report-action@master -# if: always() -# id: allure-report -# with: -# allure_results: python/report/allure-results -# gh_pages: gh-pages -# allure_report: allure-report -# allure_history: allure-history -# - name: Deploy report to Github Pages -# if: always() -# uses: peaceiris/actions-gh-pages@v2 -# env: -# PERSONAL_TOKEN: ${{ secrets.CR_PAT_ZW }} -# PUBLISH_BRANCH: gh-pages -# PUBLISH_DIR: allure-history +# comment_mode: "create new" +# check_name: Java SDK Test Cluster0 PKG Report +# comment_title: Java SDK Test Cluster0 PKG Report - python-sdk-test-standalone-1: - if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'python' }} - runs-on: ubuntu-latest - container: - image: ghcr.io/4paradigm/hybridsql:latest - env: - OS: linux - steps: - - uses: actions/checkout@v2 - - name: modify-properties - run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} - - name: test - run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-python.sh -b PKG -d standalone -l "1,2,3,4,5" - - name: upload test results - if: always() - uses: actions/upload-artifact@v2 - with: - name: python-sdk-standalone-1-pkg-${{ github.sha }} - path: | - python/report/allure-results - apiserver-test: - if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'apiserver' }} - runs-on: ubuntu-latest - container: - image: ghcr.io/4paradigm/hybridsql:latest - env: - OS: linux - steps: - - uses: actions/checkout@v2 - - name: modify-properties - run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} - - name: test - run: source /root/.bashrc && bash test/steps/openmldb-apiserver-test.sh -b PKG -c test_all.xml -d standalone -l "0" - - name: TEST Results - if: always() - uses: EnricoMi/publish-unit-test-result-action@v1 - with: - files: test/integration-test/openmldb-test-java/openmldb-http-test/target/surefire-reports/TEST-*.xml - check_name: APIServer PKG Report - comment_title: APIServer PKG Report - batch-test: - if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'batch' }} - runs-on: ubuntu-latest - container: - image: ghcr.io/4paradigm/hybridsql:latest - env: - OS: linux - steps: - - uses: actions/checkout@v2 - - name: modify-properties - run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} - - name: test - run: source /root/.bashrc && bash test/steps/openmldb-batch-test.sh -b PKG - - name: TEST Results - if: always() - uses: EnricoMi/publish-unit-test-result-action@v1 - with: - files: test/batch-test/openmldb-batch-test/target/surefire-reports/TEST-*.xml - check_name: Batch Test PKG Report - comment_title: Batch Test PKG Report diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties index e1ef99fb326..0cb6fa1d848 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties @@ -4,7 +4,7 @@ zk_url=https://archive.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4. main=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2.3-linux.tar.gz 0.2.2=https://github.com/4paradigm/OpenMLDB/releases/download/0.2.2/openmldb-0.2.2-linux.tar.gz 0.2.3=https://github.com/4paradigm/OpenMLDB/releases/download/v0.2.3/openmldb-0.2.3-linux.tar.gz -spark_url=https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0.3.2/spark-3.0.0-bin-openmldbspark.tgz +spark_url=https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0.6.1/spark-3.0.0-bin-openmldbspark.tgz tmp=http://pkg.4paradigm.com:81/rtidb/test/openmldb-pkg/openmldb-0.6.0-linux.tar.gz tmp_zk_url=http://pkg.4paradigm.com:81/rtidb/test/zookeeper-3.4.14.tar.gz diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java index 7b16271dea7..0e008864a8c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/high_availability/TestCluster.java @@ -20,7 +20,7 @@ public class TestCluster extends ClusterTest { private OpenMLDBDevops openMLDBDevops; @BeforeClass public void beforeClass(){ - dbName = "test_devops2"; + dbName = "test_devops"; sdkClient = SDKClient.of(executor); nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_cluster.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_cluster.xml index b313b52acbc..f8282b2883d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_cluster.xml +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_cluster.xml @@ -2,6 +2,8 @@ + + diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_single.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_single.xml index bbdc8aa901a..e665adb7951 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_single.xml +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_single.xml @@ -2,6 +2,8 @@ + + diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_deploy.xml b/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_deploy.xml deleted file mode 100644 index ebaa1e6417c..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/test_suite/test_deploy.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/test/steps/modify_devops_config.sh b/test/steps/modify_devops_config.sh new file mode 100755 index 00000000000..12c1b10d961 --- /dev/null +++ b/test/steps/modify_devops_config.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +CASE_XML=$1 +DEPLOY_MODE=$2 +OPENMLDB_SDK_VERSION=$3 +TEST_CASE_VERSION=$4 +OPENMLDB_SERVER_VERSION=$5 +JAVA_NATIVE_VERSION=$6 +TABLE_STORAGE_MODE=$7 +echo "deploy_mode:${DEPLOY_MODE}" +ROOT_DIR=$(pwd) +echo "test_sdk_version:$OPENMLDB_SDK_VERSION" +cd test/integration-test/openmldb-test-java/openmldb-devops-test || exit +# modify suite_xml +sed -i "s###" test_suite/"${CASE_XML}" +#sed -i "s###" test_suite/"${CASE_XML}" + +echo "devops test suite xml:" +cat test_suite/"${CASE_XML}" + +#cd test/integration-test/openmldb-test-java/openmldb-sdk-test || exit +## modify suite_xml +#sed -i "s###" test_suite/test_cluster.xml +##sed -i "s###" test_suite/test_cluster.xml +##if [[ "${BUILD_MODE}" == "SRC" ]]; then +## sed -i "s###" test_suite/"${CASE_XML}" +##fi +#echo "test suite xml:" +#cat test_suite/"${CASE_XML}" +# +#if [ -n "${TEST_CASE_VERSION}" ]; then +# echo -e "\nversion=${TEST_CASE_VERSION}" >> src/main/resources/run_case.properties +#fi +#if [ -n "${TABLE_STORAGE_MODE}" ]; then +# sed -i "s#table_storage_mode=.*#table_storage_mode=${TABLE_STORAGE_MODE}#" src/main/resources/run_case.properties +#fi +#echo "run_case config:" +#cat src/main/resources/run_case.properties +# modify pom +cd "${ROOT_DIR}" +cd test/integration-test/openmldb-test-java/openmldb-test-common || exit +sed -i "s#.*#${OPENMLDB_SDK_VERSION}#" pom.xml +sed -i "s#.*#${JAVA_NATIVE_VERSION}#" pom.xml +echo "pom xml:" +cat pom.xml +cd "${ROOT_DIR}" || exit diff --git a/test/steps/openmldb-node-failure-test.sh b/test/steps/openmldb-node-failure-test.sh new file mode 100755 index 00000000000..590c35cf3e4 --- /dev/null +++ b/test/steps/openmldb-node-failure-test.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash + +# Copyright 2021 4Paradigm +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#bash openmldb-sdk-test-java.sh -b SRC -c test_all.xml -d cluster -l 0 +#-b SRC表示从源码进行编译,会从github上下载代码然后进行编译,PKG表示直接从github上下载压缩包部署 +#-c 执行的suite_xml,决定了跑哪些case +#-d 部署模式,有cluster和standalone两种,默认cluster +#-l 测试的case级别,有0,1,2,3,4,5六个级别,默认为0,也可以同时跑多个级别的case,例如:1,2,3,4,5 + +while getopts ":c:d:l:s:" opt +do + case $opt in + c) + echo "参数c的值:$OPTARG" + CASE_XML=$OPTARG + ;; + d) + echo "参数d的值:$OPTARG" + DEPLOY_MODE=$OPTARG + ;; + ?) echo "未知参数" + exit 1 + ;; + esac +done +if [[ "${CASE_XML}" == "" ]]; then + CASE_XML="test_all.xml" +fi +if [[ "${DEPLOY_MODE}" == "" ]]; then + DEPLOY_MODE="cluster" +fi + +echo "CASE_XML:${CASE_XML}" +echo "DEPLOY_MODE:${DEPLOY_MODE}" + +ROOT_DIR=$(pwd) +# 安装wget +yum install -y wget +yum install -y net-tools +ulimit -c unlimited +echo "ROOT_DIR:${ROOT_DIR}" + +# 从源码编译 +deployConfigPath="test/integration-test/openmldb-test-java/openmldb-deploy/src/main/resources/deploy.properties" +OPENMLDB_SERVER_VERSION="SRC" +SERVER_URL=$(more ${deployConfigPath} | grep "${OPENMLDB_SERVER_VERSION}") +echo "SERVER_URL:${SERVER_URL}" +if [[ "${SERVER_URL}" == "" ]]; then + echo -e "\n${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz\n" >> ${deployConfigPath} +else + sed -i "s#${OPENMLDB_SERVER_VERSION}=.*#${OPENMLDB_SERVER_VERSION}=${ROOT_DIR}/openmldb-linux.tar.gz#" ${deployConfigPath} +fi + +JAVA_SDK_VERSION=$(more java/pom.xml | grep ".*" | head -1 | sed 's#.*\(.*\).*#\1#') +JAVA_NATIVE_VERSION=$(more java/pom.xml | grep ".*" | head -1 | sed 's#.*\(.*\).*#\1#') +sh test/steps/build-java-sdk.sh + +echo "JAVA_SDK_VERSION:${JAVA_SDK_VERSION}" +echo "JAVA_NATIVE_VERSION:${JAVA_NATIVE_VERSION}" +echo "deploy config:" +cat ${deployConfigPath} +# install command tool +cd test/test-tool/command-tool || exit +mvn clean install -Dmaven.test.skip=true +cd "${ROOT_DIR}" || exit +# modify config +sh test/steps/modify_devops_config.sh "${CASE_XML}" "${DEPLOY_MODE}" "${JAVA_SDK_VERSION}" "" "${OPENMLDB_SERVER_VERSION}" "${JAVA_NATIVE_VERSION}" + +# install jar +cd test/integration-test/openmldb-test-java || exit +mvn clean install -Dmaven.test.skip=true +cd "${ROOT_DIR}" || exit +# run case +cd "${ROOT_DIR}"/test/integration-test/openmldb-test-java/openmldb-devops-test || exit +mvn clean test -e -U -Dsuite=test_suite/"${CASE_XML}" From 043cb4521faac0f7d4d8471b563af303cda36d09 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Wed, 31 Aug 2022 19:13:56 +0800 Subject: [PATCH 158/172] update OpenMLDInfo --- .github/workflows/devops-test.yml | 52 +++++++++++++ .../qa/openmldb_deploy/bean/OpenMLDBInfo.java | 3 +- .../common/OpenMLDBDeploy.java | 78 +++++++++++++------ .../devops_test/common/ClusterTest.java | 30 +++---- .../devops_test/tmp/TestClusterLinux.java | 28 ++++--- .../openmldb/devops_test/tmp/TestYaml.java | 35 +++++++++ .../test_suite/test_node_expansion.xml | 2 + .../openmldb/ecosystem/common/KafkaTest.java | 28 ++++--- .../http_test/common/ClusterTest.java | 27 ++++--- .../http_test/common/StandaloneTest.java | 27 ++++--- .../java_sdk_test/common/OpenMLDBTest.java | 28 ++++--- .../java_sdk_test/common/StandaloneTest.java | 26 ++++--- .../test_common/provider/YamlUtil.java | 19 +++-- 13 files changed, 272 insertions(+), 111 deletions(-) create mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestYaml.java diff --git a/.github/workflows/devops-test.yml b/.github/workflows/devops-test.yml index 7b3d413bb18..1b7a900d259 100644 --- a/.github/workflows/devops-test.yml +++ b/.github/workflows/devops-test.yml @@ -43,6 +43,58 @@ jobs: check_name: "node-failure-test-cluster Report" comment_title: "node-failure-test-cluster Report" + node-failure-test-single: + if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'node_failure' }} + runs-on: ubuntu-latest + container: + image: ghcr.io/4paradigm/hybridsql:latest + env: + OS: linux + steps: + - uses: actions/checkout@v2 + - name: build jsdk and package + run: | + make configure CMAKE_INSTALL_PREFIX=openmldb-linux + make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install + tar -zcvf openmldb-linux.tar.gz openmldb-linux + echo "openmldb-pkg:" + ls -al + - name: test + run: source /root/.bashrc && bash test/steps/openmldb-node-failure-test.sh -c test_single.xml -d single + - name: TEST Results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + files: test/integration-test/openmldb-test-java/openmldb-devops-test/target/surefire-reports/TEST-*.xml + check_name: "node-failure-test-cluster Report" + comment_title: "node-failure-test-cluster Report" + + node-expansion-test-cluster: + if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'node_expansion' }} + runs-on: ubuntu-latest + container: + image: ghcr.io/4paradigm/hybridsql:latest + env: + OS: linux + steps: + - uses: actions/checkout@v2 + - name: build jsdk and package + run: | + make configure CMAKE_INSTALL_PREFIX=openmldb-linux + make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install + tar -zcvf openmldb-linux.tar.gz openmldb-linux + echo "openmldb-pkg:" + ls -al + - name: test + run: source /root/.bashrc && bash test/steps/openmldb-node-failure-test.sh -c test_node_expansion.xml -d cluster + - name: TEST Results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + files: test/integration-test/openmldb-test-java/openmldb-devops-test/target/surefire-reports/TEST-*.xml + check_name: "node-failure-test-cluster Report" + comment_title: "node-failure-test-cluster Report" + # upgrade-test-cluster: # if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'upgrade' }} # runs-on: ubuntu-latest diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java index 22befda91c7..bf63e7adb7c 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/bean/OpenMLDBInfo.java @@ -18,6 +18,7 @@ import lombok.Builder; import lombok.Data; +import lombok.NoArgsConstructor; import java.util.ArrayList; import java.util.List; @@ -27,7 +28,7 @@ * @date 2021/2/7 12:10 PM */ @Data -@Builder +//@Builder public class OpenMLDBInfo { private OpenMLDBDeployType deployType; private String host; diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java index 333e1465087..2ca68aa3d94 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/src/main/java/com/_4paradigm/qa/openmldb_deploy/common/OpenMLDBDeploy.java @@ -79,8 +79,9 @@ public OpenMLDBInfo deployCluster(int ns, int tablet){ return deployCluster(null,ns,tablet); } public OpenMLDBInfo deployCluster(String clusterName, int ns, int tablet){ - OpenMLDBInfo.OpenMLDBInfoBuilder builder = OpenMLDBInfo.builder(); - builder.deployType(OpenMLDBDeployType.CLUSTER); + OpenMLDBInfo openMLDBInfo = new OpenMLDBInfo(); +// OpenMLDBInfo.OpenMLDBInfoBuilder builder = OpenMLDBInfo.builder(); + openMLDBInfo.setDeployType(OpenMLDBDeployType.CLUSTER); String testPath = DeployUtil.getTestPath(version); if(StringUtils.isNotEmpty(installPath)){ testPath = installPath+"/"+version; @@ -88,7 +89,10 @@ public OpenMLDBInfo deployCluster(String clusterName, int ns, int tablet){ if(StringUtils.isNotEmpty(clusterName)) { testPath = testPath + "/" + clusterName; } - builder.nsNum(ns).tabletNum(tablet).basePath(testPath); + openMLDBInfo.setNsNum(ns); + openMLDBInfo.setTabletNum(tablet); + openMLDBInfo.setBasePath(testPath); +// builder.nsNum(ns).tabletNum(tablet).basePath(testPath); String ip = LinuxUtil.hostnameI(); File file = new File(testPath); if(!file.exists()){ @@ -97,14 +101,25 @@ public OpenMLDBInfo deployCluster(String clusterName, int ns, int tablet){ int zkPort = deployZK(testPath); String openMLDBDirectoryName = downloadOpenMLDB(testPath); String zk_point = ip+":"+zkPort; - builder.zk_cluster(zk_point).zk_root_path("/openmldb"); - builder.nsEndpoints(Lists.newArrayList()).nsNames(Lists.newArrayList()); - builder.tabletEndpoints(Lists.newArrayList()).tabletNames(Lists.newArrayList()); - builder.apiServerEndpoints(Lists.newArrayList()).apiServerNames(Lists.newArrayList()); - builder.taskManagerEndpoints(Lists.newArrayList()); - builder.openMLDBPath(testPath+"/openmldb-ns-1/bin/openmldb"); - builder.openMLDBDirectoryName(openMLDBDirectoryName); - OpenMLDBInfo openMLDBInfo = builder.build(); + openMLDBInfo.setZk_cluster(zk_point); + openMLDBInfo.setZk_root_path("/openmldb"); + openMLDBInfo.setNsEndpoints(Lists.newArrayList()); + openMLDBInfo.setNsNames(Lists.newArrayList()); + openMLDBInfo.setTabletEndpoints(Lists.newArrayList()); + openMLDBInfo.setTabletNames(Lists.newArrayList()); + openMLDBInfo.setApiServerEndpoints(Lists.newArrayList()); + openMLDBInfo.setApiServerNames(Lists.newArrayList()); + openMLDBInfo.setTaskManagerEndpoints(Lists.newArrayList()); + openMLDBInfo.setOpenMLDBPath(testPath+"/openmldb-ns-1/bin/openmldb"); + openMLDBInfo.setOpenMLDBDirectoryName(openMLDBDirectoryName); +// builder.zk_cluster(zk_point).zk_root_path("/openmldb"); +// builder.nsEndpoints(Lists.newArrayList()).nsNames(Lists.newArrayList()); +// builder.tabletEndpoints(Lists.newArrayList()).tabletNames(Lists.newArrayList()); +// builder.apiServerEndpoints(Lists.newArrayList()).apiServerNames(Lists.newArrayList()); +// builder.taskManagerEndpoints(Lists.newArrayList()); +// builder.openMLDBPath(testPath+"/openmldb-ns-1/bin/openmldb"); +// builder.openMLDBDirectoryName(openMLDBDirectoryName); +// OpenMLDBInfo openMLDBInfo = builder.build(); for(int i=1;i<=tablet;i++) { int tablet_port ; if(useName){ @@ -450,19 +465,34 @@ public OpenMLDBInfo deployStandalone(String testPath, String ip){ boolean apiServerOk = LinuxUtil.checkPortIsUsed(apiServerPort,3000,30); if(nsOk&&tabletOk&&apiServerOk){ log.info(String.format("standalone 部署成功,nsPort:{},tabletPort:{},apiServerPort:{}",nsPort,tabletPort,apiServerPort)); - OpenMLDBInfo openMLDBInfo = OpenMLDBInfo.builder() - .deployType(OpenMLDBDeployType.STANDALONE) - .openMLDBPath(testPath+"/openmldb-standalone/bin/openmldb") - .apiServerEndpoints(Lists.newArrayList()) - .basePath(testPath) - .nsEndpoints(Lists.newArrayList(nsEndpoint)) - .nsNum(1) - .host(ip) - .port(nsPort) - .tabletNum(1) - .tabletEndpoints(Lists.newArrayList(tabletEndpoint)) - .apiServerEndpoints(Lists.newArrayList(apiServerEndpoint)) - .build(); + OpenMLDBInfo openMLDBInfo = new OpenMLDBInfo(); + openMLDBInfo.setDeployType(OpenMLDBDeployType.STANDALONE); + openMLDBInfo.setNsNum(1); + openMLDBInfo.setTabletNum(1); + openMLDBInfo.setBasePath(testPath); + openMLDBInfo.setHost(ip); + openMLDBInfo.setPort(nsPort); + openMLDBInfo.setNsEndpoints(Lists.newArrayList(nsEndpoint)); + openMLDBInfo.setNsNames(Lists.newArrayList()); + openMLDBInfo.setTabletEndpoints(Lists.newArrayList(tabletEndpoint)); + openMLDBInfo.setTabletNames(Lists.newArrayList()); + openMLDBInfo.setApiServerEndpoints(Lists.newArrayList(apiServerEndpoint)); + openMLDBInfo.setApiServerNames(Lists.newArrayList()); + openMLDBInfo.setOpenMLDBPath(testPath+"/openmldb-standalone/bin/openmldb"); + +// OpenMLDBInfo openMLDBInfo = OpenMLDBInfo.builder() +// .deployType(OpenMLDBDeployType.STANDALONE) +// .openMLDBPath(testPath+"/openmldb-standalone/bin/openmldb") +// .apiServerEndpoints(Lists.newArrayList()) +// .basePath(testPath) +// .nsEndpoints(Lists.newArrayList(nsEndpoint)) +// .nsNum(1) +// .host(ip) +// .port(nsPort) +// .tabletNum(1) +// .tabletEndpoints(Lists.newArrayList(tabletEndpoint)) +// .apiServerEndpoints(Lists.newArrayList(apiServerEndpoint)) +// .build(); return openMLDBInfo; } }catch (Exception e){ diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index fe90cb83111..9c8d4905573 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -59,19 +59,23 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi openMLDBDeploy.setSystemTableReplicaNum(1); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(1, 1); }else{ - OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() - .deployType(OpenMLDBDeployType.CLUSTER) - .openMLDBDirectoryName("openmldb-0.6.0-darwin") - .basePath("/Users/zhaowei/openmldb-auto-test/tmp_mac") - .openMLDBPath("/Users/zhaowei/openmldb-auto-test/tmp_mac/openmldb-ns-1/bin/openmldb") - .zk_cluster("127.0.0.1:30003") - .zk_root_path("/openmldb") - .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("127.0.0.1:30008", "127.0.0.1:30009")) - .tabletEndpoints(Lists.newArrayList("127.0.0.1:30004", "127.0.0.1:30006", "127.0.0.1:30007")) - .apiServerEndpoints(Lists.newArrayList("127.0.0.1:30010")) - .taskManagerEndpoints(Lists.newArrayList("127.0.0.1:30011")) - .build(); + OpenMLDBInfo openMLDBInfo = new OpenMLDBInfo(); + openMLDBInfo.setDeployType(OpenMLDBDeployType.CLUSTER); + openMLDBInfo.setNsNum(2); + openMLDBInfo.setTabletNum(3); + openMLDBInfo.setBasePath("/home/zhaowei01/openmldb-auto-test/tmp"); + openMLDBInfo.setZk_cluster("172.24.4.55:30000"); + openMLDBInfo.setZk_root_path("/openmldb"); + openMLDBInfo.setNsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")); + openMLDBInfo.setNsNames(Lists.newArrayList()); + openMLDBInfo.setTabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")); + openMLDBInfo.setTabletNames(Lists.newArrayList()); + openMLDBInfo.setApiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")); + openMLDBInfo.setApiServerNames(Lists.newArrayList()); + openMLDBInfo.setTaskManagerEndpoints(Lists.newArrayList("172.24.4.55:30007")); + openMLDBInfo.setOpenMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb"); + + OpenMLDBGlobalVar.mainInfo = openMLDBInfo; OpenMLDBGlobalVar.env = "cluster"; // OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestClusterLinux.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestClusterLinux.java index 0ef9fa8408d..86ffac5626d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestClusterLinux.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestClusterLinux.java @@ -25,17 +25,23 @@ public class TestClusterLinux { private SqlExecutor executor; @BeforeClass public void init() throws SQLException { - OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() - .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/home/zhaowei01/openmldb-auto-test/tmp") - .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:30000") - .zk_root_path("/openmldb") - .nsNum(2).tabletNum(3) - .nsEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) - .tabletEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) - .apiServerEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:30006")) - .build(); + OpenMLDBInfo openMLDBInfo = new OpenMLDBInfo(); + openMLDBInfo.setDeployType(OpenMLDBDeployType.CLUSTER); + openMLDBInfo.setNsNum(2); + openMLDBInfo.setTabletNum(3); + openMLDBInfo.setBasePath("/home/zhaowei01/openmldb-auto-test/tmp"); + openMLDBInfo.setZk_cluster("172.24.4.55:30000"); + openMLDBInfo.setZk_root_path("/openmldb"); + openMLDBInfo.setNsEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")); + openMLDBInfo.setNsNames(com.google.common.collect.Lists.newArrayList()); + openMLDBInfo.setTabletEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")); + openMLDBInfo.setTabletNames(com.google.common.collect.Lists.newArrayList()); + openMLDBInfo.setApiServerEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:30006")); + openMLDBInfo.setApiServerNames(com.google.common.collect.Lists.newArrayList()); + openMLDBInfo.setTaskManagerEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:30007")); + openMLDBInfo.setOpenMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb"); + + OpenMLDBGlobalVar.mainInfo = openMLDBInfo; OpenMLDBGlobalVar.env = "cluster"; OpenMLDBClient openMLDBClient = new OpenMLDBClient(OpenMLDBGlobalVar.mainInfo.getZk_cluster(), OpenMLDBGlobalVar.mainInfo.getZk_root_path()); executor = openMLDBClient.getExecutor(); diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestYaml.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestYaml.java new file mode 100644 index 00000000000..e6065d00b96 --- /dev/null +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/tmp/TestYaml.java @@ -0,0 +1,35 @@ +package com._4paradigm.openmldb.devops_test.tmp; + +import com._4paradigm.openmldb.test_common.provider.YamlUtil; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; +import com.google.common.collect.Lists; +import org.testng.annotations.Test; + +public class TestYaml { + @Test + public void testWriteYaml(){ + OpenMLDBInfo openMLDBInfo = new OpenMLDBInfo(); + openMLDBInfo.setDeployType(OpenMLDBDeployType.CLUSTER); + openMLDBInfo.setNsNum(2); + openMLDBInfo.setTabletNum(3); + openMLDBInfo.setBasePath("/home/zhaowei01/openmldb-auto-test/tmp"); + openMLDBInfo.setZk_cluster("172.24.4.55:30000"); + openMLDBInfo.setZk_root_path("/openmldb"); + openMLDBInfo.setNsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")); + openMLDBInfo.setNsNames(Lists.newArrayList()); + openMLDBInfo.setTabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")); + openMLDBInfo.setTabletNames(Lists.newArrayList()); + openMLDBInfo.setApiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")); + openMLDBInfo.setApiServerNames(Lists.newArrayList()); + openMLDBInfo.setTaskManagerEndpoints(Lists.newArrayList("172.24.4.55:30007")); + openMLDBInfo.setOpenMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb"); + + YamlUtil.writeYamlFile(openMLDBInfo,"out/test.yaml"); + } + @Test + public void testLoadYaml(){ + OpenMLDBInfo openMLDBInfo = YamlUtil.getObject("out/test.yaml", OpenMLDBInfo.class); + System.out.println(openMLDBInfo); + } +} diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_node_expansion.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_node_expansion.xml index d9c9d9e5edb..e69bdc129e8 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_node_expansion.xml +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_node_expansion.xml @@ -2,6 +2,8 @@ + + diff --git a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/java/com/_4paradigm/openmldb/ecosystem/common/KafkaTest.java b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/java/com/_4paradigm/openmldb/ecosystem/common/KafkaTest.java index 79609a1a50b..a224960b8d5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/java/com/_4paradigm/openmldb/ecosystem/common/KafkaTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/main/java/com/_4paradigm/openmldb/ecosystem/common/KafkaTest.java @@ -56,17 +56,23 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi openMLDBDeploy.setCluster(false); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); }else{ - OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() - .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/home/zhaowei01/openmldb-auto-test/tmp") - .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:30000") - .zk_root_path("/openmldb") - .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) - .build(); + OpenMLDBInfo openMLDBInfo = new OpenMLDBInfo(); + openMLDBInfo.setDeployType(OpenMLDBDeployType.CLUSTER); + openMLDBInfo.setNsNum(2); + openMLDBInfo.setTabletNum(3); + openMLDBInfo.setBasePath("/home/zhaowei01/openmldb-auto-test/tmp"); + openMLDBInfo.setZk_cluster("172.24.4.55:30000"); + openMLDBInfo.setZk_root_path("/openmldb"); + openMLDBInfo.setNsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")); + openMLDBInfo.setNsNames(Lists.newArrayList()); + openMLDBInfo.setTabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")); + openMLDBInfo.setTabletNames(Lists.newArrayList()); + openMLDBInfo.setApiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")); + openMLDBInfo.setApiServerNames(Lists.newArrayList()); + openMLDBInfo.setTaskManagerEndpoints(Lists.newArrayList("172.24.4.55:30007")); + openMLDBInfo.setOpenMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb"); + + OpenMLDBGlobalVar.mainInfo = openMLDBInfo; OpenMLDBGlobalVar.env = "cluster"; } diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java index 207a5c52617..b24d2de1652 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/ClusterTest.java @@ -53,17 +53,22 @@ public void beforeTest(@Optional("qa") String env, @Optional("main") String vers openMLDBDeploy.setCluster(false); RestfulGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); } else { - RestfulGlobalVar.mainInfo = OpenMLDBInfo.builder() - .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/home/zhaowei01/fedb-auto-test/tmp") - .openMLDBPath("/home/zhaowei01/fedb-auto-test/tmp/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:30000") - .zk_root_path("/openmldb") - .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) - .build(); + OpenMLDBInfo openMLDBInfo = new OpenMLDBInfo(); + openMLDBInfo.setDeployType(OpenMLDBDeployType.CLUSTER); + openMLDBInfo.setNsNum(2); + openMLDBInfo.setTabletNum(3); + openMLDBInfo.setBasePath("/home/zhaowei01/openmldb-auto-test/tmp"); + openMLDBInfo.setZk_cluster("172.24.4.55:30000"); + openMLDBInfo.setZk_root_path("/openmldb"); + openMLDBInfo.setNsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")); + openMLDBInfo.setNsNames(Lists.newArrayList()); + openMLDBInfo.setTabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")); + openMLDBInfo.setTabletNames(Lists.newArrayList()); + openMLDBInfo.setApiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")); + openMLDBInfo.setApiServerNames(Lists.newArrayList()); + openMLDBInfo.setTaskManagerEndpoints(Lists.newArrayList("172.24.4.55:30007")); + openMLDBInfo.setOpenMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb"); + RestfulGlobalVar.mainInfo = openMLDBInfo; OpenMLDBGlobalVar.env = "cluster"; } OpenMLDBClient openMLDBClient = new OpenMLDBClient(RestfulGlobalVar.mainInfo.getZk_cluster(),RestfulGlobalVar.mainInfo.getZk_root_path()); diff --git a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/StandaloneTest.java b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/StandaloneTest.java index 5383e32164d..828130a17a5 100644 --- a/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/StandaloneTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-http-test/src/main/java/com/_4paradigm/openmldb/http_test/common/StandaloneTest.java @@ -16,6 +16,7 @@ package com._4paradigm.openmldb.http_test.common; +import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; @@ -43,17 +44,21 @@ public void beforeTest(@Optional("qa") String env, @Optional("main") String vers fedbDeploy.setOpenMLDBPath(fedbPath); RestfulGlobalVar.mainInfo = fedbDeploy.deployStandalone(); }else{ - RestfulGlobalVar.mainInfo = OpenMLDBInfo.builder() - .deployType(OpenMLDBDeployType.STANDALONE) - .basePath("/home/zhaowei01/fedb-auto-test/standalone") - .openMLDBPath("/home/zhaowei01/fedb-auto-test/standalone/openmldb-standalone/bin/openmldb") - .nsNum(1).tabletNum(1) - .nsEndpoints(Lists.newArrayList("172.24.4.55:10013")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:10014")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:10015")) - .host("172.24.4.55") - .port(10018) - .build(); + OpenMLDBInfo openMLDBInfo = new OpenMLDBInfo(); + openMLDBInfo.setDeployType(OpenMLDBDeployType.STANDALONE); + openMLDBInfo.setHost("172.24.4.55"); + openMLDBInfo.setPort(30013); + openMLDBInfo.setNsNum(1); + openMLDBInfo.setTabletNum(1); + openMLDBInfo.setBasePath("/home/wangkaidong/fedb-auto-test/standalone"); + openMLDBInfo.setZk_cluster("172.24.4.55:30000"); + openMLDBInfo.setZk_root_path("/openmldb"); + openMLDBInfo.setNsEndpoints(Lists.newArrayList("172.24.4.55:30013")); + openMLDBInfo.setTabletEndpoints(Lists.newArrayList("172.24.4.55:30014")); + openMLDBInfo.setApiServerEndpoints(Lists.newArrayList("172.24.4.55:30015")); + openMLDBInfo.setOpenMLDBPath("/home/wangkaidong/fedb-auto-test/standalone/openmldb-standalone/bin/openmldb"); + + RestfulGlobalVar.mainInfo = openMLDBInfo; } } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java index bd7416a96d1..51423099afb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java @@ -56,17 +56,23 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi openMLDBDeploy.setCluster(false); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); }else{ - OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() - .deployType(OpenMLDBDeployType.CLUSTER) - .basePath("/home/zhaowei01/openmldb-auto-test/tmp") - .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:30000") - .zk_root_path("/openmldb") - .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) - .build(); + OpenMLDBInfo openMLDBInfo = new OpenMLDBInfo(); + openMLDBInfo.setDeployType(OpenMLDBDeployType.CLUSTER); + openMLDBInfo.setNsNum(2); + openMLDBInfo.setTabletNum(3); + openMLDBInfo.setBasePath("/home/zhaowei01/openmldb-auto-test/tmp"); + openMLDBInfo.setZk_cluster("172.24.4.55:30000"); + openMLDBInfo.setZk_root_path("/openmldb"); + openMLDBInfo.setNsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")); + openMLDBInfo.setNsNames(Lists.newArrayList()); + openMLDBInfo.setTabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")); + openMLDBInfo.setTabletNames(Lists.newArrayList()); + openMLDBInfo.setApiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")); + openMLDBInfo.setApiServerNames(Lists.newArrayList()); + openMLDBInfo.setTaskManagerEndpoints(Lists.newArrayList("172.24.4.55:30007")); + openMLDBInfo.setOpenMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb"); + + OpenMLDBGlobalVar.mainInfo = openMLDBInfo; OpenMLDBGlobalVar.env = "cluster"; } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java index 90424dda790..2ea77d26adf 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/StandaloneTest.java @@ -48,17 +48,21 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi openMLDBDeploy.setOpenMLDBPath(openMLDBPath); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployStandalone(); }else{ - OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() - .deployType(OpenMLDBDeployType.STANDALONE) - .basePath("/home/wangkaidong/fedb-auto-test/standalone") - .openMLDBPath("/home/wangkaidong/fedb-auto-test/standalone/openmldb-standalone/bin/openmldb") - .nsNum(1).tabletNum(1) - .nsEndpoints(Lists.newArrayList("172.24.4.55:30013")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:30014")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30015")) - .host("172.24.4.55") - .port(30013) - .build(); + OpenMLDBInfo openMLDBInfo = new OpenMLDBInfo(); + openMLDBInfo.setDeployType(OpenMLDBDeployType.STANDALONE); + openMLDBInfo.setHost("172.24.4.55"); + openMLDBInfo.setPort(30013); + openMLDBInfo.setNsNum(1); + openMLDBInfo.setTabletNum(1); + openMLDBInfo.setBasePath("/home/wangkaidong/fedb-auto-test/standalone"); + openMLDBInfo.setZk_cluster("172.24.4.55:30000"); + openMLDBInfo.setZk_root_path("/openmldb"); + openMLDBInfo.setNsEndpoints(Lists.newArrayList("172.24.4.55:30013")); + openMLDBInfo.setTabletEndpoints(Lists.newArrayList("172.24.4.55:30014")); + openMLDBInfo.setApiServerEndpoints(Lists.newArrayList("172.24.4.55:30015")); + openMLDBInfo.setOpenMLDBPath("/home/wangkaidong/fedb-auto-test/standalone/openmldb-standalone/bin/openmldb"); + + OpenMLDBGlobalVar.mainInfo = openMLDBInfo; } String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/provider/YamlUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/provider/YamlUtil.java index 3fd2be4fca2..76ed8d366d6 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/provider/YamlUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/provider/YamlUtil.java @@ -22,6 +22,7 @@ import java.io.FileInputStream; import java.io.FileNotFoundException; +import java.io.PrintWriter; /** * @author zhaowei @@ -33,7 +34,7 @@ public class YamlUtil { public static final String FAIL_CASE= "FailCase"; - public static T getObject(String caseFile, Class clazz) throws FileNotFoundException { + public static T getObject(String caseFile, Class clazz) { try { Yaml yaml = new Yaml(); FileInputStream testDataStream = new FileInputStream(caseFile); @@ -43,14 +44,18 @@ public static T getObject(String caseFile, Class clazz) throws FileNotFou log.error("fail to load yaml: ", e); e.printStackTrace(); return null; -// FesqlDataProvider nullDataProvider = new FesqlDataProvider(); -// SQLCase failCase = new SQLCase(); -// failCase.setDesc(FAIL_SQL_CASE); -// nullDataProvider.setCases(Lists.newArrayList(failCase)); -// return nullDataProvider; } } - + public static void writeYamlFile(Object obj,String yamlPath) { + try { + Yaml yaml = new Yaml(); + PrintWriter out = new PrintWriter(yamlPath); + yaml.dump(obj,out); + } catch (Exception e) { + log.error("fail to write yaml: ", e); + e.printStackTrace(); + } + } } From 8fe93aab319374490eef7ef6075a261181013459 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 1 Sep 2022 12:03:24 +0800 Subject: [PATCH 159/172] add devops cicd --- .github/workflows/devops-test.yml | 133 ++++++++++++---- .../devops_test/common/ClusterTest.java | 36 +---- .../upgrade_test/UpgradeSingleton.java | 148 ------------------ .../test_suite/test_upgrade_single.xml | 2 +- .../java_sdk_test/common/OpenMLDBTest.java | 5 + test/steps/modify_devops_config.sh | 44 +++--- ...ailure-test.sh => openmldb-devops-test.sh} | 36 ++++- 7 files changed, 168 insertions(+), 236 deletions(-) delete mode 100644 test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeSingleton.java rename test/steps/{openmldb-node-failure-test.sh => openmldb-devops-test.sh} (73%) diff --git a/.github/workflows/devops-test.yml b/.github/workflows/devops-test.yml index 1b7a900d259..55aa0461e90 100644 --- a/.github/workflows/devops-test.yml +++ b/.github/workflows/devops-test.yml @@ -6,7 +6,7 @@ on: PRE_UPGRADE_VERSION: description: 'version before upgrade' required: true - default: '0.5.0' + default: '' EXEC_TEST_TYPE: description: 'Which tests need to be executed? The options are all, upgrade, node_failure, node_expansion' required: true @@ -34,7 +34,7 @@ jobs: echo "openmldb-pkg:" ls -al - name: test - run: source /root/.bashrc && bash test/steps/openmldb-node-failure-test.sh -c test_cluster.xml -d cluster + run: source /root/.bashrc && bash test/steps/openmldb-devops-test.sh -c test_cluster.xml -t node_failure - name: TEST Results if: always() uses: EnricoMi/publish-unit-test-result-action@v1 @@ -60,14 +60,14 @@ jobs: echo "openmldb-pkg:" ls -al - name: test - run: source /root/.bashrc && bash test/steps/openmldb-node-failure-test.sh -c test_single.xml -d single + run: source /root/.bashrc && bash test/steps/openmldb-devops-test.sh -c test_single.xml -t node_failure - name: TEST Results if: always() uses: EnricoMi/publish-unit-test-result-action@v1 with: files: test/integration-test/openmldb-test-java/openmldb-devops-test/target/surefire-reports/TEST-*.xml - check_name: "node-failure-test-cluster Report" - comment_title: "node-failure-test-cluster Report" + check_name: "node-failure-test-single Report" + comment_title: "node-failure-test-single Report" node-expansion-test-cluster: if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'node_expansion' }} @@ -86,36 +86,111 @@ jobs: echo "openmldb-pkg:" ls -al - name: test - run: source /root/.bashrc && bash test/steps/openmldb-node-failure-test.sh -c test_node_expansion.xml -d cluster + run: source /root/.bashrc && bash test/steps/openmldb-devops-test.sh -c test_node_expansion.xml -t node_expansion - name: TEST Results if: always() uses: EnricoMi/publish-unit-test-result-action@v1 with: files: test/integration-test/openmldb-test-java/openmldb-devops-test/target/surefire-reports/TEST-*.xml - check_name: "node-failure-test-cluster Report" - comment_title: "node-failure-test-cluster Report" + check_name: "node-expansion-test-cluster Report" + comment_title: "node-expansion-test-cluster Report" -# upgrade-test-cluster: -# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'upgrade' }} -# runs-on: ubuntu-latest -# container: -# image: ghcr.io/4paradigm/hybridsql:latest -# env: -# OS: linux -# steps: -# - uses: actions/checkout@v2 -# - name: modify-properties -# run: sh test/steps/modify-properties.sh ${{ github.event.inputs.JAVA_SDK_VERSION }} ${{ github.event.inputs.OPENMLDB_SERVER_VERSION }} ${{ github.event.inputs.PYTHON_SDK_VERSION }} ${{ github.event.inputs.BATCH_VERSION }} ${{ github.event.inputs.DIFF_VERSIONS }} -# - name: test -# run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b PKG -c test_all.xml -d cluster -l "0" -# - name: TEST Results -# if: always() -# uses: EnricoMi/publish-unit-test-result-action@v1 -# with: -# files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml -# comment_mode: "create new" -# check_name: Java SDK Test Cluster0 PKG Report -# comment_title: Java SDK Test Cluster0 PKG Report + upgrade-test-cluster: + if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'upgrade' }} + runs-on: ubuntu-latest + container: + image: ghcr.io/4paradigm/hybridsql:latest + env: + OS: linux + steps: + - uses: actions/checkout@v2 + - name: build jsdk and package + run: | + make configure CMAKE_INSTALL_PREFIX=openmldb-linux + make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install + tar -zcvf openmldb-linux.tar.gz openmldb-linux + echo "openmldb-pkg:" + ls -al + - name: test-memory + run: source /root/.bashrc && bash test/steps/openmldb-devops-test.sh -v ${{ github.event.inputs.PRE_UPGRADE_VERSION }} -c test_upgrade.xml -t upgrade -s "memory" + - name: upgrade results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + files: test/integration-test/openmldb-test-java/openmldb-devops-test/target/surefire-reports/TEST-*.xml + check_name: "upgrade-test-cluster Report" + comment_title: "upgrade-test-cluster Report" + - name: sdk results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml + check_name: "java-sdk-cluster-memory-0 Report" + comment_title: "java-sdk-cluster-memory-0 Report" + upgrade-test-single: + if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'upgrade' }} + runs-on: ubuntu-latest + container: + image: ghcr.io/4paradigm/hybridsql:latest + env: + OS: linux + steps: + - uses: actions/checkout@v2 + - name: build jsdk and package + run: | + make configure CMAKE_INSTALL_PREFIX=openmldb-linux + make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install + tar -zcvf openmldb-linux.tar.gz openmldb-linux + echo "openmldb-pkg:" + ls -al + - name: test-memory + run: source /root/.bashrc && bash test/steps/openmldb-devops-test.sh -v ${{ github.event.inputs.PRE_UPGRADE_VERSION }} -c test_upgrade_single.xml -t upgrade -s "memory" + - name: upgrade results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + files: test/integration-test/openmldb-test-java/openmldb-devops-test/target/surefire-reports/TEST-*.xml + check_name: "upgrade-test-single Report" + comment_title: "upgrade-test-single Report" + - name: sdk results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml + check_name: "single-java-sdk-cluster-memory-0 Report" + comment_title: "single-java-sdk-cluster-memory-0 Report" + upgrade-test-cluster-SSD: + if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'upgrade' }} + runs-on: ubuntu-latest + container: + image: ghcr.io/4paradigm/hybridsql:latest + env: + OS: linux + steps: + - uses: actions/checkout@v2 + - name: build jsdk and package + run: | + make configure CMAKE_INSTALL_PREFIX=openmldb-linux + make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install + tar -zcvf openmldb-linux.tar.gz openmldb-linux + echo "openmldb-pkg:" + ls -al + - name: test-memory + run: source /root/.bashrc && bash test/steps/openmldb-devops-test.sh -v ${{ github.event.inputs.PRE_UPGRADE_VERSION }} -c test_upgrade.xml -t upgrade -s "ssd" + - name: upgrade results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + files: test/integration-test/openmldb-test-java/openmldb-devops-test/target/surefire-reports/TEST-*.xml + check_name: "upgrade-test-cluster ssd Report" + comment_title: "upgrade-test-cluster ssd Report" + - name: sdk results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml + check_name: "java-sdk-cluster-ssd-0 Report" + comment_title: "java-sdk-cluster-ssd-0 Report" diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java index 9c8d4905573..39da842080d 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/common/ClusterTest.java @@ -20,6 +20,7 @@ import com._4paradigm.openmldb.sdk.SqlExecutor; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; +import com._4paradigm.openmldb.test_common.provider.YamlUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; @@ -30,6 +31,7 @@ import org.testng.annotations.Optional; import org.testng.annotations.Parameters; +import java.io.File; import java.sql.Statement; /** @@ -77,36 +79,12 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi OpenMLDBGlobalVar.mainInfo = openMLDBInfo; OpenMLDBGlobalVar.env = "cluster"; - -// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() -// .openMLDBDirectoryName("openmldb-0.5.2-linux") -// .deployType(OpenMLDBDeployType.CLUSTER) -// .basePath("/home/zhaowei01/openmldb-auto-test/tmp") -// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb") -// .zk_cluster("172.24.4.55:30000") -// .zk_root_path("/openmldb") -// .nsNum(2).tabletNum(3) -// .nsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")) -// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")) -// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")) -// .build(); -// OpenMLDBGlobalVar.env = "cluster"; - -// OpenMLDBGlobalVar.mainInfo = OpenMLDBInfo.builder() -// .deployType(OpenMLDBDeployType.CLUSTER) -// .basePath("/home/zhaowei01/openmldb-auto-test/single") -// .openMLDBPath("/home/zhaowei01/openmldb-auto-test/single/openmldb-ns-1/bin/openmldb") -// .zk_cluster("172.24.4.55:30008") -// .zk_root_path("/openmldb") -// .nsNum(1).tabletNum(1) -// .nsEndpoints(Lists.newArrayList("172.24.4.55:30010")) -// .tabletEndpoints(Lists.newArrayList("172.24.4.55:30009")) -// .apiServerEndpoints(Lists.newArrayList("172.24.4.55:30011")) -// .taskManagerEndpoints(Lists.newArrayList("172.24.4.55:30012")) -// .build(); -// OpenMLDBGlobalVar.env = "cluster"; - } + File outFile = new File("out"); + if(!outFile.exists()){ + outFile.mkdir(); + } + YamlUtil.writeYamlFile(OpenMLDBGlobalVar.mainInfo,"out/openmldb_info.yaml"); String caseEnv = System.getProperty("caseEnv"); if (!StringUtils.isEmpty(caseEnv)) { OpenMLDBGlobalVar.env = caseEnv; diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeSingleton.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeSingleton.java deleted file mode 100644 index 84d4c589b15..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/test/java/com/_4paradigm/openmldb/devops_test/upgrade_test/UpgradeSingleton.java +++ /dev/null @@ -1,148 +0,0 @@ -package com._4paradigm.openmldb.devops_test.upgrade_test; - -import com._4paradigm.openmldb.devops_test.common.ClusterTest; -import com._4paradigm.openmldb.devops_test.util.CheckUtil; -import com._4paradigm.openmldb.test_common.openmldb.NsClient; -import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBDevops; -import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; -import com._4paradigm.openmldb.test_common.openmldb.SDKClient; -import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; -import com._4paradigm.qa.openmldb_deploy.util.DeployUtil; -import com._4paradigm.test_tool.command_tool.common.ExecutorUtil; -import lombok.extern.slf4j.Slf4j; -import org.testng.Assert; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.Optional; -import org.testng.annotations.Parameters; -import org.testng.annotations.Test; -import org.testng.collections.Lists; - -import java.io.File; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -@Slf4j -public class UpgradeSingleton extends ClusterTest { - private String dbName; - private String memoryTableName; - private String ssdTableName; - private String hddTableName; - private SDKClient sdkClient; - private NsClient nsClient; - private OpenMLDBDevops openMLDBDevops; - private String openMLDBPath; - private String newBinPath; - private String confPath; - private String upgradePath; - private OpenMLDBDeploy openMLDBDeploy; - @BeforeClass - @Parameters("upgradeVersion") - public void beforeClass(@Optional("0.6.0") String upgradeVersion){ - dbName = "test_upgrade"; - memoryTableName = "test_memory"; - ssdTableName = "test_ssd"; - hddTableName = "test_hdd"; - sdkClient = SDKClient.of(executor); - nsClient = NsClient.of(OpenMLDBGlobalVar.mainInfo); - openMLDBDevops = OpenMLDBDevops.of(OpenMLDBGlobalVar.mainInfo,dbName); - - int dataCount = 100; - sdkClient.createAndUseDB(dbName); - String memoryTableDDL = "create table test_memory(\n" + - "c1 string,\n" + - "c2 smallint,\n" + - "c3 int,\n" + - "c4 bigint,\n" + - "c5 float,\n" + - "c6 double,\n" + - "c7 timestamp,\n" + - "c8 date,\n" + - "c9 bool,\n" + - "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3);"; - String ssdTableDDL = "create table test_ssd(\n" + - "c1 string,\n" + - "c2 smallint,\n" + - "c3 int,\n" + - "c4 bigint,\n" + - "c5 float,\n" + - "c6 double,\n" + - "c7 timestamp,\n" + - "c8 date,\n" + - "c9 bool,\n" + - "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"SSD\");"; - String hddTableDDL = "create table test_hdd(\n" + - "c1 string,\n" + - "c2 smallint,\n" + - "c3 int,\n" + - "c4 bigint,\n" + - "c5 float,\n" + - "c6 double,\n" + - "c7 timestamp,\n" + - "c8 date,\n" + - "c9 bool,\n" + - "index(key=(c1),ts=c7))options(partitionnum=2,replicanum=3,storage_mode=\"HDD\");"; - List> dataList = new ArrayList<>(); - for(int i=0;i list = Lists.newArrayList("aa" + i, 1, 2, 3, 1.1, 2.1, 1590738989000L, "2020-05-01", true); - dataList.add(list); - } - sdkClient.execute(Lists.newArrayList(memoryTableDDL)); - sdkClient.insertList(memoryTableName,dataList); - if(version.compareTo("0.5.0")>=0) { - sdkClient.execute(Lists.newArrayList(ssdTableDDL, hddTableDDL)); - sdkClient.insertList(ssdTableName, dataList); - sdkClient.insertList(hddTableName, dataList); - } - upgradePath = DeployUtil.getTestPath(version)+"/upgrade_"+upgradeVersion; - File file = new File(upgradePath); - if(!file.exists()){ - file.mkdirs(); - } - openMLDBDeploy = new OpenMLDBDeploy(upgradeVersion); - String upgradeDirectoryName = openMLDBDeploy.downloadOpenMLDB(upgradePath); - openMLDBPath = upgradeDirectoryName+"/bin/openmldb"; - newBinPath = upgradeDirectoryName+"/bin/"; - confPath = upgradeDirectoryName+"/conf"; - } - @Test - public void testUpgrade(){ - Map> map1 = nsClient.getTableOffset(dbName); - log.info("升级前offset:"+map1); - openMLDBDevops.upgradeNs(newBinPath,confPath); - openMLDBDevops.upgradeTablet(newBinPath,confPath); - openMLDBDevops.upgradeApiServer(newBinPath,confPath); - openMLDBDevops.upgradeTaskManager(openMLDBDeploy); - Map> map2 = nsClient.getTableOffset(dbName); - log.info("升级后offset:"+map2); - Assert.assertEquals(map1,map2); - CheckUtil.addDataCheckByOffset(sdkClient, nsClient, dbName, Lists.newArrayList(memoryTableName), 100, 10); - if(version.compareTo("0.5.0")>=0) { - CheckUtil.addDataCheckByOffset(sdkClient, nsClient, dbName, Lists.newArrayList(ssdTableName, hddTableName), 100, 10); - } - } - -// public void upgradeNs(){ -// Map> map1 = nsClient.getTableOffset(dbName); -// log.info("升级前offset:"+map1); -// openMLDBDevops.upgradeNs(openMLDBPath,confPath); -// Map> map2 = nsClient.getTableOffset(dbName); -// log.info("升级后offset:"+map2); -// Assert.assertEquals(map1,map2); -// } -// public void upgradeTablet(){ -// Map> map1 = nsClient.getTableOffset(dbName); -// log.info("升级前offset:"+map1); -// openMLDBDevops.upgradeTablet(openMLDBPath,confPath); -// Map> map2 = nsClient.getTableOffset(dbName); -// log.info("升级后offset:"+map2); -// Assert.assertEquals(map1,map2); -// CheckUtil.addDataCheck(sdkClient,nsClient,dbName,Lists.newArrayList(memoryTableName,ssdTableName,hddTableName),100,10); -// } - -// @AfterClass - public void afterClass(){ - String command = "rm -rf "+upgradePath; - ExecutorUtil.run(command); - } -} diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade_single.xml b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade_single.xml index dd51a0562ab..05388c38578 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade_single.xml +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/test_suite/test_upgrade_single.xml @@ -6,7 +6,7 @@ - + diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java index 51423099afb..30c47cf5ba4 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java @@ -21,6 +21,7 @@ import com._4paradigm.openmldb.test_common.common.BaseTest; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBGlobalVar; import com._4paradigm.openmldb.test_common.openmldb.OpenMLDBClient; +import com._4paradigm.openmldb.test_common.provider.YamlUtil; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; @@ -31,6 +32,8 @@ import org.testng.annotations.Optional; import org.testng.annotations.Parameters; +import java.io.File; +import java.lang.reflect.Field; import java.sql.Statement; /** @@ -55,6 +58,8 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi openMLDBDeploy.setOpenMLDBPath(openMLDBPath); openMLDBDeploy.setCluster(false); OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3); + }else if(env.equalsIgnoreCase("deploy")){ + OpenMLDBGlobalVar.mainInfo = YamlUtil.getObject("out/openmldb_info.yaml",OpenMLDBInfo.class); }else{ OpenMLDBInfo openMLDBInfo = new OpenMLDBInfo(); openMLDBInfo.setDeployType(OpenMLDBDeployType.CLUSTER); diff --git a/test/steps/modify_devops_config.sh b/test/steps/modify_devops_config.sh index 12c1b10d961..8f4b5e3dd94 100755 --- a/test/steps/modify_devops_config.sh +++ b/test/steps/modify_devops_config.sh @@ -16,7 +16,7 @@ CASE_XML=$1 -DEPLOY_MODE=$2 +PRE_UPGRADE_VERSION=$2 OPENMLDB_SDK_VERSION=$3 TEST_CASE_VERSION=$4 OPENMLDB_SERVER_VERSION=$5 @@ -27,30 +27,32 @@ ROOT_DIR=$(pwd) echo "test_sdk_version:$OPENMLDB_SDK_VERSION" cd test/integration-test/openmldb-test-java/openmldb-devops-test || exit # modify suite_xml -sed -i "s###" test_suite/"${CASE_XML}" -#sed -i "s###" test_suite/"${CASE_XML}" +if [[ "${PRE_UPGRADE_VERSION}" == "" ]]; then + sed -i "s###" test_suite/"${CASE_XML}" +else + sed -i "s###" test_suite/"${CASE_XML}" + sed -i "s###" test_suite/"${CASE_XML}" +fi echo "devops test suite xml:" cat test_suite/"${CASE_XML}" -#cd test/integration-test/openmldb-test-java/openmldb-sdk-test || exit -## modify suite_xml -#sed -i "s###" test_suite/test_cluster.xml -##sed -i "s###" test_suite/test_cluster.xml -##if [[ "${BUILD_MODE}" == "SRC" ]]; then -## sed -i "s###" test_suite/"${CASE_XML}" -##fi -#echo "test suite xml:" -#cat test_suite/"${CASE_XML}" -# -#if [ -n "${TEST_CASE_VERSION}" ]; then -# echo -e "\nversion=${TEST_CASE_VERSION}" >> src/main/resources/run_case.properties -#fi -#if [ -n "${TABLE_STORAGE_MODE}" ]; then -# sed -i "s#table_storage_mode=.*#table_storage_mode=${TABLE_STORAGE_MODE}#" src/main/resources/run_case.properties -#fi -#echo "run_case config:" -#cat src/main/resources/run_case.properties +cd test/integration-test/openmldb-test-java/openmldb-sdk-test || exit +# modify suite_xml +sed -i "s###" test_suite/test_cluster.xml +sed -i "s###" test_suite/test_cluster.xml + +echo "test suite xml:" +cat test_suite/test_cluster.xml + +if [ -n "${TEST_CASE_VERSION}" ]; then + echo -e "\nversion=${TEST_CASE_VERSION}" >> src/main/resources/run_case.properties +fi +if [ -n "${TABLE_STORAGE_MODE}" ]; then + sed -i "s#table_storage_mode=.*#table_storage_mode=${TABLE_STORAGE_MODE}#" src/main/resources/run_case.properties +fi +echo "run_case config:" +cat src/main/resources/run_case.properties # modify pom cd "${ROOT_DIR}" cd test/integration-test/openmldb-test-java/openmldb-test-common || exit diff --git a/test/steps/openmldb-node-failure-test.sh b/test/steps/openmldb-devops-test.sh similarity index 73% rename from test/steps/openmldb-node-failure-test.sh rename to test/steps/openmldb-devops-test.sh index 590c35cf3e4..d473cbdedb5 100755 --- a/test/steps/openmldb-node-failure-test.sh +++ b/test/steps/openmldb-devops-test.sh @@ -21,16 +21,23 @@ #-d 部署模式,有cluster和standalone两种,默认cluster #-l 测试的case级别,有0,1,2,3,4,5六个级别,默认为0,也可以同时跑多个级别的case,例如:1,2,3,4,5 -while getopts ":c:d:l:s:" opt +while getopts ":c:t:s:v:" opt do case $opt in c) echo "参数c的值:$OPTARG" CASE_XML=$OPTARG ;; - d) - echo "参数d的值:$OPTARG" - DEPLOY_MODE=$OPTARG + t) + echo "参数t的值:$OPTARG" + TEST_TYPE=$OPTARG + ;; + s) echo "参数s的值:$OPTARG" + TABLE_STORAGE_MODE=$OPTARG + ;; + ;; + v) echo "参数v的值:$OPTARG" + PRE_UPGRADE_VERSION=$OPTARG ;; ?) echo "未知参数" exit 1 @@ -40,12 +47,13 @@ done if [[ "${CASE_XML}" == "" ]]; then CASE_XML="test_all.xml" fi -if [[ "${DEPLOY_MODE}" == "" ]]; then - DEPLOY_MODE="cluster" +if [[ "${TEST_TYPE}" == "" ]]; then + TEST_TYPE="upgrade" fi echo "CASE_XML:${CASE_XML}" -echo "DEPLOY_MODE:${DEPLOY_MODE}" +echo "TEST_TYPE:${TEST_TYPE}" +echo "TABLE_STORAGE_MODE:${TABLE_STORAGE_MODE}" ROOT_DIR=$(pwd) # 安装wget @@ -78,7 +86,7 @@ cd test/test-tool/command-tool || exit mvn clean install -Dmaven.test.skip=true cd "${ROOT_DIR}" || exit # modify config -sh test/steps/modify_devops_config.sh "${CASE_XML}" "${DEPLOY_MODE}" "${JAVA_SDK_VERSION}" "" "${OPENMLDB_SERVER_VERSION}" "${JAVA_NATIVE_VERSION}" +sh test/steps/modify_devops_config.sh "${CASE_XML}" "${PRE_UPGRADE_VERSION}" "${JAVA_SDK_VERSION}" "" "${OPENMLDB_SERVER_VERSION}" "${JAVA_NATIVE_VERSION}" "${TABLE_STORAGE_MODE}" # install jar cd test/integration-test/openmldb-test-java || exit @@ -87,3 +95,15 @@ cd "${ROOT_DIR}" || exit # run case cd "${ROOT_DIR}"/test/integration-test/openmldb-test-java/openmldb-devops-test || exit mvn clean test -e -U -Dsuite=test_suite/"${CASE_XML}" + +if [[ "${TABLE_STORAGE_MODE}" == "memory" ]]; then + SDK_CASE_XML="test_cluster.xml" +else + SDK_CASE_XML="test_cluster_disk.xml" +fi +echo "SDK_CASE_XML:${SDK_CASE_XML}" +if [[ "${TEST_TYPE}" == "upgrade" ]]; then + # run case + cd "${ROOT_DIR}"/test/integration-test/openmldb-test-java/openmldb-sdk-test || exit + mvn clean test -e -U -DsuiteXmlFile=test_suite/"${SDK_CASE_XML}" -DcaseLevel="0" +fi From 3f1496eb3500b68a736021cb29e9c350f85fff40 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 1 Sep 2022 14:31:27 +0800 Subject: [PATCH 160/172] support upgrade test --- .../_4paradigm/openmldb/test_common/command/CommandUtil.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/CommandUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/CommandUtil.java index 708c64c8f8a..37ab3479935 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/CommandUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/CommandUtil.java @@ -19,7 +19,7 @@ public static List run(String command, int time, int count){ List result; do{ result = ExecutorUtil.run(command); - if((result.size()==0)||(result.size()==1&&result.get(0).equals("zk client init failed"))){ + if((result.size()==0)||(result.contains("zk client init failed"))){ num++; Tool.sleep(time); logger.info("command retry:"+num); From 3a4f57e153f785242db7de933e676a5e1657e6d2 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 1 Sep 2022 15:49:01 +0800 Subject: [PATCH 161/172] support upgrade test --- .../_4paradigm/openmldb/devops_test/util/CheckUtil.java | 3 +-- .../test_common/command/OpenMLDBCommandFactory.java | 4 ++-- .../_4paradigm/openmldb/test_common/openmldb/NsClient.java | 7 +++---- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java index 776d2e8a6d5..357be47cfcb 100644 --- a/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-devops-test/src/main/java/com/_4paradigm/openmldb/devops_test/util/CheckUtil.java @@ -1,6 +1,5 @@ package com._4paradigm.openmldb.devops_test.util; -import com._4paradigm.openmldb.test_common.openmldb.CliClient; import com._4paradigm.openmldb.test_common.openmldb.NsClient; import com._4paradigm.openmldb.test_common.openmldb.SDKClient; import com._4paradigm.qa.openmldb_deploy.util.Tool; @@ -27,7 +26,7 @@ public static void addDataCheckByOffset(SDKClient sdkClient, NsClient nsClient, Tool.sleep(10*1000); } Assert.assertEquals(sdkClient.getTableRowCount(tableName),originalCount+addCount,msg); - Assert.assertEquals(nsClient.getTableRowCount(dbName,tableName),originalCount+addCount,msg); + Assert.assertEquals(nsClient.getTableCount(dbName,tableName),originalCount+addCount,msg); } nsClient.checkTableOffSet(dbName,null); } diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandFactory.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandFactory.java index 7f3a2aa8e24..cc1437e6f50 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandFactory.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/command/OpenMLDBCommandFactory.java @@ -33,9 +33,9 @@ private static String getNoInteractiveCommandByStandalone(String rtidbPath,Strin return line; } private static String getNoInteractiveCommandByCLuster(String rtidbPath,String zkEndPoint,String zkRootPath,String dbName,String command){ - String line = "%s --zk_session_timeout=10000 --zk_cluster=%s --zk_root_path=%s --role=sql_client --interactive=false --database=%s --cmd='%s'"; + String line = "%s --zk_cluster=%s --zk_root_path=%s --role=sql_client --interactive=false --database=%s --cmd='%s'"; if(command.contains("'")){ - line = "%s --zk_session_timeout=10000 --zk_cluster=%s --zk_root_path=%s --role=sql_client --interactive=false --database=%s --cmd=\"%s\""; + line = "%s --zk_cluster=%s --zk_root_path=%s --role=sql_client --interactive=false --database=%s --cmd=\"%s\""; } line = String.format(line,rtidbPath,zkEndPoint,zkRootPath,dbName,command); // logger.info("generate rtidb no interactive command:{}",line); diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java index 314824b51c4..4b2b8db60e1 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/openmldb/NsClient.java @@ -1,6 +1,5 @@ package com._4paradigm.openmldb.test_common.openmldb; -import com._4paradigm.openmldb.test_common.bean.OpenMLDBResult; import com._4paradigm.openmldb.test_common.command.CommandUtil; import com._4paradigm.openmldb.test_common.util.NsResultUtil; import com._4paradigm.openmldb.test_common.util.Tool; @@ -31,7 +30,7 @@ public static NsClient of(OpenMLDBInfo openMLDBInfo){ } public String genNsCommand(String openMLDBPath,String zkCluster,String zkRootPath,String dbName,String command){ String dbStr = StringUtils.isNotEmpty(dbName)?"--database="+dbName:""; - String line = "%s --zk_session_timeout=10000 --zk_cluster=%s --zk_root_path=%s --role=ns_client --interactive=false %s --cmd='%s'"; + String line = "%s --zk_cluster=%s --zk_root_path=%s --role=ns_client --interactive=false %s --cmd='%s'"; line = String.format(line,openMLDBPath,zkCluster,zkRootPath,dbStr,command); log.info("ns command:"+line); return line; @@ -62,7 +61,7 @@ public void checkOPStatusDone(String dbName,String tableName){ public List showTableHaveTable(String dbName,String tableName){ String command = StringUtils.isNotEmpty(tableName) ?"showtable "+tableName:"showtable"; String nsCommand = genNsCommand(dbName,command); - Tool.sleep(3*1000); + Tool.sleep(10*1000); List result = WaitUtil.waitCondition(() -> { List lines = CommandUtil.run(nsCommand); if (lines.size() <= 2) { @@ -77,7 +76,7 @@ public List showTable(String dbName,String tableName){ List lines = runNs(dbName,command); return lines; } - public long getTableRowCount(String dbName,String tableName){ + public long getTableCount(String dbName, String tableName){ List lines = showTableHaveTable(dbName,tableName); long count = 0; for(int i=2;i Date: Thu, 1 Sep 2022 18:02:55 +0800 Subject: [PATCH 162/172] support upgrade test --- .../_4paradigm/test_tool/command_tool/common/LocalExecutor.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/LocalExecutor.java b/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/LocalExecutor.java index 03a2bbfcdd6..062a5f62118 100644 --- a/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/LocalExecutor.java +++ b/test/test-tool/command-tool/src/main/java/com/_4paradigm/test_tool/command_tool/common/LocalExecutor.java @@ -14,7 +14,7 @@ public class LocalExecutor implements CommandExecutor { public LocalExecutor(){ starts.add("wget"); starts.add("tar"); - contains.add("--role=ns_client"); +// contains.add("--role=ns_client"); } public boolean isUseExec(String command){ for(String start:starts){ From eef690d99fe3d5d10d4636c2ec37235d9b38b2d2 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 1 Sep 2022 18:53:22 +0800 Subject: [PATCH 163/172] add devops cicd --- test/steps/openmldb-devops-test.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/test/steps/openmldb-devops-test.sh b/test/steps/openmldb-devops-test.sh index d473cbdedb5..8535f52f268 100755 --- a/test/steps/openmldb-devops-test.sh +++ b/test/steps/openmldb-devops-test.sh @@ -32,11 +32,12 @@ do echo "参数t的值:$OPTARG" TEST_TYPE=$OPTARG ;; - s) echo "参数s的值:$OPTARG" + s) + echo "参数s的值:$OPTARG" TABLE_STORAGE_MODE=$OPTARG ;; - ;; - v) echo "参数v的值:$OPTARG" + v) + echo "参数v的值:$OPTARG" PRE_UPGRADE_VERSION=$OPTARG ;; ?) echo "未知参数" From 27d33cc3b9e3c889b1380e629e523ad46aaf5f4b Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 1 Sep 2022 18:55:12 +0800 Subject: [PATCH 164/172] add devops cicd --- test/steps/modify_java_sdk_config.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/steps/modify_java_sdk_config.sh b/test/steps/modify_java_sdk_config.sh index dfbc670e058..45f3c77d7d4 100755 --- a/test/steps/modify_java_sdk_config.sh +++ b/test/steps/modify_java_sdk_config.sh @@ -43,7 +43,7 @@ fi echo "run_case config:" cat src/main/resources/run_case.properties # modify pom -cd "${ROOT_DIR}" +cd "${ROOT_DIR}" || exit cd test/integration-test/openmldb-test-java/openmldb-test-common || exit sed -i "s#.*#${OPENMLDB_SDK_VERSION}#" pom.xml sed -i "s#.*#${JAVA_NATIVE_VERSION}#" pom.xml From 9f24a506bf584adbe8dd8256380bd7d40f2b866d Mon Sep 17 00:00:00 2001 From: zhaowei Date: Thu, 1 Sep 2022 20:16:21 +0800 Subject: [PATCH 165/172] add devops cicd --- test/steps/modify_devops_config.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/steps/modify_devops_config.sh b/test/steps/modify_devops_config.sh index 8f4b5e3dd94..015246143c7 100755 --- a/test/steps/modify_devops_config.sh +++ b/test/steps/modify_devops_config.sh @@ -54,7 +54,7 @@ fi echo "run_case config:" cat src/main/resources/run_case.properties # modify pom -cd "${ROOT_DIR}" +cd "${ROOT_DIR}" || exit cd test/integration-test/openmldb-test-java/openmldb-test-common || exit sed -i "s#.*#${OPENMLDB_SDK_VERSION}#" pom.xml sed -i "s#.*#${JAVA_NATIVE_VERSION}#" pom.xml From a124e17938601bd52619c69d8adecd5752ed061f Mon Sep 17 00:00:00 2001 From: zhaowei Date: Fri, 2 Sep 2022 14:35:12 +0800 Subject: [PATCH 166/172] delete a test --- .github/workflows/devops-test.yml | 2 +- .../java_sdk_test/temp/TestCommand.java | 45 ------------------- test/steps/openmldb-devops-test.sh | 12 ++--- 3 files changed, 7 insertions(+), 52 deletions(-) delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestCommand.java diff --git a/.github/workflows/devops-test.yml b/.github/workflows/devops-test.yml index 55aa0461e90..d139c0f8bdc 100644 --- a/.github/workflows/devops-test.yml +++ b/.github/workflows/devops-test.yml @@ -5,7 +5,7 @@ on: inputs: PRE_UPGRADE_VERSION: description: 'version before upgrade' - required: true + required: false default: '' EXEC_TEST_TYPE: description: 'Which tests need to be executed? The options are all, upgrade, node_failure, node_expansion' diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestCommand.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestCommand.java deleted file mode 100644 index 4f423db36be..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/temp/TestCommand.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2021 4Paradigm - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com._4paradigm.openmldb.java_sdk_test.temp; - - -import com._4paradigm.openmldb.test_common.command.OpenMLDBCommandFactory; -import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; -import com.google.common.collect.Lists; -import org.testng.annotations.Test; - -import java.util.List; - -public class TestCommand { - @Test - public void test1(){ - OpenMLDBInfo fedbInfo = OpenMLDBInfo.builder() - .basePath("/home/zhaowei01/fedb-auto-test/0.1.5") - .openMLDBPath("/home/zhaowei01/fedb-auto-test/0.1.5/openmldb-ns-1/bin/openmldb") - .zk_cluster("172.24.4.55:10000") - .zk_root_path("/openmldb") - .nsNum(2).tabletNum(3) - .nsEndpoints(Lists.newArrayList("172.24.4.55:10001", "172.24.4.55:10002")) - .tabletEndpoints(Lists.newArrayList("172.24.4.55:10003", "172.24.4.55:10004", "172.24.4.55:10005")) - .apiServerEndpoints(Lists.newArrayList("172.24.4.55:10006")) - .build(); - // String command = OpenmlDBCommandFactory.getNoInteractiveCommand(fedbInfo, "test_zw", "desc t3"); - // System.out.println("command = " + command); - List test_zw = OpenMLDBCommandFactory.runNoInteractive(fedbInfo, "test_zw", "desc t4;"); - System.out.println("======="); - test_zw.forEach(System.out::println); - } -} diff --git a/test/steps/openmldb-devops-test.sh b/test/steps/openmldb-devops-test.sh index 8535f52f268..b1c52093919 100755 --- a/test/steps/openmldb-devops-test.sh +++ b/test/steps/openmldb-devops-test.sh @@ -97,13 +97,13 @@ cd "${ROOT_DIR}" || exit cd "${ROOT_DIR}"/test/integration-test/openmldb-test-java/openmldb-devops-test || exit mvn clean test -e -U -Dsuite=test_suite/"${CASE_XML}" -if [[ "${TABLE_STORAGE_MODE}" == "memory" ]]; then - SDK_CASE_XML="test_cluster.xml" -else - SDK_CASE_XML="test_cluster_disk.xml" -fi -echo "SDK_CASE_XML:${SDK_CASE_XML}" if [[ "${TEST_TYPE}" == "upgrade" ]]; then + if [[ "${TABLE_STORAGE_MODE}" == "memory" ]]; then + SDK_CASE_XML="test_cluster.xml" + else + SDK_CASE_XML="test_cluster_disk.xml" + fi + echo "SDK_CASE_XML:${SDK_CASE_XML}" # run case cd "${ROOT_DIR}"/test/integration-test/openmldb-test-java/openmldb-sdk-test || exit mvn clean test -e -U -DsuiteXmlFile=test_suite/"${SDK_CASE_XML}" -DcaseLevel="0" From d87bf5cf1187e506a3421d13a2a9265d2f5a6ff7 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Fri, 2 Sep 2022 15:45:26 +0800 Subject: [PATCH 167/172] delete a test --- .../auto_gen_case/AutoGenCaseTest.java | 31 ++++++++++++------- test/steps/build-java-sdk.sh | 3 +- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java index d056c980a84..09b551f6f59 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/auto_gen_case/AutoGenCaseTest.java @@ -25,8 +25,10 @@ import com._4paradigm.openmldb.test_common.model.SQLCase; import com._4paradigm.openmldb.test_common.model.SQLCaseType; import com._4paradigm.openmldb.test_common.provider.Yaml; +import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBDeployType; import com._4paradigm.qa.openmldb_deploy.bean.OpenMLDBInfo; import com._4paradigm.qa.openmldb_deploy.common.OpenMLDBDeploy; +import com.google.common.collect.Lists; import io.qameta.allure.Feature; import io.qameta.allure.Story; import lombok.extern.slf4j.Slf4j; @@ -62,17 +64,24 @@ public void beforeClass(){ }else{ //测试调试用 String verion = "2.2.2"; - OpenMLDBInfo fedbInfo = OpenMLDBInfo.builder() - .basePath("/home/zhaowei01/fedb-auto-test/2.2.2") - .openMLDBPath("/home/zhaowei01/fedb-auto-test/2.2.2/fedb-ns-1/bin/fedb") - .zk_cluster("172.24.4.55:10006") - .zk_root_path("/fedb") - .nsNum(2).tabletNum(3) - .nsEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:10007", "172.24.4.55:10008")) - .tabletEndpoints(com.google.common.collect.Lists.newArrayList("172.24.4.55:10009", "172.24.4.55:10010", "172.24.4.55:10011")) - .build(); - executorMap.put(verion, new OpenMLDBClient(fedbInfo.getZk_cluster(),fedbInfo.getZk_root_path()).getExecutor()); - fedbInfoMap.put(verion, fedbInfo); + OpenMLDBInfo openMLDBInfo = new OpenMLDBInfo(); + openMLDBInfo.setDeployType(OpenMLDBDeployType.CLUSTER); + openMLDBInfo.setNsNum(2); + openMLDBInfo.setTabletNum(3); + openMLDBInfo.setBasePath("/home/zhaowei01/openmldb-auto-test/tmp"); + openMLDBInfo.setZk_cluster("172.24.4.55:30000"); + openMLDBInfo.setZk_root_path("/openmldb"); + openMLDBInfo.setNsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")); + openMLDBInfo.setNsNames(Lists.newArrayList()); + openMLDBInfo.setTabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")); + openMLDBInfo.setTabletNames(Lists.newArrayList()); + openMLDBInfo.setApiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")); + openMLDBInfo.setApiServerNames(Lists.newArrayList()); + openMLDBInfo.setTaskManagerEndpoints(Lists.newArrayList("172.24.4.55:30007")); + openMLDBInfo.setOpenMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb"); + + executorMap.put(verion, new OpenMLDBClient(openMLDBInfo.getZk_cluster(),openMLDBInfo.getZk_root_path()).getExecutor()); + fedbInfoMap.put(verion, openMLDBInfo); fedbInfoMap.put("mainVersion", OpenMLDBGlobalVar.mainInfo); } } diff --git a/test/steps/build-java-sdk.sh b/test/steps/build-java-sdk.sh index 2fff60f3e67..dee889d37b0 100755 --- a/test/steps/build-java-sdk.sh +++ b/test/steps/build-java-sdk.sh @@ -16,5 +16,6 @@ ROOT_DIR=$(pwd) cd java || exit -mvn clean install -Dmaven.test.skip=true -Dgpg.skip +#mvn clean install -Dmaven.test.skip=true -Dgpg.skip +mvn clean install -DskipTests=true -Dscalatest.skip=true -Dwagon.skip=true -Dmaven.test.skip=true -Dgpg.skip cd "${ROOT_DIR}" || exit From 389963e12f02804719a6e0fabaf5b12409b10874 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 5 Sep 2022 10:56:16 +0800 Subject: [PATCH 168/172] modify deploy --- .../openmldb-deploy/test-suite/test_deploy_tmp2.xml | 2 +- test/steps/build-java-sdk.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml index c3104b2b7b6..629590aeb19 100644 --- a/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml +++ b/test/integration-test/openmldb-test-java/openmldb-deploy/test-suite/test_deploy_tmp2.xml @@ -2,7 +2,7 @@ - + diff --git a/test/steps/build-java-sdk.sh b/test/steps/build-java-sdk.sh index dee889d37b0..7195ee4b2f1 100755 --- a/test/steps/build-java-sdk.sh +++ b/test/steps/build-java-sdk.sh @@ -16,6 +16,6 @@ ROOT_DIR=$(pwd) cd java || exit -#mvn clean install -Dmaven.test.skip=true -Dgpg.skip -mvn clean install -DskipTests=true -Dscalatest.skip=true -Dwagon.skip=true -Dmaven.test.skip=true -Dgpg.skip +mvn clean install -Dmaven.test.skip=true -Dgpg.skip +#mvn clean install -DskipTests=true -Dscalatest.skip=true -Dwagon.skip=true -Dmaven.test.skip=true -Dgpg.skip cd "${ROOT_DIR}" || exit From e6c73bf9f03bf1f18e47d9a7ea5559283ae74a6b Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 5 Sep 2022 13:38:33 +0800 Subject: [PATCH 169/172] modify option checker --- .../java_sdk_test/checker/OptionsChecker.java | 4 +-- .../java_sdk_test/common/OpenMLDBTest.java | 25 ++++++++++++---- .../cluster/v050/LongWindowTest.java | 29 ------------------- 3 files changed, 22 insertions(+), 36 deletions(-) delete mode 100644 test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/OptionsChecker.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/OptionsChecker.java index 1531989a47b..c0f2bf74ea2 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/OptionsChecker.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/checker/OptionsChecker.java @@ -58,7 +58,7 @@ public void check() throws Exception { Object partitionNum = JsonPath.read(resultData, "$.table.partition_num"); Object replicaNum = JsonPath.read(resultData, "$.table.replica_num"); Map options = expect.getOptions(); - Assert.assertEquals(options.get("partitionNum"),partitionNum,"partitionNum不一致"); - Assert.assertEquals(options.get("replicaNum"),replicaNum,"replicaNum不一致"); + Assert.assertEquals(partitionNum,options.get("partitionNum"),"partitionNum不一致,resultData:"+resultData); + Assert.assertEquals(replicaNum,options.get("replicaNum"),"replicaNum不一致,resultData:"+resultData); } } diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java index 30c47cf5ba4..1812abefd73 100644 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java +++ b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/main/java/com/_4paradigm/openmldb/java_sdk_test/common/OpenMLDBTest.java @@ -62,19 +62,34 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi OpenMLDBGlobalVar.mainInfo = YamlUtil.getObject("out/openmldb_info.yaml",OpenMLDBInfo.class); }else{ OpenMLDBInfo openMLDBInfo = new OpenMLDBInfo(); +// openMLDBInfo.setDeployType(OpenMLDBDeployType.CLUSTER); +// openMLDBInfo.setNsNum(2); +// openMLDBInfo.setTabletNum(3); +// openMLDBInfo.setBasePath("/home/zhaowei01/openmldb-auto-test/tmp"); +// openMLDBInfo.setZk_cluster("172.24.4.55:30000"); +// openMLDBInfo.setZk_root_path("/openmldb"); +// openMLDBInfo.setNsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")); +// openMLDBInfo.setNsNames(Lists.newArrayList()); +// openMLDBInfo.setTabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")); +// openMLDBInfo.setTabletNames(Lists.newArrayList()); +// openMLDBInfo.setApiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")); +// openMLDBInfo.setApiServerNames(Lists.newArrayList()); +// openMLDBInfo.setTaskManagerEndpoints(Lists.newArrayList("172.24.4.55:30007")); +// openMLDBInfo.setOpenMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb"); + openMLDBInfo.setDeployType(OpenMLDBDeployType.CLUSTER); openMLDBInfo.setNsNum(2); openMLDBInfo.setTabletNum(3); openMLDBInfo.setBasePath("/home/zhaowei01/openmldb-auto-test/tmp"); - openMLDBInfo.setZk_cluster("172.24.4.55:30000"); + openMLDBInfo.setZk_cluster("172.24.4.55:30008"); openMLDBInfo.setZk_root_path("/openmldb"); - openMLDBInfo.setNsEndpoints(Lists.newArrayList("172.24.4.55:30004", "172.24.4.55:30005")); + openMLDBInfo.setNsEndpoints(Lists.newArrayList("172.24.4.55:30012", "172.24.4.55:30013")); openMLDBInfo.setNsNames(Lists.newArrayList()); - openMLDBInfo.setTabletEndpoints(Lists.newArrayList("172.24.4.55:30001", "172.24.4.55:30002", "172.24.4.55:30003")); + openMLDBInfo.setTabletEndpoints(Lists.newArrayList("172.24.4.55:30009", "172.24.4.55:30010", "172.24.4.55:30011")); openMLDBInfo.setTabletNames(Lists.newArrayList()); - openMLDBInfo.setApiServerEndpoints(Lists.newArrayList("172.24.4.55:30006")); + openMLDBInfo.setApiServerEndpoints(Lists.newArrayList("172.24.4.55:30014")); openMLDBInfo.setApiServerNames(Lists.newArrayList()); - openMLDBInfo.setTaskManagerEndpoints(Lists.newArrayList("172.24.4.55:30007")); + openMLDBInfo.setTaskManagerEndpoints(Lists.newArrayList("172.24.4.55:30015")); openMLDBInfo.setOpenMLDBPath("/home/zhaowei01/openmldb-auto-test/tmp/openmldb-ns-1/bin/openmldb"); OpenMLDBGlobalVar.mainInfo = openMLDBInfo; diff --git a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java b/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java deleted file mode 100644 index 7f6426141cc..00000000000 --- a/test/integration-test/openmldb-test-java/openmldb-sdk-test/src/test/java/com/_4paradigm/openmldb/java_sdk_test/cluster/v050/LongWindowTest.java +++ /dev/null @@ -1,29 +0,0 @@ -package com._4paradigm.openmldb.java_sdk_test.cluster.v050; - -import com._4paradigm.openmldb.java_sdk_test.common.OpenMLDBTest; -import com._4paradigm.openmldb.java_sdk_test.executor.ExecutorFactory; -import com._4paradigm.openmldb.test_common.model.SQLCase; -import com._4paradigm.openmldb.test_common.model.SQLCaseType; -import com._4paradigm.openmldb.test_common.provider.Yaml; -import io.qameta.allure.Story; -import lombok.extern.slf4j.Slf4j; -import org.testng.annotations.Test; - -@Slf4j -public class LongWindowTest extends OpenMLDBTest { - - - @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/long_window/long_window.yaml") - @Story("Out-In") - public void testLongWindow1(SQLCase testCase){ - ExecutorFactory.build(executor,testCase, SQLCaseType.kBatch).run(); - } - - @Test(dataProvider = "getCase") - @Yaml(filePaths = "function/long_window/long_window.yaml") - @Story("Out-In") - public void testLongWindow2(SQLCase testCase){ - ExecutorFactory.build(executor,testCase, SQLCaseType.kRequest).run(); - } -} From afdb7679deab282d0e7d5f23b1c39a32e012955b Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 5 Sep 2022 19:41:50 +0800 Subject: [PATCH 170/172] modify case --- .../ddl/test_create_no_index.yaml | 1 - cases/integration_test/ddl/test_options.yaml | 67 +++++++++++++------ .../openmldb/test_common/util/SDKUtil.java | 2 +- 3 files changed, 47 insertions(+), 23 deletions(-) diff --git a/cases/integration_test/ddl/test_create_no_index.yaml b/cases/integration_test/ddl/test_create_no_index.yaml index f29afdf4717..603d53498b3 100644 --- a/cases/integration_test/ddl/test_create_no_index.yaml +++ b/cases/integration_test/ddl/test_create_no_index.yaml @@ -240,7 +240,6 @@ cases: - id: 11 desc: 不指定索引创建表,然后增加索引 - tags: ["TODO","还不支持增加索引时指定新的ts列"] inputs: - create: | diff --git a/cases/integration_test/ddl/test_options.yaml b/cases/integration_test/ddl/test_options.yaml index 1c8ed43ad7d..d35fb6bec31 100644 --- a/cases/integration_test/ddl/test_options.yaml +++ b/cases/integration_test/ddl/test_options.yaml @@ -56,7 +56,7 @@ cases: name: t3 success: true options: - partitionNum: 8 + partitionNum: 1 replicaNum: 1 - id: 3 @@ -107,11 +107,14 @@ cases: distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ]),('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])] ); expect: - success: false + name: t3 + success: true + options: + partitionNum: 2 + replicaNum: 3 - id: 6 desc: partitionnum=0,指定distribution - tags: ["TODO","bug修复后验证"] mode: standalone-unsupport inputs: - name: t3 @@ -123,11 +126,7 @@ cases: distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])] ); expect: - name: t3 - success: true - options: - partitionNum: 1 - replicaNum: 3 + success: false - id: 7 desc: partitionnum=10 @@ -195,21 +194,21 @@ cases: success: false - id: 11 - desc: distribution没有指定follower + desc: distribution小于replicanum inputs: - name: t3 sql: | create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) options ( partitionnum = 1, - replicanum = 1, + replicanum = 2, distribution = [ ('{tb_endpoint_0}')] ); expect: success: false - id: 12 - desc: distribution的个数和replicanum对不上 + desc: distribution大于replicanum mode: standalone-unsupport inputs: - name: t3 @@ -322,7 +321,11 @@ cases: distribution = [ ('{tb_endpoint_0}', [])] ); expect: - success: false + name: t3 + success: true + options: + partitionNum: 1 + replicaNum: 1 - id: 20 desc: distribution指定的tablet不存在 @@ -351,11 +354,7 @@ cases: distribution = [ ('{tb_endpoint_0}',[])] ); expect: - name: t3 - success: true - options: - partitionNum: 4 - replicaNum: 1 + success: false - id: 22 desc: test-case @@ -383,7 +382,6 @@ cases: replicaNum: 3 - id: 23 - tags: ["TODO","bug修复后验证"] desc: partitionnum=0,没有指定distribution mode: standalone-unsupport inputs: @@ -399,7 +397,6 @@ cases: - id: 24 desc: 没有partitionnum和replicanum,指定distribution - tags: ["TODO","bug修复后验证"] mode: standalone-unsupport inputs: - name: t3 @@ -414,8 +411,36 @@ cases: options: partitionNum: 1 replicaNum: 3 - - + - + id: 25 + desc: distribution多于partitionnum + mode: standalone-unsupport + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 1, + replicanum = 3, + distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ]),('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])] + ); + expect: + success: false + - + id: 26 + desc: distribution小于partitionnum + mode: standalone-unsupport + inputs: + - name: t3 + sql: | + create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m)) + options ( + partitionnum = 3, + replicanum = 3, + distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ]),('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])] + ); + expect: + success: false diff --git a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java index bb6d5f71510..10600ac4e7b 100644 --- a/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java +++ b/test/integration-test/openmldb-test-java/openmldb-test-common/src/main/java/com/_4paradigm/openmldb/test_common/util/SDKUtil.java @@ -276,7 +276,7 @@ public static OpenMLDBResult createIndex(SqlExecutor executor, String sql) { try { createOk = executor.getStatement().execute(sql); openMLDBResult.setOk(true); - Thread.sleep(10000); + Tool.sleep(20*1000); } catch (Exception e) { e.printStackTrace(); openMLDBResult.setOk(false); From 87b5a84eeeae42ef3de051f3a070649be2f14f37 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 5 Sep 2022 20:15:12 +0800 Subject: [PATCH 171/172] modify case --- cases/integration_test/ddl/test_create.yaml | 5 ++--- cases/integration_test/ddl/test_create_index.yaml | 13 +++---------- cases/integration_test/ddl/test_ttl.yaml | 5 ----- 3 files changed, 5 insertions(+), 18 deletions(-) diff --git a/cases/integration_test/ddl/test_create.yaml b/cases/integration_test/ddl/test_create.yaml index 7319230b3ac..6342b2d6987 100644 --- a/cases/integration_test/ddl/test_create.yaml +++ b/cases/integration_test/ddl/test_create.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: [] +debugs: ["create col with __prefix"] version: 0.5.0 cases: - @@ -389,13 +389,12 @@ cases: - id: 36 desc: create col with __prefix sqlDialect: ["HybridSQL"] - tags: ["TODO", "@chenjing create with __prefix"] sql: | create table {auto} ( __c1 string, __c3 int, __ts bigint, index(key=__c1, ts=__ts)); expect: - columns: ["__c1 string","__c3 int", "__ts bigint"] + success: true - id: 37 desc: create with replica num diff --git a/cases/integration_test/ddl/test_create_index.yaml b/cases/integration_test/ddl/test_create_index.yaml index 5549a5db039..a4040aaddef 100644 --- a/cases/integration_test/ddl/test_create_index.yaml +++ b/cases/integration_test/ddl/test_create_index.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: [] +debugs: ["指定ttl_type=absandlat,部分数据过期","key和ts相同"] version: 0.5.0 cases: - @@ -462,7 +462,6 @@ cases: - id: 22 desc: 指定ttl_type=latest,部分数据过期 - tags: ["TODO","latest的过期数据比预期多了一条,@denglong"] inputs: - columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] @@ -484,7 +483,6 @@ cases: - id: 23 desc: 指定ttl_type=absandlat,部分数据过期 - tags: ["TODO","latest的过期数据比预期多了一条,@denglong"] inputs: - columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] @@ -500,8 +498,8 @@ cases: columns: ["id int","c1 string","c2 int","c3 timestamp","c4 timestamp"] order: id rows: - - [2,"aa", 1, 1590738990000,1590738990000] - - [3,"aa", 1, 1590738990000,1590738990000] + - [2,"aa", 1, 1590738990000,1590738991000] + - [3,"aa", 1, 1590738990000,1590738992000] - id: 24 desc: 指定ttl_type=absorlat,部分数据过期 @@ -521,7 +519,6 @@ cases: - id: 25 desc: 指定ttl_type=absandlat,部分数据过期-边界 - tags: ["TODO","latest的过期数据比预期多了一条,@denglong"] inputs: - columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] @@ -588,7 +585,6 @@ cases: - id: 28 desc: 指定ttl_type=absorlat,部分数据过期-边界2 - tags: ["TODO","latest的过期数据比预期多了一条,@denglong"] inputs: - columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] @@ -634,7 +630,6 @@ cases: - id: 30 desc: 先创建索引,在插入数据,测试过期-latest - tags: ["TODO","latest的过期数据比预期多了一条,@denglong"] inputs: - columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] @@ -704,7 +699,6 @@ cases: - id: 33 desc: key和ts相同 - tags: ["TODO","key和ts相同,认为是相同的索引要添加失败,目前添加成功,@denglong"] inputs: - columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"] @@ -713,7 +707,6 @@ cases: - [aa,1,1590738990000,1590738989000] sqls: - CREATE INDEX index1 ON {0} (c1) OPTIONS (ts=c4, ttl=100m, ttl_type=absolute); - - desc {0}; expect: success: false - diff --git a/cases/integration_test/ddl/test_ttl.yaml b/cases/integration_test/ddl/test_ttl.yaml index ba2456856c1..510b7e9a408 100644 --- a/cases/integration_test/ddl/test_ttl.yaml +++ b/cases/integration_test/ddl/test_ttl.yaml @@ -221,7 +221,6 @@ cases: - id: 24 desc: 指定ttl_type=absolute,部分数据过期 - tags: ["TODO","边界case,待离线和c++支持后,在使用"] inputs: - columns: ["c1 string","c2 int","c3 timestamp", "c4 timestamp"] @@ -237,7 +236,6 @@ cases: - id: 25 desc: 指定ttl_type=absandlat,部分数据过期-边界 - tags: ["TODO","边界case,待离线和c++支持后,在使用"] inputs: - columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] @@ -257,7 +255,6 @@ cases: - id: 26 desc: 指定ttl_type=absandlat,部分数据过期-边界2 - tags: ["TODO","边界case,待离线和c++支持后,在使用"] inputs: - columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] @@ -278,7 +275,6 @@ cases: - id: 27 desc: 指定ttl_type=absorlat,部分数据过期-边界 - tags: ["TODO","边界case,待离线和c++支持后,在使用"] inputs: - columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] @@ -296,7 +292,6 @@ cases: - id: 28 desc: 指定ttl_type=absorlat,部分数据过期-边界2 - tags: ["TODO","边界case,待离线和c++支持后,在使用"] inputs: - columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"] From 4facd290c3f9ce4538e053e6e451aef567c71413 Mon Sep 17 00:00:00 2001 From: zhaowei Date: Mon, 5 Sep 2022 20:15:50 +0800 Subject: [PATCH 172/172] modify case --- cases/integration_test/ddl/test_create.yaml | 2 +- cases/integration_test/ddl/test_create_index.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cases/integration_test/ddl/test_create.yaml b/cases/integration_test/ddl/test_create.yaml index 6342b2d6987..c877221404e 100644 --- a/cases/integration_test/ddl/test_create.yaml +++ b/cases/integration_test/ddl/test_create.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: ["create col with __prefix"] +debugs: [] version: 0.5.0 cases: - diff --git a/cases/integration_test/ddl/test_create_index.yaml b/cases/integration_test/ddl/test_create_index.yaml index a4040aaddef..6d4ce9e14cd 100644 --- a/cases/integration_test/ddl/test_create_index.yaml +++ b/cases/integration_test/ddl/test_create_index.yaml @@ -13,7 +13,7 @@ # limitations under the License. db: test_zw -debugs: ["指定ttl_type=absandlat,部分数据过期","key和ts相同"] +debugs: [] version: 0.5.0 cases: -