Skip to content

Commit

Permalink
temp
Browse files Browse the repository at this point in the history
Signed-off-by: Peng Huo <[email protected]>
  • Loading branch information
penghuo committed Oct 25, 2024
1 parent fe21979 commit 438176b
Show file tree
Hide file tree
Showing 32 changed files with 501 additions and 47 deletions.
9 changes: 9 additions & 0 deletions async-query-core/src/main/antlr/SqlBaseLexer.g4
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,7 @@ BUCKETS: 'BUCKETS';
BY: 'BY';
BYTE: 'BYTE';
CACHE: 'CACHE';
CALL: 'CALL';
CALLED: 'CALLED';
CASCADE: 'CASCADE';
CASE: 'CASE';
Expand Down Expand Up @@ -255,12 +256,14 @@ BINARY_HEX: 'X';
HOUR: 'HOUR';
HOURS: 'HOURS';
IDENTIFIER_KW: 'IDENTIFIER';
IDENTITY: 'IDENTITY';
IF: 'IF';
IGNORE: 'IGNORE';
IMMEDIATE: 'IMMEDIATE';
IMPORT: 'IMPORT';
IN: 'IN';
INCLUDE: 'INCLUDE';
INCREMENT: 'INCREMENT';
INDEX: 'INDEX';
INDEXES: 'INDEXES';
INNER: 'INNER';
Expand All @@ -276,13 +279,15 @@ INTO: 'INTO';
INVOKER: 'INVOKER';
IS: 'IS';
ITEMS: 'ITEMS';
ITERATE: 'ITERATE';
JOIN: 'JOIN';
KEYS: 'KEYS';
LANGUAGE: 'LANGUAGE';
LAST: 'LAST';
LATERAL: 'LATERAL';
LAZY: 'LAZY';
LEADING: 'LEADING';
LEAVE: 'LEAVE';
LEFT: 'LEFT';
LIKE: 'LIKE';
ILIKE: 'ILIKE';
Expand All @@ -296,6 +301,7 @@ LOCK: 'LOCK';
LOCKS: 'LOCKS';
LOGICAL: 'LOGICAL';
LONG: 'LONG';
LOOP: 'LOOP';
MACRO: 'MACRO';
MAP: 'MAP' {incComplexTypeLevelCounter();};
MATCHED: 'MATCHED';
Expand Down Expand Up @@ -362,6 +368,7 @@ REFERENCES: 'REFERENCES';
REFRESH: 'REFRESH';
RENAME: 'RENAME';
REPAIR: 'REPAIR';
REPEAT: 'REPEAT';
REPEATABLE: 'REPEATABLE';
REPLACE: 'REPLACE';
RESET: 'RESET';
Expand Down Expand Up @@ -451,6 +458,7 @@ UNKNOWN: 'UNKNOWN';
UNLOCK: 'UNLOCK';
UNPIVOT: 'UNPIVOT';
UNSET: 'UNSET';
UNTIL: 'UNTIL';
UPDATE: 'UPDATE';
USE: 'USE';
USER: 'USER';
Expand Down Expand Up @@ -501,6 +509,7 @@ TILDE: '~';
AMPERSAND: '&';
PIPE: '|';
CONCAT_PIPE: '||';
OPERATOR_PIPE: '|>';
HAT: '^';
COLON: ':';
DOUBLE_COLON: '::';
Expand Down
87 changes: 85 additions & 2 deletions async-query-core/src/main/antlr/SqlBaseParser.g4
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,12 @@ compoundStatement
| setStatementWithOptionalVarKeyword
| beginEndCompoundBlock
| ifElseStatement
| caseStatement
| whileStatement
| repeatStatement
| leaveStatement
| iterateStatement
| loopStatement
;

setStatementWithOptionalVarKeyword
Expand All @@ -83,6 +88,29 @@ ifElseStatement
(ELSE elseBody=compoundBody)? END IF
;

repeatStatement
: beginLabel? REPEAT compoundBody UNTIL booleanExpression END REPEAT endLabel?
;

leaveStatement
: LEAVE multipartIdentifier
;

iterateStatement
: ITERATE multipartIdentifier
;

caseStatement
: CASE (WHEN conditions+=booleanExpression THEN conditionalBodies+=compoundBody)+
(ELSE elseBody=compoundBody)? END CASE #searchedCaseStatement
| CASE caseVariable=expression (WHEN conditionExpressions+=expression THEN conditionalBodies+=compoundBody)+
(ELSE elseBody=compoundBody)? END CASE #simpleCaseStatement
;

loopStatement
: beginLabel? LOOP compoundBody END LOOP endLabel?
;

singleStatement
: (statement|setResetStatement) SEMICOLON* EOF
;
Expand Down Expand Up @@ -125,7 +153,7 @@ statement
| ctes? dmlStatementNoWith #dmlStatement
| USE identifierReference #use
| USE namespace identifierReference #useNamespace
| SET CATALOG (errorCapturingIdentifier | stringLit) #setCatalog
| SET CATALOG catalogIdentifierReference #setCatalog
| CREATE namespace (IF errorCapturingNot EXISTS)? identifierReference
(commentSpec |
locationSpec |
Expand Down Expand Up @@ -275,6 +303,10 @@ statement
LEFT_PAREN columns=multipartIdentifierPropertyList RIGHT_PAREN
(OPTIONS options=propertyList)? #createIndex
| DROP INDEX (IF EXISTS)? identifier ON TABLE? identifierReference #dropIndex
| CALL identifierReference
LEFT_PAREN
(functionArgument (COMMA functionArgument)*)?
RIGHT_PAREN #call
| unsupportedHiveNativeCommands .*? #failNativeCommand
;

Expand Down Expand Up @@ -567,6 +599,12 @@ identifierReference
| multipartIdentifier
;

catalogIdentifierReference
: IDENTIFIER_KW LEFT_PAREN expression RIGHT_PAREN
| errorCapturingIdentifier
| stringLit
;

queryOrganization
: (ORDER BY order+=sortItem (COMMA order+=sortItem)*)?
(CLUSTER BY clusterBy+=expression (COMMA clusterBy+=expression)*)?
Expand All @@ -589,6 +627,7 @@ queryTerm
operator=INTERSECT setQuantifier? right=queryTerm #setOperation
| left=queryTerm {!legacy_setops_precedence_enabled}?
operator=(UNION | EXCEPT | SETMINUS) setQuantifier? right=queryTerm #setOperation
| left=queryTerm OPERATOR_PIPE operatorPipeRightSide #operatorPipeStatement
;

queryPrimary
Expand Down Expand Up @@ -1272,7 +1311,22 @@ colDefinitionOption
;

generationExpression
: GENERATED ALWAYS AS LEFT_PAREN expression RIGHT_PAREN
: GENERATED ALWAYS AS LEFT_PAREN expression RIGHT_PAREN #generatedColumn
| GENERATED (ALWAYS | BY DEFAULT) AS IDENTITY identityColSpec? #identityColumn
;

identityColSpec
: LEFT_PAREN sequenceGeneratorOption* RIGHT_PAREN
;

sequenceGeneratorOption
: START WITH start=sequenceGeneratorStartOrStep
| INCREMENT BY step=sequenceGeneratorStartOrStep
;

sequenceGeneratorStartOrStep
: MINUS? INTEGER_VALUE
| MINUS? BIGINT_LITERAL
;

complexColTypeList
Expand Down Expand Up @@ -1447,6 +1501,20 @@ version
| stringLit
;

operatorPipeRightSide
: selectClause
| whereClause
// The following two cases match the PIVOT or UNPIVOT clause, respectively.
// For each one, we add the other clause as an option in order to return high-quality error
// messages in the event that both are present (this is not allowed).
| pivotClause unpivotClause?
| unpivotClause pivotClause?
| sample
| joinRelation
| operator=(UNION | EXCEPT | SETMINUS | INTERSECT) setQuantifier? right=queryTerm
| queryOrganization
;

// When `SQL_standard_keyword_behavior=true`, there are 2 kinds of keywords in Spark SQL.
// - Reserved keywords:
// Keywords that are reserved and can't be used as identifiers for table, view, column,
Expand Down Expand Up @@ -1562,11 +1630,13 @@ ansiNonReserved
| HOUR
| HOURS
| IDENTIFIER_KW
| IDENTITY
| IF
| IGNORE
| IMMEDIATE
| IMPORT
| INCLUDE
| INCREMENT
| INDEX
| INDEXES
| INPATH
Expand All @@ -1578,10 +1648,12 @@ ansiNonReserved
| INTERVAL
| INVOKER
| ITEMS
| ITERATE
| KEYS
| LANGUAGE
| LAST
| LAZY
| LEAVE
| LIKE
| ILIKE
| LIMIT
Expand All @@ -1594,6 +1666,7 @@ ansiNonReserved
| LOCKS
| LOGICAL
| LONG
| LOOP
| MACRO
| MAP
| MATCHED
Expand Down Expand Up @@ -1648,6 +1721,7 @@ ansiNonReserved
| REFRESH
| RENAME
| REPAIR
| REPEAT
| REPEATABLE
| REPLACE
| RESET
Expand Down Expand Up @@ -1723,6 +1797,7 @@ ansiNonReserved
| UNLOCK
| UNPIVOT
| UNSET
| UNTIL
| UPDATE
| USE
| VALUES
Expand Down Expand Up @@ -1802,6 +1877,7 @@ nonReserved
| BY
| BYTE
| CACHE
| CALL
| CALLED
| CASCADE
| CASE
Expand Down Expand Up @@ -1908,12 +1984,14 @@ nonReserved
| HOUR
| HOURS
| IDENTIFIER_KW
| IDENTITY
| IF
| IGNORE
| IMMEDIATE
| IMPORT
| IN
| INCLUDE
| INCREMENT
| INDEX
| INDEXES
| INPATH
Expand All @@ -1927,11 +2005,13 @@ nonReserved
| INVOKER
| IS
| ITEMS
| ITERATE
| KEYS
| LANGUAGE
| LAST
| LAZY
| LEADING
| LEAVE
| LIKE
| LONG
| ILIKE
Expand All @@ -1945,6 +2025,7 @@ nonReserved
| LOCKS
| LOGICAL
| LONG
| LOOP
| MACRO
| MAP
| MATCHED
Expand Down Expand Up @@ -2009,6 +2090,7 @@ nonReserved
| REFRESH
| RENAME
| REPAIR
| REPEAT
| REPEATABLE
| REPLACE
| RESET
Expand Down Expand Up @@ -2093,6 +2175,7 @@ nonReserved
| UNLOCK
| UNPIVOT
| UNSET
| UNTIL
| UPDATE
| USE
| USER
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@

/** Setting. */
public abstract class Settings {

public static final int QUERY_SIZE_LIMIT_SETTING_DEFAULT = 10000;

@RequiredArgsConstructor
public enum Key {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import java.util.stream.Collectors;
import org.opensearch.sql.planner.logical.LogicalPlan;
import org.opensearch.sql.planner.optimizer.rule.MergeFilterAndFilter;
import org.opensearch.sql.planner.optimizer.rule.MergeLimits;
import org.opensearch.sql.planner.optimizer.rule.PushFilterUnderSort;
import org.opensearch.sql.planner.optimizer.rule.read.CreateTableScanBuilder;
import org.opensearch.sql.planner.optimizer.rule.read.TableScanPushDown;
Expand Down Expand Up @@ -46,6 +47,7 @@ public static LogicalPlanOptimizer create() {
*/
new MergeFilterAndFilter(),
new PushFilterUnderSort(),
new MergeLimits(),
/*
* Phase 2: Transformations that rely on data source push down capability
*/
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
/*
* Copyright OpenSearch Contributors
* SPDX-License-Identifier: Apache-2.0
*/

package org.opensearch.sql.planner.optimizer.rule;

import static com.facebook.presto.matching.Pattern.typeOf;
import static org.opensearch.sql.planner.optimizer.pattern.Patterns.source;

import com.facebook.presto.matching.Capture;
import com.facebook.presto.matching.Captures;
import com.facebook.presto.matching.Pattern;
import lombok.Getter;
import lombok.experimental.Accessors;
import org.opensearch.sql.ast.tree.Limit;
import org.opensearch.sql.expression.DSL;
import org.opensearch.sql.planner.logical.LogicalFilter;
import org.opensearch.sql.planner.logical.LogicalLimit;
import org.opensearch.sql.planner.logical.LogicalPlan;
import org.opensearch.sql.planner.optimizer.Rule;

/** Merge Filter --> Filter to the single Filter condition. */
public class MergeLimits implements Rule<LogicalLimit> {

private final Capture<LogicalLimit> capture;

@Accessors(fluent = true)
@Getter
private final Pattern<LogicalLimit> pattern;

/** Constructor of MergeFilterAndFilter. */
public MergeLimits() {
this.capture = Capture.newCapture();
this.pattern =
typeOf(LogicalLimit.class)
.with(source().matching(typeOf(LogicalLimit.class).capturedAs(capture)));
}

@Override
public LogicalPlan apply(LogicalLimit limit, Captures captures) {
LogicalLimit childLimit = captures.get(capture);
if (limit.getOffset() != 0 || childLimit.getOffset() != 0) {
return limit;
} else {
return new LogicalLimit(childLimit.getChild().get(0), Integer.min(limit.getLimit(),
childLimit.getLimit()), 0);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ public void open() {
super.open();

// skip the leading rows of offset size
while (input.hasNext() && count < offset) {
while (count < offset && input.hasNext()) {
count++;
input.next();
}
Expand Down
Loading

0 comments on commit 438176b

Please sign in to comment.