From 7930b88770f10067374020b4b230afef42885df9 Mon Sep 17 00:00:00 2001 From: Florian Schade Date: Fri, 8 Sep 2023 10:21:25 +0200 Subject: [PATCH 1/8] enhancement: add more kql spec tests and simplify ast normalization --- changelog/unreleased/kql-fix-unary.md | 7 - .../unreleased/kql-search-query-language.md | 4 + services/search/pkg/query/kql/cast.go | 21 - services/search/pkg/query/kql/const.go | 7 - services/search/pkg/query/kql/dictionary.peg | 20 +- .../search/pkg/query/kql/dictionary_gen.go | 494 +++++++++--------- .../search/pkg/query/kql/dictionary_test.go | 473 ++++++++++++++--- services/search/pkg/query/kql/factory.go | 43 +- services/search/pkg/query/kql/kql.go | 122 +++++ services/search/pkg/query/kql/kql_test.go | 4 +- services/search/pkg/query/kql/normalize.go | 128 ----- .../search/pkg/query/kql/normalize_test.go | 126 ----- 12 files changed, 823 insertions(+), 626 deletions(-) delete mode 100644 changelog/unreleased/kql-fix-unary.md delete mode 100644 services/search/pkg/query/kql/normalize.go delete mode 100644 services/search/pkg/query/kql/normalize_test.go diff --git a/changelog/unreleased/kql-fix-unary.md b/changelog/unreleased/kql-fix-unary.md deleted file mode 100644 index 29a33c7173e..00000000000 --- a/changelog/unreleased/kql-fix-unary.md +++ /dev/null @@ -1,7 +0,0 @@ -Bugfix: Fixed cunary in the beginning - -Fixed case when the unary in the beginning lead to panic - - -https://github.com/owncloud/ocis/pull/7247 -git diff --git a/changelog/unreleased/kql-search-query-language.md b/changelog/unreleased/kql-search-query-language.md index ddb289e2485..0749e09dff9 100644 --- a/changelog/unreleased/kql-search-query-language.md +++ b/changelog/unreleased/kql-search-query-language.md @@ -19,7 +19,11 @@ Complex queries: https://github.com/owncloud/ocis/pull/7212 https://github.com/owncloud/ocis/pull/7043 +https://github.com/owncloud/ocis/pull/7247 +https://github.com/owncloud/ocis/pull/7248 +https://github.com/owncloud/ocis/pull/7254 https://github.com/owncloud/web/pull/9653 +https://github.com/owncloud/web/pull/9672 https://github.com/owncloud/ocis/issues/7042 https://github.com/owncloud/ocis/issues/7179 https://github.com/owncloud/ocis/issues/7114 diff --git a/services/search/pkg/query/kql/cast.go b/services/search/pkg/query/kql/cast.go index cdceda9a584..41c8183a62a 100644 --- a/services/search/pkg/query/kql/cast.go +++ b/services/search/pkg/query/kql/cast.go @@ -7,13 +7,6 @@ import ( "github.com/owncloud/ocis/v2/services/search/pkg/query/ast" ) -func toIfaceSlice(in interface{}) []interface{} { - if in == nil { - return nil - } - return in.([]interface{}) -} - func toNode[T ast.Node](in interface{}) (T, error) { var t T out, ok := in.(T) @@ -25,21 +18,7 @@ func toNode[T ast.Node](in interface{}) (T, error) { } func toNodes[T ast.Node](in interface{}) ([]T, error) { - switch v := in.(type) { - case []interface{}: - var nodes []T - - for _, el := range toIfaceSlice(v) { - node, err := toNode[T](el) - if err != nil { - return nil, err - } - - nodes = append(nodes, node) - } - - return nodes, nil case []T: return v, nil default: diff --git a/services/search/pkg/query/kql/const.go b/services/search/pkg/query/kql/const.go index b17fba346b1..f3e5818bd78 100644 --- a/services/search/pkg/query/kql/const.go +++ b/services/search/pkg/query/kql/const.go @@ -1,8 +1 @@ package kql - -// The operator node value definition -const ( - BoolAND = "AND" - BoolOR = "OR" - BoolNOT = "NOT" -) diff --git a/services/search/pkg/query/kql/dictionary.peg b/services/search/pkg/query/kql/dictionary.peg index 927f0159eac..6c177b2d87c 100644 --- a/services/search/pkg/query/kql/dictionary.peg +++ b/services/search/pkg/query/kql/dictionary.peg @@ -12,17 +12,13 @@ AST <- } Nodes <- - n:( - _ - ( - GroupNode / - PropertyRestrictionNodes / - OperatorBooleanNode / - FreeTextKeywordNodes - ) - _ - )+ { - return buildNodes(n) + _ head:( + GroupNode / + PropertyRestrictionNodes / + OperatorBooleanNode / + FreeTextKeywordNodes + ) _ tail:Nodes? { + return buildNodes(head, tail) } //////////////////////////////////////////////////////// @@ -81,7 +77,7 @@ WordNode <- //////////////////////////////////////////////////////// OperatorBooleanNode <- - ("AND" / "OR" / "NOT") { + ("AND" / "OR" / "NOT" / "+" / "-") { return buildOperatorNode(c.text, c.pos) } diff --git a/services/search/pkg/query/kql/dictionary_gen.go b/services/search/pkg/query/kql/dictionary_gen.go index 45d7c38858e..4c9e749ffbd 100644 --- a/services/search/pkg/query/kql/dictionary_gen.go +++ b/services/search/pkg/query/kql/dictionary_gen.go @@ -54,42 +54,50 @@ var g = &grammar{ expr: &actionExpr{ pos: position{line: 15, col: 5, offset: 238}, run: (*parser).callonNodes1, - expr: &labeledExpr{ - pos: position{line: 15, col: 5, offset: 238}, - label: "n", - expr: &oneOrMoreExpr{ - pos: position{line: 15, col: 7, offset: 240}, - expr: &seqExpr{ - pos: position{line: 16, col: 9, offset: 250}, - exprs: []any{ - &ruleRefExpr{ - pos: position{line: 16, col: 9, offset: 250}, - name: "_", - }, - &choiceExpr{ - pos: position{line: 18, col: 13, offset: 274}, - alternatives: []any{ - &ruleRefExpr{ - pos: position{line: 18, col: 13, offset: 274}, - name: "GroupNode", - }, - &ruleRefExpr{ - pos: position{line: 19, col: 13, offset: 298}, - name: "PropertyRestrictionNodes", - }, - &ruleRefExpr{ - pos: position{line: 20, col: 13, offset: 337}, - name: "OperatorBooleanNode", - }, - &ruleRefExpr{ - pos: position{line: 21, col: 13, offset: 371}, - name: "FreeTextKeywordNodes", - }, + expr: &seqExpr{ + pos: position{line: 15, col: 5, offset: 238}, + exprs: []any{ + &ruleRefExpr{ + pos: position{line: 15, col: 5, offset: 238}, + name: "_", + }, + &labeledExpr{ + pos: position{line: 15, col: 7, offset: 240}, + label: "head", + expr: &choiceExpr{ + pos: position{line: 16, col: 9, offset: 255}, + alternatives: []any{ + &ruleRefExpr{ + pos: position{line: 16, col: 9, offset: 255}, + name: "GroupNode", + }, + &ruleRefExpr{ + pos: position{line: 17, col: 9, offset: 275}, + name: "PropertyRestrictionNodes", + }, + &ruleRefExpr{ + pos: position{line: 18, col: 9, offset: 310}, + name: "OperatorBooleanNode", + }, + &ruleRefExpr{ + pos: position{line: 19, col: 9, offset: 340}, + name: "FreeTextKeywordNodes", }, }, - &ruleRefExpr{ - pos: position{line: 23, col: 9, offset: 410}, - name: "_", + }, + }, + &ruleRefExpr{ + pos: position{line: 20, col: 7, offset: 367}, + name: "_", + }, + &labeledExpr{ + pos: position{line: 20, col: 9, offset: 369}, + label: "tail", + expr: &zeroOrOneExpr{ + pos: position{line: 20, col: 14, offset: 374}, + expr: &ruleRefExpr{ + pos: position{line: 20, col: 14, offset: 374}, + name: "Nodes", }, }, }, @@ -99,59 +107,59 @@ var g = &grammar{ }, { name: "GroupNode", - pos: position{line: 32, col: 1, offset: 581}, + pos: position{line: 28, col: 1, offset: 552}, expr: &actionExpr{ - pos: position{line: 33, col: 5, offset: 598}, + pos: position{line: 29, col: 5, offset: 569}, run: (*parser).callonGroupNode1, expr: &seqExpr{ - pos: position{line: 33, col: 5, offset: 598}, + pos: position{line: 29, col: 5, offset: 569}, exprs: []any{ &labeledExpr{ - pos: position{line: 33, col: 5, offset: 598}, + pos: position{line: 29, col: 5, offset: 569}, label: "k", expr: &zeroOrOneExpr{ - pos: position{line: 33, col: 7, offset: 600}, + pos: position{line: 29, col: 7, offset: 571}, expr: &oneOrMoreExpr{ - pos: position{line: 33, col: 8, offset: 601}, + pos: position{line: 29, col: 8, offset: 572}, expr: &ruleRefExpr{ - pos: position{line: 33, col: 8, offset: 601}, + pos: position{line: 29, col: 8, offset: 572}, name: "Char", }, }, }, }, &zeroOrOneExpr{ - pos: position{line: 33, col: 16, offset: 609}, + pos: position{line: 29, col: 16, offset: 580}, expr: &choiceExpr{ - pos: position{line: 33, col: 17, offset: 610}, + pos: position{line: 29, col: 17, offset: 581}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 33, col: 17, offset: 610}, + pos: position{line: 29, col: 17, offset: 581}, name: "OperatorColonNode", }, &ruleRefExpr{ - pos: position{line: 33, col: 37, offset: 630}, + pos: position{line: 29, col: 37, offset: 601}, name: "OperatorEqualNode", }, }, }, }, &litMatcher{ - pos: position{line: 33, col: 57, offset: 650}, + pos: position{line: 29, col: 57, offset: 621}, val: "(", ignoreCase: false, want: "\"(\"", }, &labeledExpr{ - pos: position{line: 33, col: 61, offset: 654}, + pos: position{line: 29, col: 61, offset: 625}, label: "v", expr: &ruleRefExpr{ - pos: position{line: 33, col: 63, offset: 656}, + pos: position{line: 29, col: 63, offset: 627}, name: "Nodes", }, }, &litMatcher{ - pos: position{line: 33, col: 69, offset: 662}, + pos: position{line: 29, col: 69, offset: 633}, val: ")", ignoreCase: false, want: "\")\"", @@ -162,20 +170,20 @@ var g = &grammar{ }, { name: "PropertyRestrictionNodes", - pos: position{line: 41, col: 1, offset: 866}, + pos: position{line: 37, col: 1, offset: 837}, expr: &choiceExpr{ - pos: position{line: 42, col: 5, offset: 898}, + pos: position{line: 38, col: 5, offset: 869}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 42, col: 5, offset: 898}, + pos: position{line: 38, col: 5, offset: 869}, name: "YesNoPropertyRestrictionNode", }, &ruleRefExpr{ - pos: position{line: 43, col: 5, offset: 933}, + pos: position{line: 39, col: 5, offset: 904}, name: "DateTimeRestrictionNode", }, &ruleRefExpr{ - pos: position{line: 44, col: 5, offset: 963}, + pos: position{line: 40, col: 5, offset: 934}, name: "TextPropertyRestrictionNode", }, }, @@ -183,51 +191,51 @@ var g = &grammar{ }, { name: "YesNoPropertyRestrictionNode", - pos: position{line: 46, col: 1, offset: 992}, + pos: position{line: 42, col: 1, offset: 963}, expr: &actionExpr{ - pos: position{line: 47, col: 5, offset: 1028}, + pos: position{line: 43, col: 5, offset: 999}, run: (*parser).callonYesNoPropertyRestrictionNode1, expr: &seqExpr{ - pos: position{line: 47, col: 5, offset: 1028}, + pos: position{line: 43, col: 5, offset: 999}, exprs: []any{ &labeledExpr{ - pos: position{line: 47, col: 5, offset: 1028}, + pos: position{line: 43, col: 5, offset: 999}, label: "k", expr: &oneOrMoreExpr{ - pos: position{line: 47, col: 7, offset: 1030}, + pos: position{line: 43, col: 7, offset: 1001}, expr: &ruleRefExpr{ - pos: position{line: 47, col: 7, offset: 1030}, + pos: position{line: 43, col: 7, offset: 1001}, name: "Char", }, }, }, &choiceExpr{ - pos: position{line: 47, col: 14, offset: 1037}, + pos: position{line: 43, col: 14, offset: 1008}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 47, col: 14, offset: 1037}, + pos: position{line: 43, col: 14, offset: 1008}, name: "OperatorColonNode", }, &ruleRefExpr{ - pos: position{line: 47, col: 34, offset: 1057}, + pos: position{line: 43, col: 34, offset: 1028}, name: "OperatorEqualNode", }, }, }, &labeledExpr{ - pos: position{line: 47, col: 53, offset: 1076}, + pos: position{line: 43, col: 53, offset: 1047}, label: "v", expr: &choiceExpr{ - pos: position{line: 47, col: 56, offset: 1079}, + pos: position{line: 43, col: 56, offset: 1050}, alternatives: []any{ &litMatcher{ - pos: position{line: 47, col: 56, offset: 1079}, + pos: position{line: 43, col: 56, offset: 1050}, val: "true", ignoreCase: false, want: "\"true\"", }, &litMatcher{ - pos: position{line: 47, col: 65, offset: 1088}, + pos: position{line: 43, col: 65, offset: 1059}, val: "false", ignoreCase: false, want: "\"false\"", @@ -241,93 +249,93 @@ var g = &grammar{ }, { name: "DateTimeRestrictionNode", - pos: position{line: 51, col: 1, offset: 1158}, + pos: position{line: 47, col: 1, offset: 1129}, expr: &actionExpr{ - pos: position{line: 52, col: 5, offset: 1189}, + pos: position{line: 48, col: 5, offset: 1160}, run: (*parser).callonDateTimeRestrictionNode1, expr: &seqExpr{ - pos: position{line: 52, col: 5, offset: 1189}, + pos: position{line: 48, col: 5, offset: 1160}, exprs: []any{ &labeledExpr{ - pos: position{line: 52, col: 5, offset: 1189}, + pos: position{line: 48, col: 5, offset: 1160}, label: "k", expr: &oneOrMoreExpr{ - pos: position{line: 52, col: 7, offset: 1191}, + pos: position{line: 48, col: 7, offset: 1162}, expr: &ruleRefExpr{ - pos: position{line: 52, col: 7, offset: 1191}, + pos: position{line: 48, col: 7, offset: 1162}, name: "Char", }, }, }, &labeledExpr{ - pos: position{line: 52, col: 13, offset: 1197}, + pos: position{line: 48, col: 13, offset: 1168}, label: "o", expr: &choiceExpr{ - pos: position{line: 52, col: 16, offset: 1200}, + pos: position{line: 48, col: 16, offset: 1171}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 52, col: 16, offset: 1200}, + pos: position{line: 48, col: 16, offset: 1171}, name: "OperatorGreaterOrEqualNode", }, &ruleRefExpr{ - pos: position{line: 52, col: 45, offset: 1229}, + pos: position{line: 48, col: 45, offset: 1200}, name: "OperatorLessOrEqualNode", }, &ruleRefExpr{ - pos: position{line: 52, col: 71, offset: 1255}, + pos: position{line: 48, col: 71, offset: 1226}, name: "OperatorGreaterNode", }, &ruleRefExpr{ - pos: position{line: 52, col: 93, offset: 1277}, + pos: position{line: 48, col: 93, offset: 1248}, name: "OperatorLessNode", }, &ruleRefExpr{ - pos: position{line: 52, col: 112, offset: 1296}, + pos: position{line: 48, col: 112, offset: 1267}, name: "OperatorEqualNode", }, &ruleRefExpr{ - pos: position{line: 52, col: 132, offset: 1316}, + pos: position{line: 48, col: 132, offset: 1287}, name: "OperatorColonNode", }, }, }, }, &zeroOrOneExpr{ - pos: position{line: 52, col: 151, offset: 1335}, + pos: position{line: 48, col: 151, offset: 1306}, expr: &litMatcher{ - pos: position{line: 52, col: 151, offset: 1335}, + pos: position{line: 48, col: 151, offset: 1306}, val: "\"", ignoreCase: false, want: "\"\\\"\"", }, }, &labeledExpr{ - pos: position{line: 52, col: 156, offset: 1340}, + pos: position{line: 48, col: 156, offset: 1311}, label: "v", expr: &seqExpr{ - pos: position{line: 52, col: 159, offset: 1343}, + pos: position{line: 48, col: 159, offset: 1314}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 52, col: 159, offset: 1343}, + pos: position{line: 48, col: 159, offset: 1314}, name: "FullDate", }, &litMatcher{ - pos: position{line: 52, col: 168, offset: 1352}, + pos: position{line: 48, col: 168, offset: 1323}, val: "T", ignoreCase: false, want: "\"T\"", }, &ruleRefExpr{ - pos: position{line: 52, col: 172, offset: 1356}, + pos: position{line: 48, col: 172, offset: 1327}, name: "FullTime", }, }, }, }, &zeroOrOneExpr{ - pos: position{line: 52, col: 182, offset: 1366}, + pos: position{line: 48, col: 182, offset: 1337}, expr: &litMatcher{ - pos: position{line: 52, col: 182, offset: 1366}, + pos: position{line: 48, col: 182, offset: 1337}, val: "\"", ignoreCase: false, want: "\"\\\"\"", @@ -339,51 +347,51 @@ var g = &grammar{ }, { name: "TextPropertyRestrictionNode", - pos: position{line: 56, col: 1, offset: 1437}, + pos: position{line: 52, col: 1, offset: 1408}, expr: &actionExpr{ - pos: position{line: 57, col: 5, offset: 1472}, + pos: position{line: 53, col: 5, offset: 1443}, run: (*parser).callonTextPropertyRestrictionNode1, expr: &seqExpr{ - pos: position{line: 57, col: 5, offset: 1472}, + pos: position{line: 53, col: 5, offset: 1443}, exprs: []any{ &labeledExpr{ - pos: position{line: 57, col: 5, offset: 1472}, + pos: position{line: 53, col: 5, offset: 1443}, label: "k", expr: &oneOrMoreExpr{ - pos: position{line: 57, col: 7, offset: 1474}, + pos: position{line: 53, col: 7, offset: 1445}, expr: &ruleRefExpr{ - pos: position{line: 57, col: 7, offset: 1474}, + pos: position{line: 53, col: 7, offset: 1445}, name: "Char", }, }, }, &choiceExpr{ - pos: position{line: 57, col: 14, offset: 1481}, + pos: position{line: 53, col: 14, offset: 1452}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 57, col: 14, offset: 1481}, + pos: position{line: 53, col: 14, offset: 1452}, name: "OperatorColonNode", }, &ruleRefExpr{ - pos: position{line: 57, col: 34, offset: 1501}, + pos: position{line: 53, col: 34, offset: 1472}, name: "OperatorEqualNode", }, }, }, &labeledExpr{ - pos: position{line: 57, col: 53, offset: 1520}, + pos: position{line: 53, col: 53, offset: 1491}, label: "v", expr: &choiceExpr{ - pos: position{line: 57, col: 56, offset: 1523}, + pos: position{line: 53, col: 56, offset: 1494}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 57, col: 56, offset: 1523}, + pos: position{line: 53, col: 56, offset: 1494}, name: "String", }, &oneOrMoreExpr{ - pos: position{line: 57, col: 65, offset: 1532}, + pos: position{line: 53, col: 65, offset: 1503}, expr: &charClassMatcher{ - pos: position{line: 57, col: 65, offset: 1532}, + pos: position{line: 53, col: 65, offset: 1503}, val: "[^ ()]", chars: []rune{' ', '(', ')'}, ignoreCase: false, @@ -399,16 +407,16 @@ var g = &grammar{ }, { name: "FreeTextKeywordNodes", - pos: position{line: 65, col: 1, offset: 1738}, + pos: position{line: 61, col: 1, offset: 1709}, expr: &choiceExpr{ - pos: position{line: 66, col: 5, offset: 1766}, + pos: position{line: 62, col: 5, offset: 1737}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 66, col: 5, offset: 1766}, + pos: position{line: 62, col: 5, offset: 1737}, name: "PhraseNode", }, &ruleRefExpr{ - pos: position{line: 67, col: 5, offset: 1783}, + pos: position{line: 63, col: 5, offset: 1754}, name: "WordNode", }, }, @@ -416,40 +424,40 @@ var g = &grammar{ }, { name: "PhraseNode", - pos: position{line: 69, col: 1, offset: 1793}, + pos: position{line: 65, col: 1, offset: 1764}, expr: &actionExpr{ - pos: position{line: 70, col: 6, offset: 1812}, + pos: position{line: 66, col: 6, offset: 1783}, run: (*parser).callonPhraseNode1, expr: &seqExpr{ - pos: position{line: 70, col: 6, offset: 1812}, + pos: position{line: 66, col: 6, offset: 1783}, exprs: []any{ &zeroOrOneExpr{ - pos: position{line: 70, col: 6, offset: 1812}, + pos: position{line: 66, col: 6, offset: 1783}, expr: &ruleRefExpr{ - pos: position{line: 70, col: 6, offset: 1812}, + pos: position{line: 66, col: 6, offset: 1783}, name: "OperatorColonNode", }, }, &ruleRefExpr{ - pos: position{line: 70, col: 25, offset: 1831}, + pos: position{line: 66, col: 25, offset: 1802}, name: "_", }, &labeledExpr{ - pos: position{line: 70, col: 27, offset: 1833}, + pos: position{line: 66, col: 27, offset: 1804}, label: "v", expr: &ruleRefExpr{ - pos: position{line: 70, col: 29, offset: 1835}, + pos: position{line: 66, col: 29, offset: 1806}, name: "String", }, }, &ruleRefExpr{ - pos: position{line: 70, col: 36, offset: 1842}, + pos: position{line: 66, col: 36, offset: 1813}, name: "_", }, &zeroOrOneExpr{ - pos: position{line: 70, col: 38, offset: 1844}, + pos: position{line: 66, col: 38, offset: 1815}, expr: &ruleRefExpr{ - pos: position{line: 70, col: 38, offset: 1844}, + pos: position{line: 66, col: 38, offset: 1815}, name: "OperatorColonNode", }, }, @@ -459,31 +467,31 @@ var g = &grammar{ }, { name: "WordNode", - pos: position{line: 74, col: 1, offset: 1925}, + pos: position{line: 70, col: 1, offset: 1896}, expr: &actionExpr{ - pos: position{line: 75, col: 6, offset: 1942}, + pos: position{line: 71, col: 6, offset: 1913}, run: (*parser).callonWordNode1, expr: &seqExpr{ - pos: position{line: 75, col: 6, offset: 1942}, + pos: position{line: 71, col: 6, offset: 1913}, exprs: []any{ &zeroOrOneExpr{ - pos: position{line: 75, col: 6, offset: 1942}, + pos: position{line: 71, col: 6, offset: 1913}, expr: &ruleRefExpr{ - pos: position{line: 75, col: 6, offset: 1942}, + pos: position{line: 71, col: 6, offset: 1913}, name: "OperatorColonNode", }, }, &ruleRefExpr{ - pos: position{line: 75, col: 25, offset: 1961}, + pos: position{line: 71, col: 25, offset: 1932}, name: "_", }, &labeledExpr{ - pos: position{line: 75, col: 27, offset: 1963}, + pos: position{line: 71, col: 27, offset: 1934}, label: "v", expr: &oneOrMoreExpr{ - pos: position{line: 75, col: 29, offset: 1965}, + pos: position{line: 71, col: 29, offset: 1936}, expr: &charClassMatcher{ - pos: position{line: 75, col: 29, offset: 1965}, + pos: position{line: 71, col: 29, offset: 1936}, val: "[^ :()]", chars: []rune{' ', ':', '(', ')'}, ignoreCase: false, @@ -492,13 +500,13 @@ var g = &grammar{ }, }, &ruleRefExpr{ - pos: position{line: 75, col: 38, offset: 1974}, + pos: position{line: 71, col: 38, offset: 1945}, name: "_", }, &zeroOrOneExpr{ - pos: position{line: 75, col: 40, offset: 1976}, + pos: position{line: 71, col: 40, offset: 1947}, expr: &ruleRefExpr{ - pos: position{line: 75, col: 40, offset: 1976}, + pos: position{line: 71, col: 40, offset: 1947}, name: "OperatorColonNode", }, }, @@ -508,43 +516,55 @@ var g = &grammar{ }, { name: "OperatorBooleanNode", - pos: position{line: 83, col: 1, offset: 2185}, + pos: position{line: 79, col: 1, offset: 2156}, expr: &actionExpr{ - pos: position{line: 84, col: 5, offset: 2212}, + pos: position{line: 80, col: 5, offset: 2183}, run: (*parser).callonOperatorBooleanNode1, expr: &choiceExpr{ - pos: position{line: 84, col: 6, offset: 2213}, + pos: position{line: 80, col: 6, offset: 2184}, alternatives: []any{ &litMatcher{ - pos: position{line: 84, col: 6, offset: 2213}, + pos: position{line: 80, col: 6, offset: 2184}, val: "AND", ignoreCase: false, want: "\"AND\"", }, &litMatcher{ - pos: position{line: 84, col: 14, offset: 2221}, + pos: position{line: 80, col: 14, offset: 2192}, val: "OR", ignoreCase: false, want: "\"OR\"", }, &litMatcher{ - pos: position{line: 84, col: 21, offset: 2228}, + pos: position{line: 80, col: 21, offset: 2199}, val: "NOT", ignoreCase: false, want: "\"NOT\"", }, + &litMatcher{ + pos: position{line: 80, col: 29, offset: 2207}, + val: "+", + ignoreCase: false, + want: "\"+\"", + }, + &litMatcher{ + pos: position{line: 80, col: 35, offset: 2213}, + val: "-", + ignoreCase: false, + want: "\"-\"", + }, }, }, }, }, { name: "OperatorColonNode", - pos: position{line: 88, col: 1, offset: 2292}, + pos: position{line: 84, col: 1, offset: 2275}, expr: &actionExpr{ - pos: position{line: 89, col: 5, offset: 2317}, + pos: position{line: 85, col: 5, offset: 2300}, run: (*parser).callonOperatorColonNode1, expr: &litMatcher{ - pos: position{line: 89, col: 5, offset: 2317}, + pos: position{line: 85, col: 5, offset: 2300}, val: ":", ignoreCase: false, want: "\":\"", @@ -553,12 +573,12 @@ var g = &grammar{ }, { name: "OperatorEqualNode", - pos: position{line: 93, col: 1, offset: 2378}, + pos: position{line: 89, col: 1, offset: 2361}, expr: &actionExpr{ - pos: position{line: 94, col: 5, offset: 2403}, + pos: position{line: 90, col: 5, offset: 2386}, run: (*parser).callonOperatorEqualNode1, expr: &litMatcher{ - pos: position{line: 94, col: 5, offset: 2403}, + pos: position{line: 90, col: 5, offset: 2386}, val: "=", ignoreCase: false, want: "\"=\"", @@ -567,12 +587,12 @@ var g = &grammar{ }, { name: "OperatorLessNode", - pos: position{line: 98, col: 1, offset: 2464}, + pos: position{line: 94, col: 1, offset: 2447}, expr: &actionExpr{ - pos: position{line: 99, col: 5, offset: 2488}, + pos: position{line: 95, col: 5, offset: 2471}, run: (*parser).callonOperatorLessNode1, expr: &litMatcher{ - pos: position{line: 99, col: 5, offset: 2488}, + pos: position{line: 95, col: 5, offset: 2471}, val: "<", ignoreCase: false, want: "\"<\"", @@ -581,12 +601,12 @@ var g = &grammar{ }, { name: "OperatorLessOrEqualNode", - pos: position{line: 103, col: 1, offset: 2549}, + pos: position{line: 99, col: 1, offset: 2532}, expr: &actionExpr{ - pos: position{line: 104, col: 5, offset: 2580}, + pos: position{line: 100, col: 5, offset: 2563}, run: (*parser).callonOperatorLessOrEqualNode1, expr: &litMatcher{ - pos: position{line: 104, col: 5, offset: 2580}, + pos: position{line: 100, col: 5, offset: 2563}, val: "<=", ignoreCase: false, want: "\"<=\"", @@ -595,12 +615,12 @@ var g = &grammar{ }, { name: "OperatorGreaterNode", - pos: position{line: 108, col: 1, offset: 2642}, + pos: position{line: 104, col: 1, offset: 2625}, expr: &actionExpr{ - pos: position{line: 109, col: 5, offset: 2669}, + pos: position{line: 105, col: 5, offset: 2652}, run: (*parser).callonOperatorGreaterNode1, expr: &litMatcher{ - pos: position{line: 109, col: 5, offset: 2669}, + pos: position{line: 105, col: 5, offset: 2652}, val: ">", ignoreCase: false, want: "\">\"", @@ -609,12 +629,12 @@ var g = &grammar{ }, { name: "OperatorGreaterOrEqualNode", - pos: position{line: 113, col: 1, offset: 2730}, + pos: position{line: 109, col: 1, offset: 2713}, expr: &actionExpr{ - pos: position{line: 114, col: 5, offset: 2764}, + pos: position{line: 110, col: 5, offset: 2747}, run: (*parser).callonOperatorGreaterOrEqualNode1, expr: &litMatcher{ - pos: position{line: 114, col: 5, offset: 2764}, + pos: position{line: 110, col: 5, offset: 2747}, val: ">=", ignoreCase: false, want: "\">=\"", @@ -623,27 +643,27 @@ var g = &grammar{ }, { name: "TimeYear", - pos: position{line: 123, col: 1, offset: 2950}, + pos: position{line: 119, col: 1, offset: 2933}, expr: &actionExpr{ - pos: position{line: 124, col: 5, offset: 2966}, + pos: position{line: 120, col: 5, offset: 2949}, run: (*parser).callonTimeYear1, expr: &seqExpr{ - pos: position{line: 124, col: 5, offset: 2966}, + pos: position{line: 120, col: 5, offset: 2949}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 124, col: 5, offset: 2966}, + pos: position{line: 120, col: 5, offset: 2949}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 124, col: 11, offset: 2972}, + pos: position{line: 120, col: 11, offset: 2955}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 124, col: 17, offset: 2978}, + pos: position{line: 120, col: 17, offset: 2961}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 124, col: 23, offset: 2984}, + pos: position{line: 120, col: 23, offset: 2967}, name: "Digit", }, }, @@ -652,19 +672,19 @@ var g = &grammar{ }, { name: "TimeMonth", - pos: position{line: 128, col: 1, offset: 3026}, + pos: position{line: 124, col: 1, offset: 3009}, expr: &actionExpr{ - pos: position{line: 129, col: 5, offset: 3043}, + pos: position{line: 125, col: 5, offset: 3026}, run: (*parser).callonTimeMonth1, expr: &seqExpr{ - pos: position{line: 129, col: 5, offset: 3043}, + pos: position{line: 125, col: 5, offset: 3026}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 129, col: 5, offset: 3043}, + pos: position{line: 125, col: 5, offset: 3026}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 129, col: 11, offset: 3049}, + pos: position{line: 125, col: 11, offset: 3032}, name: "Digit", }, }, @@ -673,19 +693,19 @@ var g = &grammar{ }, { name: "TimeDay", - pos: position{line: 133, col: 1, offset: 3091}, + pos: position{line: 129, col: 1, offset: 3074}, expr: &actionExpr{ - pos: position{line: 134, col: 5, offset: 3106}, + pos: position{line: 130, col: 5, offset: 3089}, run: (*parser).callonTimeDay1, expr: &seqExpr{ - pos: position{line: 134, col: 5, offset: 3106}, + pos: position{line: 130, col: 5, offset: 3089}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 134, col: 5, offset: 3106}, + pos: position{line: 130, col: 5, offset: 3089}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 134, col: 11, offset: 3112}, + pos: position{line: 130, col: 11, offset: 3095}, name: "Digit", }, }, @@ -694,19 +714,19 @@ var g = &grammar{ }, { name: "TimeHour", - pos: position{line: 138, col: 1, offset: 3154}, + pos: position{line: 134, col: 1, offset: 3137}, expr: &actionExpr{ - pos: position{line: 139, col: 5, offset: 3170}, + pos: position{line: 135, col: 5, offset: 3153}, run: (*parser).callonTimeHour1, expr: &seqExpr{ - pos: position{line: 139, col: 5, offset: 3170}, + pos: position{line: 135, col: 5, offset: 3153}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 139, col: 5, offset: 3170}, + pos: position{line: 135, col: 5, offset: 3153}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 139, col: 11, offset: 3176}, + pos: position{line: 135, col: 11, offset: 3159}, name: "Digit", }, }, @@ -715,19 +735,19 @@ var g = &grammar{ }, { name: "TimeMinute", - pos: position{line: 143, col: 1, offset: 3218}, + pos: position{line: 139, col: 1, offset: 3201}, expr: &actionExpr{ - pos: position{line: 144, col: 5, offset: 3236}, + pos: position{line: 140, col: 5, offset: 3219}, run: (*parser).callonTimeMinute1, expr: &seqExpr{ - pos: position{line: 144, col: 5, offset: 3236}, + pos: position{line: 140, col: 5, offset: 3219}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 144, col: 5, offset: 3236}, + pos: position{line: 140, col: 5, offset: 3219}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 144, col: 11, offset: 3242}, + pos: position{line: 140, col: 11, offset: 3225}, name: "Digit", }, }, @@ -736,19 +756,19 @@ var g = &grammar{ }, { name: "TimeSecond", - pos: position{line: 148, col: 1, offset: 3284}, + pos: position{line: 144, col: 1, offset: 3267}, expr: &actionExpr{ - pos: position{line: 149, col: 5, offset: 3302}, + pos: position{line: 145, col: 5, offset: 3285}, run: (*parser).callonTimeSecond1, expr: &seqExpr{ - pos: position{line: 149, col: 5, offset: 3302}, + pos: position{line: 145, col: 5, offset: 3285}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 149, col: 5, offset: 3302}, + pos: position{line: 145, col: 5, offset: 3285}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 149, col: 11, offset: 3308}, + pos: position{line: 145, col: 11, offset: 3291}, name: "Digit", }, }, @@ -757,35 +777,35 @@ var g = &grammar{ }, { name: "FullDate", - pos: position{line: 153, col: 1, offset: 3350}, + pos: position{line: 149, col: 1, offset: 3333}, expr: &actionExpr{ - pos: position{line: 154, col: 5, offset: 3366}, + pos: position{line: 150, col: 5, offset: 3349}, run: (*parser).callonFullDate1, expr: &seqExpr{ - pos: position{line: 154, col: 5, offset: 3366}, + pos: position{line: 150, col: 5, offset: 3349}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 154, col: 5, offset: 3366}, + pos: position{line: 150, col: 5, offset: 3349}, name: "TimeYear", }, &litMatcher{ - pos: position{line: 154, col: 14, offset: 3375}, + pos: position{line: 150, col: 14, offset: 3358}, val: "-", ignoreCase: false, want: "\"-\"", }, &ruleRefExpr{ - pos: position{line: 154, col: 18, offset: 3379}, + pos: position{line: 150, col: 18, offset: 3362}, name: "TimeMonth", }, &litMatcher{ - pos: position{line: 154, col: 28, offset: 3389}, + pos: position{line: 150, col: 28, offset: 3372}, val: "-", ignoreCase: false, want: "\"-\"", }, &ruleRefExpr{ - pos: position{line: 154, col: 32, offset: 3393}, + pos: position{line: 150, col: 32, offset: 3376}, name: "TimeDay", }, }, @@ -794,52 +814,52 @@ var g = &grammar{ }, { name: "FullTime", - pos: position{line: 158, col: 1, offset: 3437}, + pos: position{line: 154, col: 1, offset: 3420}, expr: &actionExpr{ - pos: position{line: 159, col: 5, offset: 3453}, + pos: position{line: 155, col: 5, offset: 3436}, run: (*parser).callonFullTime1, expr: &seqExpr{ - pos: position{line: 159, col: 5, offset: 3453}, + pos: position{line: 155, col: 5, offset: 3436}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 159, col: 5, offset: 3453}, + pos: position{line: 155, col: 5, offset: 3436}, name: "TimeHour", }, &litMatcher{ - pos: position{line: 159, col: 14, offset: 3462}, + pos: position{line: 155, col: 14, offset: 3445}, val: ":", ignoreCase: false, want: "\":\"", }, &ruleRefExpr{ - pos: position{line: 159, col: 18, offset: 3466}, + pos: position{line: 155, col: 18, offset: 3449}, name: "TimeMinute", }, &litMatcher{ - pos: position{line: 159, col: 29, offset: 3477}, + pos: position{line: 155, col: 29, offset: 3460}, val: ":", ignoreCase: false, want: "\":\"", }, &ruleRefExpr{ - pos: position{line: 159, col: 33, offset: 3481}, + pos: position{line: 155, col: 33, offset: 3464}, name: "TimeSecond", }, &zeroOrOneExpr{ - pos: position{line: 159, col: 44, offset: 3492}, + pos: position{line: 155, col: 44, offset: 3475}, expr: &seqExpr{ - pos: position{line: 159, col: 45, offset: 3493}, + pos: position{line: 155, col: 45, offset: 3476}, exprs: []any{ &litMatcher{ - pos: position{line: 159, col: 45, offset: 3493}, + pos: position{line: 155, col: 45, offset: 3476}, val: ".", ignoreCase: false, want: "\".\"", }, &oneOrMoreExpr{ - pos: position{line: 159, col: 49, offset: 3497}, + pos: position{line: 155, col: 49, offset: 3480}, expr: &ruleRefExpr{ - pos: position{line: 159, col: 49, offset: 3497}, + pos: position{line: 155, col: 49, offset: 3480}, name: "Digit", }, }, @@ -847,28 +867,28 @@ var g = &grammar{ }, }, &choiceExpr{ - pos: position{line: 159, col: 59, offset: 3507}, + pos: position{line: 155, col: 59, offset: 3490}, alternatives: []any{ &litMatcher{ - pos: position{line: 159, col: 59, offset: 3507}, + pos: position{line: 155, col: 59, offset: 3490}, val: "Z", ignoreCase: false, want: "\"Z\"", }, &seqExpr{ - pos: position{line: 159, col: 65, offset: 3513}, + pos: position{line: 155, col: 65, offset: 3496}, exprs: []any{ &choiceExpr{ - pos: position{line: 159, col: 66, offset: 3514}, + pos: position{line: 155, col: 66, offset: 3497}, alternatives: []any{ &litMatcher{ - pos: position{line: 159, col: 66, offset: 3514}, + pos: position{line: 155, col: 66, offset: 3497}, val: "+", ignoreCase: false, want: "\"+\"", }, &litMatcher{ - pos: position{line: 159, col: 72, offset: 3520}, + pos: position{line: 155, col: 72, offset: 3503}, val: "-", ignoreCase: false, want: "\"-\"", @@ -876,17 +896,17 @@ var g = &grammar{ }, }, &ruleRefExpr{ - pos: position{line: 159, col: 77, offset: 3525}, + pos: position{line: 155, col: 77, offset: 3508}, name: "TimeHour", }, &litMatcher{ - pos: position{line: 159, col: 86, offset: 3534}, + pos: position{line: 155, col: 86, offset: 3517}, val: ":", ignoreCase: false, want: "\":\"", }, &ruleRefExpr{ - pos: position{line: 159, col: 90, offset: 3538}, + pos: position{line: 155, col: 90, offset: 3521}, name: "TimeMinute", }, }, @@ -899,12 +919,12 @@ var g = &grammar{ }, { name: "Char", - pos: position{line: 167, col: 1, offset: 3709}, + pos: position{line: 163, col: 1, offset: 3692}, expr: &actionExpr{ - pos: position{line: 168, col: 5, offset: 3721}, + pos: position{line: 164, col: 5, offset: 3704}, run: (*parser).callonChar1, expr: &charClassMatcher{ - pos: position{line: 168, col: 5, offset: 3721}, + pos: position{line: 164, col: 5, offset: 3704}, val: "[A-Za-z]", ranges: []rune{'A', 'Z', 'a', 'z'}, ignoreCase: false, @@ -914,26 +934,26 @@ var g = &grammar{ }, { name: "String", - pos: position{line: 172, col: 1, offset: 3766}, + pos: position{line: 168, col: 1, offset: 3749}, expr: &actionExpr{ - pos: position{line: 173, col: 5, offset: 3780}, + pos: position{line: 169, col: 5, offset: 3763}, run: (*parser).callonString1, expr: &seqExpr{ - pos: position{line: 173, col: 5, offset: 3780}, + pos: position{line: 169, col: 5, offset: 3763}, exprs: []any{ &litMatcher{ - pos: position{line: 173, col: 5, offset: 3780}, + pos: position{line: 169, col: 5, offset: 3763}, val: "\"", ignoreCase: false, want: "\"\\\"\"", }, &labeledExpr{ - pos: position{line: 173, col: 9, offset: 3784}, + pos: position{line: 169, col: 9, offset: 3767}, label: "v", expr: &zeroOrMoreExpr{ - pos: position{line: 173, col: 11, offset: 3786}, + pos: position{line: 169, col: 11, offset: 3769}, expr: &charClassMatcher{ - pos: position{line: 173, col: 11, offset: 3786}, + pos: position{line: 169, col: 11, offset: 3769}, val: "[^\"]", chars: []rune{'"'}, ignoreCase: false, @@ -942,7 +962,7 @@ var g = &grammar{ }, }, &litMatcher{ - pos: position{line: 173, col: 17, offset: 3792}, + pos: position{line: 169, col: 17, offset: 3775}, val: "\"", ignoreCase: false, want: "\"\\\"\"", @@ -953,12 +973,12 @@ var g = &grammar{ }, { name: "Digit", - pos: position{line: 177, col: 1, offset: 3827}, + pos: position{line: 173, col: 1, offset: 3810}, expr: &actionExpr{ - pos: position{line: 178, col: 5, offset: 3840}, + pos: position{line: 174, col: 5, offset: 3823}, run: (*parser).callonDigit1, expr: &charClassMatcher{ - pos: position{line: 178, col: 5, offset: 3840}, + pos: position{line: 174, col: 5, offset: 3823}, val: "[0-9]", ranges: []rune{'0', '9'}, ignoreCase: false, @@ -968,11 +988,11 @@ var g = &grammar{ }, { name: "_", - pos: position{line: 182, col: 1, offset: 3882}, + pos: position{line: 178, col: 1, offset: 3865}, expr: &zeroOrMoreExpr{ - pos: position{line: 183, col: 5, offset: 3891}, + pos: position{line: 179, col: 5, offset: 3874}, expr: &charClassMatcher{ - pos: position{line: 183, col: 5, offset: 3891}, + pos: position{line: 179, col: 5, offset: 3874}, val: "[ \\t]", chars: []rune{' ', '\t'}, ignoreCase: false, @@ -994,15 +1014,15 @@ func (p *parser) callonAST1() (any, error) { return p.cur.onAST1(stack["nodes"]) } -func (c *current) onNodes1(n any) (any, error) { - return buildNodes(n) +func (c *current) onNodes1(head, tail any) (any, error) { + return buildNodes(head, tail) } func (p *parser) callonNodes1() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onNodes1(stack["n"]) + return p.cur.onNodes1(stack["head"], stack["tail"]) } func (c *current) onGroupNode1(k, v any) (any, error) { diff --git a/services/search/pkg/query/kql/dictionary_test.go b/services/search/pkg/query/kql/dictionary_test.go index 0b56eba69de..e60645f4ab1 100644 --- a/services/search/pkg/query/kql/dictionary_test.go +++ b/services/search/pkg/query/kql/dictionary_test.go @@ -12,7 +12,7 @@ import ( "github.com/owncloud/ocis/v2/services/search/pkg/query/kql" ) -var timeMustParse = func(t *testing.T, ts string) time.Time { +var mustParseTime = func(t *testing.T, ts string) time.Time { tp, err := time.Parse(time.RFC3339Nano, ts) if err != nil { t.Fatalf("time.Parse(...) error = %v", err) @@ -21,6 +21,10 @@ var timeMustParse = func(t *testing.T, ts string) time.Time { return tp } +var mustJoin = func(v []string) string { + return strings.Join(v, " ") +} + var FullDictionary = []string{ `federated search`, `federat* search`, @@ -51,78 +55,396 @@ var FullDictionary = []string{ func TestParse(t *testing.T) { tests := []struct { name string - givenQuery []string + skip bool + givenQuery string expectedAst *ast.Ast expectedError error }{ + // SPEC ////////////////////////////////////////////////////////////////////////////// + // https://msopenspecs.azureedge.net/files/MS-KQL/%5bMS-KQL%5d.pdf + // + // 3.1.11 Implicit Operator + { + name: `cat dog`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.StringNode{Value: "dog"}, + }, + }, + }, + { + name: `cat AND (dog OR fox)`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.GroupNode{Nodes: []ast.Node{ + &ast.StringNode{Value: "dog"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.StringNode{Value: "fox"}, + }}, + }, + }, + }, + { + name: `cat (dog OR fox)`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.GroupNode{Nodes: []ast.Node{ + &ast.StringNode{Value: "dog"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.StringNode{Value: "fox"}, + }}, + }, + }, + }, + // 3.1.12 Parentheses + { + name: `(cat OR dog) AND fox`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.GroupNode{Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.StringNode{Value: "dog"}, + }}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "fox"}, + }, + }, + }, + // 3.2.3 Implicit Operator for Property Restriction + { + name: `author:"John Smith" filetype:docx`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Key: "author", Value: "John Smith"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Key: "filetype", Value: "docx"}, + }, + }, + }, + { + name: `author:"John Smith" AND filetype:docx`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Key: "author", Value: "John Smith"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Key: "filetype", Value: "docx"}, + }, + }, + }, + { + name: `author:"John Smith" author:"Jane Smith"`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Key: "author", Value: "John Smith"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.StringNode{Key: "author", Value: "Jane Smith"}, + }, + }, + }, + { + name: `author:"John Smith" OR author:"Jane Smith"`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Key: "author", Value: "John Smith"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.StringNode{Key: "author", Value: "Jane Smith"}, + }, + }, + }, + { + name: `cat filetype:docx`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Key: "filetype", Value: "docx"}, + }, + }, + }, + { + name: `cat AND filetype:docx`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Key: "filetype", Value: "docx"}, + }, + }, + }, + // 3.3.1.1.1 Implicit AND Operator + { + name: `cat +dog`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "dog"}, + }, + }, + }, + { + name: `cat AND dog`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "dog"}, + }, + }, + }, + { + name: `cat -dog`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.OperatorNode{Value: kql.BoolNOT}, + &ast.StringNode{Value: "dog"}, + }, + }, + }, + { + name: `cat AND NOT dog`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.OperatorNode{Value: kql.BoolNOT}, + &ast.StringNode{Value: "dog"}, + }, + }, + }, + { + name: `cat +dog -fox`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "dog"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.OperatorNode{Value: kql.BoolNOT}, + &ast.StringNode{Value: "fox"}, + }, + }, + }, + { + name: `cat AND dog AND NOT fox`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "dog"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.OperatorNode{Value: kql.BoolNOT}, + &ast.StringNode{Value: "fox"}, + }, + }, + }, + { + name: `cat dog +fox`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.StringNode{Value: "dog"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "fox"}, + }, + }, + }, + { + name: `fox OR (fox AND (cat OR dog))`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "fox"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.GroupNode{Nodes: []ast.Node{ + &ast.StringNode{Value: "fox"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.GroupNode{Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.StringNode{Value: "dog"}, + }}, + }}, + }, + }, + }, + { + name: `cat dog -fox`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.StringNode{Value: "dog"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.OperatorNode{Value: kql.BoolNOT}, + &ast.StringNode{Value: "fox"}, + }, + }, + }, + { + name: `(NOT fox) AND (cat OR dog)`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.GroupNode{Nodes: []ast.Node{ + &ast.OperatorNode{Value: kql.BoolNOT}, + &ast.StringNode{Value: "fox"}, + }}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.GroupNode{Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.StringNode{Value: "dog"}, + }}, + }, + }, + }, + { + name: `cat +dog -fox`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "dog"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.OperatorNode{Value: kql.BoolNOT}, + &ast.StringNode{Value: "fox"}, + }, + }, + }, + { + name: `(NOT fox) AND (dog OR (dog AND cat))`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.GroupNode{Nodes: []ast.Node{ + &ast.OperatorNode{Value: kql.BoolNOT}, + &ast.StringNode{Value: "fox"}, + }}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.GroupNode{Nodes: []ast.Node{ + &ast.StringNode{Value: "dog"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.GroupNode{Nodes: []ast.Node{ + &ast.StringNode{Value: "dog"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "cat"}, + }}, + }}, + }, + }, + }, + ////////////////////////////////////////////////////////////////////////////////////// + // everything else { name: "FullDictionary", - givenQuery: FullDictionary, + givenQuery: mustJoin(FullDictionary), expectedAst: &ast.Ast{ Nodes: []ast.Node{ &ast.StringNode{Value: "federated"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "search"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "federat*"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "search"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "search"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "fed*"}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Key: "author", Value: "John Smith"}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Key: "filetype", Value: "docx"}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Key: "filename", Value: "budget.xlsx"}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "author"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "John Smith"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "author"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "John Smith"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "author"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "John Smith"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "author"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "John Smith"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "author"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "John Smith"}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Key: "author", Value: "Shakespear"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Key: "author", Value: "Paul"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Key: "author", Value: "Shakesp*"}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Key: "title", Value: "Advanced Search"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Key: "title", Value: "Advanced Sear*"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Key: "title", Value: "Advan* Search"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Key: "title", Value: "*anced Search"}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Key: "author", Value: "John Smith"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Key: "author", Value: "Jane Smith"}, - &ast.OperatorNode{Value: "OR"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Key: "author", Value: "John Smith"}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Key: "filetype", Value: "docx"}, - &ast.OperatorNode{Value: "AND"}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.GroupNode{ Key: "author", Nodes: []ast.Node{ &ast.StringNode{Value: "John Smith"}, - &ast.OperatorNode{Value: "AND"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "Jane Smith"}, }, }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.GroupNode{ Key: "author", Nodes: []ast.Node{ &ast.StringNode{Value: "John Smith"}, - &ast.OperatorNode{Value: "OR"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "Jane Smith"}, }, }, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.GroupNode{ Nodes: []ast.Node{ &ast.StringNode{Key: "DepartmentId", Value: "*"}, - &ast.OperatorNode{Value: "OR"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Key: "RelatedHubSites", Value: "*"}, }, }, - &ast.OperatorNode{Value: "AND"}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Key: "contentclass", Value: "sts_site"}, - &ast.OperatorNode{Value: "NOT"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.OperatorNode{Value: kql.BoolNOT}, &ast.BooleanNode{Key: "IsHubSite", Value: false}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Key: "author", Value: "John Smith"}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.GroupNode{ Nodes: []ast.Node{ &ast.StringNode{Key: "filetype", Value: "docx"}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Key: "title", Value: "Advanced Search"}, }, }, @@ -131,36 +453,40 @@ func TestParse(t *testing.T) { }, { name: "Group", - givenQuery: []string{ + givenQuery: mustJoin([]string{ `(name:"moby di*" OR tag:bestseller) AND tag:book NOT tag:read`, `author:("John Smith" Jane)`, `author:("John Smith" OR Jane)`, - }, + }), expectedAst: &ast.Ast{ Nodes: []ast.Node{ &ast.GroupNode{ Nodes: []ast.Node{ &ast.StringNode{Key: "name", Value: "moby di*"}, - &ast.OperatorNode{Value: "OR"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Key: "tag", Value: "bestseller"}, }, }, - &ast.OperatorNode{Value: "AND"}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Key: "tag", Value: "book"}, - &ast.OperatorNode{Value: "NOT"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.OperatorNode{Value: kql.BoolNOT}, &ast.StringNode{Key: "tag", Value: "read"}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.GroupNode{ Key: "author", Nodes: []ast.Node{ &ast.StringNode{Value: "John Smith"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "Jane"}, }, }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.GroupNode{ Key: "author", Nodes: []ast.Node{ &ast.StringNode{Value: "John Smith"}, - &ast.OperatorNode{Value: "OR"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "Jane"}, }, }, @@ -168,37 +494,33 @@ func TestParse(t *testing.T) { }, }, { - name: "KeyGroup or key conjunction", - givenQuery: []string{ - `author:("John Smith" Jane) author:"Jack" AND author:"Oggy"`, - }, + name: `author:("John Smith" Jane) author:"Jack" AND author:"Oggy"`, expectedAst: &ast.Ast{ Nodes: []ast.Node{ &ast.GroupNode{ Key: "author", Nodes: []ast.Node{ &ast.StringNode{Value: "John Smith"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "Jane"}, }, }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Key: "author", Value: "Jack"}, - &ast.OperatorNode{Value: "AND"}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Key: "author", Value: "Oggy"}, }, }, }, { - name: "KeyGroup", - givenQuery: []string{ - `author:("John Smith" OR Jane)`, - }, + name: `author:("John Smith" OR Jane)`, expectedAst: &ast.Ast{ Nodes: []ast.Node{ &ast.GroupNode{ Key: "author", Nodes: []ast.Node{ &ast.StringNode{Value: "John Smith"}, - &ast.OperatorNode{Value: "OR"}, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{Value: "Jane"}, }, }, @@ -206,46 +528,41 @@ func TestParse(t *testing.T) { }, }, { - name: "not and not", - givenQuery: []string{ - `NOT "John Smith" NOT Jane`, - }, + name: `NOT "John Smith" NOT Jane`, expectedAst: &ast.Ast{ Nodes: []ast.Node{ - &ast.OperatorNode{Value: "NOT"}, + &ast.OperatorNode{Value: kql.BoolNOT}, &ast.StringNode{Value: "John Smith"}, - &ast.OperatorNode{Value: "NOT"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.OperatorNode{Value: kql.BoolNOT}, &ast.StringNode{Value: "Jane"}, }, }, }, { - name: "not or not and not", - givenQuery: []string{ - `NOT author:"John Smith" NOT author:"Jane Smith" NOT tag:sifi`, - }, + name: `NOT author:"John Smith" NOT author:"Jane Smith" NOT tag:sifi`, expectedAst: &ast.Ast{ Nodes: []ast.Node{ - &ast.OperatorNode{Value: "NOT"}, + &ast.OperatorNode{Value: kql.BoolNOT}, &ast.StringNode{Key: "author", Value: "John Smith"}, - &ast.OperatorNode{Value: "NOT"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.OperatorNode{Value: kql.BoolNOT}, &ast.StringNode{Key: "author", Value: "Jane Smith"}, - &ast.OperatorNode{Value: "NOT"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.OperatorNode{Value: kql.BoolNOT}, &ast.StringNode{Key: "tag", Value: "sifi"}, }, }, }, { - name: "misc", - givenQuery: []string{ - `scope:"/new folder/subfolder" file`, - }, + name: `scope:"/new folder/subfolder" file`, expectedAst: &ast.Ast{ Nodes: []ast.Node{ &ast.StringNode{ Key: "scope", Value: "/new folder/subfolder", }, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{ Value: "file", }, @@ -253,26 +570,27 @@ func TestParse(t *testing.T) { }, }, { - name: "unicode", - givenQuery: []string{ - ` 😂 "*😀 😁*" name:😂💁👌🎍😍 name:😂💁👌 😍`, - }, + name: ` 😂 "*😀 😁*" name:😂💁👌🎍😍 name:😂💁👌 😍`, expectedAst: &ast.Ast{ Nodes: []ast.Node{ &ast.StringNode{ Value: "😂", }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{ Value: "*😀 😁*", }, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{ Key: "name", Value: "😂💁👌🎍😍", }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{ Key: "name", Value: "😂💁👌", }, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{ Value: "😍", }, @@ -281,7 +599,7 @@ func TestParse(t *testing.T) { }, { name: "DateTimeRestrictionNode", - givenQuery: []string{ + givenQuery: mustJoin([]string{ `Mtime:"2023-09-05T08:42:11.23554+02:00"`, `Mtime:2023-09-05T08:42:11.23554+02:00`, `Mtime="2023-09-05T08:42:11.23554+02:00"`, @@ -294,84 +612,96 @@ func TestParse(t *testing.T) { `Mtime>2023-09-05T08:42:11.23554+02:00`, `Mtime>="2023-09-05T08:42:11.23554+02:00"`, `Mtime>=2023-09-05T08:42:11.23554+02:00`, - }, + }), expectedAst: &ast.Ast{ Nodes: []ast.Node{ &ast.DateTimeNode{ Key: "Mtime", Operator: &ast.OperatorNode{Value: ":"}, - Value: timeMustParse(t, "2023-09-05T08:42:11.23554+02:00"), + Value: mustParseTime(t, "2023-09-05T08:42:11.23554+02:00"), }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.DateTimeNode{ Key: "Mtime", Operator: &ast.OperatorNode{Value: ":"}, - Value: timeMustParse(t, "2023-09-05T08:42:11.23554+02:00"), + Value: mustParseTime(t, "2023-09-05T08:42:11.23554+02:00"), }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.DateTimeNode{ Key: "Mtime", Operator: &ast.OperatorNode{Value: "="}, - Value: timeMustParse(t, "2023-09-05T08:42:11.23554+02:00"), + Value: mustParseTime(t, "2023-09-05T08:42:11.23554+02:00"), }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.DateTimeNode{ Key: "Mtime", Operator: &ast.OperatorNode{Value: "="}, - Value: timeMustParse(t, "2023-09-05T08:42:11.23554+02:00"), + Value: mustParseTime(t, "2023-09-05T08:42:11.23554+02:00"), }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.DateTimeNode{ Key: "Mtime", Operator: &ast.OperatorNode{Value: "<"}, - Value: timeMustParse(t, "2023-09-05T08:42:11.23554+02:00"), + Value: mustParseTime(t, "2023-09-05T08:42:11.23554+02:00"), }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.DateTimeNode{ Key: "Mtime", Operator: &ast.OperatorNode{Value: "<"}, - Value: timeMustParse(t, "2023-09-05T08:42:11.23554+02:00"), + Value: mustParseTime(t, "2023-09-05T08:42:11.23554+02:00"), }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.DateTimeNode{ Key: "Mtime", Operator: &ast.OperatorNode{Value: "<="}, - Value: timeMustParse(t, "2023-09-05T08:42:11.23554+02:00"), + Value: mustParseTime(t, "2023-09-05T08:42:11.23554+02:00"), }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.DateTimeNode{ Key: "Mtime", Operator: &ast.OperatorNode{Value: "<="}, - Value: timeMustParse(t, "2023-09-05T08:42:11.23554+02:00"), + Value: mustParseTime(t, "2023-09-05T08:42:11.23554+02:00"), }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.DateTimeNode{ Key: "Mtime", Operator: &ast.OperatorNode{Value: ">"}, - Value: timeMustParse(t, "2023-09-05T08:42:11.23554+02:00"), + Value: mustParseTime(t, "2023-09-05T08:42:11.23554+02:00"), }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.DateTimeNode{ Key: "Mtime", Operator: &ast.OperatorNode{Value: ">"}, - Value: timeMustParse(t, "2023-09-05T08:42:11.23554+02:00"), + Value: mustParseTime(t, "2023-09-05T08:42:11.23554+02:00"), }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.DateTimeNode{ Key: "Mtime", Operator: &ast.OperatorNode{Value: ">="}, - Value: timeMustParse(t, "2023-09-05T08:42:11.23554+02:00"), + Value: mustParseTime(t, "2023-09-05T08:42:11.23554+02:00"), }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.DateTimeNode{ Key: "Mtime", Operator: &ast.OperatorNode{Value: ">="}, - Value: timeMustParse(t, "2023-09-05T08:42:11.23554+02:00"), + Value: mustParseTime(t, "2023-09-05T08:42:11.23554+02:00"), }, }, }, }, { name: "id", - givenQuery: []string{ + givenQuery: mustJoin([]string{ `id:b27d3bf1-b254-459f-92e8-bdba668d6d3f$d0648459-25fb-4ed8-8684-bc62c7dca29c!d0648459-25fb-4ed8-8684-bc62c7dca29c`, `ID:b27d3bf1-b254-459f-92e8-bdba668d6d3f$d0648459-25fb-4ed8-8684-bc62c7dca29c!d0648459-25fb-4ed8-8684-bc62c7dca29c`, - }, + }), expectedAst: &ast.Ast{ Nodes: []ast.Node{ &ast.StringNode{ Key: "id", Value: "b27d3bf1-b254-459f-92e8-bdba668d6d3f$d0648459-25fb-4ed8-8684-bc62c7dca29c!d0648459-25fb-4ed8-8684-bc62c7dca29c", }, + &ast.OperatorNode{Value: kql.BoolOR}, &ast.StringNode{ Key: "ID", Value: "b27d3bf1-b254-459f-92e8-bdba668d6d3f$d0648459-25fb-4ed8-8684-bc62c7dca29c!d0648459-25fb-4ed8-8684-bc62c7dca29c", @@ -385,8 +715,17 @@ func TestParse(t *testing.T) { for _, tt := range tests { tt := tt + t.Run(tt.name, func(t *testing.T) { - q := strings.Join(tt.givenQuery, " ") + if tt.skip { + t.Skip() + } + + q := tt.name + + if tt.givenQuery != "" { + q = tt.givenQuery + } parsedAST, err := kql.Parse("", []byte(q)) @@ -397,12 +736,6 @@ func TestParse(t *testing.T) { return } - normalizedNodes, err := kql.NormalizeNodes(tt.expectedAst.Nodes) - if err != nil { - t.Fatalf("NormalizeNodes() error = %v", err) - } - tt.expectedAst.Nodes = normalizedNodes - if diff := test.DiffAst(tt.expectedAst, parsedAST); diff != "" { t.Fatalf("AST mismatch \nquery: '%s' \n(-want +got): %s", q, diff) } diff --git a/services/search/pkg/query/kql/factory.go b/services/search/pkg/query/kql/factory.go index b29a4ab61b9..bdee43a8873 100644 --- a/services/search/pkg/query/kql/factory.go +++ b/services/search/pkg/query/kql/factory.go @@ -38,31 +38,35 @@ func buildAST(n interface{}, text []byte, pos position) (*ast.Ast, error) { return nil, err } - normalizedNodes, err := NormalizeNodes(nodes) - if err != nil { - return nil, err - } - return &ast.Ast{ Base: b, - Nodes: normalizedNodes, + Nodes: nodes, }, nil } -func buildNodes(e interface{}) ([]ast.Node, error) { - maybeNodesGroups := toIfaceSlice(e) +func buildNodes(head, tail interface{}) ([]ast.Node, error) { + headNode, err := toNode[ast.Node](head) + if err != nil { + return nil, err + } + + if tail == nil { + return []ast.Node{headNode}, nil + } + + tailNodes, err := toNodes[ast.Node](tail) + if err != nil { + return nil, err + } - nodes := make([]ast.Node, len(maybeNodesGroups)) - for i, maybeNodesGroup := range maybeNodesGroups { - node, err := toNode[ast.Node](toIfaceSlice(maybeNodesGroup)[1]) - if err != nil { - return nil, err - } + allNodes := []ast.Node{headNode} - nodes[i] = node + connectionNode := incorporateNode(headNode, tailNodes...) + if connectionNode != nil { + allNodes = append(allNodes, connectionNode) } - return nodes, nil + return append(allNodes, tailNodes...), nil } func buildStringNode(k, v interface{}, text []byte, pos position) (*ast.StringNode, error) { @@ -151,6 +155,13 @@ func buildOperatorNode(text []byte, pos position) (*ast.OperatorNode, error) { return nil, err } + switch value { + case "+": + value = BoolAND + case "-": + value = BoolNOT + } + return &ast.OperatorNode{ Base: b, Value: value, diff --git a/services/search/pkg/query/kql/kql.go b/services/search/pkg/query/kql/kql.go index c49d1fb2c2f..0566278aa62 100644 --- a/services/search/pkg/query/kql/kql.go +++ b/services/search/pkg/query/kql/kql.go @@ -2,9 +2,21 @@ package kql import ( + "strings" + "github.com/owncloud/ocis/v2/services/search/pkg/query/ast" ) +// The operator node value definition +const ( + // BoolAND connect two nodes with "AND" + BoolAND = "AND" + // BoolOR connect two nodes with "OR" + BoolOR = "OR" + // BoolNOT connect two nodes with "NOT" + BoolNOT = "NOT" +) + // Builder implements kql Builder interface type Builder struct{} @@ -16,3 +28,113 @@ func (b Builder) Build(q string) (*ast.Ast, error) { } return f.(*ast.Ast), nil } + +// incorporateNode connects a leading node with the rest +func incorporateNode(headNode ast.Node, tailNodes ...ast.Node) *ast.OperatorNode { + switch headNode.(type) { + case *ast.OperatorNode: + return nil + } + + var nearestNeighborNode ast.Node + var nearestNeighborOperators []*ast.OperatorNode + +l: + for _, tailNode := range tailNodes { + switch node := tailNode.(type) { + case *ast.OperatorNode: + nearestNeighborOperators = append(nearestNeighborOperators, node) + default: + nearestNeighborNode = node + break l + } + } + + if nearestNeighborNode == nil { + return nil + } + + headKey := strings.ToLower(nodeKey(headNode)) + neighborKey := strings.ToLower(nodeKey(nearestNeighborNode)) + + connection := &ast.OperatorNode{ + Base: &ast.Base{Loc: &ast.Location{Source: &[]string{"implicitly operator"}[0]}}, + Value: BoolAND, + } + + // if the current node and the neighbor node have the same key + // the connection is of type OR, same applies if no keys are in place + // + // "" == "" + // + // spec: same + // author:"John Smith" author:"Jane Smith" + // author:"John Smith" OR author:"Jane Smith" + if headKey == neighborKey { + connection.Value = BoolOR + } + + // decisions based on nearest neighbor node + switch nearestNeighborNode.(type) { + // nearest neighbor node type could change the default case + // docs says, if the next value node: + // + // is a group AND has no key + // + // even if the current node has none too, which normal leads to SAME KEY OR + // + // it should be an AND edge + // + // spec: same + // cat (dog OR fox) + // cat AND (dog OR fox) + // + // note: + // sounds contradictory to me + case *ast.GroupNode: + if headKey == "" && neighborKey == "" { + connection.Value = BoolAND + } + } + + // decisions based on nearest neighbor operators + for i, node := range nearestNeighborOperators { + // consider direct neighbor operator only + if i == 0 { + // no connection is necessary here because an `AND` or `OR` edge is already present + // exit + for _, skipValue := range []string{BoolOR, BoolAND} { + if node.Value == skipValue { + return nil + } + } + + // if neighbor node negotiates, AND edge is needed + // + // spec: same + // cat -dog + // cat AND NOT dog + if node.Value == BoolNOT { + connection.Value = BoolAND + } + } + } + + return connection +} + +// nodeKey tries to return a node key +func nodeKey(n ast.Node) string { + switch node := n.(type) { + case *ast.StringNode: + return node.Key + case *ast.DateTimeNode: + return node.Key + case *ast.BooleanNode: + return node.Key + case *ast.GroupNode: + return node.Key + default: + return "" + } +} diff --git a/services/search/pkg/query/kql/kql_test.go b/services/search/pkg/query/kql/kql_test.go index 1ec78cfdfeb..d18068ed575 100644 --- a/services/search/pkg/query/kql/kql_test.go +++ b/services/search/pkg/query/kql/kql_test.go @@ -20,8 +20,8 @@ func TestNewAST(t *testing.T) { }, { name: "error", - givenQuery: "AND", - shouldError: true, + givenQuery: kql.BoolAND, + shouldError: false, }, } diff --git a/services/search/pkg/query/kql/normalize.go b/services/search/pkg/query/kql/normalize.go deleted file mode 100644 index 9ee287c5b6b..00000000000 --- a/services/search/pkg/query/kql/normalize.go +++ /dev/null @@ -1,128 +0,0 @@ -package kql - -import ( - "github.com/owncloud/ocis/v2/services/search/pkg/query/ast" -) - -var implicitOperatorNodeSource = "implicitly operator" -var operatorNodeAnd = ast.OperatorNode{Base: &ast.Base{Loc: &ast.Location{Source: &implicitOperatorNodeSource}}, Value: BoolAND} -var operatorNodeOr = ast.OperatorNode{Base: &ast.Base{Loc: &ast.Location{Source: &implicitOperatorNodeSource}}, Value: BoolOR} - -// NormalizeNodes Populate the implicit logical operators in the ast -// -// https://learn.microsoft.com/en-us/sharepoint/dev/general-development/keyword-query-language-kql-syntax-reference#constructing-free-text-queries-using-kql -// If there are multiple free-text expressions without any operators in between them, the query behavior is the same as using the AND operator. -// "John Smith" "Jane Smith" -// This functionally is the same as using the AND Boolean operator, as follows: -// "John Smith" AND "Jane Smith" -// -// https://learn.microsoft.com/en-us/sharepoint/dev/general-development/keyword-query-language-kql-syntax-reference#using-multiple-property-restrictions-within-a-kql-query -// When you use multiple instances of the same property restriction, matches are based on the union of the property restrictions in the KQL query. -// author:"John Smith" author:"Jane Smith" -// This functionally is the same as using the OR Boolean operator, as follows: -// author:"John Smith" OR author:"Jane Smith" -// -// When you use different property restrictions, matches are based on an intersection of the property restrictions in the KQL query, as follows: -// author:"John Smith" filetype:docx -// This is the same as using the AND Boolean operator, as follows: -// author:"John Smith" AND filetype:docx -// -// https://learn.microsoft.com/en-us/sharepoint/dev/general-development/keyword-query-language-kql-syntax-reference#grouping-property-restrictions-within-a-kql-query -// author:("John Smith" "Jane Smith") -// This is the same as using the AND Boolean operator, as follows: -// author:"John Smith" AND author:"Jane Smith" -func NormalizeNodes(nodes []ast.Node) ([]ast.Node, error) { - res := make([]ast.Node, 0, len(nodes)) - var currentNode ast.Node - var prevKey, currentKey *string - var operator *ast.OperatorNode - for _, node := range nodes { - switch n := node.(type) { - case *ast.StringNode: - if prevKey == nil { - prevKey = &n.Key - res = append(res, node) - continue - } - currentNode = n - currentKey = &n.Key - case *ast.DateTimeNode: - if prevKey == nil { - prevKey = &n.Key - res = append(res, node) - continue - } - currentNode = n - currentKey = &n.Key - case *ast.BooleanNode: - if prevKey == nil { - prevKey = &n.Key - res = append(res, node) - continue - } - currentNode = n - currentKey = &n.Key - case *ast.GroupNode: - var err error - n.Nodes, err = NormalizeNodes(n.Nodes) - if err != nil { - return nil, err - } - if prevKey == nil { - prevKey = &n.Key - res = append(res, n) - continue - } - currentNode = n - currentKey = &n.Key - case *ast.OperatorNode: - if n.Value == BoolNOT { - if prevKey == nil { - res = append(res, n) - } else { - operator = n - } - } else { - if prevKey == nil { - return nil, &StartsWithBinaryOperatorError{Op: n.Value} - } - prevKey = nil - res = append(res, node) - } - default: - prevKey = nil - res = append(res, node) - } - if prevKey != nil && currentKey != nil { - if *prevKey == *currentKey && *prevKey != "" { - res = append(res, &operatorNodeOr) - } else { - res = append(res, &operatorNodeAnd) - } - if operator != nil { - res = append(res, operator) - operator = nil - } - res = append(res, currentNode) - - prevKey = currentKey - currentNode = nil - currentKey = nil - continue - } - } - - return trimOrphan(res), nil -} - -func trimOrphan(nodes []ast.Node) []ast.Node { - offset := len(nodes) - for i := len(nodes) - 1; i >= 0; i-- { - if _, ok := nodes[i].(*ast.OperatorNode); ok { - offset-- - } else { - break - } - } - return nodes[:offset] -} diff --git a/services/search/pkg/query/kql/normalize_test.go b/services/search/pkg/query/kql/normalize_test.go deleted file mode 100644 index 7beaf60e70a..00000000000 --- a/services/search/pkg/query/kql/normalize_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package kql_test - -import ( - "testing" - "time" - - tAssert "github.com/stretchr/testify/assert" - - "github.com/owncloud/ocis/v2/services/search/pkg/query/ast" - "github.com/owncloud/ocis/v2/services/search/pkg/query/ast/test" - "github.com/owncloud/ocis/v2/services/search/pkg/query/kql" -) - -var now = time.Now() - -func TestNormalizeNodes(t *testing.T) { - tests := []struct { - name string - givenNodes []ast.Node - expectedNodes []ast.Node - fixme bool - expectedError error - }{ - { - name: "start with binary operator", - givenNodes: []ast.Node{ - &ast.OperatorNode{Value: "OR"}, - }, - expectedError: &kql.StartsWithBinaryOperatorError{Op: "OR"}, - }, - { - name: "same key implicit OR", - givenNodes: []ast.Node{ - &ast.StringNode{Key: "author", Value: "John Smith"}, - &ast.StringNode{Key: "author", Value: "Jane Smith"}, - }, - expectedNodes: []ast.Node{ - &ast.StringNode{Key: "author", Value: "John Smith"}, - &ast.OperatorNode{Value: "OR"}, - &ast.StringNode{Key: "author", Value: "Jane Smith"}, - }, - }, - { - name: "no key implicit AND", - givenNodes: []ast.Node{ - &ast.StringNode{Value: "John Smith"}, - &ast.StringNode{Value: "Jane Smith"}, - }, - expectedNodes: []ast.Node{ - &ast.StringNode{Value: "John Smith"}, - &ast.OperatorNode{Value: "AND"}, - &ast.StringNode{Value: "Jane Smith"}, - }, - }, - { - name: "same key explicit AND", - givenNodes: []ast.Node{ - &ast.StringNode{Key: "author", Value: "John Smith"}, - &ast.OperatorNode{Value: "AND"}, - &ast.StringNode{Key: "author", Value: "Jane Smith"}, - }, - expectedNodes: []ast.Node{ - &ast.StringNode{Key: "author", Value: "John Smith"}, - &ast.OperatorNode{Value: "AND"}, - &ast.StringNode{Key: "author", Value: "Jane Smith"}, - }, - }, - { - name: "key-group implicit AND", - // https://learn.microsoft.com/en-us/sharepoint/dev/general-development/keyword-query-language-kql-syntax-reference#grouping-property-restrictions-within-a-kql-query - fixme: true, - givenNodes: []ast.Node{ - &ast.GroupNode{Key: "author", Nodes: []ast.Node{ - &ast.StringNode{Key: "author", Value: "John Smith"}, - &ast.StringNode{Key: "author", Value: "Jane Smith"}, - }}, - }, - expectedNodes: []ast.Node{ - &ast.GroupNode{Key: "author", Nodes: []ast.Node{ - &ast.StringNode{Key: "author", Value: "John Smith"}, - &ast.OperatorNode{Value: "AND"}, - &ast.StringNode{Key: "author", Value: "Jane Smith"}, - }}, - }, - }, - { - name: "different key implicit AND", - givenNodes: []ast.Node{ - &ast.StringNode{Key: "author", Value: "John Smith"}, - &ast.StringNode{Key: "filetype", Value: "docx"}, - &ast.DateTimeNode{Key: "mtime", Operator: &ast.OperatorNode{Value: "="}, Value: now}, - }, - expectedNodes: []ast.Node{ - &ast.StringNode{Key: "author", Value: "John Smith"}, - &ast.OperatorNode{Value: "AND"}, - &ast.StringNode{Key: "filetype", Value: "docx"}, - &ast.OperatorNode{Value: "AND"}, - &ast.DateTimeNode{Key: "mtime", Operator: &ast.OperatorNode{Value: "="}, Value: now}, - }, - }, - } - - assert := tAssert.New(t) - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - if tt.fixme { - t.Skip("not implemented") - } - - normalizedNodes, err := kql.NormalizeNodes(tt.givenNodes) - - if tt.expectedError != nil { - assert.Equal(err, tt.expectedError) - assert.Nil(normalizedNodes) - - return - } - - if diff := test.DiffAst(tt.expectedNodes, normalizedNodes); diff != "" { - t.Fatalf("Nodes mismatch (-want +got): %s", diff) - } - }) - } -} From f4ee91184a89785896e39328bc5232e108a46fdf Mon Sep 17 00:00:00 2001 From: Florian Schade Date: Fri, 8 Sep 2023 12:43:00 +0200 Subject: [PATCH 2/8] enhancement: kql parser error if query starts with AND --- services/search/pkg/query/kql/dictionary.peg | 23 +- .../search/pkg/query/kql/dictionary_gen.go | 539 ++++++++++-------- .../search/pkg/query/kql/dictionary_test.go | 29 +- services/search/pkg/query/kql/kql.go | 13 +- services/search/pkg/query/kql/kql_test.go | 2 +- 5 files changed, 365 insertions(+), 241 deletions(-) diff --git a/services/search/pkg/query/kql/dictionary.peg b/services/search/pkg/query/kql/dictionary.peg index 6c177b2d87c..289d3e4bdb8 100644 --- a/services/search/pkg/query/kql/dictionary.peg +++ b/services/search/pkg/query/kql/dictionary.peg @@ -7,7 +7,7 @@ //////////////////////////////////////////////////////// AST <- - _ nodes:Nodes _ { + _ !OperatorBooleanAndNode _ nodes:Nodes _ { return buildAST(nodes, c.text, c.pos) } @@ -15,7 +15,7 @@ Nodes <- _ head:( GroupNode / PropertyRestrictionNodes / - OperatorBooleanNode / + OperatorBooleanNodes / FreeTextKeywordNodes ) _ tail:Nodes? { return buildNodes(head, tail) @@ -76,8 +76,23 @@ WordNode <- // operators //////////////////////////////////////////////////////// -OperatorBooleanNode <- - ("AND" / "OR" / "NOT" / "+" / "-") { +OperatorBooleanNodes <- + OperatorBooleanAndNode / + OperatorBooleanNotNode / + OperatorBooleanOrNode + +OperatorBooleanAndNode <- + ("AND" / "+") { + return buildOperatorNode(c.text, c.pos) + } + +OperatorBooleanNotNode <- + ("NOT" / "-") { + return buildOperatorNode(c.text, c.pos) + } + +OperatorBooleanOrNode <- + ("OR") { return buildOperatorNode(c.text, c.pos) } diff --git a/services/search/pkg/query/kql/dictionary_gen.go b/services/search/pkg/query/kql/dictionary_gen.go index 4c9e749ffbd..67c9959e1c9 100644 --- a/services/search/pkg/query/kql/dictionary_gen.go +++ b/services/search/pkg/query/kql/dictionary_gen.go @@ -32,16 +32,27 @@ var g = &grammar{ pos: position{line: 10, col: 5, offset: 154}, name: "_", }, + ¬Expr{ + pos: position{line: 10, col: 7, offset: 156}, + expr: &ruleRefExpr{ + pos: position{line: 10, col: 8, offset: 157}, + name: "OperatorBooleanAndNode", + }, + }, + &ruleRefExpr{ + pos: position{line: 10, col: 31, offset: 180}, + name: "_", + }, &labeledExpr{ - pos: position{line: 10, col: 7, offset: 156}, + pos: position{line: 10, col: 33, offset: 182}, label: "nodes", expr: &ruleRefExpr{ - pos: position{line: 10, col: 13, offset: 162}, + pos: position{line: 10, col: 39, offset: 188}, name: "Nodes", }, }, &ruleRefExpr{ - pos: position{line: 10, col: 19, offset: 168}, + pos: position{line: 10, col: 45, offset: 194}, name: "_", }, }, @@ -50,53 +61,53 @@ var g = &grammar{ }, { name: "Nodes", - pos: position{line: 14, col: 1, offset: 225}, + pos: position{line: 14, col: 1, offset: 251}, expr: &actionExpr{ - pos: position{line: 15, col: 5, offset: 238}, + pos: position{line: 15, col: 5, offset: 264}, run: (*parser).callonNodes1, expr: &seqExpr{ - pos: position{line: 15, col: 5, offset: 238}, + pos: position{line: 15, col: 5, offset: 264}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 15, col: 5, offset: 238}, + pos: position{line: 15, col: 5, offset: 264}, name: "_", }, &labeledExpr{ - pos: position{line: 15, col: 7, offset: 240}, + pos: position{line: 15, col: 7, offset: 266}, label: "head", expr: &choiceExpr{ - pos: position{line: 16, col: 9, offset: 255}, + pos: position{line: 16, col: 9, offset: 281}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 16, col: 9, offset: 255}, + pos: position{line: 16, col: 9, offset: 281}, name: "GroupNode", }, &ruleRefExpr{ - pos: position{line: 17, col: 9, offset: 275}, + pos: position{line: 17, col: 9, offset: 301}, name: "PropertyRestrictionNodes", }, &ruleRefExpr{ - pos: position{line: 18, col: 9, offset: 310}, - name: "OperatorBooleanNode", + pos: position{line: 18, col: 9, offset: 336}, + name: "OperatorBooleanNodes", }, &ruleRefExpr{ - pos: position{line: 19, col: 9, offset: 340}, + pos: position{line: 19, col: 9, offset: 367}, name: "FreeTextKeywordNodes", }, }, }, }, &ruleRefExpr{ - pos: position{line: 20, col: 7, offset: 367}, + pos: position{line: 20, col: 7, offset: 394}, name: "_", }, &labeledExpr{ - pos: position{line: 20, col: 9, offset: 369}, + pos: position{line: 20, col: 9, offset: 396}, label: "tail", expr: &zeroOrOneExpr{ - pos: position{line: 20, col: 14, offset: 374}, + pos: position{line: 20, col: 14, offset: 401}, expr: &ruleRefExpr{ - pos: position{line: 20, col: 14, offset: 374}, + pos: position{line: 20, col: 14, offset: 401}, name: "Nodes", }, }, @@ -107,59 +118,59 @@ var g = &grammar{ }, { name: "GroupNode", - pos: position{line: 28, col: 1, offset: 552}, + pos: position{line: 28, col: 1, offset: 579}, expr: &actionExpr{ - pos: position{line: 29, col: 5, offset: 569}, + pos: position{line: 29, col: 5, offset: 596}, run: (*parser).callonGroupNode1, expr: &seqExpr{ - pos: position{line: 29, col: 5, offset: 569}, + pos: position{line: 29, col: 5, offset: 596}, exprs: []any{ &labeledExpr{ - pos: position{line: 29, col: 5, offset: 569}, + pos: position{line: 29, col: 5, offset: 596}, label: "k", expr: &zeroOrOneExpr{ - pos: position{line: 29, col: 7, offset: 571}, + pos: position{line: 29, col: 7, offset: 598}, expr: &oneOrMoreExpr{ - pos: position{line: 29, col: 8, offset: 572}, + pos: position{line: 29, col: 8, offset: 599}, expr: &ruleRefExpr{ - pos: position{line: 29, col: 8, offset: 572}, + pos: position{line: 29, col: 8, offset: 599}, name: "Char", }, }, }, }, &zeroOrOneExpr{ - pos: position{line: 29, col: 16, offset: 580}, + pos: position{line: 29, col: 16, offset: 607}, expr: &choiceExpr{ - pos: position{line: 29, col: 17, offset: 581}, + pos: position{line: 29, col: 17, offset: 608}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 29, col: 17, offset: 581}, + pos: position{line: 29, col: 17, offset: 608}, name: "OperatorColonNode", }, &ruleRefExpr{ - pos: position{line: 29, col: 37, offset: 601}, + pos: position{line: 29, col: 37, offset: 628}, name: "OperatorEqualNode", }, }, }, }, &litMatcher{ - pos: position{line: 29, col: 57, offset: 621}, + pos: position{line: 29, col: 57, offset: 648}, val: "(", ignoreCase: false, want: "\"(\"", }, &labeledExpr{ - pos: position{line: 29, col: 61, offset: 625}, + pos: position{line: 29, col: 61, offset: 652}, label: "v", expr: &ruleRefExpr{ - pos: position{line: 29, col: 63, offset: 627}, + pos: position{line: 29, col: 63, offset: 654}, name: "Nodes", }, }, &litMatcher{ - pos: position{line: 29, col: 69, offset: 633}, + pos: position{line: 29, col: 69, offset: 660}, val: ")", ignoreCase: false, want: "\")\"", @@ -170,20 +181,20 @@ var g = &grammar{ }, { name: "PropertyRestrictionNodes", - pos: position{line: 37, col: 1, offset: 837}, + pos: position{line: 37, col: 1, offset: 864}, expr: &choiceExpr{ - pos: position{line: 38, col: 5, offset: 869}, + pos: position{line: 38, col: 5, offset: 896}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 38, col: 5, offset: 869}, + pos: position{line: 38, col: 5, offset: 896}, name: "YesNoPropertyRestrictionNode", }, &ruleRefExpr{ - pos: position{line: 39, col: 5, offset: 904}, + pos: position{line: 39, col: 5, offset: 931}, name: "DateTimeRestrictionNode", }, &ruleRefExpr{ - pos: position{line: 40, col: 5, offset: 934}, + pos: position{line: 40, col: 5, offset: 961}, name: "TextPropertyRestrictionNode", }, }, @@ -191,51 +202,51 @@ var g = &grammar{ }, { name: "YesNoPropertyRestrictionNode", - pos: position{line: 42, col: 1, offset: 963}, + pos: position{line: 42, col: 1, offset: 990}, expr: &actionExpr{ - pos: position{line: 43, col: 5, offset: 999}, + pos: position{line: 43, col: 5, offset: 1026}, run: (*parser).callonYesNoPropertyRestrictionNode1, expr: &seqExpr{ - pos: position{line: 43, col: 5, offset: 999}, + pos: position{line: 43, col: 5, offset: 1026}, exprs: []any{ &labeledExpr{ - pos: position{line: 43, col: 5, offset: 999}, + pos: position{line: 43, col: 5, offset: 1026}, label: "k", expr: &oneOrMoreExpr{ - pos: position{line: 43, col: 7, offset: 1001}, + pos: position{line: 43, col: 7, offset: 1028}, expr: &ruleRefExpr{ - pos: position{line: 43, col: 7, offset: 1001}, + pos: position{line: 43, col: 7, offset: 1028}, name: "Char", }, }, }, &choiceExpr{ - pos: position{line: 43, col: 14, offset: 1008}, + pos: position{line: 43, col: 14, offset: 1035}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 43, col: 14, offset: 1008}, + pos: position{line: 43, col: 14, offset: 1035}, name: "OperatorColonNode", }, &ruleRefExpr{ - pos: position{line: 43, col: 34, offset: 1028}, + pos: position{line: 43, col: 34, offset: 1055}, name: "OperatorEqualNode", }, }, }, &labeledExpr{ - pos: position{line: 43, col: 53, offset: 1047}, + pos: position{line: 43, col: 53, offset: 1074}, label: "v", expr: &choiceExpr{ - pos: position{line: 43, col: 56, offset: 1050}, + pos: position{line: 43, col: 56, offset: 1077}, alternatives: []any{ &litMatcher{ - pos: position{line: 43, col: 56, offset: 1050}, + pos: position{line: 43, col: 56, offset: 1077}, val: "true", ignoreCase: false, want: "\"true\"", }, &litMatcher{ - pos: position{line: 43, col: 65, offset: 1059}, + pos: position{line: 43, col: 65, offset: 1086}, val: "false", ignoreCase: false, want: "\"false\"", @@ -249,93 +260,93 @@ var g = &grammar{ }, { name: "DateTimeRestrictionNode", - pos: position{line: 47, col: 1, offset: 1129}, + pos: position{line: 47, col: 1, offset: 1156}, expr: &actionExpr{ - pos: position{line: 48, col: 5, offset: 1160}, + pos: position{line: 48, col: 5, offset: 1187}, run: (*parser).callonDateTimeRestrictionNode1, expr: &seqExpr{ - pos: position{line: 48, col: 5, offset: 1160}, + pos: position{line: 48, col: 5, offset: 1187}, exprs: []any{ &labeledExpr{ - pos: position{line: 48, col: 5, offset: 1160}, + pos: position{line: 48, col: 5, offset: 1187}, label: "k", expr: &oneOrMoreExpr{ - pos: position{line: 48, col: 7, offset: 1162}, + pos: position{line: 48, col: 7, offset: 1189}, expr: &ruleRefExpr{ - pos: position{line: 48, col: 7, offset: 1162}, + pos: position{line: 48, col: 7, offset: 1189}, name: "Char", }, }, }, &labeledExpr{ - pos: position{line: 48, col: 13, offset: 1168}, + pos: position{line: 48, col: 13, offset: 1195}, label: "o", expr: &choiceExpr{ - pos: position{line: 48, col: 16, offset: 1171}, + pos: position{line: 48, col: 16, offset: 1198}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 48, col: 16, offset: 1171}, + pos: position{line: 48, col: 16, offset: 1198}, name: "OperatorGreaterOrEqualNode", }, &ruleRefExpr{ - pos: position{line: 48, col: 45, offset: 1200}, + pos: position{line: 48, col: 45, offset: 1227}, name: "OperatorLessOrEqualNode", }, &ruleRefExpr{ - pos: position{line: 48, col: 71, offset: 1226}, + pos: position{line: 48, col: 71, offset: 1253}, name: "OperatorGreaterNode", }, &ruleRefExpr{ - pos: position{line: 48, col: 93, offset: 1248}, + pos: position{line: 48, col: 93, offset: 1275}, name: "OperatorLessNode", }, &ruleRefExpr{ - pos: position{line: 48, col: 112, offset: 1267}, + pos: position{line: 48, col: 112, offset: 1294}, name: "OperatorEqualNode", }, &ruleRefExpr{ - pos: position{line: 48, col: 132, offset: 1287}, + pos: position{line: 48, col: 132, offset: 1314}, name: "OperatorColonNode", }, }, }, }, &zeroOrOneExpr{ - pos: position{line: 48, col: 151, offset: 1306}, + pos: position{line: 48, col: 151, offset: 1333}, expr: &litMatcher{ - pos: position{line: 48, col: 151, offset: 1306}, + pos: position{line: 48, col: 151, offset: 1333}, val: "\"", ignoreCase: false, want: "\"\\\"\"", }, }, &labeledExpr{ - pos: position{line: 48, col: 156, offset: 1311}, + pos: position{line: 48, col: 156, offset: 1338}, label: "v", expr: &seqExpr{ - pos: position{line: 48, col: 159, offset: 1314}, + pos: position{line: 48, col: 159, offset: 1341}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 48, col: 159, offset: 1314}, + pos: position{line: 48, col: 159, offset: 1341}, name: "FullDate", }, &litMatcher{ - pos: position{line: 48, col: 168, offset: 1323}, + pos: position{line: 48, col: 168, offset: 1350}, val: "T", ignoreCase: false, want: "\"T\"", }, &ruleRefExpr{ - pos: position{line: 48, col: 172, offset: 1327}, + pos: position{line: 48, col: 172, offset: 1354}, name: "FullTime", }, }, }, }, &zeroOrOneExpr{ - pos: position{line: 48, col: 182, offset: 1337}, + pos: position{line: 48, col: 182, offset: 1364}, expr: &litMatcher{ - pos: position{line: 48, col: 182, offset: 1337}, + pos: position{line: 48, col: 182, offset: 1364}, val: "\"", ignoreCase: false, want: "\"\\\"\"", @@ -347,51 +358,51 @@ var g = &grammar{ }, { name: "TextPropertyRestrictionNode", - pos: position{line: 52, col: 1, offset: 1408}, + pos: position{line: 52, col: 1, offset: 1435}, expr: &actionExpr{ - pos: position{line: 53, col: 5, offset: 1443}, + pos: position{line: 53, col: 5, offset: 1470}, run: (*parser).callonTextPropertyRestrictionNode1, expr: &seqExpr{ - pos: position{line: 53, col: 5, offset: 1443}, + pos: position{line: 53, col: 5, offset: 1470}, exprs: []any{ &labeledExpr{ - pos: position{line: 53, col: 5, offset: 1443}, + pos: position{line: 53, col: 5, offset: 1470}, label: "k", expr: &oneOrMoreExpr{ - pos: position{line: 53, col: 7, offset: 1445}, + pos: position{line: 53, col: 7, offset: 1472}, expr: &ruleRefExpr{ - pos: position{line: 53, col: 7, offset: 1445}, + pos: position{line: 53, col: 7, offset: 1472}, name: "Char", }, }, }, &choiceExpr{ - pos: position{line: 53, col: 14, offset: 1452}, + pos: position{line: 53, col: 14, offset: 1479}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 53, col: 14, offset: 1452}, + pos: position{line: 53, col: 14, offset: 1479}, name: "OperatorColonNode", }, &ruleRefExpr{ - pos: position{line: 53, col: 34, offset: 1472}, + pos: position{line: 53, col: 34, offset: 1499}, name: "OperatorEqualNode", }, }, }, &labeledExpr{ - pos: position{line: 53, col: 53, offset: 1491}, + pos: position{line: 53, col: 53, offset: 1518}, label: "v", expr: &choiceExpr{ - pos: position{line: 53, col: 56, offset: 1494}, + pos: position{line: 53, col: 56, offset: 1521}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 53, col: 56, offset: 1494}, + pos: position{line: 53, col: 56, offset: 1521}, name: "String", }, &oneOrMoreExpr{ - pos: position{line: 53, col: 65, offset: 1503}, + pos: position{line: 53, col: 65, offset: 1530}, expr: &charClassMatcher{ - pos: position{line: 53, col: 65, offset: 1503}, + pos: position{line: 53, col: 65, offset: 1530}, val: "[^ ()]", chars: []rune{' ', '(', ')'}, ignoreCase: false, @@ -407,16 +418,16 @@ var g = &grammar{ }, { name: "FreeTextKeywordNodes", - pos: position{line: 61, col: 1, offset: 1709}, + pos: position{line: 61, col: 1, offset: 1736}, expr: &choiceExpr{ - pos: position{line: 62, col: 5, offset: 1737}, + pos: position{line: 62, col: 5, offset: 1764}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 62, col: 5, offset: 1737}, + pos: position{line: 62, col: 5, offset: 1764}, name: "PhraseNode", }, &ruleRefExpr{ - pos: position{line: 63, col: 5, offset: 1754}, + pos: position{line: 63, col: 5, offset: 1781}, name: "WordNode", }, }, @@ -424,40 +435,40 @@ var g = &grammar{ }, { name: "PhraseNode", - pos: position{line: 65, col: 1, offset: 1764}, + pos: position{line: 65, col: 1, offset: 1791}, expr: &actionExpr{ - pos: position{line: 66, col: 6, offset: 1783}, + pos: position{line: 66, col: 6, offset: 1810}, run: (*parser).callonPhraseNode1, expr: &seqExpr{ - pos: position{line: 66, col: 6, offset: 1783}, + pos: position{line: 66, col: 6, offset: 1810}, exprs: []any{ &zeroOrOneExpr{ - pos: position{line: 66, col: 6, offset: 1783}, + pos: position{line: 66, col: 6, offset: 1810}, expr: &ruleRefExpr{ - pos: position{line: 66, col: 6, offset: 1783}, + pos: position{line: 66, col: 6, offset: 1810}, name: "OperatorColonNode", }, }, &ruleRefExpr{ - pos: position{line: 66, col: 25, offset: 1802}, + pos: position{line: 66, col: 25, offset: 1829}, name: "_", }, &labeledExpr{ - pos: position{line: 66, col: 27, offset: 1804}, + pos: position{line: 66, col: 27, offset: 1831}, label: "v", expr: &ruleRefExpr{ - pos: position{line: 66, col: 29, offset: 1806}, + pos: position{line: 66, col: 29, offset: 1833}, name: "String", }, }, &ruleRefExpr{ - pos: position{line: 66, col: 36, offset: 1813}, + pos: position{line: 66, col: 36, offset: 1840}, name: "_", }, &zeroOrOneExpr{ - pos: position{line: 66, col: 38, offset: 1815}, + pos: position{line: 66, col: 38, offset: 1842}, expr: &ruleRefExpr{ - pos: position{line: 66, col: 38, offset: 1815}, + pos: position{line: 66, col: 38, offset: 1842}, name: "OperatorColonNode", }, }, @@ -467,31 +478,31 @@ var g = &grammar{ }, { name: "WordNode", - pos: position{line: 70, col: 1, offset: 1896}, + pos: position{line: 70, col: 1, offset: 1923}, expr: &actionExpr{ - pos: position{line: 71, col: 6, offset: 1913}, + pos: position{line: 71, col: 6, offset: 1940}, run: (*parser).callonWordNode1, expr: &seqExpr{ - pos: position{line: 71, col: 6, offset: 1913}, + pos: position{line: 71, col: 6, offset: 1940}, exprs: []any{ &zeroOrOneExpr{ - pos: position{line: 71, col: 6, offset: 1913}, + pos: position{line: 71, col: 6, offset: 1940}, expr: &ruleRefExpr{ - pos: position{line: 71, col: 6, offset: 1913}, + pos: position{line: 71, col: 6, offset: 1940}, name: "OperatorColonNode", }, }, &ruleRefExpr{ - pos: position{line: 71, col: 25, offset: 1932}, + pos: position{line: 71, col: 25, offset: 1959}, name: "_", }, &labeledExpr{ - pos: position{line: 71, col: 27, offset: 1934}, + pos: position{line: 71, col: 27, offset: 1961}, label: "v", expr: &oneOrMoreExpr{ - pos: position{line: 71, col: 29, offset: 1936}, + pos: position{line: 71, col: 29, offset: 1963}, expr: &charClassMatcher{ - pos: position{line: 71, col: 29, offset: 1936}, + pos: position{line: 71, col: 29, offset: 1963}, val: "[^ :()]", chars: []rune{' ', ':', '(', ')'}, ignoreCase: false, @@ -500,13 +511,13 @@ var g = &grammar{ }, }, &ruleRefExpr{ - pos: position{line: 71, col: 38, offset: 1945}, + pos: position{line: 71, col: 38, offset: 1972}, name: "_", }, &zeroOrOneExpr{ - pos: position{line: 71, col: 40, offset: 1947}, + pos: position{line: 71, col: 40, offset: 1974}, expr: &ruleRefExpr{ - pos: position{line: 71, col: 40, offset: 1947}, + pos: position{line: 71, col: 40, offset: 1974}, name: "OperatorColonNode", }, }, @@ -515,40 +526,68 @@ var g = &grammar{ }, }, { - name: "OperatorBooleanNode", - pos: position{line: 79, col: 1, offset: 2156}, + name: "OperatorBooleanNodes", + pos: position{line: 79, col: 1, offset: 2183}, + expr: &choiceExpr{ + pos: position{line: 80, col: 5, offset: 2211}, + alternatives: []any{ + &ruleRefExpr{ + pos: position{line: 80, col: 5, offset: 2211}, + name: "OperatorBooleanAndNode", + }, + &ruleRefExpr{ + pos: position{line: 81, col: 5, offset: 2240}, + name: "OperatorBooleanNotNode", + }, + &ruleRefExpr{ + pos: position{line: 82, col: 5, offset: 2269}, + name: "OperatorBooleanOrNode", + }, + }, + }, + }, + { + name: "OperatorBooleanAndNode", + pos: position{line: 84, col: 1, offset: 2292}, expr: &actionExpr{ - pos: position{line: 80, col: 5, offset: 2183}, - run: (*parser).callonOperatorBooleanNode1, + pos: position{line: 85, col: 5, offset: 2322}, + run: (*parser).callonOperatorBooleanAndNode1, expr: &choiceExpr{ - pos: position{line: 80, col: 6, offset: 2184}, + pos: position{line: 85, col: 6, offset: 2323}, alternatives: []any{ &litMatcher{ - pos: position{line: 80, col: 6, offset: 2184}, + pos: position{line: 85, col: 6, offset: 2323}, val: "AND", ignoreCase: false, want: "\"AND\"", }, &litMatcher{ - pos: position{line: 80, col: 14, offset: 2192}, - val: "OR", + pos: position{line: 85, col: 14, offset: 2331}, + val: "+", ignoreCase: false, - want: "\"OR\"", + want: "\"+\"", }, + }, + }, + }, + }, + { + name: "OperatorBooleanNotNode", + pos: position{line: 89, col: 1, offset: 2393}, + expr: &actionExpr{ + pos: position{line: 90, col: 5, offset: 2423}, + run: (*parser).callonOperatorBooleanNotNode1, + expr: &choiceExpr{ + pos: position{line: 90, col: 6, offset: 2424}, + alternatives: []any{ &litMatcher{ - pos: position{line: 80, col: 21, offset: 2199}, + pos: position{line: 90, col: 6, offset: 2424}, val: "NOT", ignoreCase: false, want: "\"NOT\"", }, &litMatcher{ - pos: position{line: 80, col: 29, offset: 2207}, - val: "+", - ignoreCase: false, - want: "\"+\"", - }, - &litMatcher{ - pos: position{line: 80, col: 35, offset: 2213}, + pos: position{line: 90, col: 14, offset: 2432}, val: "-", ignoreCase: false, want: "\"-\"", @@ -557,14 +596,28 @@ var g = &grammar{ }, }, }, + { + name: "OperatorBooleanOrNode", + pos: position{line: 94, col: 1, offset: 2494}, + expr: &actionExpr{ + pos: position{line: 95, col: 5, offset: 2523}, + run: (*parser).callonOperatorBooleanOrNode1, + expr: &litMatcher{ + pos: position{line: 95, col: 6, offset: 2524}, + val: "OR", + ignoreCase: false, + want: "\"OR\"", + }, + }, + }, { name: "OperatorColonNode", - pos: position{line: 84, col: 1, offset: 2275}, + pos: position{line: 99, col: 1, offset: 2587}, expr: &actionExpr{ - pos: position{line: 85, col: 5, offset: 2300}, + pos: position{line: 100, col: 5, offset: 2612}, run: (*parser).callonOperatorColonNode1, expr: &litMatcher{ - pos: position{line: 85, col: 5, offset: 2300}, + pos: position{line: 100, col: 5, offset: 2612}, val: ":", ignoreCase: false, want: "\":\"", @@ -573,12 +626,12 @@ var g = &grammar{ }, { name: "OperatorEqualNode", - pos: position{line: 89, col: 1, offset: 2361}, + pos: position{line: 104, col: 1, offset: 2673}, expr: &actionExpr{ - pos: position{line: 90, col: 5, offset: 2386}, + pos: position{line: 105, col: 5, offset: 2698}, run: (*parser).callonOperatorEqualNode1, expr: &litMatcher{ - pos: position{line: 90, col: 5, offset: 2386}, + pos: position{line: 105, col: 5, offset: 2698}, val: "=", ignoreCase: false, want: "\"=\"", @@ -587,12 +640,12 @@ var g = &grammar{ }, { name: "OperatorLessNode", - pos: position{line: 94, col: 1, offset: 2447}, + pos: position{line: 109, col: 1, offset: 2759}, expr: &actionExpr{ - pos: position{line: 95, col: 5, offset: 2471}, + pos: position{line: 110, col: 5, offset: 2783}, run: (*parser).callonOperatorLessNode1, expr: &litMatcher{ - pos: position{line: 95, col: 5, offset: 2471}, + pos: position{line: 110, col: 5, offset: 2783}, val: "<", ignoreCase: false, want: "\"<\"", @@ -601,12 +654,12 @@ var g = &grammar{ }, { name: "OperatorLessOrEqualNode", - pos: position{line: 99, col: 1, offset: 2532}, + pos: position{line: 114, col: 1, offset: 2844}, expr: &actionExpr{ - pos: position{line: 100, col: 5, offset: 2563}, + pos: position{line: 115, col: 5, offset: 2875}, run: (*parser).callonOperatorLessOrEqualNode1, expr: &litMatcher{ - pos: position{line: 100, col: 5, offset: 2563}, + pos: position{line: 115, col: 5, offset: 2875}, val: "<=", ignoreCase: false, want: "\"<=\"", @@ -615,12 +668,12 @@ var g = &grammar{ }, { name: "OperatorGreaterNode", - pos: position{line: 104, col: 1, offset: 2625}, + pos: position{line: 119, col: 1, offset: 2937}, expr: &actionExpr{ - pos: position{line: 105, col: 5, offset: 2652}, + pos: position{line: 120, col: 5, offset: 2964}, run: (*parser).callonOperatorGreaterNode1, expr: &litMatcher{ - pos: position{line: 105, col: 5, offset: 2652}, + pos: position{line: 120, col: 5, offset: 2964}, val: ">", ignoreCase: false, want: "\">\"", @@ -629,12 +682,12 @@ var g = &grammar{ }, { name: "OperatorGreaterOrEqualNode", - pos: position{line: 109, col: 1, offset: 2713}, + pos: position{line: 124, col: 1, offset: 3025}, expr: &actionExpr{ - pos: position{line: 110, col: 5, offset: 2747}, + pos: position{line: 125, col: 5, offset: 3059}, run: (*parser).callonOperatorGreaterOrEqualNode1, expr: &litMatcher{ - pos: position{line: 110, col: 5, offset: 2747}, + pos: position{line: 125, col: 5, offset: 3059}, val: ">=", ignoreCase: false, want: "\">=\"", @@ -643,27 +696,27 @@ var g = &grammar{ }, { name: "TimeYear", - pos: position{line: 119, col: 1, offset: 2933}, + pos: position{line: 134, col: 1, offset: 3245}, expr: &actionExpr{ - pos: position{line: 120, col: 5, offset: 2949}, + pos: position{line: 135, col: 5, offset: 3261}, run: (*parser).callonTimeYear1, expr: &seqExpr{ - pos: position{line: 120, col: 5, offset: 2949}, + pos: position{line: 135, col: 5, offset: 3261}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 120, col: 5, offset: 2949}, + pos: position{line: 135, col: 5, offset: 3261}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 120, col: 11, offset: 2955}, + pos: position{line: 135, col: 11, offset: 3267}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 120, col: 17, offset: 2961}, + pos: position{line: 135, col: 17, offset: 3273}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 120, col: 23, offset: 2967}, + pos: position{line: 135, col: 23, offset: 3279}, name: "Digit", }, }, @@ -672,19 +725,19 @@ var g = &grammar{ }, { name: "TimeMonth", - pos: position{line: 124, col: 1, offset: 3009}, + pos: position{line: 139, col: 1, offset: 3321}, expr: &actionExpr{ - pos: position{line: 125, col: 5, offset: 3026}, + pos: position{line: 140, col: 5, offset: 3338}, run: (*parser).callonTimeMonth1, expr: &seqExpr{ - pos: position{line: 125, col: 5, offset: 3026}, + pos: position{line: 140, col: 5, offset: 3338}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 125, col: 5, offset: 3026}, + pos: position{line: 140, col: 5, offset: 3338}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 125, col: 11, offset: 3032}, + pos: position{line: 140, col: 11, offset: 3344}, name: "Digit", }, }, @@ -693,19 +746,19 @@ var g = &grammar{ }, { name: "TimeDay", - pos: position{line: 129, col: 1, offset: 3074}, + pos: position{line: 144, col: 1, offset: 3386}, expr: &actionExpr{ - pos: position{line: 130, col: 5, offset: 3089}, + pos: position{line: 145, col: 5, offset: 3401}, run: (*parser).callonTimeDay1, expr: &seqExpr{ - pos: position{line: 130, col: 5, offset: 3089}, + pos: position{line: 145, col: 5, offset: 3401}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 130, col: 5, offset: 3089}, + pos: position{line: 145, col: 5, offset: 3401}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 130, col: 11, offset: 3095}, + pos: position{line: 145, col: 11, offset: 3407}, name: "Digit", }, }, @@ -714,19 +767,19 @@ var g = &grammar{ }, { name: "TimeHour", - pos: position{line: 134, col: 1, offset: 3137}, + pos: position{line: 149, col: 1, offset: 3449}, expr: &actionExpr{ - pos: position{line: 135, col: 5, offset: 3153}, + pos: position{line: 150, col: 5, offset: 3465}, run: (*parser).callonTimeHour1, expr: &seqExpr{ - pos: position{line: 135, col: 5, offset: 3153}, + pos: position{line: 150, col: 5, offset: 3465}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 135, col: 5, offset: 3153}, + pos: position{line: 150, col: 5, offset: 3465}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 135, col: 11, offset: 3159}, + pos: position{line: 150, col: 11, offset: 3471}, name: "Digit", }, }, @@ -735,19 +788,19 @@ var g = &grammar{ }, { name: "TimeMinute", - pos: position{line: 139, col: 1, offset: 3201}, + pos: position{line: 154, col: 1, offset: 3513}, expr: &actionExpr{ - pos: position{line: 140, col: 5, offset: 3219}, + pos: position{line: 155, col: 5, offset: 3531}, run: (*parser).callonTimeMinute1, expr: &seqExpr{ - pos: position{line: 140, col: 5, offset: 3219}, + pos: position{line: 155, col: 5, offset: 3531}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 140, col: 5, offset: 3219}, + pos: position{line: 155, col: 5, offset: 3531}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 140, col: 11, offset: 3225}, + pos: position{line: 155, col: 11, offset: 3537}, name: "Digit", }, }, @@ -756,19 +809,19 @@ var g = &grammar{ }, { name: "TimeSecond", - pos: position{line: 144, col: 1, offset: 3267}, + pos: position{line: 159, col: 1, offset: 3579}, expr: &actionExpr{ - pos: position{line: 145, col: 5, offset: 3285}, + pos: position{line: 160, col: 5, offset: 3597}, run: (*parser).callonTimeSecond1, expr: &seqExpr{ - pos: position{line: 145, col: 5, offset: 3285}, + pos: position{line: 160, col: 5, offset: 3597}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 145, col: 5, offset: 3285}, + pos: position{line: 160, col: 5, offset: 3597}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 145, col: 11, offset: 3291}, + pos: position{line: 160, col: 11, offset: 3603}, name: "Digit", }, }, @@ -777,35 +830,35 @@ var g = &grammar{ }, { name: "FullDate", - pos: position{line: 149, col: 1, offset: 3333}, + pos: position{line: 164, col: 1, offset: 3645}, expr: &actionExpr{ - pos: position{line: 150, col: 5, offset: 3349}, + pos: position{line: 165, col: 5, offset: 3661}, run: (*parser).callonFullDate1, expr: &seqExpr{ - pos: position{line: 150, col: 5, offset: 3349}, + pos: position{line: 165, col: 5, offset: 3661}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 150, col: 5, offset: 3349}, + pos: position{line: 165, col: 5, offset: 3661}, name: "TimeYear", }, &litMatcher{ - pos: position{line: 150, col: 14, offset: 3358}, + pos: position{line: 165, col: 14, offset: 3670}, val: "-", ignoreCase: false, want: "\"-\"", }, &ruleRefExpr{ - pos: position{line: 150, col: 18, offset: 3362}, + pos: position{line: 165, col: 18, offset: 3674}, name: "TimeMonth", }, &litMatcher{ - pos: position{line: 150, col: 28, offset: 3372}, + pos: position{line: 165, col: 28, offset: 3684}, val: "-", ignoreCase: false, want: "\"-\"", }, &ruleRefExpr{ - pos: position{line: 150, col: 32, offset: 3376}, + pos: position{line: 165, col: 32, offset: 3688}, name: "TimeDay", }, }, @@ -814,52 +867,52 @@ var g = &grammar{ }, { name: "FullTime", - pos: position{line: 154, col: 1, offset: 3420}, + pos: position{line: 169, col: 1, offset: 3732}, expr: &actionExpr{ - pos: position{line: 155, col: 5, offset: 3436}, + pos: position{line: 170, col: 5, offset: 3748}, run: (*parser).callonFullTime1, expr: &seqExpr{ - pos: position{line: 155, col: 5, offset: 3436}, + pos: position{line: 170, col: 5, offset: 3748}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 155, col: 5, offset: 3436}, + pos: position{line: 170, col: 5, offset: 3748}, name: "TimeHour", }, &litMatcher{ - pos: position{line: 155, col: 14, offset: 3445}, + pos: position{line: 170, col: 14, offset: 3757}, val: ":", ignoreCase: false, want: "\":\"", }, &ruleRefExpr{ - pos: position{line: 155, col: 18, offset: 3449}, + pos: position{line: 170, col: 18, offset: 3761}, name: "TimeMinute", }, &litMatcher{ - pos: position{line: 155, col: 29, offset: 3460}, + pos: position{line: 170, col: 29, offset: 3772}, val: ":", ignoreCase: false, want: "\":\"", }, &ruleRefExpr{ - pos: position{line: 155, col: 33, offset: 3464}, + pos: position{line: 170, col: 33, offset: 3776}, name: "TimeSecond", }, &zeroOrOneExpr{ - pos: position{line: 155, col: 44, offset: 3475}, + pos: position{line: 170, col: 44, offset: 3787}, expr: &seqExpr{ - pos: position{line: 155, col: 45, offset: 3476}, + pos: position{line: 170, col: 45, offset: 3788}, exprs: []any{ &litMatcher{ - pos: position{line: 155, col: 45, offset: 3476}, + pos: position{line: 170, col: 45, offset: 3788}, val: ".", ignoreCase: false, want: "\".\"", }, &oneOrMoreExpr{ - pos: position{line: 155, col: 49, offset: 3480}, + pos: position{line: 170, col: 49, offset: 3792}, expr: &ruleRefExpr{ - pos: position{line: 155, col: 49, offset: 3480}, + pos: position{line: 170, col: 49, offset: 3792}, name: "Digit", }, }, @@ -867,28 +920,28 @@ var g = &grammar{ }, }, &choiceExpr{ - pos: position{line: 155, col: 59, offset: 3490}, + pos: position{line: 170, col: 59, offset: 3802}, alternatives: []any{ &litMatcher{ - pos: position{line: 155, col: 59, offset: 3490}, + pos: position{line: 170, col: 59, offset: 3802}, val: "Z", ignoreCase: false, want: "\"Z\"", }, &seqExpr{ - pos: position{line: 155, col: 65, offset: 3496}, + pos: position{line: 170, col: 65, offset: 3808}, exprs: []any{ &choiceExpr{ - pos: position{line: 155, col: 66, offset: 3497}, + pos: position{line: 170, col: 66, offset: 3809}, alternatives: []any{ &litMatcher{ - pos: position{line: 155, col: 66, offset: 3497}, + pos: position{line: 170, col: 66, offset: 3809}, val: "+", ignoreCase: false, want: "\"+\"", }, &litMatcher{ - pos: position{line: 155, col: 72, offset: 3503}, + pos: position{line: 170, col: 72, offset: 3815}, val: "-", ignoreCase: false, want: "\"-\"", @@ -896,17 +949,17 @@ var g = &grammar{ }, }, &ruleRefExpr{ - pos: position{line: 155, col: 77, offset: 3508}, + pos: position{line: 170, col: 77, offset: 3820}, name: "TimeHour", }, &litMatcher{ - pos: position{line: 155, col: 86, offset: 3517}, + pos: position{line: 170, col: 86, offset: 3829}, val: ":", ignoreCase: false, want: "\":\"", }, &ruleRefExpr{ - pos: position{line: 155, col: 90, offset: 3521}, + pos: position{line: 170, col: 90, offset: 3833}, name: "TimeMinute", }, }, @@ -919,12 +972,12 @@ var g = &grammar{ }, { name: "Char", - pos: position{line: 163, col: 1, offset: 3692}, + pos: position{line: 178, col: 1, offset: 4004}, expr: &actionExpr{ - pos: position{line: 164, col: 5, offset: 3704}, + pos: position{line: 179, col: 5, offset: 4016}, run: (*parser).callonChar1, expr: &charClassMatcher{ - pos: position{line: 164, col: 5, offset: 3704}, + pos: position{line: 179, col: 5, offset: 4016}, val: "[A-Za-z]", ranges: []rune{'A', 'Z', 'a', 'z'}, ignoreCase: false, @@ -934,26 +987,26 @@ var g = &grammar{ }, { name: "String", - pos: position{line: 168, col: 1, offset: 3749}, + pos: position{line: 183, col: 1, offset: 4061}, expr: &actionExpr{ - pos: position{line: 169, col: 5, offset: 3763}, + pos: position{line: 184, col: 5, offset: 4075}, run: (*parser).callonString1, expr: &seqExpr{ - pos: position{line: 169, col: 5, offset: 3763}, + pos: position{line: 184, col: 5, offset: 4075}, exprs: []any{ &litMatcher{ - pos: position{line: 169, col: 5, offset: 3763}, + pos: position{line: 184, col: 5, offset: 4075}, val: "\"", ignoreCase: false, want: "\"\\\"\"", }, &labeledExpr{ - pos: position{line: 169, col: 9, offset: 3767}, + pos: position{line: 184, col: 9, offset: 4079}, label: "v", expr: &zeroOrMoreExpr{ - pos: position{line: 169, col: 11, offset: 3769}, + pos: position{line: 184, col: 11, offset: 4081}, expr: &charClassMatcher{ - pos: position{line: 169, col: 11, offset: 3769}, + pos: position{line: 184, col: 11, offset: 4081}, val: "[^\"]", chars: []rune{'"'}, ignoreCase: false, @@ -962,7 +1015,7 @@ var g = &grammar{ }, }, &litMatcher{ - pos: position{line: 169, col: 17, offset: 3775}, + pos: position{line: 184, col: 17, offset: 4087}, val: "\"", ignoreCase: false, want: "\"\\\"\"", @@ -973,12 +1026,12 @@ var g = &grammar{ }, { name: "Digit", - pos: position{line: 173, col: 1, offset: 3810}, + pos: position{line: 188, col: 1, offset: 4122}, expr: &actionExpr{ - pos: position{line: 174, col: 5, offset: 3823}, + pos: position{line: 189, col: 5, offset: 4135}, run: (*parser).callonDigit1, expr: &charClassMatcher{ - pos: position{line: 174, col: 5, offset: 3823}, + pos: position{line: 189, col: 5, offset: 4135}, val: "[0-9]", ranges: []rune{'0', '9'}, ignoreCase: false, @@ -988,11 +1041,11 @@ var g = &grammar{ }, { name: "_", - pos: position{line: 178, col: 1, offset: 3865}, + pos: position{line: 193, col: 1, offset: 4177}, expr: &zeroOrMoreExpr{ - pos: position{line: 179, col: 5, offset: 3874}, + pos: position{line: 194, col: 5, offset: 4186}, expr: &charClassMatcher{ - pos: position{line: 179, col: 5, offset: 3874}, + pos: position{line: 194, col: 5, offset: 4186}, val: "[ \\t]", chars: []rune{' ', '\t'}, ignoreCase: false, @@ -1091,15 +1144,37 @@ func (p *parser) callonWordNode1() (any, error) { return p.cur.onWordNode1(stack["v"]) } -func (c *current) onOperatorBooleanNode1() (any, error) { +func (c *current) onOperatorBooleanAndNode1() (any, error) { + return buildOperatorNode(c.text, c.pos) + +} + +func (p *parser) callonOperatorBooleanAndNode1() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onOperatorBooleanAndNode1() +} + +func (c *current) onOperatorBooleanNotNode1() (any, error) { + return buildOperatorNode(c.text, c.pos) + +} + +func (p *parser) callonOperatorBooleanNotNode1() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onOperatorBooleanNotNode1() +} + +func (c *current) onOperatorBooleanOrNode1() (any, error) { return buildOperatorNode(c.text, c.pos) } -func (p *parser) callonOperatorBooleanNode1() (any, error) { +func (p *parser) callonOperatorBooleanOrNode1() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onOperatorBooleanNode1() + return p.cur.onOperatorBooleanOrNode1() } func (c *current) onOperatorColonNode1() (any, error) { diff --git a/services/search/pkg/query/kql/dictionary_test.go b/services/search/pkg/query/kql/dictionary_test.go index e60645f4ab1..faafbf19038 100644 --- a/services/search/pkg/query/kql/dictionary_test.go +++ b/services/search/pkg/query/kql/dictionary_test.go @@ -1,6 +1,7 @@ package kql_test import ( + "errors" "strings" "testing" "time" @@ -63,6 +64,25 @@ func TestParse(t *testing.T) { // SPEC ////////////////////////////////////////////////////////////////////////////// // https://msopenspecs.azureedge.net/files/MS-KQL/%5bMS-KQL%5d.pdf // + // 2.1.2 AND Operator + { + name: `cat AND dog`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "dog"}, + }, + }, + }, + { + name: `AND`, + expectedError: errors.New(""), + }, + { + name: `AND cat AND dog`, + expectedError: errors.New(""), + }, // 3.1.11 Implicit Operator { name: `cat dog`, @@ -727,11 +747,14 @@ func TestParse(t *testing.T) { q = tt.givenQuery } - parsedAST, err := kql.Parse("", []byte(q)) + parsedAST, err := kql.Builder{}.Build(q) if tt.expectedError != nil { - assert.Equal(err, tt.expectedError) - assert.Nil(parsedAST) + if tt.expectedError.Error() != "" { + assert.Equal(err, tt.expectedError) + } else { + assert.NotNil(err) + } return } diff --git a/services/search/pkg/query/kql/kql.go b/services/search/pkg/query/kql/kql.go index 0566278aa62..48c349e817c 100644 --- a/services/search/pkg/query/kql/kql.go +++ b/services/search/pkg/query/kql/kql.go @@ -2,6 +2,7 @@ package kql import ( + "errors" "strings" "github.com/owncloud/ocis/v2/services/search/pkg/query/ast" @@ -24,8 +25,18 @@ type Builder struct{} func (b Builder) Build(q string) (*ast.Ast, error) { f, err := Parse("", []byte(q)) if err != nil { - return nil, err + var list errList + errors.As(err, &list) + + for _, listError := range list { + var parserError *parserError + switch { + case errors.As(listError, &parserError): + return nil, listError + } + } } + return f.(*ast.Ast), nil } diff --git a/services/search/pkg/query/kql/kql_test.go b/services/search/pkg/query/kql/kql_test.go index d18068ed575..0e8e871221f 100644 --- a/services/search/pkg/query/kql/kql_test.go +++ b/services/search/pkg/query/kql/kql_test.go @@ -21,7 +21,7 @@ func TestNewAST(t *testing.T) { { name: "error", givenQuery: kql.BoolAND, - shouldError: false, + shouldError: true, }, } From ed19e7adc4536782165aed41930f6c578d44a1ae Mon Sep 17 00:00:00 2001 From: Florian Schade Date: Fri, 8 Sep 2023 14:03:02 +0200 Subject: [PATCH 3/8] enhancement: add kql docs and support for date and time only dateTimeRestriction queries --- go.mod | 1 + go.sum | 5 + services/search/pkg/query/kql/cast.go | 4 +- services/search/pkg/query/kql/dictionary.peg | 23 +- .../search/pkg/query/kql/dictionary_gen.go | 525 ++-- .../search/pkg/query/kql/dictionary_test.go | 87 +- services/search/pkg/query/kql/doc.go | 27 + .../github.com/araddon/dateparse/.travis.yml | 13 + vendor/github.com/araddon/dateparse/LICENSE | 21 + vendor/github.com/araddon/dateparse/README.md | 323 +++ .../github.com/araddon/dateparse/parseany.go | 2189 +++++++++++++++++ vendor/modules.txt | 3 + 12 files changed, 2977 insertions(+), 244 deletions(-) create mode 100644 services/search/pkg/query/kql/doc.go create mode 100644 vendor/github.com/araddon/dateparse/.travis.yml create mode 100644 vendor/github.com/araddon/dateparse/LICENSE create mode 100644 vendor/github.com/araddon/dateparse/README.md create mode 100644 vendor/github.com/araddon/dateparse/parseany.go diff --git a/go.mod b/go.mod index 1ac01b855f2..f3063d6d56a 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/Masterminds/semver v1.5.0 github.com/MicahParks/keyfunc v1.9.0 github.com/Nerzal/gocloak/v13 v13.8.0 + github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de github.com/bbalet/stopwords v1.0.0 github.com/blevesearch/bleve/v2 v2.3.9 github.com/coreos/go-oidc v2.2.1+incompatible diff --git a/go.sum b/go.sum index 8719735e944..6b9a060e0b7 100644 --- a/go.sum +++ b/go.sum @@ -859,6 +859,8 @@ github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4x github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de h1:FxWPpzIjnTlhPwqqXc4/vE0f7GvRjuAsbW+HOIe8KnA= +github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de/go.mod h1:DCaWoUhZrYW9p1lxo/cm8EmUOOzAPSEZNGF2DK1dJgw= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -1640,6 +1642,7 @@ github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= @@ -1861,6 +1864,7 @@ github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qq github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/riandyrn/otelchi v0.5.1 h1:0/45omeqpP7f/cvdL16GddQBfAEmZvUyl2QzLSE6uYo= github.com/riandyrn/otelchi v0.5.1/go.mod h1:ZxVxNEl+jQ9uHseRYIxKWRb3OY8YXFEu+EkNiiSNUEA= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -1892,6 +1896,7 @@ github.com/sacloud/libsacloud v1.36.2/go.mod h1:P7YAOVmnIn3DKHqCZcUKYUXmSwGBm3yS github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210127161313-bd30bebeac4f/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= github.com/sciencemesh/meshdirectory-web v1.0.4 h1:1YSctF6PAXhoHUYCaeRTj7rHaF7b3rYrZf2R0VXBIbo= github.com/sciencemesh/meshdirectory-web v1.0.4/go.mod h1:fJSThTS3xf+sTdL0iXQoaQJssLI7tn7DetHMHUl4SRk= +github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= diff --git a/services/search/pkg/query/kql/cast.go b/services/search/pkg/query/kql/cast.go index 41c8183a62a..2f7c58c3531 100644 --- a/services/search/pkg/query/kql/cast.go +++ b/services/search/pkg/query/kql/cast.go @@ -4,6 +4,8 @@ import ( "fmt" "time" + "github.com/araddon/dateparse" + "github.com/owncloud/ocis/v2/services/search/pkg/query/ast" ) @@ -56,5 +58,5 @@ func toTime(in interface{}) (time.Time, error) { return time.Time{}, err } - return time.Parse(time.RFC3339Nano, ts) + return dateparse.ParseLocal(ts) } diff --git a/services/search/pkg/query/kql/dictionary.peg b/services/search/pkg/query/kql/dictionary.peg index 289d3e4bdb8..df8784d807c 100644 --- a/services/search/pkg/query/kql/dictionary.peg +++ b/services/search/pkg/query/kql/dictionary.peg @@ -7,7 +7,10 @@ //////////////////////////////////////////////////////// AST <- - _ !OperatorBooleanAndNode _ nodes:Nodes _ { + _ !( + OperatorBooleanAndNode / + OperatorBooleanOrNode + ) _ nodes:Nodes _ { return buildAST(nodes, c.text, c.pos) } @@ -45,7 +48,18 @@ YesNoPropertyRestrictionNode <- } DateTimeRestrictionNode <- - k:Char+ o:(OperatorGreaterOrEqualNode / OperatorLessOrEqualNode / OperatorGreaterNode / OperatorLessNode / OperatorEqualNode / OperatorColonNode) '"'? v:(FullDate "T" FullTime) '"'? { + k:Char+ o:( + OperatorGreaterOrEqualNode / + OperatorLessOrEqualNode / + OperatorGreaterNode / + OperatorLessNode / + OperatorEqualNode / + OperatorColonNode + ) '"'? v:( + DateTime / + FullDate / + FullTime + ) '"'? { return buildDateTimeNode(k, o, v, c.text, c.pos) } @@ -171,6 +185,11 @@ FullTime <- return c.text, nil } +DateTime + = FullDate "T" FullTime { + return c.text, nil + } + //////////////////////////////////////////////////////// // misc //////////////////////////////////////////////////////// diff --git a/services/search/pkg/query/kql/dictionary_gen.go b/services/search/pkg/query/kql/dictionary_gen.go index 67c9959e1c9..8f83d73d105 100644 --- a/services/search/pkg/query/kql/dictionary_gen.go +++ b/services/search/pkg/query/kql/dictionary_gen.go @@ -34,25 +34,34 @@ var g = &grammar{ }, ¬Expr{ pos: position{line: 10, col: 7, offset: 156}, - expr: &ruleRefExpr{ - pos: position{line: 10, col: 8, offset: 157}, - name: "OperatorBooleanAndNode", + expr: &choiceExpr{ + pos: position{line: 11, col: 9, offset: 167}, + alternatives: []any{ + &ruleRefExpr{ + pos: position{line: 11, col: 9, offset: 167}, + name: "OperatorBooleanAndNode", + }, + &ruleRefExpr{ + pos: position{line: 12, col: 9, offset: 200}, + name: "OperatorBooleanOrNode", + }, + }, }, }, &ruleRefExpr{ - pos: position{line: 10, col: 31, offset: 180}, + pos: position{line: 13, col: 7, offset: 228}, name: "_", }, &labeledExpr{ - pos: position{line: 10, col: 33, offset: 182}, + pos: position{line: 13, col: 9, offset: 230}, label: "nodes", expr: &ruleRefExpr{ - pos: position{line: 10, col: 39, offset: 188}, + pos: position{line: 13, col: 15, offset: 236}, name: "Nodes", }, }, &ruleRefExpr{ - pos: position{line: 10, col: 45, offset: 194}, + pos: position{line: 13, col: 21, offset: 242}, name: "_", }, }, @@ -61,53 +70,53 @@ var g = &grammar{ }, { name: "Nodes", - pos: position{line: 14, col: 1, offset: 251}, + pos: position{line: 17, col: 1, offset: 299}, expr: &actionExpr{ - pos: position{line: 15, col: 5, offset: 264}, + pos: position{line: 18, col: 5, offset: 312}, run: (*parser).callonNodes1, expr: &seqExpr{ - pos: position{line: 15, col: 5, offset: 264}, + pos: position{line: 18, col: 5, offset: 312}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 15, col: 5, offset: 264}, + pos: position{line: 18, col: 5, offset: 312}, name: "_", }, &labeledExpr{ - pos: position{line: 15, col: 7, offset: 266}, + pos: position{line: 18, col: 7, offset: 314}, label: "head", expr: &choiceExpr{ - pos: position{line: 16, col: 9, offset: 281}, + pos: position{line: 19, col: 9, offset: 329}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 16, col: 9, offset: 281}, + pos: position{line: 19, col: 9, offset: 329}, name: "GroupNode", }, &ruleRefExpr{ - pos: position{line: 17, col: 9, offset: 301}, + pos: position{line: 20, col: 9, offset: 349}, name: "PropertyRestrictionNodes", }, &ruleRefExpr{ - pos: position{line: 18, col: 9, offset: 336}, + pos: position{line: 21, col: 9, offset: 384}, name: "OperatorBooleanNodes", }, &ruleRefExpr{ - pos: position{line: 19, col: 9, offset: 367}, + pos: position{line: 22, col: 9, offset: 415}, name: "FreeTextKeywordNodes", }, }, }, }, &ruleRefExpr{ - pos: position{line: 20, col: 7, offset: 394}, + pos: position{line: 23, col: 7, offset: 442}, name: "_", }, &labeledExpr{ - pos: position{line: 20, col: 9, offset: 396}, + pos: position{line: 23, col: 9, offset: 444}, label: "tail", expr: &zeroOrOneExpr{ - pos: position{line: 20, col: 14, offset: 401}, + pos: position{line: 23, col: 14, offset: 449}, expr: &ruleRefExpr{ - pos: position{line: 20, col: 14, offset: 401}, + pos: position{line: 23, col: 14, offset: 449}, name: "Nodes", }, }, @@ -118,59 +127,59 @@ var g = &grammar{ }, { name: "GroupNode", - pos: position{line: 28, col: 1, offset: 579}, + pos: position{line: 31, col: 1, offset: 627}, expr: &actionExpr{ - pos: position{line: 29, col: 5, offset: 596}, + pos: position{line: 32, col: 5, offset: 644}, run: (*parser).callonGroupNode1, expr: &seqExpr{ - pos: position{line: 29, col: 5, offset: 596}, + pos: position{line: 32, col: 5, offset: 644}, exprs: []any{ &labeledExpr{ - pos: position{line: 29, col: 5, offset: 596}, + pos: position{line: 32, col: 5, offset: 644}, label: "k", expr: &zeroOrOneExpr{ - pos: position{line: 29, col: 7, offset: 598}, + pos: position{line: 32, col: 7, offset: 646}, expr: &oneOrMoreExpr{ - pos: position{line: 29, col: 8, offset: 599}, + pos: position{line: 32, col: 8, offset: 647}, expr: &ruleRefExpr{ - pos: position{line: 29, col: 8, offset: 599}, + pos: position{line: 32, col: 8, offset: 647}, name: "Char", }, }, }, }, &zeroOrOneExpr{ - pos: position{line: 29, col: 16, offset: 607}, + pos: position{line: 32, col: 16, offset: 655}, expr: &choiceExpr{ - pos: position{line: 29, col: 17, offset: 608}, + pos: position{line: 32, col: 17, offset: 656}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 29, col: 17, offset: 608}, + pos: position{line: 32, col: 17, offset: 656}, name: "OperatorColonNode", }, &ruleRefExpr{ - pos: position{line: 29, col: 37, offset: 628}, + pos: position{line: 32, col: 37, offset: 676}, name: "OperatorEqualNode", }, }, }, }, &litMatcher{ - pos: position{line: 29, col: 57, offset: 648}, + pos: position{line: 32, col: 57, offset: 696}, val: "(", ignoreCase: false, want: "\"(\"", }, &labeledExpr{ - pos: position{line: 29, col: 61, offset: 652}, + pos: position{line: 32, col: 61, offset: 700}, label: "v", expr: &ruleRefExpr{ - pos: position{line: 29, col: 63, offset: 654}, + pos: position{line: 32, col: 63, offset: 702}, name: "Nodes", }, }, &litMatcher{ - pos: position{line: 29, col: 69, offset: 660}, + pos: position{line: 32, col: 69, offset: 708}, val: ")", ignoreCase: false, want: "\")\"", @@ -181,20 +190,20 @@ var g = &grammar{ }, { name: "PropertyRestrictionNodes", - pos: position{line: 37, col: 1, offset: 864}, + pos: position{line: 40, col: 1, offset: 912}, expr: &choiceExpr{ - pos: position{line: 38, col: 5, offset: 896}, + pos: position{line: 41, col: 5, offset: 944}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 38, col: 5, offset: 896}, + pos: position{line: 41, col: 5, offset: 944}, name: "YesNoPropertyRestrictionNode", }, &ruleRefExpr{ - pos: position{line: 39, col: 5, offset: 931}, + pos: position{line: 42, col: 5, offset: 979}, name: "DateTimeRestrictionNode", }, &ruleRefExpr{ - pos: position{line: 40, col: 5, offset: 961}, + pos: position{line: 43, col: 5, offset: 1009}, name: "TextPropertyRestrictionNode", }, }, @@ -202,51 +211,51 @@ var g = &grammar{ }, { name: "YesNoPropertyRestrictionNode", - pos: position{line: 42, col: 1, offset: 990}, + pos: position{line: 45, col: 1, offset: 1038}, expr: &actionExpr{ - pos: position{line: 43, col: 5, offset: 1026}, + pos: position{line: 46, col: 5, offset: 1074}, run: (*parser).callonYesNoPropertyRestrictionNode1, expr: &seqExpr{ - pos: position{line: 43, col: 5, offset: 1026}, + pos: position{line: 46, col: 5, offset: 1074}, exprs: []any{ &labeledExpr{ - pos: position{line: 43, col: 5, offset: 1026}, + pos: position{line: 46, col: 5, offset: 1074}, label: "k", expr: &oneOrMoreExpr{ - pos: position{line: 43, col: 7, offset: 1028}, + pos: position{line: 46, col: 7, offset: 1076}, expr: &ruleRefExpr{ - pos: position{line: 43, col: 7, offset: 1028}, + pos: position{line: 46, col: 7, offset: 1076}, name: "Char", }, }, }, &choiceExpr{ - pos: position{line: 43, col: 14, offset: 1035}, + pos: position{line: 46, col: 14, offset: 1083}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 43, col: 14, offset: 1035}, + pos: position{line: 46, col: 14, offset: 1083}, name: "OperatorColonNode", }, &ruleRefExpr{ - pos: position{line: 43, col: 34, offset: 1055}, + pos: position{line: 46, col: 34, offset: 1103}, name: "OperatorEqualNode", }, }, }, &labeledExpr{ - pos: position{line: 43, col: 53, offset: 1074}, + pos: position{line: 46, col: 53, offset: 1122}, label: "v", expr: &choiceExpr{ - pos: position{line: 43, col: 56, offset: 1077}, + pos: position{line: 46, col: 56, offset: 1125}, alternatives: []any{ &litMatcher{ - pos: position{line: 43, col: 56, offset: 1077}, + pos: position{line: 46, col: 56, offset: 1125}, val: "true", ignoreCase: false, want: "\"true\"", }, &litMatcher{ - pos: position{line: 43, col: 65, offset: 1086}, + pos: position{line: 46, col: 65, offset: 1134}, val: "false", ignoreCase: false, want: "\"false\"", @@ -260,93 +269,91 @@ var g = &grammar{ }, { name: "DateTimeRestrictionNode", - pos: position{line: 47, col: 1, offset: 1156}, + pos: position{line: 50, col: 1, offset: 1204}, expr: &actionExpr{ - pos: position{line: 48, col: 5, offset: 1187}, + pos: position{line: 51, col: 5, offset: 1235}, run: (*parser).callonDateTimeRestrictionNode1, expr: &seqExpr{ - pos: position{line: 48, col: 5, offset: 1187}, + pos: position{line: 51, col: 5, offset: 1235}, exprs: []any{ &labeledExpr{ - pos: position{line: 48, col: 5, offset: 1187}, + pos: position{line: 51, col: 5, offset: 1235}, label: "k", expr: &oneOrMoreExpr{ - pos: position{line: 48, col: 7, offset: 1189}, + pos: position{line: 51, col: 7, offset: 1237}, expr: &ruleRefExpr{ - pos: position{line: 48, col: 7, offset: 1189}, + pos: position{line: 51, col: 7, offset: 1237}, name: "Char", }, }, }, &labeledExpr{ - pos: position{line: 48, col: 13, offset: 1195}, + pos: position{line: 51, col: 13, offset: 1243}, label: "o", expr: &choiceExpr{ - pos: position{line: 48, col: 16, offset: 1198}, + pos: position{line: 52, col: 9, offset: 1255}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 48, col: 16, offset: 1198}, + pos: position{line: 52, col: 9, offset: 1255}, name: "OperatorGreaterOrEqualNode", }, &ruleRefExpr{ - pos: position{line: 48, col: 45, offset: 1227}, + pos: position{line: 53, col: 9, offset: 1292}, name: "OperatorLessOrEqualNode", }, &ruleRefExpr{ - pos: position{line: 48, col: 71, offset: 1253}, + pos: position{line: 54, col: 9, offset: 1326}, name: "OperatorGreaterNode", }, &ruleRefExpr{ - pos: position{line: 48, col: 93, offset: 1275}, + pos: position{line: 55, col: 9, offset: 1356}, name: "OperatorLessNode", }, &ruleRefExpr{ - pos: position{line: 48, col: 112, offset: 1294}, + pos: position{line: 56, col: 9, offset: 1383}, name: "OperatorEqualNode", }, &ruleRefExpr{ - pos: position{line: 48, col: 132, offset: 1314}, + pos: position{line: 57, col: 9, offset: 1411}, name: "OperatorColonNode", }, }, }, }, &zeroOrOneExpr{ - pos: position{line: 48, col: 151, offset: 1333}, + pos: position{line: 58, col: 7, offset: 1435}, expr: &litMatcher{ - pos: position{line: 48, col: 151, offset: 1333}, + pos: position{line: 58, col: 7, offset: 1435}, val: "\"", ignoreCase: false, want: "\"\\\"\"", }, }, &labeledExpr{ - pos: position{line: 48, col: 156, offset: 1338}, + pos: position{line: 58, col: 12, offset: 1440}, label: "v", - expr: &seqExpr{ - pos: position{line: 48, col: 159, offset: 1341}, - exprs: []any{ + expr: &choiceExpr{ + pos: position{line: 59, col: 9, offset: 1452}, + alternatives: []any{ &ruleRefExpr{ - pos: position{line: 48, col: 159, offset: 1341}, - name: "FullDate", + pos: position{line: 59, col: 9, offset: 1452}, + name: "DateTime", }, - &litMatcher{ - pos: position{line: 48, col: 168, offset: 1350}, - val: "T", - ignoreCase: false, - want: "\"T\"", + &ruleRefExpr{ + pos: position{line: 60, col: 9, offset: 1471}, + name: "FullDate", }, &ruleRefExpr{ - pos: position{line: 48, col: 172, offset: 1354}, + pos: position{line: 61, col: 9, offset: 1490}, name: "FullTime", }, }, }, }, &zeroOrOneExpr{ - pos: position{line: 48, col: 182, offset: 1364}, + pos: position{line: 62, col: 7, offset: 1505}, expr: &litMatcher{ - pos: position{line: 48, col: 182, offset: 1364}, + pos: position{line: 62, col: 7, offset: 1505}, val: "\"", ignoreCase: false, want: "\"\\\"\"", @@ -358,51 +365,51 @@ var g = &grammar{ }, { name: "TextPropertyRestrictionNode", - pos: position{line: 52, col: 1, offset: 1435}, + pos: position{line: 66, col: 1, offset: 1576}, expr: &actionExpr{ - pos: position{line: 53, col: 5, offset: 1470}, + pos: position{line: 67, col: 5, offset: 1611}, run: (*parser).callonTextPropertyRestrictionNode1, expr: &seqExpr{ - pos: position{line: 53, col: 5, offset: 1470}, + pos: position{line: 67, col: 5, offset: 1611}, exprs: []any{ &labeledExpr{ - pos: position{line: 53, col: 5, offset: 1470}, + pos: position{line: 67, col: 5, offset: 1611}, label: "k", expr: &oneOrMoreExpr{ - pos: position{line: 53, col: 7, offset: 1472}, + pos: position{line: 67, col: 7, offset: 1613}, expr: &ruleRefExpr{ - pos: position{line: 53, col: 7, offset: 1472}, + pos: position{line: 67, col: 7, offset: 1613}, name: "Char", }, }, }, &choiceExpr{ - pos: position{line: 53, col: 14, offset: 1479}, + pos: position{line: 67, col: 14, offset: 1620}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 53, col: 14, offset: 1479}, + pos: position{line: 67, col: 14, offset: 1620}, name: "OperatorColonNode", }, &ruleRefExpr{ - pos: position{line: 53, col: 34, offset: 1499}, + pos: position{line: 67, col: 34, offset: 1640}, name: "OperatorEqualNode", }, }, }, &labeledExpr{ - pos: position{line: 53, col: 53, offset: 1518}, + pos: position{line: 67, col: 53, offset: 1659}, label: "v", expr: &choiceExpr{ - pos: position{line: 53, col: 56, offset: 1521}, + pos: position{line: 67, col: 56, offset: 1662}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 53, col: 56, offset: 1521}, + pos: position{line: 67, col: 56, offset: 1662}, name: "String", }, &oneOrMoreExpr{ - pos: position{line: 53, col: 65, offset: 1530}, + pos: position{line: 67, col: 65, offset: 1671}, expr: &charClassMatcher{ - pos: position{line: 53, col: 65, offset: 1530}, + pos: position{line: 67, col: 65, offset: 1671}, val: "[^ ()]", chars: []rune{' ', '(', ')'}, ignoreCase: false, @@ -418,16 +425,16 @@ var g = &grammar{ }, { name: "FreeTextKeywordNodes", - pos: position{line: 61, col: 1, offset: 1736}, + pos: position{line: 75, col: 1, offset: 1877}, expr: &choiceExpr{ - pos: position{line: 62, col: 5, offset: 1764}, + pos: position{line: 76, col: 5, offset: 1905}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 62, col: 5, offset: 1764}, + pos: position{line: 76, col: 5, offset: 1905}, name: "PhraseNode", }, &ruleRefExpr{ - pos: position{line: 63, col: 5, offset: 1781}, + pos: position{line: 77, col: 5, offset: 1922}, name: "WordNode", }, }, @@ -435,40 +442,40 @@ var g = &grammar{ }, { name: "PhraseNode", - pos: position{line: 65, col: 1, offset: 1791}, + pos: position{line: 79, col: 1, offset: 1932}, expr: &actionExpr{ - pos: position{line: 66, col: 6, offset: 1810}, + pos: position{line: 80, col: 6, offset: 1951}, run: (*parser).callonPhraseNode1, expr: &seqExpr{ - pos: position{line: 66, col: 6, offset: 1810}, + pos: position{line: 80, col: 6, offset: 1951}, exprs: []any{ &zeroOrOneExpr{ - pos: position{line: 66, col: 6, offset: 1810}, + pos: position{line: 80, col: 6, offset: 1951}, expr: &ruleRefExpr{ - pos: position{line: 66, col: 6, offset: 1810}, + pos: position{line: 80, col: 6, offset: 1951}, name: "OperatorColonNode", }, }, &ruleRefExpr{ - pos: position{line: 66, col: 25, offset: 1829}, + pos: position{line: 80, col: 25, offset: 1970}, name: "_", }, &labeledExpr{ - pos: position{line: 66, col: 27, offset: 1831}, + pos: position{line: 80, col: 27, offset: 1972}, label: "v", expr: &ruleRefExpr{ - pos: position{line: 66, col: 29, offset: 1833}, + pos: position{line: 80, col: 29, offset: 1974}, name: "String", }, }, &ruleRefExpr{ - pos: position{line: 66, col: 36, offset: 1840}, + pos: position{line: 80, col: 36, offset: 1981}, name: "_", }, &zeroOrOneExpr{ - pos: position{line: 66, col: 38, offset: 1842}, + pos: position{line: 80, col: 38, offset: 1983}, expr: &ruleRefExpr{ - pos: position{line: 66, col: 38, offset: 1842}, + pos: position{line: 80, col: 38, offset: 1983}, name: "OperatorColonNode", }, }, @@ -478,31 +485,31 @@ var g = &grammar{ }, { name: "WordNode", - pos: position{line: 70, col: 1, offset: 1923}, + pos: position{line: 84, col: 1, offset: 2064}, expr: &actionExpr{ - pos: position{line: 71, col: 6, offset: 1940}, + pos: position{line: 85, col: 6, offset: 2081}, run: (*parser).callonWordNode1, expr: &seqExpr{ - pos: position{line: 71, col: 6, offset: 1940}, + pos: position{line: 85, col: 6, offset: 2081}, exprs: []any{ &zeroOrOneExpr{ - pos: position{line: 71, col: 6, offset: 1940}, + pos: position{line: 85, col: 6, offset: 2081}, expr: &ruleRefExpr{ - pos: position{line: 71, col: 6, offset: 1940}, + pos: position{line: 85, col: 6, offset: 2081}, name: "OperatorColonNode", }, }, &ruleRefExpr{ - pos: position{line: 71, col: 25, offset: 1959}, + pos: position{line: 85, col: 25, offset: 2100}, name: "_", }, &labeledExpr{ - pos: position{line: 71, col: 27, offset: 1961}, + pos: position{line: 85, col: 27, offset: 2102}, label: "v", expr: &oneOrMoreExpr{ - pos: position{line: 71, col: 29, offset: 1963}, + pos: position{line: 85, col: 29, offset: 2104}, expr: &charClassMatcher{ - pos: position{line: 71, col: 29, offset: 1963}, + pos: position{line: 85, col: 29, offset: 2104}, val: "[^ :()]", chars: []rune{' ', ':', '(', ')'}, ignoreCase: false, @@ -511,13 +518,13 @@ var g = &grammar{ }, }, &ruleRefExpr{ - pos: position{line: 71, col: 38, offset: 1972}, + pos: position{line: 85, col: 38, offset: 2113}, name: "_", }, &zeroOrOneExpr{ - pos: position{line: 71, col: 40, offset: 1974}, + pos: position{line: 85, col: 40, offset: 2115}, expr: &ruleRefExpr{ - pos: position{line: 71, col: 40, offset: 1974}, + pos: position{line: 85, col: 40, offset: 2115}, name: "OperatorColonNode", }, }, @@ -527,20 +534,20 @@ var g = &grammar{ }, { name: "OperatorBooleanNodes", - pos: position{line: 79, col: 1, offset: 2183}, + pos: position{line: 93, col: 1, offset: 2324}, expr: &choiceExpr{ - pos: position{line: 80, col: 5, offset: 2211}, + pos: position{line: 94, col: 5, offset: 2352}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 80, col: 5, offset: 2211}, + pos: position{line: 94, col: 5, offset: 2352}, name: "OperatorBooleanAndNode", }, &ruleRefExpr{ - pos: position{line: 81, col: 5, offset: 2240}, + pos: position{line: 95, col: 5, offset: 2381}, name: "OperatorBooleanNotNode", }, &ruleRefExpr{ - pos: position{line: 82, col: 5, offset: 2269}, + pos: position{line: 96, col: 5, offset: 2410}, name: "OperatorBooleanOrNode", }, }, @@ -548,21 +555,21 @@ var g = &grammar{ }, { name: "OperatorBooleanAndNode", - pos: position{line: 84, col: 1, offset: 2292}, + pos: position{line: 98, col: 1, offset: 2433}, expr: &actionExpr{ - pos: position{line: 85, col: 5, offset: 2322}, + pos: position{line: 99, col: 5, offset: 2463}, run: (*parser).callonOperatorBooleanAndNode1, expr: &choiceExpr{ - pos: position{line: 85, col: 6, offset: 2323}, + pos: position{line: 99, col: 6, offset: 2464}, alternatives: []any{ &litMatcher{ - pos: position{line: 85, col: 6, offset: 2323}, + pos: position{line: 99, col: 6, offset: 2464}, val: "AND", ignoreCase: false, want: "\"AND\"", }, &litMatcher{ - pos: position{line: 85, col: 14, offset: 2331}, + pos: position{line: 99, col: 14, offset: 2472}, val: "+", ignoreCase: false, want: "\"+\"", @@ -573,21 +580,21 @@ var g = &grammar{ }, { name: "OperatorBooleanNotNode", - pos: position{line: 89, col: 1, offset: 2393}, + pos: position{line: 103, col: 1, offset: 2534}, expr: &actionExpr{ - pos: position{line: 90, col: 5, offset: 2423}, + pos: position{line: 104, col: 5, offset: 2564}, run: (*parser).callonOperatorBooleanNotNode1, expr: &choiceExpr{ - pos: position{line: 90, col: 6, offset: 2424}, + pos: position{line: 104, col: 6, offset: 2565}, alternatives: []any{ &litMatcher{ - pos: position{line: 90, col: 6, offset: 2424}, + pos: position{line: 104, col: 6, offset: 2565}, val: "NOT", ignoreCase: false, want: "\"NOT\"", }, &litMatcher{ - pos: position{line: 90, col: 14, offset: 2432}, + pos: position{line: 104, col: 14, offset: 2573}, val: "-", ignoreCase: false, want: "\"-\"", @@ -598,12 +605,12 @@ var g = &grammar{ }, { name: "OperatorBooleanOrNode", - pos: position{line: 94, col: 1, offset: 2494}, + pos: position{line: 108, col: 1, offset: 2635}, expr: &actionExpr{ - pos: position{line: 95, col: 5, offset: 2523}, + pos: position{line: 109, col: 5, offset: 2664}, run: (*parser).callonOperatorBooleanOrNode1, expr: &litMatcher{ - pos: position{line: 95, col: 6, offset: 2524}, + pos: position{line: 109, col: 6, offset: 2665}, val: "OR", ignoreCase: false, want: "\"OR\"", @@ -612,12 +619,12 @@ var g = &grammar{ }, { name: "OperatorColonNode", - pos: position{line: 99, col: 1, offset: 2587}, + pos: position{line: 113, col: 1, offset: 2728}, expr: &actionExpr{ - pos: position{line: 100, col: 5, offset: 2612}, + pos: position{line: 114, col: 5, offset: 2753}, run: (*parser).callonOperatorColonNode1, expr: &litMatcher{ - pos: position{line: 100, col: 5, offset: 2612}, + pos: position{line: 114, col: 5, offset: 2753}, val: ":", ignoreCase: false, want: "\":\"", @@ -626,12 +633,12 @@ var g = &grammar{ }, { name: "OperatorEqualNode", - pos: position{line: 104, col: 1, offset: 2673}, + pos: position{line: 118, col: 1, offset: 2814}, expr: &actionExpr{ - pos: position{line: 105, col: 5, offset: 2698}, + pos: position{line: 119, col: 5, offset: 2839}, run: (*parser).callonOperatorEqualNode1, expr: &litMatcher{ - pos: position{line: 105, col: 5, offset: 2698}, + pos: position{line: 119, col: 5, offset: 2839}, val: "=", ignoreCase: false, want: "\"=\"", @@ -640,12 +647,12 @@ var g = &grammar{ }, { name: "OperatorLessNode", - pos: position{line: 109, col: 1, offset: 2759}, + pos: position{line: 123, col: 1, offset: 2900}, expr: &actionExpr{ - pos: position{line: 110, col: 5, offset: 2783}, + pos: position{line: 124, col: 5, offset: 2924}, run: (*parser).callonOperatorLessNode1, expr: &litMatcher{ - pos: position{line: 110, col: 5, offset: 2783}, + pos: position{line: 124, col: 5, offset: 2924}, val: "<", ignoreCase: false, want: "\"<\"", @@ -654,12 +661,12 @@ var g = &grammar{ }, { name: "OperatorLessOrEqualNode", - pos: position{line: 114, col: 1, offset: 2844}, + pos: position{line: 128, col: 1, offset: 2985}, expr: &actionExpr{ - pos: position{line: 115, col: 5, offset: 2875}, + pos: position{line: 129, col: 5, offset: 3016}, run: (*parser).callonOperatorLessOrEqualNode1, expr: &litMatcher{ - pos: position{line: 115, col: 5, offset: 2875}, + pos: position{line: 129, col: 5, offset: 3016}, val: "<=", ignoreCase: false, want: "\"<=\"", @@ -668,12 +675,12 @@ var g = &grammar{ }, { name: "OperatorGreaterNode", - pos: position{line: 119, col: 1, offset: 2937}, + pos: position{line: 133, col: 1, offset: 3078}, expr: &actionExpr{ - pos: position{line: 120, col: 5, offset: 2964}, + pos: position{line: 134, col: 5, offset: 3105}, run: (*parser).callonOperatorGreaterNode1, expr: &litMatcher{ - pos: position{line: 120, col: 5, offset: 2964}, + pos: position{line: 134, col: 5, offset: 3105}, val: ">", ignoreCase: false, want: "\">\"", @@ -682,12 +689,12 @@ var g = &grammar{ }, { name: "OperatorGreaterOrEqualNode", - pos: position{line: 124, col: 1, offset: 3025}, + pos: position{line: 138, col: 1, offset: 3166}, expr: &actionExpr{ - pos: position{line: 125, col: 5, offset: 3059}, + pos: position{line: 139, col: 5, offset: 3200}, run: (*parser).callonOperatorGreaterOrEqualNode1, expr: &litMatcher{ - pos: position{line: 125, col: 5, offset: 3059}, + pos: position{line: 139, col: 5, offset: 3200}, val: ">=", ignoreCase: false, want: "\">=\"", @@ -696,27 +703,27 @@ var g = &grammar{ }, { name: "TimeYear", - pos: position{line: 134, col: 1, offset: 3245}, + pos: position{line: 148, col: 1, offset: 3386}, expr: &actionExpr{ - pos: position{line: 135, col: 5, offset: 3261}, + pos: position{line: 149, col: 5, offset: 3402}, run: (*parser).callonTimeYear1, expr: &seqExpr{ - pos: position{line: 135, col: 5, offset: 3261}, + pos: position{line: 149, col: 5, offset: 3402}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 135, col: 5, offset: 3261}, + pos: position{line: 149, col: 5, offset: 3402}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 135, col: 11, offset: 3267}, + pos: position{line: 149, col: 11, offset: 3408}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 135, col: 17, offset: 3273}, + pos: position{line: 149, col: 17, offset: 3414}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 135, col: 23, offset: 3279}, + pos: position{line: 149, col: 23, offset: 3420}, name: "Digit", }, }, @@ -725,19 +732,19 @@ var g = &grammar{ }, { name: "TimeMonth", - pos: position{line: 139, col: 1, offset: 3321}, + pos: position{line: 153, col: 1, offset: 3462}, expr: &actionExpr{ - pos: position{line: 140, col: 5, offset: 3338}, + pos: position{line: 154, col: 5, offset: 3479}, run: (*parser).callonTimeMonth1, expr: &seqExpr{ - pos: position{line: 140, col: 5, offset: 3338}, + pos: position{line: 154, col: 5, offset: 3479}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 140, col: 5, offset: 3338}, + pos: position{line: 154, col: 5, offset: 3479}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 140, col: 11, offset: 3344}, + pos: position{line: 154, col: 11, offset: 3485}, name: "Digit", }, }, @@ -746,19 +753,19 @@ var g = &grammar{ }, { name: "TimeDay", - pos: position{line: 144, col: 1, offset: 3386}, + pos: position{line: 158, col: 1, offset: 3527}, expr: &actionExpr{ - pos: position{line: 145, col: 5, offset: 3401}, + pos: position{line: 159, col: 5, offset: 3542}, run: (*parser).callonTimeDay1, expr: &seqExpr{ - pos: position{line: 145, col: 5, offset: 3401}, + pos: position{line: 159, col: 5, offset: 3542}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 145, col: 5, offset: 3401}, + pos: position{line: 159, col: 5, offset: 3542}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 145, col: 11, offset: 3407}, + pos: position{line: 159, col: 11, offset: 3548}, name: "Digit", }, }, @@ -767,19 +774,19 @@ var g = &grammar{ }, { name: "TimeHour", - pos: position{line: 149, col: 1, offset: 3449}, + pos: position{line: 163, col: 1, offset: 3590}, expr: &actionExpr{ - pos: position{line: 150, col: 5, offset: 3465}, + pos: position{line: 164, col: 5, offset: 3606}, run: (*parser).callonTimeHour1, expr: &seqExpr{ - pos: position{line: 150, col: 5, offset: 3465}, + pos: position{line: 164, col: 5, offset: 3606}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 150, col: 5, offset: 3465}, + pos: position{line: 164, col: 5, offset: 3606}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 150, col: 11, offset: 3471}, + pos: position{line: 164, col: 11, offset: 3612}, name: "Digit", }, }, @@ -788,19 +795,19 @@ var g = &grammar{ }, { name: "TimeMinute", - pos: position{line: 154, col: 1, offset: 3513}, + pos: position{line: 168, col: 1, offset: 3654}, expr: &actionExpr{ - pos: position{line: 155, col: 5, offset: 3531}, + pos: position{line: 169, col: 5, offset: 3672}, run: (*parser).callonTimeMinute1, expr: &seqExpr{ - pos: position{line: 155, col: 5, offset: 3531}, + pos: position{line: 169, col: 5, offset: 3672}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 155, col: 5, offset: 3531}, + pos: position{line: 169, col: 5, offset: 3672}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 155, col: 11, offset: 3537}, + pos: position{line: 169, col: 11, offset: 3678}, name: "Digit", }, }, @@ -809,19 +816,19 @@ var g = &grammar{ }, { name: "TimeSecond", - pos: position{line: 159, col: 1, offset: 3579}, + pos: position{line: 173, col: 1, offset: 3720}, expr: &actionExpr{ - pos: position{line: 160, col: 5, offset: 3597}, + pos: position{line: 174, col: 5, offset: 3738}, run: (*parser).callonTimeSecond1, expr: &seqExpr{ - pos: position{line: 160, col: 5, offset: 3597}, + pos: position{line: 174, col: 5, offset: 3738}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 160, col: 5, offset: 3597}, + pos: position{line: 174, col: 5, offset: 3738}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 160, col: 11, offset: 3603}, + pos: position{line: 174, col: 11, offset: 3744}, name: "Digit", }, }, @@ -830,35 +837,35 @@ var g = &grammar{ }, { name: "FullDate", - pos: position{line: 164, col: 1, offset: 3645}, + pos: position{line: 178, col: 1, offset: 3786}, expr: &actionExpr{ - pos: position{line: 165, col: 5, offset: 3661}, + pos: position{line: 179, col: 5, offset: 3802}, run: (*parser).callonFullDate1, expr: &seqExpr{ - pos: position{line: 165, col: 5, offset: 3661}, + pos: position{line: 179, col: 5, offset: 3802}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 165, col: 5, offset: 3661}, + pos: position{line: 179, col: 5, offset: 3802}, name: "TimeYear", }, &litMatcher{ - pos: position{line: 165, col: 14, offset: 3670}, + pos: position{line: 179, col: 14, offset: 3811}, val: "-", ignoreCase: false, want: "\"-\"", }, &ruleRefExpr{ - pos: position{line: 165, col: 18, offset: 3674}, + pos: position{line: 179, col: 18, offset: 3815}, name: "TimeMonth", }, &litMatcher{ - pos: position{line: 165, col: 28, offset: 3684}, + pos: position{line: 179, col: 28, offset: 3825}, val: "-", ignoreCase: false, want: "\"-\"", }, &ruleRefExpr{ - pos: position{line: 165, col: 32, offset: 3688}, + pos: position{line: 179, col: 32, offset: 3829}, name: "TimeDay", }, }, @@ -867,52 +874,52 @@ var g = &grammar{ }, { name: "FullTime", - pos: position{line: 169, col: 1, offset: 3732}, + pos: position{line: 183, col: 1, offset: 3873}, expr: &actionExpr{ - pos: position{line: 170, col: 5, offset: 3748}, + pos: position{line: 184, col: 5, offset: 3889}, run: (*parser).callonFullTime1, expr: &seqExpr{ - pos: position{line: 170, col: 5, offset: 3748}, + pos: position{line: 184, col: 5, offset: 3889}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 170, col: 5, offset: 3748}, + pos: position{line: 184, col: 5, offset: 3889}, name: "TimeHour", }, &litMatcher{ - pos: position{line: 170, col: 14, offset: 3757}, + pos: position{line: 184, col: 14, offset: 3898}, val: ":", ignoreCase: false, want: "\":\"", }, &ruleRefExpr{ - pos: position{line: 170, col: 18, offset: 3761}, + pos: position{line: 184, col: 18, offset: 3902}, name: "TimeMinute", }, &litMatcher{ - pos: position{line: 170, col: 29, offset: 3772}, + pos: position{line: 184, col: 29, offset: 3913}, val: ":", ignoreCase: false, want: "\":\"", }, &ruleRefExpr{ - pos: position{line: 170, col: 33, offset: 3776}, + pos: position{line: 184, col: 33, offset: 3917}, name: "TimeSecond", }, &zeroOrOneExpr{ - pos: position{line: 170, col: 44, offset: 3787}, + pos: position{line: 184, col: 44, offset: 3928}, expr: &seqExpr{ - pos: position{line: 170, col: 45, offset: 3788}, + pos: position{line: 184, col: 45, offset: 3929}, exprs: []any{ &litMatcher{ - pos: position{line: 170, col: 45, offset: 3788}, + pos: position{line: 184, col: 45, offset: 3929}, val: ".", ignoreCase: false, want: "\".\"", }, &oneOrMoreExpr{ - pos: position{line: 170, col: 49, offset: 3792}, + pos: position{line: 184, col: 49, offset: 3933}, expr: &ruleRefExpr{ - pos: position{line: 170, col: 49, offset: 3792}, + pos: position{line: 184, col: 49, offset: 3933}, name: "Digit", }, }, @@ -920,28 +927,28 @@ var g = &grammar{ }, }, &choiceExpr{ - pos: position{line: 170, col: 59, offset: 3802}, + pos: position{line: 184, col: 59, offset: 3943}, alternatives: []any{ &litMatcher{ - pos: position{line: 170, col: 59, offset: 3802}, + pos: position{line: 184, col: 59, offset: 3943}, val: "Z", ignoreCase: false, want: "\"Z\"", }, &seqExpr{ - pos: position{line: 170, col: 65, offset: 3808}, + pos: position{line: 184, col: 65, offset: 3949}, exprs: []any{ &choiceExpr{ - pos: position{line: 170, col: 66, offset: 3809}, + pos: position{line: 184, col: 66, offset: 3950}, alternatives: []any{ &litMatcher{ - pos: position{line: 170, col: 66, offset: 3809}, + pos: position{line: 184, col: 66, offset: 3950}, val: "+", ignoreCase: false, want: "\"+\"", }, &litMatcher{ - pos: position{line: 170, col: 72, offset: 3815}, + pos: position{line: 184, col: 72, offset: 3956}, val: "-", ignoreCase: false, want: "\"-\"", @@ -949,17 +956,17 @@ var g = &grammar{ }, }, &ruleRefExpr{ - pos: position{line: 170, col: 77, offset: 3820}, + pos: position{line: 184, col: 77, offset: 3961}, name: "TimeHour", }, &litMatcher{ - pos: position{line: 170, col: 86, offset: 3829}, + pos: position{line: 184, col: 86, offset: 3970}, val: ":", ignoreCase: false, want: "\":\"", }, &ruleRefExpr{ - pos: position{line: 170, col: 90, offset: 3833}, + pos: position{line: 184, col: 90, offset: 3974}, name: "TimeMinute", }, }, @@ -970,14 +977,41 @@ var g = &grammar{ }, }, }, + { + name: "DateTime", + pos: position{line: 188, col: 1, offset: 4022}, + expr: &actionExpr{ + pos: position{line: 189, col: 5, offset: 4035}, + run: (*parser).callonDateTime1, + expr: &seqExpr{ + pos: position{line: 189, col: 5, offset: 4035}, + exprs: []any{ + &ruleRefExpr{ + pos: position{line: 189, col: 5, offset: 4035}, + name: "FullDate", + }, + &litMatcher{ + pos: position{line: 189, col: 14, offset: 4044}, + val: "T", + ignoreCase: false, + want: "\"T\"", + }, + &ruleRefExpr{ + pos: position{line: 189, col: 18, offset: 4048}, + name: "FullTime", + }, + }, + }, + }, + }, { name: "Char", - pos: position{line: 178, col: 1, offset: 4004}, + pos: position{line: 197, col: 1, offset: 4214}, expr: &actionExpr{ - pos: position{line: 179, col: 5, offset: 4016}, + pos: position{line: 198, col: 5, offset: 4226}, run: (*parser).callonChar1, expr: &charClassMatcher{ - pos: position{line: 179, col: 5, offset: 4016}, + pos: position{line: 198, col: 5, offset: 4226}, val: "[A-Za-z]", ranges: []rune{'A', 'Z', 'a', 'z'}, ignoreCase: false, @@ -987,26 +1021,26 @@ var g = &grammar{ }, { name: "String", - pos: position{line: 183, col: 1, offset: 4061}, + pos: position{line: 202, col: 1, offset: 4271}, expr: &actionExpr{ - pos: position{line: 184, col: 5, offset: 4075}, + pos: position{line: 203, col: 5, offset: 4285}, run: (*parser).callonString1, expr: &seqExpr{ - pos: position{line: 184, col: 5, offset: 4075}, + pos: position{line: 203, col: 5, offset: 4285}, exprs: []any{ &litMatcher{ - pos: position{line: 184, col: 5, offset: 4075}, + pos: position{line: 203, col: 5, offset: 4285}, val: "\"", ignoreCase: false, want: "\"\\\"\"", }, &labeledExpr{ - pos: position{line: 184, col: 9, offset: 4079}, + pos: position{line: 203, col: 9, offset: 4289}, label: "v", expr: &zeroOrMoreExpr{ - pos: position{line: 184, col: 11, offset: 4081}, + pos: position{line: 203, col: 11, offset: 4291}, expr: &charClassMatcher{ - pos: position{line: 184, col: 11, offset: 4081}, + pos: position{line: 203, col: 11, offset: 4291}, val: "[^\"]", chars: []rune{'"'}, ignoreCase: false, @@ -1015,7 +1049,7 @@ var g = &grammar{ }, }, &litMatcher{ - pos: position{line: 184, col: 17, offset: 4087}, + pos: position{line: 203, col: 17, offset: 4297}, val: "\"", ignoreCase: false, want: "\"\\\"\"", @@ -1026,12 +1060,12 @@ var g = &grammar{ }, { name: "Digit", - pos: position{line: 188, col: 1, offset: 4122}, + pos: position{line: 207, col: 1, offset: 4332}, expr: &actionExpr{ - pos: position{line: 189, col: 5, offset: 4135}, + pos: position{line: 208, col: 5, offset: 4345}, run: (*parser).callonDigit1, expr: &charClassMatcher{ - pos: position{line: 189, col: 5, offset: 4135}, + pos: position{line: 208, col: 5, offset: 4345}, val: "[0-9]", ranges: []rune{'0', '9'}, ignoreCase: false, @@ -1041,11 +1075,11 @@ var g = &grammar{ }, { name: "_", - pos: position{line: 193, col: 1, offset: 4177}, + pos: position{line: 212, col: 1, offset: 4387}, expr: &zeroOrMoreExpr{ - pos: position{line: 194, col: 5, offset: 4186}, + pos: position{line: 213, col: 5, offset: 4396}, expr: &charClassMatcher{ - pos: position{line: 194, col: 5, offset: 4186}, + pos: position{line: 213, col: 5, offset: 4396}, val: "[ \\t]", chars: []rune{' ', '\t'}, ignoreCase: false, @@ -1331,6 +1365,17 @@ func (p *parser) callonFullTime1() (any, error) { return p.cur.onFullTime1() } +func (c *current) onDateTime1() (any, error) { + return c.text, nil + +} + +func (p *parser) callonDateTime1() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onDateTime1() +} + func (c *current) onChar1() (any, error) { return c.text, nil diff --git a/services/search/pkg/query/kql/dictionary_test.go b/services/search/pkg/query/kql/dictionary_test.go index faafbf19038..7421f524541 100644 --- a/services/search/pkg/query/kql/dictionary_test.go +++ b/services/search/pkg/query/kql/dictionary_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/araddon/dateparse" tAssert "github.com/stretchr/testify/assert" "github.com/owncloud/ocis/v2/services/search/pkg/query/ast" @@ -14,7 +15,7 @@ import ( ) var mustParseTime = func(t *testing.T, ts string) time.Time { - tp, err := time.Parse(time.RFC3339Nano, ts) + tp, err := dateparse.ParseLocal(ts) if err != nil { t.Fatalf("time.Parse(...) error = %v", err) } @@ -62,9 +63,13 @@ func TestParse(t *testing.T) { expectedError error }{ // SPEC ////////////////////////////////////////////////////////////////////////////// + // // https://msopenspecs.azureedge.net/files/MS-KQL/%5bMS-KQL%5d.pdf + // https://learn.microsoft.com/en-us/openspecs/sharepoint_protocols/ms-kql/3bbf06cd-8fc1-4277-bd92-8661ccd3c9b0 // + // ++ // 2.1.2 AND Operator + // 3.1.2 AND Operator { name: `cat AND dog`, expectedAst: &ast.Ast{ @@ -83,6 +88,51 @@ func TestParse(t *testing.T) { name: `AND cat AND dog`, expectedError: errors.New(""), }, + // ++ + // 2.1.6 NOT Operator + // 3.1.6 NOT Operator + { + name: `cat NOT dog`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.OperatorNode{Value: kql.BoolNOT}, + &ast.StringNode{Value: "dog"}, + }, + }, + }, + { + name: `NOT dog`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.OperatorNode{Value: kql.BoolNOT}, + &ast.StringNode{Value: "dog"}, + }, + }, + }, + // ++ + // 2.1.8 OR Operator + // 3.1.8 OR Operator + { + name: `cat OR dog`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.StringNode{Value: "dog"}, + }, + }, + }, + { + name: `OR`, + expectedError: errors.New(""), + }, + { + name: `OR cat AND dog`, + expectedError: errors.New(""), + }, + // ++ // 3.1.11 Implicit Operator { name: `cat dog`, @@ -122,6 +172,8 @@ func TestParse(t *testing.T) { }, }, }, + // ++ + // 2.1.12 Parentheses // 3.1.12 Parentheses { name: `(cat OR dog) AND fox`, @@ -137,6 +189,7 @@ func TestParse(t *testing.T) { }, }, }, + // ++ // 3.2.3 Implicit Operator for Property Restriction { name: `author:"John Smith" filetype:docx`, @@ -198,6 +251,7 @@ func TestParse(t *testing.T) { }, }, }, + // ++ // 3.3.1.1.1 Implicit AND Operator { name: `cat +dog`, @@ -361,6 +415,37 @@ func TestParse(t *testing.T) { }, }, }, + // ++ + // 2.3.5 Date Tokens + // 3.3.5 Date Tokens + { + name: `Modified:2023-09-05`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.DateTimeNode{ + Key: "Modified", + Operator: &ast.OperatorNode{Value: ":"}, + Value: mustParseTime(t, "2023-09-05"), + }, + }, + }, + }, + { + name: `Modified:"2008-01-29"`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.DateTimeNode{ + Key: "Modified", + Operator: &ast.OperatorNode{Value: ":"}, + Value: mustParseTime(t, "2008-01-29"), + }, + }, + }, + }, + { + name: `Modified:today`, + skip: true, + }, ////////////////////////////////////////////////////////////////////////////////////// // everything else { diff --git a/services/search/pkg/query/kql/doc.go b/services/search/pkg/query/kql/doc.go new file mode 100644 index 00000000000..577ada26b72 --- /dev/null +++ b/services/search/pkg/query/kql/doc.go @@ -0,0 +1,27 @@ +/* +Package kql provides the ability to work with kql queries. + +Not every aspect of the spec is implemented yet. +The language support will grow over time if needed. + +The following spec parts are supported and tested: + - 2.1.2 AND Operator + - 2.1.6 NOT Operator + - 2.1.8 OR Operator + - 2.1.12 Parentheses + - 2.3.5 Date Tokens + - Human tokens not implemented + - 3.1.11 Implicit Operator + - 3.1.12 Parentheses + - 3.1.2 AND Operator + - 3.1.6 NOT Operator + - 3.1.8 OR Operator + - 3.2.3 Implicit Operator for Property Restriction + - 3.3.1.1.1 Implicit AND Operator + - 3.3.5 Date Tokens + +References: + - https://learn.microsoft.com/en-us/openspecs/sharepoint_protocols/ms-kql/3bbf06cd-8fc1-4277-bd92-8661ccd3c9b0 + - https://msopenspecs.azureedge.net/files/MS-KQL/%5bMS-KQL%5d.pdf +*/ +package kql diff --git a/vendor/github.com/araddon/dateparse/.travis.yml b/vendor/github.com/araddon/dateparse/.travis.yml new file mode 100644 index 00000000000..3b4b17777fb --- /dev/null +++ b/vendor/github.com/araddon/dateparse/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.13.x + +before_install: + - go get -t -v ./... + +script: + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/araddon/dateparse/LICENSE b/vendor/github.com/araddon/dateparse/LICENSE new file mode 100644 index 00000000000..f675ed313ac --- /dev/null +++ b/vendor/github.com/araddon/dateparse/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015-2017 Aaron Raddon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/araddon/dateparse/README.md b/vendor/github.com/araddon/dateparse/README.md new file mode 100644 index 00000000000..fe682dd5652 --- /dev/null +++ b/vendor/github.com/araddon/dateparse/README.md @@ -0,0 +1,323 @@ +Go Date Parser +--------------------------- + +Parse many date strings without knowing format in advance. Uses a scanner to read bytes and use a state machine to find format. Much faster than shotgun based parse methods. See [bench_test.go](https://github.com/araddon/dateparse/blob/master/bench_test.go) for performance comparison. + + +[![Code Coverage](https://codecov.io/gh/araddon/dateparse/branch/master/graph/badge.svg)](https://codecov.io/gh/araddon/dateparse) +[![GoDoc](https://godoc.org/github.com/araddon/dateparse?status.svg)](http://godoc.org/github.com/araddon/dateparse) +[![Build Status](https://travis-ci.org/araddon/dateparse.svg?branch=master)](https://travis-ci.org/araddon/dateparse) +[![Go ReportCard](https://goreportcard.com/badge/araddon/dateparse)](https://goreportcard.com/report/araddon/dateparse) + +**MM/DD/YYYY VS DD/MM/YYYY** Right now this uses mm/dd/yyyy WHEN ambiguous if this is not desired behavior, use `ParseStrict` which will fail on ambiguous date strings. + +**Timezones** The location your server is configured affects the results! See example or https://play.golang.org/p/IDHRalIyXh and last paragraph here https://golang.org/pkg/time/#Parse. + + +```go + +// Normal parse. Equivalent Timezone rules as time.Parse() +t, err := dateparse.ParseAny("3/1/2014") + +// Parse Strict, error on ambigous mm/dd vs dd/mm dates +t, err := dateparse.ParseStrict("3/1/2014") +> returns error + +// Return a string that represents the layout to parse the given date-time. +layout, err := dateparse.ParseFormat("May 8, 2009 5:57:51 PM") +> "Jan 2, 2006 3:04:05 PM" + +``` + +cli tool for testing dateformats +---------------------------------- + +[Date Parse CLI](https://github.com/araddon/dateparse/blob/master/dateparse) + + +Extended example +------------------- + +https://github.com/araddon/dateparse/blob/master/example/main.go + +```go +package main + +import ( + "flag" + "fmt" + "time" + + "github.com/scylladb/termtables" + "github.com/araddon/dateparse" +) + +var examples = []string{ + "May 8, 2009 5:57:51 PM", + "oct 7, 1970", + "oct 7, '70", + "oct. 7, 1970", + "oct. 7, 70", + "Mon Jan 2 15:04:05 2006", + "Mon Jan 2 15:04:05 MST 2006", + "Mon Jan 02 15:04:05 -0700 2006", + "Monday, 02-Jan-06 15:04:05 MST", + "Mon, 02 Jan 2006 15:04:05 MST", + "Tue, 11 Jul 2017 16:28:13 +0200 (CEST)", + "Mon, 02 Jan 2006 15:04:05 -0700", + "Mon 30 Sep 2018 09:09:09 PM UTC", + "Mon Aug 10 15:44:11 UTC+0100 2015", + "Thu, 4 Jan 2018 17:53:36 +0000", + "Fri Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time)", + "Sun, 3 Jan 2021 00:12:23 +0800 (GMT+08:00)", + "September 17, 2012 10:09am", + "September 17, 2012 at 10:09am PST-08", + "September 17, 2012, 10:10:09", + "October 7, 1970", + "October 7th, 1970", + "12 Feb 2006, 19:17", + "12 Feb 2006 19:17", + "14 May 2019 19:11:40.164", + "7 oct 70", + "7 oct 1970", + "03 February 2013", + "1 July 2013", + "2013-Feb-03", + // dd/Mon/yyy alpha Months + "06/Jan/2008:15:04:05 -0700", + "06/Jan/2008 15:04:05 -0700", + // mm/dd/yy + "3/31/2014", + "03/31/2014", + "08/21/71", + "8/1/71", + "4/8/2014 22:05", + "04/08/2014 22:05", + "4/8/14 22:05", + "04/2/2014 03:00:51", + "8/8/1965 12:00:00 AM", + "8/8/1965 01:00:01 PM", + "8/8/1965 01:00 PM", + "8/8/1965 1:00 PM", + "8/8/1965 12:00 AM", + "4/02/2014 03:00:51", + "03/19/2012 10:11:59", + "03/19/2012 10:11:59.3186369", + // yyyy/mm/dd + "2014/3/31", + "2014/03/31", + "2014/4/8 22:05", + "2014/04/08 22:05", + "2014/04/2 03:00:51", + "2014/4/02 03:00:51", + "2012/03/19 10:11:59", + "2012/03/19 10:11:59.3186369", + // yyyy:mm:dd + "2014:3:31", + "2014:03:31", + "2014:4:8 22:05", + "2014:04:08 22:05", + "2014:04:2 03:00:51", + "2014:4:02 03:00:51", + "2012:03:19 10:11:59", + "2012:03:19 10:11:59.3186369", + // Chinese + "2014年04月08日", + // yyyy-mm-ddThh + "2006-01-02T15:04:05+0000", + "2009-08-12T22:15:09-07:00", + "2009-08-12T22:15:09", + "2009-08-12T22:15:09.988", + "2009-08-12T22:15:09Z", + "2017-07-19T03:21:51:897+0100", + "2019-05-29T08:41-04", // no seconds, 2 digit TZ offset + // yyyy-mm-dd hh:mm:ss + "2014-04-26 17:24:37.3186369", + "2012-08-03 18:31:59.257000000", + "2014-04-26 17:24:37.123", + "2013-04-01 22:43", + "2013-04-01 22:43:22", + "2014-12-16 06:20:00 UTC", + "2014-12-16 06:20:00 GMT", + "2014-04-26 05:24:37 PM", + "2014-04-26 13:13:43 +0800", + "2014-04-26 13:13:43 +0800 +08", + "2014-04-26 13:13:44 +09:00", + "2012-08-03 18:31:59.257000000 +0000 UTC", + "2015-09-30 18:48:56.35272715 +0000 UTC", + "2015-02-18 00:12:00 +0000 GMT", + "2015-02-18 00:12:00 +0000 UTC", + "2015-02-08 03:02:00 +0300 MSK m=+0.000000001", + "2015-02-08 03:02:00.001 +0300 MSK m=+0.000000001", + "2017-07-19 03:21:51+00:00", + "2014-04-26", + "2014-04", + "2014", + "2014-05-11 08:20:13,787", + // yyyy-mm-dd-07:00 + "2020-07-20+08:00", + // mm.dd.yy + "3.31.2014", + "03.31.2014", + "08.21.71", + "2014.03", + "2014.03.30", + // yyyymmdd and similar + "20140601", + "20140722105203", + // yymmdd hh:mm:yy mysql log + // 080313 05:21:55 mysqld started + "171113 14:14:20", + // unix seconds, ms, micro, nano + "1332151919", + "1384216367189", + "1384216367111222", + "1384216367111222333", +} + +var ( + timezone = "" +) + +func main() { + flag.StringVar(&timezone, "timezone", "UTC", "Timezone aka `America/Los_Angeles` formatted time-zone") + flag.Parse() + + if timezone != "" { + // NOTE: This is very, very important to understand + // time-parsing in go + loc, err := time.LoadLocation(timezone) + if err != nil { + panic(err.Error()) + } + time.Local = loc + } + + table := termtables.CreateTable() + + table.AddHeaders("Input", "Parsed, and Output as %v") + for _, dateExample := range examples { + t, err := dateparse.ParseLocal(dateExample) + if err != nil { + panic(err.Error()) + } + table.AddRow(dateExample, fmt.Sprintf("%v", t)) + } + fmt.Println(table.Render()) +} + +/* ++-------------------------------------------------------+-----------------------------------------+ +| Input | Parsed, and Output as %v | ++-------------------------------------------------------+-----------------------------------------+ +| May 8, 2009 5:57:51 PM | 2009-05-08 17:57:51 +0000 UTC | +| oct 7, 1970 | 1970-10-07 00:00:00 +0000 UTC | +| oct 7, '70 | 1970-10-07 00:00:00 +0000 UTC | +| oct. 7, 1970 | 1970-10-07 00:00:00 +0000 UTC | +| oct. 7, 70 | 1970-10-07 00:00:00 +0000 UTC | +| Mon Jan 2 15:04:05 2006 | 2006-01-02 15:04:05 +0000 UTC | +| Mon Jan 2 15:04:05 MST 2006 | 2006-01-02 15:04:05 +0000 MST | +| Mon Jan 02 15:04:05 -0700 2006 | 2006-01-02 15:04:05 -0700 -0700 | +| Monday, 02-Jan-06 15:04:05 MST | 2006-01-02 15:04:05 +0000 MST | +| Mon, 02 Jan 2006 15:04:05 MST | 2006-01-02 15:04:05 +0000 MST | +| Tue, 11 Jul 2017 16:28:13 +0200 (CEST) | 2017-07-11 16:28:13 +0200 +0200 | +| Mon, 02 Jan 2006 15:04:05 -0700 | 2006-01-02 15:04:05 -0700 -0700 | +| Mon 30 Sep 2018 09:09:09 PM UTC | 2018-09-30 21:09:09 +0000 UTC | +| Mon Aug 10 15:44:11 UTC+0100 2015 | 2015-08-10 15:44:11 +0000 UTC | +| Thu, 4 Jan 2018 17:53:36 +0000 | 2018-01-04 17:53:36 +0000 UTC | +| Fri Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time) | 2015-07-03 18:04:07 +0100 GMT | +| Sun, 3 Jan 2021 00:12:23 +0800 (GMT+08:00) | 2021-01-03 00:12:23 +0800 +0800 | +| September 17, 2012 10:09am | 2012-09-17 10:09:00 +0000 UTC | +| September 17, 2012 at 10:09am PST-08 | 2012-09-17 10:09:00 -0800 PST | +| September 17, 2012, 10:10:09 | 2012-09-17 10:10:09 +0000 UTC | +| October 7, 1970 | 1970-10-07 00:00:00 +0000 UTC | +| October 7th, 1970 | 1970-10-07 00:00:00 +0000 UTC | +| 12 Feb 2006, 19:17 | 2006-02-12 19:17:00 +0000 UTC | +| 12 Feb 2006 19:17 | 2006-02-12 19:17:00 +0000 UTC | +| 14 May 2019 19:11:40.164 | 2019-05-14 19:11:40.164 +0000 UTC | +| 7 oct 70 | 1970-10-07 00:00:00 +0000 UTC | +| 7 oct 1970 | 1970-10-07 00:00:00 +0000 UTC | +| 03 February 2013 | 2013-02-03 00:00:00 +0000 UTC | +| 1 July 2013 | 2013-07-01 00:00:00 +0000 UTC | +| 2013-Feb-03 | 2013-02-03 00:00:00 +0000 UTC | +| 06/Jan/2008:15:04:05 -0700 | 2008-01-06 15:04:05 -0700 -0700 | +| 06/Jan/2008 15:04:05 -0700 | 2008-01-06 15:04:05 -0700 -0700 | +| 3/31/2014 | 2014-03-31 00:00:00 +0000 UTC | +| 03/31/2014 | 2014-03-31 00:00:00 +0000 UTC | +| 08/21/71 | 1971-08-21 00:00:00 +0000 UTC | +| 8/1/71 | 1971-08-01 00:00:00 +0000 UTC | +| 4/8/2014 22:05 | 2014-04-08 22:05:00 +0000 UTC | +| 04/08/2014 22:05 | 2014-04-08 22:05:00 +0000 UTC | +| 4/8/14 22:05 | 2014-04-08 22:05:00 +0000 UTC | +| 04/2/2014 03:00:51 | 2014-04-02 03:00:51 +0000 UTC | +| 8/8/1965 12:00:00 AM | 1965-08-08 00:00:00 +0000 UTC | +| 8/8/1965 01:00:01 PM | 1965-08-08 13:00:01 +0000 UTC | +| 8/8/1965 01:00 PM | 1965-08-08 13:00:00 +0000 UTC | +| 8/8/1965 1:00 PM | 1965-08-08 13:00:00 +0000 UTC | +| 8/8/1965 12:00 AM | 1965-08-08 00:00:00 +0000 UTC | +| 4/02/2014 03:00:51 | 2014-04-02 03:00:51 +0000 UTC | +| 03/19/2012 10:11:59 | 2012-03-19 10:11:59 +0000 UTC | +| 03/19/2012 10:11:59.3186369 | 2012-03-19 10:11:59.3186369 +0000 UTC | +| 2014/3/31 | 2014-03-31 00:00:00 +0000 UTC | +| 2014/03/31 | 2014-03-31 00:00:00 +0000 UTC | +| 2014/4/8 22:05 | 2014-04-08 22:05:00 +0000 UTC | +| 2014/04/08 22:05 | 2014-04-08 22:05:00 +0000 UTC | +| 2014/04/2 03:00:51 | 2014-04-02 03:00:51 +0000 UTC | +| 2014/4/02 03:00:51 | 2014-04-02 03:00:51 +0000 UTC | +| 2012/03/19 10:11:59 | 2012-03-19 10:11:59 +0000 UTC | +| 2012/03/19 10:11:59.3186369 | 2012-03-19 10:11:59.3186369 +0000 UTC | +| 2014:3:31 | 2014-03-31 00:00:00 +0000 UTC | +| 2014:03:31 | 2014-03-31 00:00:00 +0000 UTC | +| 2014:4:8 22:05 | 2014-04-08 22:05:00 +0000 UTC | +| 2014:04:08 22:05 | 2014-04-08 22:05:00 +0000 UTC | +| 2014:04:2 03:00:51 | 2014-04-02 03:00:51 +0000 UTC | +| 2014:4:02 03:00:51 | 2014-04-02 03:00:51 +0000 UTC | +| 2012:03:19 10:11:59 | 2012-03-19 10:11:59 +0000 UTC | +| 2012:03:19 10:11:59.3186369 | 2012-03-19 10:11:59.3186369 +0000 UTC | +| 2014年04月08日 | 2014-04-08 00:00:00 +0000 UTC | +| 2006-01-02T15:04:05+0000 | 2006-01-02 15:04:05 +0000 UTC | +| 2009-08-12T22:15:09-07:00 | 2009-08-12 22:15:09 -0700 -0700 | +| 2009-08-12T22:15:09 | 2009-08-12 22:15:09 +0000 UTC | +| 2009-08-12T22:15:09.988 | 2009-08-12 22:15:09.988 +0000 UTC | +| 2009-08-12T22:15:09Z | 2009-08-12 22:15:09 +0000 UTC | +| 2017-07-19T03:21:51:897+0100 | 2017-07-19 03:21:51.897 +0100 +0100 | +| 2019-05-29T08:41-04 | 2019-05-29 08:41:00 -0400 -0400 | +| 2014-04-26 17:24:37.3186369 | 2014-04-26 17:24:37.3186369 +0000 UTC | +| 2012-08-03 18:31:59.257000000 | 2012-08-03 18:31:59.257 +0000 UTC | +| 2014-04-26 17:24:37.123 | 2014-04-26 17:24:37.123 +0000 UTC | +| 2013-04-01 22:43 | 2013-04-01 22:43:00 +0000 UTC | +| 2013-04-01 22:43:22 | 2013-04-01 22:43:22 +0000 UTC | +| 2014-12-16 06:20:00 UTC | 2014-12-16 06:20:00 +0000 UTC | +| 2014-12-16 06:20:00 GMT | 2014-12-16 06:20:00 +0000 UTC | +| 2014-04-26 05:24:37 PM | 2014-04-26 17:24:37 +0000 UTC | +| 2014-04-26 13:13:43 +0800 | 2014-04-26 13:13:43 +0800 +0800 | +| 2014-04-26 13:13:43 +0800 +08 | 2014-04-26 13:13:43 +0800 +0800 | +| 2014-04-26 13:13:44 +09:00 | 2014-04-26 13:13:44 +0900 +0900 | +| 2012-08-03 18:31:59.257000000 +0000 UTC | 2012-08-03 18:31:59.257 +0000 UTC | +| 2015-09-30 18:48:56.35272715 +0000 UTC | 2015-09-30 18:48:56.35272715 +0000 UTC | +| 2015-02-18 00:12:00 +0000 GMT | 2015-02-18 00:12:00 +0000 UTC | +| 2015-02-18 00:12:00 +0000 UTC | 2015-02-18 00:12:00 +0000 UTC | +| 2015-02-08 03:02:00 +0300 MSK m=+0.000000001 | 2015-02-08 03:02:00 +0300 +0300 | +| 2015-02-08 03:02:00.001 +0300 MSK m=+0.000000001 | 2015-02-08 03:02:00.001 +0300 +0300 | +| 2017-07-19 03:21:51+00:00 | 2017-07-19 03:21:51 +0000 UTC | +| 2014-04-26 | 2014-04-26 00:00:00 +0000 UTC | +| 2014-04 | 2014-04-01 00:00:00 +0000 UTC | +| 2014 | 2014-01-01 00:00:00 +0000 UTC | +| 2014-05-11 08:20:13,787 | 2014-05-11 08:20:13.787 +0000 UTC | +| 2020-07-20+08:00 | 2020-07-20 00:00:00 +0800 +0800 | +| 3.31.2014 | 2014-03-31 00:00:00 +0000 UTC | +| 03.31.2014 | 2014-03-31 00:00:00 +0000 UTC | +| 08.21.71 | 1971-08-21 00:00:00 +0000 UTC | +| 2014.03 | 2014-03-01 00:00:00 +0000 UTC | +| 2014.03.30 | 2014-03-30 00:00:00 +0000 UTC | +| 20140601 | 2014-06-01 00:00:00 +0000 UTC | +| 20140722105203 | 2014-07-22 10:52:03 +0000 UTC | +| 171113 14:14:20 | 2017-11-13 14:14:20 +0000 UTC | +| 1332151919 | 2012-03-19 10:11:59 +0000 UTC | +| 1384216367189 | 2013-11-12 00:32:47.189 +0000 UTC | +| 1384216367111222 | 2013-11-12 00:32:47.111222 +0000 UTC | +| 1384216367111222333 | 2013-11-12 00:32:47.111222333 +0000 UTC | ++-------------------------------------------------------+-----------------------------------------+ +*/ + +``` diff --git a/vendor/github.com/araddon/dateparse/parseany.go b/vendor/github.com/araddon/dateparse/parseany.go new file mode 100644 index 00000000000..b9668b21bd4 --- /dev/null +++ b/vendor/github.com/araddon/dateparse/parseany.go @@ -0,0 +1,2189 @@ +// Package dateparse parses date-strings without knowing the format +// in advance, using a fast lex based approach to eliminate shotgun +// attempts. It leans towards US style dates when there is a conflict. +package dateparse + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +// func init() { +// gou.SetupLogging("debug") +// gou.SetColorOutput() +// } + +var days = []string{ + "mon", + "tue", + "wed", + "thu", + "fri", + "sat", + "sun", + "monday", + "tuesday", + "wednesday", + "thursday", + "friday", + "saturday", + "sunday", +} + +var months = []string{ + "january", + "february", + "march", + "april", + "may", + "june", + "july", + "august", + "september", + "october", + "november", + "december", +} + +type dateState uint8 +type timeState uint8 + +const ( + dateStart dateState = iota // 0 + dateDigit + dateDigitSt + dateYearDash + dateYearDashAlphaDash + dateYearDashDash + dateYearDashDashWs // 5 + dateYearDashDashT + dateYearDashDashOffset + dateDigitDash + dateDigitDashAlpha + dateDigitDashAlphaDash // 10 + dateDigitDot + dateDigitDotDot + dateDigitSlash + dateDigitYearSlash + dateDigitSlashAlpha // 15 + dateDigitColon + dateDigitChineseYear + dateDigitChineseYearWs + dateDigitWs + dateDigitWsMoYear // 20 + dateDigitWsMolong + dateAlpha + dateAlphaWs + dateAlphaWsDigit + dateAlphaWsDigitMore // 25 + dateAlphaWsDigitMoreWs + dateAlphaWsDigitMoreWsYear + dateAlphaWsMonth + dateAlphaWsDigitYearmaybe + dateAlphaWsMonthMore + dateAlphaWsMonthSuffix + dateAlphaWsMore + dateAlphaWsAtTime + dateAlphaWsAlpha + dateAlphaWsAlphaYearmaybe // 35 + dateAlphaPeriodWsDigit + dateWeekdayComma + dateWeekdayAbbrevComma +) +const ( + // Time state + timeIgnore timeState = iota // 0 + timeStart + timeWs + timeWsAlpha + timeWsAlphaWs + timeWsAlphaZoneOffset // 5 + timeWsAlphaZoneOffsetWs + timeWsAlphaZoneOffsetWsYear + timeWsAlphaZoneOffsetWsExtra + timeWsAMPMMaybe + timeWsAMPM // 10 + timeWsOffset + timeWsOffsetWs // 12 + timeWsOffsetColonAlpha + timeWsOffsetColon + timeWsYear // 15 + timeOffset + timeOffsetColon + timeAlpha + timePeriod + timePeriodOffset // 20 + timePeriodOffsetColon + timePeriodOffsetColonWs + timePeriodWs + timePeriodWsAlpha + timePeriodWsOffset // 25 + timePeriodWsOffsetWs + timePeriodWsOffsetWsAlpha + timePeriodWsOffsetColon + timePeriodWsOffsetColonAlpha + timeZ + timeZDigit +) + +var ( + // ErrAmbiguousMMDD for date formats such as 04/02/2014 the mm/dd vs dd/mm are + // ambiguous, so it is an error for strict parse rules. + ErrAmbiguousMMDD = fmt.Errorf("This date has ambiguous mm/dd vs dd/mm type format") +) + +func unknownErr(datestr string) error { + return fmt.Errorf("Could not find format for %q", datestr) +} + +// ParseAny parse an unknown date format, detect the layout. +// Normal parse. Equivalent Timezone rules as time.Parse(). +// NOTE: please see readme on mmdd vs ddmm ambiguous dates. +func ParseAny(datestr string, opts ...ParserOption) (time.Time, error) { + p, err := parseTime(datestr, nil, opts...) + if err != nil { + return time.Time{}, err + } + return p.parse() +} + +// ParseIn with Location, equivalent to time.ParseInLocation() timezone/offset +// rules. Using location arg, if timezone/offset info exists in the +// datestring, it uses the given location rules for any zone interpretation. +// That is, MST means one thing when using America/Denver and something else +// in other locations. +func ParseIn(datestr string, loc *time.Location, opts ...ParserOption) (time.Time, error) { + p, err := parseTime(datestr, loc, opts...) + if err != nil { + return time.Time{}, err + } + return p.parse() +} + +// ParseLocal Given an unknown date format, detect the layout, +// using time.Local, parse. +// +// Set Location to time.Local. Same as ParseIn Location but lazily uses +// the global time.Local variable for Location argument. +// +// denverLoc, _ := time.LoadLocation("America/Denver") +// time.Local = denverLoc +// +// t, err := dateparse.ParseLocal("3/1/2014") +// +// Equivalent to: +// +// t, err := dateparse.ParseIn("3/1/2014", denverLoc) +// +func ParseLocal(datestr string, opts ...ParserOption) (time.Time, error) { + p, err := parseTime(datestr, time.Local, opts...) + if err != nil { + return time.Time{}, err + } + return p.parse() +} + +// MustParse parse a date, and panic if it can't be parsed. Used for testing. +// Not recommended for most use-cases. +func MustParse(datestr string, opts ...ParserOption) time.Time { + p, err := parseTime(datestr, nil, opts...) + if err != nil { + panic(err.Error()) + } + t, err := p.parse() + if err != nil { + panic(err.Error()) + } + return t +} + +// ParseFormat parse's an unknown date-time string and returns a layout +// string that can parse this (and exact same format) other date-time strings. +// +// layout, err := dateparse.ParseFormat("2013-02-01 00:00:00") +// // layout = "2006-01-02 15:04:05" +// +func ParseFormat(datestr string, opts ...ParserOption) (string, error) { + p, err := parseTime(datestr, nil, opts...) + if err != nil { + return "", err + } + _, err = p.parse() + if err != nil { + return "", err + } + return string(p.format), nil +} + +// ParseStrict parse an unknown date format. IF the date is ambigous +// mm/dd vs dd/mm then return an error. These return errors: 3.3.2014 , 8/8/71 etc +func ParseStrict(datestr string, opts ...ParserOption) (time.Time, error) { + p, err := parseTime(datestr, nil, opts...) + if err != nil { + return time.Time{}, err + } + if p.ambiguousMD { + return time.Time{}, ErrAmbiguousMMDD + } + return p.parse() +} + +func parseTime(datestr string, loc *time.Location, opts ...ParserOption) (p *parser, err error) { + + p = newParser(datestr, loc, opts...) + if p.retryAmbiguousDateWithSwap { + // month out of range signifies that a day/month swap is the correct solution to an ambiguous date + // this is because it means that a day is being interpreted as a month and overflowing the valid value for that + // by retrying in this case, we can fix a common situation with no assumptions + defer func() { + if p != nil && p.ambiguousMD { + // if it errors out with the following error, swap before we + // get out of this function to reduce scope it needs to be applied on + _, err := p.parse() + if err != nil && strings.Contains(err.Error(), "month out of range") { + // create the option to reverse the preference + preferMonthFirst := PreferMonthFirst(!p.preferMonthFirst) + // turn off the retry to avoid endless recursion + retryAmbiguousDateWithSwap := RetryAmbiguousDateWithSwap(false) + modifiedOpts := append(opts, preferMonthFirst, retryAmbiguousDateWithSwap) + p, err = parseTime(datestr, time.Local, modifiedOpts...) + } + } + + }() + } + + i := 0 + + // General strategy is to read rune by rune through the date looking for + // certain hints of what type of date we are dealing with. + // Hopefully we only need to read about 5 or 6 bytes before + // we figure it out and then attempt a parse +iterRunes: + for ; i < len(datestr); i++ { + //r := rune(datestr[i]) + r, bytesConsumed := utf8.DecodeRuneInString(datestr[i:]) + if bytesConsumed > 1 { + i += (bytesConsumed - 1) + } + + // gou.Debugf("i=%d r=%s state=%d %s", i, string(r), p.stateDate, datestr) + switch p.stateDate { + case dateStart: + if unicode.IsDigit(r) { + p.stateDate = dateDigit + } else if unicode.IsLetter(r) { + p.stateDate = dateAlpha + } else { + return nil, unknownErr(datestr) + } + case dateDigit: + + switch r { + case '-', '\u2212': + // 2006-01-02 + // 2013-Feb-03 + // 13-Feb-03 + // 29-Jun-2016 + if i == 4 { + p.stateDate = dateYearDash + p.yeari = 0 + p.yearlen = i + p.moi = i + 1 + p.set(0, "2006") + } else { + p.stateDate = dateDigitDash + } + case '/': + // 08/May/2005 + // 03/31/2005 + // 2014/02/24 + p.stateDate = dateDigitSlash + if i == 4 { + // 2014/02/24 - Year first / + p.yearlen = i // since it was start of datestr, i=len + p.moi = i + 1 + p.setYear() + p.stateDate = dateDigitYearSlash + } else { + // Either Ambiguous dd/mm vs mm/dd OR dd/month/yy + // 08/May/2005 + // 03/31/2005 + // 31/03/2005 + if i+2 < len(p.datestr) && unicode.IsLetter(rune(datestr[i+1])) { + // 08/May/2005 + p.stateDate = dateDigitSlashAlpha + p.moi = i + 1 + p.daylen = 2 + p.dayi = 0 + p.setDay() + continue + } + // Ambiguous dd/mm vs mm/dd the bane of date-parsing + // 03/31/2005 + // 31/03/2005 + p.ambiguousMD = true + if p.preferMonthFirst { + if p.molen == 0 { + // 03/31/2005 + p.molen = i + p.setMonth() + p.dayi = i + 1 + } + } else { + if p.daylen == 0 { + p.daylen = i + p.setDay() + p.moi = i + 1 + } + } + + } + + case ':': + // 03/31/2005 + // 2014/02/24 + p.stateDate = dateDigitColon + if i == 4 { + p.yearlen = i + p.moi = i + 1 + p.setYear() + } else { + p.ambiguousMD = true + if p.preferMonthFirst { + if p.molen == 0 { + p.molen = i + p.setMonth() + p.dayi = i + 1 + } + } + } + + case '.': + // 3.31.2014 + // 08.21.71 + // 2014.05 + p.stateDate = dateDigitDot + if i == 4 { + p.yearlen = i + p.moi = i + 1 + p.setYear() + } else { + p.ambiguousMD = true + p.moi = 0 + p.molen = i + p.setMonth() + p.dayi = i + 1 + } + + case ' ': + // 18 January 2018 + // 8 January 2018 + // 8 jan 2018 + // 02 Jan 2018 23:59 + // 02 Jan 2018 23:59:34 + // 12 Feb 2006, 19:17 + // 12 Feb 2006, 19:17:22 + if i == 6 { + p.stateDate = dateDigitSt + } else { + p.stateDate = dateDigitWs + p.dayi = 0 + p.daylen = i + } + case '年': + // Chinese Year + p.stateDate = dateDigitChineseYear + case ',': + return nil, unknownErr(datestr) + default: + continue + } + p.part1Len = i + + case dateDigitSt: + p.set(0, "060102") + i = i - 1 + p.stateTime = timeStart + break iterRunes + case dateYearDash: + // dateYearDashDashT + // 2006-01-02T15:04:05Z07:00 + // 2020-08-17T17:00:00:000+0100 + // dateYearDashDashWs + // 2013-04-01 22:43:22 + // dateYearDashAlphaDash + // 2013-Feb-03 + switch r { + case '-': + p.molen = i - p.moi + p.dayi = i + 1 + p.stateDate = dateYearDashDash + p.setMonth() + default: + if unicode.IsLetter(r) { + p.stateDate = dateYearDashAlphaDash + } + } + + case dateYearDashDash: + // dateYearDashDashT + // 2006-01-02T15:04:05Z07:00 + // dateYearDashDashWs + // 2013-04-01 22:43:22 + // dateYearDashDashOffset + // 2020-07-20+00:00 + switch r { + case '+', '-': + p.offseti = i + p.daylen = i - p.dayi + p.stateDate = dateYearDashDashOffset + p.setDay() + case ' ': + p.daylen = i - p.dayi + p.stateDate = dateYearDashDashWs + p.stateTime = timeStart + p.setDay() + break iterRunes + case 'T': + p.daylen = i - p.dayi + p.stateDate = dateYearDashDashT + p.stateTime = timeStart + p.setDay() + break iterRunes + } + + case dateYearDashDashT: + // dateYearDashDashT + // 2006-01-02T15:04:05Z07:00 + // 2020-08-17T17:00:00:000+0100 + + case dateYearDashDashOffset: + // 2020-07-20+00:00 + switch r { + case ':': + p.set(p.offseti, "-07:00") + // case ' ': + // return nil, unknownErr(datestr) + } + + case dateYearDashAlphaDash: + // 2013-Feb-03 + switch r { + case '-': + p.molen = i - p.moi + p.set(p.moi, "Jan") + p.dayi = i + 1 + } + case dateDigitDash: + // 13-Feb-03 + // 29-Jun-2016 + if unicode.IsLetter(r) { + p.stateDate = dateDigitDashAlpha + p.moi = i + } else { + return nil, unknownErr(datestr) + } + case dateDigitDashAlpha: + // 13-Feb-03 + // 28-Feb-03 + // 29-Jun-2016 + switch r { + case '-': + p.molen = i - p.moi + p.set(p.moi, "Jan") + p.yeari = i + 1 + p.stateDate = dateDigitDashAlphaDash + } + + case dateDigitDashAlphaDash: + // 13-Feb-03 ambiguous + // 28-Feb-03 ambiguous + // 29-Jun-2016 dd-month(alpha)-yyyy + switch r { + case ' ': + // we need to find if this was 4 digits, aka year + // or 2 digits which makes it ambiguous year/day + length := i - (p.moi + p.molen + 1) + if length == 4 { + p.yearlen = 4 + p.set(p.yeari, "2006") + // We now also know that part1 was the day + p.dayi = 0 + p.daylen = p.part1Len + p.setDay() + } else if length == 2 { + // We have no idea if this is + // yy-mon-dd OR dd-mon-yy + // + // We are going to ASSUME (bad, bad) that it is dd-mon-yy which is a horible assumption + p.ambiguousMD = true + p.yearlen = 2 + p.set(p.yeari, "06") + // We now also know that part1 was the day + p.dayi = 0 + p.daylen = p.part1Len + p.setDay() + } + p.stateTime = timeStart + break iterRunes + } + + case dateDigitYearSlash: + // 2014/07/10 06:55:38.156283 + // I honestly don't know if this format ever shows up as yyyy/ + + switch r { + case ' ', ':': + p.stateTime = timeStart + if p.daylen == 0 { + p.daylen = i - p.dayi + p.setDay() + } + break iterRunes + case '/': + if p.molen == 0 { + p.molen = i - p.moi + p.setMonth() + p.dayi = i + 1 + } + } + + case dateDigitSlashAlpha: + // 06/May/2008 + + switch r { + case '/': + // | + // 06/May/2008 + if p.molen == 0 { + p.set(p.moi, "Jan") + p.yeari = i + 1 + } + // We aren't breaking because we are going to re-use this case + // to find where the date starts, and possible time begins + case ' ', ':': + p.stateTime = timeStart + if p.yearlen == 0 { + p.yearlen = i - p.yeari + p.setYear() + } + break iterRunes + } + + case dateDigitSlash: + // 03/19/2012 10:11:59 + // 04/2/2014 03:00:37 + // 3/1/2012 10:11:59 + // 4/8/2014 22:05 + // 3/1/2014 + // 10/13/2014 + // 01/02/2006 + // 1/2/06 + + switch r { + case '/': + // This is the 2nd / so now we should know start pts of all of the dd, mm, yy + if p.preferMonthFirst { + if p.daylen == 0 { + p.daylen = i - p.dayi + p.setDay() + p.yeari = i + 1 + } + } else { + if p.molen == 0 { + p.molen = i - p.moi + p.setMonth() + p.yeari = i + 1 + } + } + // Note no break, we are going to pass by and re-enter this dateDigitSlash + // and look for ending (space) or not (just date) + case ' ': + p.stateTime = timeStart + if p.yearlen == 0 { + p.yearlen = i - p.yeari + p.setYear() + } + break iterRunes + } + + case dateDigitColon: + // 2014:07:10 06:55:38.156283 + // 03:19:2012 10:11:59 + // 04:2:2014 03:00:37 + // 3:1:2012 10:11:59 + // 4:8:2014 22:05 + // 3:1:2014 + // 10:13:2014 + // 01:02:2006 + // 1:2:06 + + switch r { + case ' ': + p.stateTime = timeStart + if p.yearlen == 0 { + p.yearlen = i - p.yeari + p.setYear() + } else if p.daylen == 0 { + p.daylen = i - p.dayi + p.setDay() + } + break iterRunes + case ':': + if p.yearlen > 0 { + // 2014:07:10 06:55:38.156283 + if p.molen == 0 { + p.molen = i - p.moi + p.setMonth() + p.dayi = i + 1 + } + } else if p.preferMonthFirst { + if p.daylen == 0 { + p.daylen = i - p.dayi + p.setDay() + p.yeari = i + 1 + } + } + } + + case dateDigitWs: + // 18 January 2018 + // 8 January 2018 + // 8 jan 2018 + // 1 jan 18 + // 02 Jan 2018 23:59 + // 02 Jan 2018 23:59:34 + // 12 Feb 2006, 19:17 + // 12 Feb 2006, 19:17:22 + switch r { + case ' ': + p.yeari = i + 1 + //p.yearlen = 4 + p.dayi = 0 + p.daylen = p.part1Len + p.setDay() + p.stateTime = timeStart + if i > p.daylen+len(" Sep") { // November etc + // If len greather than space + 3 it must be full month + p.stateDate = dateDigitWsMolong + } else { + // If len=3, the might be Feb or May? Ie ambigous abbreviated but + // we can parse may with either. BUT, that means the + // format may not be correct? + // mo := strings.ToLower(datestr[p.daylen+1 : i]) + p.moi = p.daylen + 1 + p.molen = i - p.moi + p.set(p.moi, "Jan") + p.stateDate = dateDigitWsMoYear + } + } + + case dateDigitWsMoYear: + // 8 jan 2018 + // 02 Jan 2018 23:59 + // 02 Jan 2018 23:59:34 + // 12 Feb 2006, 19:17 + // 12 Feb 2006, 19:17:22 + switch r { + case ',': + p.yearlen = i - p.yeari + p.setYear() + i++ + break iterRunes + case ' ': + p.yearlen = i - p.yeari + p.setYear() + break iterRunes + } + case dateDigitWsMolong: + // 18 January 2018 + // 8 January 2018 + + case dateDigitChineseYear: + // dateDigitChineseYear + // 2014年04月08日 + // weekday %Y年%m月%e日 %A %I:%M %p + // 2013年07月18日 星期四 10:27 上午 + if r == ' ' { + p.stateDate = dateDigitChineseYearWs + break + } + case dateDigitDot: + // This is the 2nd period + // 3.31.2014 + // 08.21.71 + // 2014.05 + // 2018.09.30 + if r == '.' { + if p.moi == 0 { + // 3.31.2014 + p.daylen = i - p.dayi + p.yeari = i + 1 + p.setDay() + p.stateDate = dateDigitDotDot + } else { + // 2018.09.30 + //p.molen = 2 + p.molen = i - p.moi + p.dayi = i + 1 + p.setMonth() + p.stateDate = dateDigitDotDot + } + } + case dateDigitDotDot: + // iterate all the way through + case dateAlpha: + // dateAlphaWS + // Mon Jan _2 15:04:05 2006 + // Mon Jan _2 15:04:05 MST 2006 + // Mon Jan 02 15:04:05 -0700 2006 + // Mon Aug 10 15:44:11 UTC+0100 2015 + // Fri Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time) + // dateAlphaWSDigit + // May 8, 2009 5:57:51 PM + // oct 1, 1970 + // dateAlphaWsMonth + // April 8, 2009 + // dateAlphaWsMore + // dateAlphaWsAtTime + // January 02, 2006 at 3:04pm MST-07 + // + // dateAlphaPeriodWsDigit + // oct. 1, 1970 + // dateWeekdayComma + // Monday, 02 Jan 2006 15:04:05 MST + // Monday, 02-Jan-06 15:04:05 MST + // Monday, 02 Jan 2006 15:04:05 -0700 + // Monday, 02 Jan 2006 15:04:05 +0100 + // dateWeekdayAbbrevComma + // Mon, 02 Jan 2006 15:04:05 MST + // Mon, 02 Jan 2006 15:04:05 -0700 + // Thu, 13 Jul 2017 08:58:40 +0100 + // Tue, 11 Jul 2017 16:28:13 +0200 (CEST) + // Mon, 02-Jan-06 15:04:05 MST + switch { + case r == ' ': + // X + // April 8, 2009 + if i > 3 { + // Check to see if the alpha is name of month? or Day? + month := strings.ToLower(datestr[0:i]) + if isMonthFull(month) { + p.fullMonth = month + // len(" 31, 2018") = 9 + if len(datestr[i:]) < 10 { + // April 8, 2009 + p.stateDate = dateAlphaWsMonth + } else { + p.stateDate = dateAlphaWsMore + } + p.dayi = i + 1 + break + } + + } else { + // This is possibly ambiguous? May will parse as either though. + // So, it could return in-correct format. + // dateAlphaWs + // May 05, 2005, 05:05:05 + // May 05 2005, 05:05:05 + // Jul 05, 2005, 05:05:05 + // May 8 17:57:51 2009 + // May 8 17:57:51 2009 + // skip & return to dateStart + // Tue 05 May 2020, 05:05:05 + // Mon Jan 2 15:04:05 2006 + + maybeDay := strings.ToLower(datestr[0:i]) + if isDay(maybeDay) { + // using skip throws off indices used by other code; saner to restart + return parseTime(datestr[i+1:], loc) + } + p.stateDate = dateAlphaWs + } + + case r == ',': + // Mon, 02 Jan 2006 + + if i == 3 { + p.stateDate = dateWeekdayAbbrevComma + p.set(0, "Mon") + } else { + p.stateDate = dateWeekdayComma + p.skip = i + 2 + i++ + // TODO: lets just make this "skip" as we don't need + // the mon, monday, they are all superfelous and not needed + // just lay down the skip, no need to fill and then skip + } + case r == '.': + // sept. 28, 2017 + // jan. 28, 2017 + p.stateDate = dateAlphaPeriodWsDigit + if i == 3 { + p.molen = i + p.set(0, "Jan") + } else if i == 4 { + // gross + datestr = datestr[0:i-1] + datestr[i:] + return parseTime(datestr, loc, opts...) + } else { + return nil, unknownErr(datestr) + } + } + + case dateAlphaWs: + // dateAlphaWsAlpha + // Mon Jan _2 15:04:05 2006 + // Mon Jan _2 15:04:05 MST 2006 + // Mon Jan 02 15:04:05 -0700 2006 + // Fri Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time) + // Mon Aug 10 15:44:11 UTC+0100 2015 + // dateAlphaWsDigit + // May 8, 2009 5:57:51 PM + // May 8 2009 5:57:51 PM + // May 8 17:57:51 2009 + // May 8 17:57:51 2009 + // May 08 17:57:51 2009 + // oct 1, 1970 + // oct 7, '70 + switch { + case unicode.IsLetter(r): + p.set(0, "Mon") + p.stateDate = dateAlphaWsAlpha + p.set(i, "Jan") + case unicode.IsDigit(r): + p.set(0, "Jan") + p.stateDate = dateAlphaWsDigit + p.dayi = i + } + + case dateAlphaWsDigit: + // May 8, 2009 5:57:51 PM + // May 8 2009 5:57:51 PM + // oct 1, 1970 + // oct 7, '70 + // oct. 7, 1970 + // May 8 17:57:51 2009 + // May 8 17:57:51 2009 + // May 08 17:57:51 2009 + if r == ',' { + p.daylen = i - p.dayi + p.setDay() + p.stateDate = dateAlphaWsDigitMore + } else if r == ' ' { + p.daylen = i - p.dayi + p.setDay() + p.yeari = i + 1 + p.stateDate = dateAlphaWsDigitYearmaybe + p.stateTime = timeStart + } else if unicode.IsLetter(r) { + p.stateDate = dateAlphaWsMonthSuffix + i-- + } + case dateAlphaWsDigitYearmaybe: + // x + // May 8 2009 5:57:51 PM + // May 8 17:57:51 2009 + // May 8 17:57:51 2009 + // May 08 17:57:51 2009 + // Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time) + if r == ':' { + // Guessed wrong; was not a year + i = i - 3 + p.stateDate = dateAlphaWsDigit + p.yeari = 0 + break iterRunes + } else if r == ' ' { + // must be year format, not 15:04 + p.yearlen = i - p.yeari + p.setYear() + break iterRunes + } + case dateAlphaWsDigitMore: + // x + // May 8, 2009 5:57:51 PM + // May 05, 2005, 05:05:05 + // May 05 2005, 05:05:05 + // oct 1, 1970 + // oct 7, '70 + if r == ' ' { + p.yeari = i + 1 + p.stateDate = dateAlphaWsDigitMoreWs + } + case dateAlphaWsDigitMoreWs: + // x + // May 8, 2009 5:57:51 PM + // May 05, 2005, 05:05:05 + // oct 1, 1970 + // oct 7, '70 + switch r { + case '\'': + p.yeari = i + 1 + case ' ', ',': + // x + // May 8, 2009 5:57:51 PM + // x + // May 8, 2009, 5:57:51 PM + p.stateDate = dateAlphaWsDigitMoreWsYear + p.yearlen = i - p.yeari + p.setYear() + p.stateTime = timeStart + break iterRunes + } + + case dateAlphaWsMonth: + // April 8, 2009 + // April 8 2009 + switch r { + case ' ', ',': + // x + // June 8, 2009 + // x + // June 8 2009 + if p.daylen == 0 { + p.daylen = i - p.dayi + p.setDay() + } + case 's', 'S', 'r', 'R', 't', 'T', 'n', 'N': + // st, rd, nd, st + i-- + p.stateDate = dateAlphaWsMonthSuffix + default: + if p.daylen > 0 && p.yeari == 0 { + p.yeari = i + } + } + case dateAlphaWsMonthMore: + // X + // January 02, 2006, 15:04:05 + // January 02 2006, 15:04:05 + // January 02, 2006 15:04:05 + // January 02 2006 15:04:05 + switch r { + case ',': + p.yearlen = i - p.yeari + p.setYear() + p.stateTime = timeStart + i++ + break iterRunes + case ' ': + p.yearlen = i - p.yeari + p.setYear() + p.stateTime = timeStart + break iterRunes + } + case dateAlphaWsMonthSuffix: + // x + // April 8th, 2009 + // April 8th 2009 + switch r { + case 't', 'T': + if p.nextIs(i, 'h') || p.nextIs(i, 'H') { + if len(datestr) > i+2 { + return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc, opts...) + } + } + case 'n', 'N': + if p.nextIs(i, 'd') || p.nextIs(i, 'D') { + if len(datestr) > i+2 { + return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc, opts...) + } + } + case 's', 'S': + if p.nextIs(i, 't') || p.nextIs(i, 'T') { + if len(datestr) > i+2 { + return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc, opts...) + } + } + case 'r', 'R': + if p.nextIs(i, 'd') || p.nextIs(i, 'D') { + if len(datestr) > i+2 { + return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc, opts...) + } + } + } + case dateAlphaWsMore: + // January 02, 2006, 15:04:05 + // January 02 2006, 15:04:05 + // January 2nd, 2006, 15:04:05 + // January 2nd 2006, 15:04:05 + // September 17, 2012 at 5:00pm UTC-05 + switch { + case r == ',': + // x + // January 02, 2006, 15:04:05 + if p.nextIs(i, ' ') { + p.daylen = i - p.dayi + p.setDay() + p.yeari = i + 2 + p.stateDate = dateAlphaWsMonthMore + i++ + } + + case r == ' ': + // x + // January 02 2006, 15:04:05 + p.daylen = i - p.dayi + p.setDay() + p.yeari = i + 1 + p.stateDate = dateAlphaWsMonthMore + case unicode.IsDigit(r): + // XX + // January 02, 2006, 15:04:05 + continue + case unicode.IsLetter(r): + // X + // January 2nd, 2006, 15:04:05 + p.daylen = i - p.dayi + p.setDay() + p.stateDate = dateAlphaWsMonthSuffix + i-- + } + + case dateAlphaPeriodWsDigit: + // oct. 7, '70 + switch { + case r == ' ': + // continue + case unicode.IsDigit(r): + p.stateDate = dateAlphaWsDigit + p.dayi = i + default: + return p, unknownErr(datestr) + } + case dateWeekdayComma: + // Monday, 02 Jan 2006 15:04:05 MST + // Monday, 02 Jan 2006 15:04:05 -0700 + // Monday, 02 Jan 2006 15:04:05 +0100 + // Monday, 02-Jan-06 15:04:05 MST + if p.dayi == 0 { + p.dayi = i + } + switch r { + case ' ', '-': + if p.moi == 0 { + p.moi = i + 1 + p.daylen = i - p.dayi + p.setDay() + } else if p.yeari == 0 { + p.yeari = i + 1 + p.molen = i - p.moi + p.set(p.moi, "Jan") + } else { + p.stateTime = timeStart + break iterRunes + } + } + case dateWeekdayAbbrevComma: + // Mon, 02 Jan 2006 15:04:05 MST + // Mon, 02 Jan 2006 15:04:05 -0700 + // Thu, 13 Jul 2017 08:58:40 +0100 + // Thu, 4 Jan 2018 17:53:36 +0000 + // Tue, 11 Jul 2017 16:28:13 +0200 (CEST) + // Mon, 02-Jan-06 15:04:05 MST + switch r { + case ' ', '-': + if p.dayi == 0 { + p.dayi = i + 1 + } else if p.moi == 0 { + p.daylen = i - p.dayi + p.setDay() + p.moi = i + 1 + } else if p.yeari == 0 { + p.molen = i - p.moi + p.set(p.moi, "Jan") + p.yeari = i + 1 + } else { + p.yearlen = i - p.yeari + p.setYear() + p.stateTime = timeStart + break iterRunes + } + } + + default: + break iterRunes + } + } + p.coalesceDate(i) + if p.stateTime == timeStart { + // increment first one, since the i++ occurs at end of loop + if i < len(p.datestr) { + i++ + } + // ensure we skip any whitespace prefix + for ; i < len(datestr); i++ { + r := rune(datestr[i]) + if r != ' ' { + break + } + } + + iterTimeRunes: + for ; i < len(datestr); i++ { + r := rune(datestr[i]) + + // gou.Debugf("i=%d r=%s state=%d iterTimeRunes %s %s", i, string(r), p.stateTime, p.ds(), p.ts()) + + switch p.stateTime { + case timeStart: + // 22:43:22 + // 22:43 + // timeComma + // 08:20:13,787 + // timeWs + // 05:24:37 PM + // 06:20:00 UTC + // 06:20:00 UTC-05 + // 00:12:00 +0000 UTC + // 22:18:00 +0000 UTC m=+0.000000001 + // 15:04:05 -0700 + // 15:04:05 -07:00 + // 15:04:05 2008 + // timeOffset + // 03:21:51+00:00 + // 19:55:00+0100 + // timePeriod + // 17:24:37.3186369 + // 00:07:31.945167 + // 18:31:59.257000000 + // 00:00:00.000 + // timePeriodOffset + // 19:55:00.799+0100 + // timePeriodOffsetColon + // 15:04:05.999-07:00 + // timePeriodWs + // timePeriodWsOffset + // 00:07:31.945167 +0000 + // 00:00:00.000 +0000 + // timePeriodWsOffsetAlpha + // 00:07:31.945167 +0000 UTC + // 22:18:00.001 +0000 UTC m=+0.000000001 + // 00:00:00.000 +0000 UTC + // timePeriodWsAlpha + // 06:20:00.000 UTC + if p.houri == 0 { + p.houri = i + } + switch r { + case ',': + // hm, lets just swap out comma for period. for some reason go + // won't parse it. + // 2014-05-11 08:20:13,787 + ds := []byte(p.datestr) + ds[i] = '.' + return parseTime(string(ds), loc, opts...) + case '-', '+': + // 03:21:51+00:00 + p.stateTime = timeOffset + if p.seci == 0 { + // 22:18+0530 + p.minlen = i - p.mini + } else { + if p.seclen == 0 { + p.seclen = i - p.seci + } + if p.msi > 0 && p.mslen == 0 { + p.mslen = i - p.msi + } + } + p.offseti = i + case '.': + p.stateTime = timePeriod + p.seclen = i - p.seci + p.msi = i + 1 + case 'Z': + p.stateTime = timeZ + if p.seci == 0 { + p.minlen = i - p.mini + } else { + p.seclen = i - p.seci + } + // (Z)ulu time + p.loc = time.UTC + case 'a', 'A': + if p.nextIs(i, 't') || p.nextIs(i, 'T') { + // x + // September 17, 2012 at 5:00pm UTC-05 + i++ // skip t + if p.nextIs(i, ' ') { + // x + // September 17, 2012 at 5:00pm UTC-05 + i++ // skip ' + p.houri = 0 // reset hour + } + } else { + switch { + case r == 'a' && p.nextIs(i, 'm'): + p.coalesceTime(i) + p.set(i, "am") + case r == 'A' && p.nextIs(i, 'M'): + p.coalesceTime(i) + p.set(i, "PM") + } + } + + case 'p', 'P': + // Could be AM/PM + switch { + case r == 'p' && p.nextIs(i, 'm'): + p.coalesceTime(i) + p.set(i, "pm") + case r == 'P' && p.nextIs(i, 'M'): + p.coalesceTime(i) + p.set(i, "PM") + } + case ' ': + p.coalesceTime(i) + p.stateTime = timeWs + case ':': + if p.mini == 0 { + p.mini = i + 1 + p.hourlen = i - p.houri + } else if p.seci == 0 { + p.seci = i + 1 + p.minlen = i - p.mini + } else if p.seci > 0 { + // 18:31:59:257 ms uses colon, wtf + p.seclen = i - p.seci + p.set(p.seci, "05") + p.msi = i + 1 + + // gross, gross, gross. manipulating the datestr is horrible. + // https://github.com/araddon/dateparse/issues/117 + // Could not get the parsing to work using golang time.Parse() without + // replacing that colon with period. + p.set(i, ".") + datestr = datestr[0:i] + "." + datestr[i+1:] + p.datestr = datestr + } + } + case timeOffset: + // 19:55:00+0100 + // timeOffsetColon + // 15:04:05+07:00 + // 15:04:05-07:00 + if r == ':' { + p.stateTime = timeOffsetColon + } + case timeWs: + // timeWsAlpha + // 06:20:00 UTC + // 06:20:00 UTC-05 + // 15:44:11 UTC+0100 2015 + // 18:04:07 GMT+0100 (GMT Daylight Time) + // 17:57:51 MST 2009 + // timeWsAMPMMaybe + // 05:24:37 PM + // timeWsOffset + // 15:04:05 -0700 + // 00:12:00 +0000 UTC + // timeWsOffsetColon + // 15:04:05 -07:00 + // 17:57:51 -0700 2009 + // timeWsOffsetColonAlpha + // 00:12:00 +00:00 UTC + // timeWsYear + // 00:12:00 2008 + // timeZ + // 15:04:05.99Z + switch r { + case 'A', 'P': + // Could be AM/PM or could be PST or similar + p.tzi = i + p.stateTime = timeWsAMPMMaybe + case '+', '-': + p.offseti = i + p.stateTime = timeWsOffset + default: + if unicode.IsLetter(r) { + // 06:20:00 UTC + // 06:20:00 UTC-05 + // 15:44:11 UTC+0100 2015 + // 17:57:51 MST 2009 + p.tzi = i + p.stateTime = timeWsAlpha + } else if unicode.IsDigit(r) { + // 00:12:00 2008 + p.stateTime = timeWsYear + p.yeari = i + } + } + case timeWsAlpha: + // 06:20:00 UTC + // 06:20:00 UTC-05 + // timeWsAlphaWs + // 17:57:51 MST 2009 + // timeWsAlphaZoneOffset + // timeWsAlphaZoneOffsetWs + // timeWsAlphaZoneOffsetWsExtra + // 18:04:07 GMT+0100 (GMT Daylight Time) + // timeWsAlphaZoneOffsetWsYear + // 15:44:11 UTC+0100 2015 + switch r { + case '+', '-': + p.tzlen = i - p.tzi + if p.tzlen == 4 { + p.set(p.tzi, " MST") + } else if p.tzlen == 3 { + p.set(p.tzi, "MST") + } + p.stateTime = timeWsAlphaZoneOffset + p.offseti = i + case ' ': + // 17:57:51 MST 2009 + // 17:57:51 MST + p.tzlen = i - p.tzi + if p.tzlen == 4 { + p.set(p.tzi, " MST") + } else if p.tzlen == 3 { + p.set(p.tzi, "MST") + } + p.stateTime = timeWsAlphaWs + p.yeari = i + 1 + } + case timeWsAlphaWs: + // 17:57:51 MST 2009 + + case timeWsAlphaZoneOffset: + // 06:20:00 UTC-05 + // timeWsAlphaZoneOffset + // timeWsAlphaZoneOffsetWs + // timeWsAlphaZoneOffsetWsExtra + // 18:04:07 GMT+0100 (GMT Daylight Time) + // timeWsAlphaZoneOffsetWsYear + // 15:44:11 UTC+0100 2015 + switch r { + case ' ': + p.set(p.offseti, "-0700") + if p.yeari == 0 { + p.yeari = i + 1 + } + p.stateTime = timeWsAlphaZoneOffsetWs + } + case timeWsAlphaZoneOffsetWs: + // timeWsAlphaZoneOffsetWs + // timeWsAlphaZoneOffsetWsExtra + // 18:04:07 GMT+0100 (GMT Daylight Time) + // timeWsAlphaZoneOffsetWsYear + // 15:44:11 UTC+0100 2015 + if unicode.IsDigit(r) { + p.stateTime = timeWsAlphaZoneOffsetWsYear + } else { + p.extra = i - 1 + p.stateTime = timeWsAlphaZoneOffsetWsExtra + } + case timeWsAlphaZoneOffsetWsYear: + // 15:44:11 UTC+0100 2015 + if unicode.IsDigit(r) { + p.yearlen = i - p.yeari + 1 + if p.yearlen == 4 { + p.setYear() + } + } + case timeWsAMPMMaybe: + // timeWsAMPMMaybe + // timeWsAMPM + // 05:24:37 PM + // timeWsAlpha + // 00:12:00 PST + // 15:44:11 UTC+0100 2015 + if r == 'M' { + //return parse("2006-01-02 03:04:05 PM", datestr, loc) + p.stateTime = timeWsAMPM + p.set(i-1, "PM") + if p.hourlen == 2 { + p.set(p.houri, "03") + } else if p.hourlen == 1 { + p.set(p.houri, "3") + } + } else { + p.stateTime = timeWsAlpha + } + + case timeWsOffset: + // timeWsOffset + // 15:04:05 -0700 + // timeWsOffsetWsOffset + // 17:57:51 -0700 -07 + // timeWsOffsetWs + // 17:57:51 -0700 2009 + // 00:12:00 +0000 UTC + // timeWsOffsetColon + // 15:04:05 -07:00 + // timeWsOffsetColonAlpha + // 00:12:00 +00:00 UTC + switch r { + case ':': + p.stateTime = timeWsOffsetColon + case ' ': + p.set(p.offseti, "-0700") + p.yeari = i + 1 + p.stateTime = timeWsOffsetWs + } + case timeWsOffsetWs: + // 17:57:51 -0700 2009 + // 00:12:00 +0000 UTC + // 22:18:00.001 +0000 UTC m=+0.000000001 + // w Extra + // 17:57:51 -0700 -07 + switch r { + case '=': + // eff you golang + if datestr[i-1] == 'm' { + p.extra = i - 2 + p.trimExtra() + break + } + case '+', '-', '(': + // This really doesn't seem valid, but for some reason when round-tripping a go date + // their is an extra +03 printed out. seems like go bug to me, but, parsing anyway. + // 00:00:00 +0300 +03 + // 00:00:00 +0300 +0300 + p.extra = i - 1 + p.stateTime = timeWsOffset + p.trimExtra() + break + default: + switch { + case unicode.IsDigit(r): + p.yearlen = i - p.yeari + 1 + if p.yearlen == 4 { + p.setYear() + } + case unicode.IsLetter(r): + // 15:04:05 -0700 MST + if p.tzi == 0 { + p.tzi = i + } + } + } + + case timeWsOffsetColon: + // timeWsOffsetColon + // 15:04:05 -07:00 + // timeWsOffsetColonAlpha + // 2015-02-18 00:12:00 +00:00 UTC + if unicode.IsLetter(r) { + // 2015-02-18 00:12:00 +00:00 UTC + p.stateTime = timeWsOffsetColonAlpha + break iterTimeRunes + } + case timePeriod: + // 15:04:05.999999999+07:00 + // 15:04:05.999999999-07:00 + // 15:04:05.999999+07:00 + // 15:04:05.999999-07:00 + // 15:04:05.999+07:00 + // 15:04:05.999-07:00 + // timePeriod + // 17:24:37.3186369 + // 00:07:31.945167 + // 18:31:59.257000000 + // 00:00:00.000 + // timePeriodOffset + // 19:55:00.799+0100 + // timePeriodOffsetColon + // 15:04:05.999-07:00 + // timePeriodWs + // timePeriodWsOffset + // 00:07:31.945167 +0000 + // 00:00:00.000 +0000 + // With Extra + // 00:00:00.000 +0300 +03 + // timePeriodWsOffsetAlpha + // 00:07:31.945167 +0000 UTC + // 00:00:00.000 +0000 UTC + // 22:18:00.001 +0000 UTC m=+0.000000001 + // timePeriodWsAlpha + // 06:20:00.000 UTC + switch r { + case ' ': + p.mslen = i - p.msi + p.stateTime = timePeriodWs + case '+', '-': + // This really shouldn't happen + p.mslen = i - p.msi + p.offseti = i + p.stateTime = timePeriodOffset + default: + if unicode.IsLetter(r) { + // 06:20:00.000 UTC + p.mslen = i - p.msi + p.stateTime = timePeriodWsAlpha + } + } + case timePeriodOffset: + // timePeriodOffset + // 19:55:00.799+0100 + // timePeriodOffsetColon + // 15:04:05.999-07:00 + // 13:31:51.999-07:00 MST + if r == ':' { + p.stateTime = timePeriodOffsetColon + } + case timePeriodOffsetColon: + // timePeriodOffset + // timePeriodOffsetColon + // 15:04:05.999-07:00 + // 13:31:51.999 -07:00 MST + switch r { + case ' ': + p.set(p.offseti, "-07:00") + p.stateTime = timePeriodOffsetColonWs + p.tzi = i + 1 + } + case timePeriodOffsetColonWs: + // continue + case timePeriodWs: + // timePeriodWs + // timePeriodWsOffset + // 00:07:31.945167 +0000 + // 00:00:00.000 +0000 + // timePeriodWsOffsetAlpha + // 00:07:31.945167 +0000 UTC + // 00:00:00.000 +0000 UTC + // timePeriodWsOffsetColon + // 13:31:51.999 -07:00 MST + // timePeriodWsAlpha + // 06:20:00.000 UTC + if p.offseti == 0 { + p.offseti = i + } + switch r { + case '+', '-': + p.mslen = i - p.msi - 1 + p.stateTime = timePeriodWsOffset + default: + if unicode.IsLetter(r) { + // 00:07:31.945167 +0000 UTC + // 00:00:00.000 +0000 UTC + p.stateTime = timePeriodWsOffsetWsAlpha + break iterTimeRunes + } + } + + case timePeriodWsOffset: + // timePeriodWs + // timePeriodWsOffset + // 00:07:31.945167 +0000 + // 00:00:00.000 +0000 + // With Extra + // 00:00:00.000 +0300 +03 + // timePeriodWsOffsetAlpha + // 00:07:31.945167 +0000 UTC + // 00:00:00.000 +0000 UTC + // 03:02:00.001 +0300 MSK m=+0.000000001 + // timePeriodWsOffsetColon + // 13:31:51.999 -07:00 MST + // timePeriodWsAlpha + // 06:20:00.000 UTC + switch r { + case ':': + p.stateTime = timePeriodWsOffsetColon + case ' ': + p.set(p.offseti, "-0700") + case '+', '-': + // This really doesn't seem valid, but for some reason when round-tripping a go date + // their is an extra +03 printed out. seems like go bug to me, but, parsing anyway. + // 00:00:00.000 +0300 +03 + // 00:00:00.000 +0300 +0300 + p.extra = i - 1 + p.trimExtra() + break + default: + if unicode.IsLetter(r) { + // 00:07:31.945167 +0000 UTC + // 00:00:00.000 +0000 UTC + // 03:02:00.001 +0300 MSK m=+0.000000001 + p.stateTime = timePeriodWsOffsetWsAlpha + } + } + case timePeriodWsOffsetWsAlpha: + // 03:02:00.001 +0300 MSK m=+0.000000001 + // eff you golang + if r == '=' && datestr[i-1] == 'm' { + p.extra = i - 2 + p.trimExtra() + break + } + + case timePeriodWsOffsetColon: + // 13:31:51.999 -07:00 MST + switch r { + case ' ': + p.set(p.offseti, "-07:00") + default: + if unicode.IsLetter(r) { + // 13:31:51.999 -07:00 MST + p.tzi = i + p.stateTime = timePeriodWsOffsetColonAlpha + } + } + case timePeriodWsOffsetColonAlpha: + // continue + case timeZ: + // timeZ + // 15:04:05.99Z + // With a time-zone at end after Z + // 2006-01-02T15:04:05.999999999Z07:00 + // 2006-01-02T15:04:05Z07:00 + // RFC3339 = "2006-01-02T15:04:05Z07:00" + // RFC3339Nano = "2006-01-02T15:04:05.999999999Z07:00" + if unicode.IsDigit(r) { + p.stateTime = timeZDigit + } + + } + } + + switch p.stateTime { + case timeWsAlpha: + switch len(p.datestr) - p.tzi { + case 3: + // 13:31:51.999 +01:00 CET + p.set(p.tzi, "MST") + case 4: + p.set(p.tzi, "MST") + p.extra = len(p.datestr) - 1 + p.trimExtra() + } + + case timeWsAlphaWs: + p.yearlen = i - p.yeari + p.setYear() + case timeWsYear: + p.yearlen = i - p.yeari + p.setYear() + case timeWsAlphaZoneOffsetWsExtra: + p.trimExtra() + case timeWsAlphaZoneOffset: + // 06:20:00 UTC-05 + if i-p.offseti < 4 { + p.set(p.offseti, "-07") + } else { + p.set(p.offseti, "-0700") + } + + case timePeriod: + p.mslen = i - p.msi + case timeOffset: + + switch len(p.datestr) - p.offseti { + case 0, 1, 2, 4: + return p, fmt.Errorf("TZ offset not recognized %q near %q (must be 2 or 4 digits optional colon)", datestr, string(datestr[p.offseti:])) + case 3: + // 19:55:00+01 + p.set(p.offseti, "-07") + case 5: + // 19:55:00+0100 + p.set(p.offseti, "-0700") + } + + case timeWsOffset: + p.set(p.offseti, "-0700") + case timeWsOffsetWs: + // 17:57:51 -0700 2009 + // 00:12:00 +0000 UTC + if p.tzi > 0 { + switch len(p.datestr) - p.tzi { + case 3: + // 13:31:51.999 +01:00 CET + p.set(p.tzi, "MST") + case 4: + // 13:31:51.999 +01:00 CEST + p.set(p.tzi, "MST ") + } + + } + case timeWsOffsetColon: + // 17:57:51 -07:00 + p.set(p.offseti, "-07:00") + case timeOffsetColon: + // 15:04:05+07:00 + p.set(p.offseti, "-07:00") + case timePeriodOffset: + // 19:55:00.799+0100 + p.set(p.offseti, "-0700") + case timePeriodOffsetColon: + p.set(p.offseti, "-07:00") + case timePeriodWsOffsetColonAlpha: + p.tzlen = i - p.tzi + switch p.tzlen { + case 3: + p.set(p.tzi, "MST") + case 4: + p.set(p.tzi, "MST ") + } + case timePeriodWsOffset: + p.set(p.offseti, "-0700") + } + p.coalesceTime(i) + } + + switch p.stateDate { + case dateDigit: + // unixy timestamps ish + // example ct type + // 1499979655583057426 19 nanoseconds + // 1499979795437000 16 micro-seconds + // 20180722105203 14 yyyyMMddhhmmss + // 1499979795437 13 milliseconds + // 1332151919 10 seconds + // 20140601 8 yyyymmdd + // 2014 4 yyyy + t := time.Time{} + if len(datestr) == len("1499979655583057426") { // 19 + // nano-seconds + if nanoSecs, err := strconv.ParseInt(datestr, 10, 64); err == nil { + t = time.Unix(0, nanoSecs) + } + } else if len(datestr) == len("1499979795437000") { // 16 + // micro-seconds + if microSecs, err := strconv.ParseInt(datestr, 10, 64); err == nil { + t = time.Unix(0, microSecs*1000) + } + } else if len(datestr) == len("yyyyMMddhhmmss") { // 14 + // yyyyMMddhhmmss + p.format = []byte("20060102150405") + return p, nil + } else if len(datestr) == len("1332151919000") { // 13 + if miliSecs, err := strconv.ParseInt(datestr, 10, 64); err == nil { + t = time.Unix(0, miliSecs*1000*1000) + } + } else if len(datestr) == len("1332151919") { //10 + if secs, err := strconv.ParseInt(datestr, 10, 64); err == nil { + t = time.Unix(secs, 0) + } + } else if len(datestr) == len("20140601") { + p.format = []byte("20060102") + return p, nil + } else if len(datestr) == len("2014") { + p.format = []byte("2006") + return p, nil + } else if len(datestr) < 4 { + return nil, fmt.Errorf("unrecognized format, too short %v", datestr) + } + if !t.IsZero() { + if loc == nil { + p.t = &t + return p, nil + } + t = t.In(loc) + p.t = &t + return p, nil + } + case dateDigitSt: + // 171113 14:14:20 + return p, nil + + case dateYearDash: + // 2006-01 + return p, nil + + case dateYearDashDash: + // 2006-01-02 + // 2006-1-02 + // 2006-1-2 + // 2006-01-2 + return p, nil + + case dateYearDashDashOffset: + /// 2020-07-20+00:00 + switch len(p.datestr) - p.offseti { + case 5: + p.set(p.offseti, "-0700") + case 6: + p.set(p.offseti, "-07:00") + } + return p, nil + + case dateYearDashAlphaDash: + // 2013-Feb-03 + // 2013-Feb-3 + p.daylen = i - p.dayi + p.setDay() + return p, nil + + case dateYearDashDashWs: + // 2013-04-01 + return p, nil + + case dateYearDashDashT: + return p, nil + + case dateDigitDashAlphaDash: + // 13-Feb-03 ambiguous + // 28-Feb-03 ambiguous + // 29-Jun-2016 + length := len(datestr) - (p.moi + p.molen + 1) + if length == 4 { + p.yearlen = 4 + p.set(p.yeari, "2006") + // We now also know that part1 was the day + p.dayi = 0 + p.daylen = p.part1Len + p.setDay() + } else if length == 2 { + // We have no idea if this is + // yy-mon-dd OR dd-mon-yy + // + // We are going to ASSUME (bad, bad) that it is dd-mon-yy which is a horible assumption + p.ambiguousMD = true + p.yearlen = 2 + p.set(p.yeari, "06") + // We now also know that part1 was the day + p.dayi = 0 + p.daylen = p.part1Len + p.setDay() + } + + return p, nil + + case dateDigitDot: + // 2014.05 + p.molen = i - p.moi + p.setMonth() + return p, nil + + case dateDigitDotDot: + // 03.31.1981 + // 3.31.2014 + // 3.2.1981 + // 3.2.81 + // 08.21.71 + // 2018.09.30 + return p, nil + + case dateDigitWsMoYear: + // 2 Jan 2018 + // 2 Jan 18 + // 2 Jan 2018 23:59 + // 02 Jan 2018 23:59 + // 12 Feb 2006, 19:17 + return p, nil + + case dateDigitWsMolong: + // 18 January 2018 + // 8 January 2018 + if p.daylen == 2 { + p.format = []byte("02 January 2006") + return p, nil + } + p.format = []byte("2 January 2006") + return p, nil // parse("2 January 2006", datestr, loc) + + case dateAlphaWsMonth: + p.yearlen = i - p.yeari + p.setYear() + return p, nil + + case dateAlphaWsMonthMore: + return p, nil + + case dateAlphaWsDigitMoreWs: + // oct 1, 1970 + p.yearlen = i - p.yeari + p.setYear() + return p, nil + + case dateAlphaWsDigitMoreWsYear: + // May 8, 2009 5:57:51 PM + // Jun 7, 2005, 05:57:51 + return p, nil + + case dateAlphaWsAlpha: + return p, nil + + case dateAlphaWsDigit: + return p, nil + + case dateAlphaWsDigitYearmaybe: + return p, nil + + case dateDigitSlash: + // 3/1/2014 + // 10/13/2014 + // 01/02/2006 + return p, nil + + case dateDigitSlashAlpha: + // 03/Jun/2014 + return p, nil + + case dateDigitYearSlash: + // 2014/10/13 + return p, nil + + case dateDigitColon: + // 3:1:2014 + // 10:13:2014 + // 01:02:2006 + // 2014:10:13 + return p, nil + + case dateDigitChineseYear: + // dateDigitChineseYear + // 2014年04月08日 + p.format = []byte("2006年01月02日") + return p, nil + + case dateDigitChineseYearWs: + p.format = []byte("2006年01月02日 15:04:05") + return p, nil + + case dateWeekdayComma: + // Monday, 02 Jan 2006 15:04:05 -0700 + // Monday, 02 Jan 2006 15:04:05 +0100 + // Monday, 02-Jan-06 15:04:05 MST + return p, nil + + case dateWeekdayAbbrevComma: + // Mon, 02-Jan-06 15:04:05 MST + // Mon, 02 Jan 2006 15:04:05 MST + return p, nil + + } + + return nil, unknownErr(datestr) +} + +type parser struct { + loc *time.Location + preferMonthFirst bool + retryAmbiguousDateWithSwap bool + ambiguousMD bool + stateDate dateState + stateTime timeState + format []byte + datestr string + fullMonth string + skip int + extra int + part1Len int + yeari int + yearlen int + moi int + molen int + dayi int + daylen int + houri int + hourlen int + mini int + minlen int + seci int + seclen int + msi int + mslen int + offseti int + offsetlen int + tzi int + tzlen int + t *time.Time +} + +// ParserOption defines a function signature implemented by options +// Options defined like this accept the parser and operate on the data within +type ParserOption func(*parser) error + +// PreferMonthFirst is an option that allows preferMonthFirst to be changed from its default +func PreferMonthFirst(preferMonthFirst bool) ParserOption { + return func(p *parser) error { + p.preferMonthFirst = preferMonthFirst + return nil + } +} + +// RetryAmbiguousDateWithSwap is an option that allows retryAmbiguousDateWithSwap to be changed from its default +func RetryAmbiguousDateWithSwap(retryAmbiguousDateWithSwap bool) ParserOption { + return func(p *parser) error { + p.retryAmbiguousDateWithSwap = retryAmbiguousDateWithSwap + return nil + } +} + +func newParser(dateStr string, loc *time.Location, opts ...ParserOption) *parser { + p := &parser{ + stateDate: dateStart, + stateTime: timeIgnore, + datestr: dateStr, + loc: loc, + preferMonthFirst: true, + retryAmbiguousDateWithSwap: false, + } + p.format = []byte(dateStr) + + // allow the options to mutate the parser fields from their defaults + for _, option := range opts { + option(p) + } + return p +} + +func (p *parser) nextIs(i int, b byte) bool { + if len(p.datestr) > i+1 && p.datestr[i+1] == b { + return true + } + return false +} + +func (p *parser) set(start int, val string) { + if start < 0 { + return + } + if len(p.format) < start+len(val) { + return + } + for i, r := range val { + p.format[start+i] = byte(r) + } +} +func (p *parser) setMonth() { + if p.molen == 2 { + p.set(p.moi, "01") + } else if p.molen == 1 { + p.set(p.moi, "1") + } +} + +func (p *parser) setDay() { + if p.daylen == 2 { + p.set(p.dayi, "02") + } else if p.daylen == 1 { + p.set(p.dayi, "2") + } +} +func (p *parser) setYear() { + if p.yearlen == 2 { + p.set(p.yeari, "06") + } else if p.yearlen == 4 { + p.set(p.yeari, "2006") + } +} +func (p *parser) coalesceDate(end int) { + if p.yeari > 0 { + if p.yearlen == 0 { + p.yearlen = end - p.yeari + } + p.setYear() + } + if p.moi > 0 && p.molen == 0 { + p.molen = end - p.moi + p.setMonth() + } + if p.dayi > 0 && p.daylen == 0 { + p.daylen = end - p.dayi + p.setDay() + } +} +func (p *parser) ts() string { + return fmt.Sprintf("h:(%d:%d) m:(%d:%d) s:(%d:%d)", p.houri, p.hourlen, p.mini, p.minlen, p.seci, p.seclen) +} +func (p *parser) ds() string { + return fmt.Sprintf("%s d:(%d:%d) m:(%d:%d) y:(%d:%d)", p.datestr, p.dayi, p.daylen, p.moi, p.molen, p.yeari, p.yearlen) +} +func (p *parser) coalesceTime(end int) { + // 03:04:05 + // 15:04:05 + // 3:04:05 + // 3:4:5 + // 15:04:05.00 + if p.houri > 0 { + if p.hourlen == 2 { + p.set(p.houri, "15") + } else if p.hourlen == 1 { + p.set(p.houri, "3") + } + } + if p.mini > 0 { + if p.minlen == 0 { + p.minlen = end - p.mini + } + if p.minlen == 2 { + p.set(p.mini, "04") + } else { + p.set(p.mini, "4") + } + } + if p.seci > 0 { + if p.seclen == 0 { + p.seclen = end - p.seci + } + if p.seclen == 2 { + p.set(p.seci, "05") + } else { + p.set(p.seci, "5") + } + } + + if p.msi > 0 { + for i := 0; i < p.mslen; i++ { + p.format[p.msi+i] = '0' + } + } +} +func (p *parser) setFullMonth(month string) { + if p.moi == 0 { + p.format = []byte(fmt.Sprintf("%s%s", "January", p.format[len(month):])) + } +} + +func (p *parser) trimExtra() { + if p.extra > 0 && len(p.format) > p.extra { + p.format = p.format[0:p.extra] + p.datestr = p.datestr[0:p.extra] + } +} + +// func (p *parser) remove(i, length int) { +// if len(p.format) > i+length { +// //append(a[:i], a[j:]...) +// p.format = append(p.format[0:i], p.format[i+length:]...) +// } +// if len(p.datestr) > i+length { +// //append(a[:i], a[j:]...) +// p.datestr = fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+length:]) +// } +// } + +func (p *parser) parse() (time.Time, error) { + if p.t != nil { + return *p.t, nil + } + if len(p.fullMonth) > 0 { + p.setFullMonth(p.fullMonth) + } + if p.skip > 0 && len(p.format) > p.skip { + p.format = p.format[p.skip:] + p.datestr = p.datestr[p.skip:] + } + + if p.loc == nil { + // gou.Debugf("parse layout=%q input=%q \ntx, err := time.Parse(%q, %q)", string(p.format), p.datestr, string(p.format), p.datestr) + return time.Parse(string(p.format), p.datestr) + } + //gou.Debugf("parse layout=%q input=%q \ntx, err := time.ParseInLocation(%q, %q, %v)", string(p.format), p.datestr, string(p.format), p.datestr, p.loc) + return time.ParseInLocation(string(p.format), p.datestr, p.loc) +} +func isDay(alpha string) bool { + for _, day := range days { + if alpha == day { + return true + } + } + return false +} +func isMonthFull(alpha string) bool { + for _, month := range months { + if alpha == month { + return true + } + } + return false +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 9c67374e466..51b94eac83c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -78,6 +78,9 @@ github.com/alexedwards/argon2id # github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964 ## explicit github.com/amoghe/go-crypt +# github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de +## explicit; go 1.12 +github.com/araddon/dateparse # github.com/armon/go-metrics v0.4.1 ## explicit; go 1.12 github.com/armon/go-metrics From 824bdd49929a9eac4acdeedf643850a55bfaf5d9 Mon Sep 17 00:00:00 2001 From: Florian Schade Date: Sat, 9 Sep 2023 13:07:12 +0200 Subject: [PATCH 4/8] enhancement: add the ability to decide how kql nodes get connected connecting nodes (with edges) seem straight forward when not using group, the default connection for nodes with the same node is always OR. THis only applies for first level nodes, for grouped nodes it is defined differently. The KQL docs are saying, nodes inside a grouped node, with the same key are connected by a AND edge. --- services/search/pkg/query/kql/cast.go | 18 +- services/search/pkg/query/kql/connect.go | 158 +++++ services/search/pkg/query/kql/const.go | 1 - services/search/pkg/query/kql/dictionary.peg | 27 +- .../search/pkg/query/kql/dictionary_gen.go | 570 +++++++++--------- .../search/pkg/query/kql/dictionary_test.go | 8 +- services/search/pkg/query/kql/factory.go | 29 +- services/search/pkg/query/kql/kql.go | 111 ---- 8 files changed, 474 insertions(+), 448 deletions(-) create mode 100644 services/search/pkg/query/kql/connect.go delete mode 100644 services/search/pkg/query/kql/const.go diff --git a/services/search/pkg/query/kql/cast.go b/services/search/pkg/query/kql/cast.go index 2f7c58c3531..d9710d35ce1 100644 --- a/services/search/pkg/query/kql/cast.go +++ b/services/search/pkg/query/kql/cast.go @@ -23,8 +23,24 @@ func toNodes[T ast.Node](in interface{}) ([]T, error) { switch v := in.(type) { case []T: return v, nil + case T: + return []T{v}, nil + case []interface{}: + var ts []T + for _, inter := range v { + n, err := toNodes[T](inter) + if err != nil { + return nil, err + } + + ts = append(ts, n...) + } + return ts, nil + case nil: + return nil, nil default: - return nil, fmt.Errorf("can't convert '%T' to []ast.Node", in) + var t T + return nil, fmt.Errorf("can't convert '%T' to '%T'", in, t) } } diff --git a/services/search/pkg/query/kql/connect.go b/services/search/pkg/query/kql/connect.go new file mode 100644 index 00000000000..4f7dce66672 --- /dev/null +++ b/services/search/pkg/query/kql/connect.go @@ -0,0 +1,158 @@ +package kql + +import ( + "strings" + + "github.com/owncloud/ocis/v2/services/search/pkg/query/ast" +) + +// connectNodes connects given nodes +func connectNodes(c Connector, nodes ...ast.Node) []ast.Node { + var connectedNodes []ast.Node + + for i := range nodes { + ri := len(nodes) - 1 - i + head := nodes[ri] + pair := []ast.Node{head} + + if connectionNodes := connectNode(c, pair[0], connectedNodes...); len(connectionNodes) >= 1 { + pair = append(pair, connectionNodes...) + } + + connectedNodes = append(pair, connectedNodes...) + } + + return connectedNodes +} + +// connectNode connects a tip node with the rest +func connectNode(c Connector, headNode ast.Node, tailNodes ...ast.Node) []ast.Node { + var nearestNeighborNode ast.Node + var nearestNeighborOperators []*ast.OperatorNode + +l: + for _, tailNode := range tailNodes { + switch node := tailNode.(type) { + case *ast.OperatorNode: + nearestNeighborOperators = append(nearestNeighborOperators, node) + default: + nearestNeighborNode = node + break l + } + } + + if nearestNeighborNode == nil { + return nil + } + + return c.Connect(headNode, nearestNeighborNode, nearestNeighborOperators) +} + +// Connector is responsible to decide what node connections are needed +type Connector interface { + Connect(head ast.Node, neighbor ast.Node, connections []*ast.OperatorNode) []ast.Node +} + +// DefaultConnector is the default node connector +type DefaultConnector struct { + sameKeyOPValue string +} + +// Connect implements the Connector interface and is used to connect the nodes using +// the default logic defined by the kql spec. +func (c DefaultConnector) Connect(head ast.Node, neighbor ast.Node, connections []*ast.OperatorNode) []ast.Node { + switch head.(type) { + case *ast.OperatorNode: + return nil + } + + headKey := strings.ToLower(c.nodeKey(head)) + neighborKey := strings.ToLower(c.nodeKey(neighbor)) + + connection := &ast.OperatorNode{ + Base: &ast.Base{Loc: &ast.Location{Source: &[]string{"implicitly operator"}[0]}}, + Value: BoolAND, + } + + // if the current node and the neighbor node have the same key + // the connection is of type OR, same applies if no keys are in place + // + // "" == "" + // + // spec: same + // author:"John Smith" author:"Jane Smith" + // author:"John Smith" OR author:"Jane Smith" + // + // nodes inside of group nodes are handled differently, + // if no explicit operator give, it uses OR + // + // spec: same + // author:"John Smith" AND author:"Jane Smith" + // author:("John Smith" "Jane Smith") + if headKey == neighborKey { + connection.Value = c.sameKeyOPValue + } + + // decisions based on nearest neighbor node + switch neighbor.(type) { + // nearest neighbor node type could change the default case + // docs says, if the next value node: + // + // is a group AND has no key + // + // even if the current node has none too, which normal leads to SAME KEY OR + // + // it should be an AND edge + // + // spec: same + // cat (dog OR fox) + // cat AND (dog OR fox) + // + // note: + // sounds contradictory to me + case *ast.GroupNode: + if headKey == "" && neighborKey == "" { + connection.Value = BoolAND + } + } + + // decisions based on nearest neighbor operators + for i, node := range connections { + // consider direct neighbor operator only + if i == 0 { + // no connection is necessary here because an `AND` or `OR` edge is already present + // exit + for _, skipValue := range []string{BoolOR, BoolAND} { + if node.Value == skipValue { + return nil + } + } + + // if neighbor node negotiates, AND edge is needed + // + // spec: same + // cat -dog + // cat AND NOT dog + if node.Value == BoolNOT { + connection.Value = BoolAND + } + } + } + + return []ast.Node{connection} +} + +func (c DefaultConnector) nodeKey(n ast.Node) string { + switch node := n.(type) { + case *ast.StringNode: + return node.Key + case *ast.DateTimeNode: + return node.Key + case *ast.BooleanNode: + return node.Key + case *ast.GroupNode: + return node.Key + default: + return "" + } +} diff --git a/services/search/pkg/query/kql/const.go b/services/search/pkg/query/kql/const.go deleted file mode 100644 index f3e5818bd78..00000000000 --- a/services/search/pkg/query/kql/const.go +++ /dev/null @@ -1 +0,0 @@ -package kql diff --git a/services/search/pkg/query/kql/dictionary.peg b/services/search/pkg/query/kql/dictionary.peg index df8784d807c..999d25bb90b 100644 --- a/services/search/pkg/query/kql/dictionary.peg +++ b/services/search/pkg/query/kql/dictionary.peg @@ -10,19 +10,22 @@ AST <- _ !( OperatorBooleanAndNode / OperatorBooleanOrNode - ) _ nodes:Nodes _ { - return buildAST(nodes, c.text, c.pos) + ) n:Nodes { + return buildAST(n, c.text, c.pos) } +//////////////////////////////////////////////////////// +// nodes +//////////////////////////////////////////////////////// + Nodes <- - _ head:( - GroupNode / - PropertyRestrictionNodes / - OperatorBooleanNodes / - FreeTextKeywordNodes - ) _ tail:Nodes? { - return buildNodes(head, tail) - } + (_ Node)+ + +Node <- + GroupNode / + PropertyRestrictionNodes / + OperatorBooleanNodes / + FreeTextKeywordNodes //////////////////////////////////////////////////////// // nesting @@ -210,4 +213,6 @@ Digit <- } _ <- - [ \t]* + [ \t]* { + return nil, nil + } diff --git a/services/search/pkg/query/kql/dictionary_gen.go b/services/search/pkg/query/kql/dictionary_gen.go index 8f83d73d105..ba3f637b3f1 100644 --- a/services/search/pkg/query/kql/dictionary_gen.go +++ b/services/search/pkg/query/kql/dictionary_gen.go @@ -48,138 +48,118 @@ var g = &grammar{ }, }, }, - &ruleRefExpr{ - pos: position{line: 13, col: 7, offset: 228}, - name: "_", - }, &labeledExpr{ - pos: position{line: 13, col: 9, offset: 230}, - label: "nodes", + pos: position{line: 13, col: 7, offset: 228}, + label: "n", expr: &ruleRefExpr{ - pos: position{line: 13, col: 15, offset: 236}, + pos: position{line: 13, col: 9, offset: 230}, name: "Nodes", }, }, - &ruleRefExpr{ - pos: position{line: 13, col: 21, offset: 242}, - name: "_", - }, }, }, }, }, { name: "Nodes", - pos: position{line: 17, col: 1, offset: 299}, - expr: &actionExpr{ - pos: position{line: 18, col: 5, offset: 312}, - run: (*parser).callonNodes1, + pos: position{line: 21, col: 1, offset: 411}, + expr: &oneOrMoreExpr{ + pos: position{line: 22, col: 5, offset: 424}, expr: &seqExpr{ - pos: position{line: 18, col: 5, offset: 312}, + pos: position{line: 22, col: 6, offset: 425}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 18, col: 5, offset: 312}, + pos: position{line: 22, col: 6, offset: 425}, name: "_", }, - &labeledExpr{ - pos: position{line: 18, col: 7, offset: 314}, - label: "head", - expr: &choiceExpr{ - pos: position{line: 19, col: 9, offset: 329}, - alternatives: []any{ - &ruleRefExpr{ - pos: position{line: 19, col: 9, offset: 329}, - name: "GroupNode", - }, - &ruleRefExpr{ - pos: position{line: 20, col: 9, offset: 349}, - name: "PropertyRestrictionNodes", - }, - &ruleRefExpr{ - pos: position{line: 21, col: 9, offset: 384}, - name: "OperatorBooleanNodes", - }, - &ruleRefExpr{ - pos: position{line: 22, col: 9, offset: 415}, - name: "FreeTextKeywordNodes", - }, - }, - }, - }, &ruleRefExpr{ - pos: position{line: 23, col: 7, offset: 442}, - name: "_", - }, - &labeledExpr{ - pos: position{line: 23, col: 9, offset: 444}, - label: "tail", - expr: &zeroOrOneExpr{ - pos: position{line: 23, col: 14, offset: 449}, - expr: &ruleRefExpr{ - pos: position{line: 23, col: 14, offset: 449}, - name: "Nodes", - }, - }, + pos: position{line: 22, col: 8, offset: 427}, + name: "Node", }, }, }, }, }, + { + name: "Node", + pos: position{line: 24, col: 1, offset: 435}, + expr: &choiceExpr{ + pos: position{line: 25, col: 5, offset: 447}, + alternatives: []any{ + &ruleRefExpr{ + pos: position{line: 25, col: 5, offset: 447}, + name: "GroupNode", + }, + &ruleRefExpr{ + pos: position{line: 26, col: 5, offset: 463}, + name: "PropertyRestrictionNodes", + }, + &ruleRefExpr{ + pos: position{line: 27, col: 5, offset: 494}, + name: "OperatorBooleanNodes", + }, + &ruleRefExpr{ + pos: position{line: 28, col: 5, offset: 521}, + name: "FreeTextKeywordNodes", + }, + }, + }, + }, { name: "GroupNode", - pos: position{line: 31, col: 1, offset: 627}, + pos: position{line: 34, col: 1, offset: 669}, expr: &actionExpr{ - pos: position{line: 32, col: 5, offset: 644}, + pos: position{line: 35, col: 5, offset: 686}, run: (*parser).callonGroupNode1, expr: &seqExpr{ - pos: position{line: 32, col: 5, offset: 644}, + pos: position{line: 35, col: 5, offset: 686}, exprs: []any{ &labeledExpr{ - pos: position{line: 32, col: 5, offset: 644}, + pos: position{line: 35, col: 5, offset: 686}, label: "k", expr: &zeroOrOneExpr{ - pos: position{line: 32, col: 7, offset: 646}, + pos: position{line: 35, col: 7, offset: 688}, expr: &oneOrMoreExpr{ - pos: position{line: 32, col: 8, offset: 647}, + pos: position{line: 35, col: 8, offset: 689}, expr: &ruleRefExpr{ - pos: position{line: 32, col: 8, offset: 647}, + pos: position{line: 35, col: 8, offset: 689}, name: "Char", }, }, }, }, &zeroOrOneExpr{ - pos: position{line: 32, col: 16, offset: 655}, + pos: position{line: 35, col: 16, offset: 697}, expr: &choiceExpr{ - pos: position{line: 32, col: 17, offset: 656}, + pos: position{line: 35, col: 17, offset: 698}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 32, col: 17, offset: 656}, + pos: position{line: 35, col: 17, offset: 698}, name: "OperatorColonNode", }, &ruleRefExpr{ - pos: position{line: 32, col: 37, offset: 676}, + pos: position{line: 35, col: 37, offset: 718}, name: "OperatorEqualNode", }, }, }, }, &litMatcher{ - pos: position{line: 32, col: 57, offset: 696}, + pos: position{line: 35, col: 57, offset: 738}, val: "(", ignoreCase: false, want: "\"(\"", }, &labeledExpr{ - pos: position{line: 32, col: 61, offset: 700}, + pos: position{line: 35, col: 61, offset: 742}, label: "v", expr: &ruleRefExpr{ - pos: position{line: 32, col: 63, offset: 702}, + pos: position{line: 35, col: 63, offset: 744}, name: "Nodes", }, }, &litMatcher{ - pos: position{line: 32, col: 69, offset: 708}, + pos: position{line: 35, col: 69, offset: 750}, val: ")", ignoreCase: false, want: "\")\"", @@ -190,20 +170,20 @@ var g = &grammar{ }, { name: "PropertyRestrictionNodes", - pos: position{line: 40, col: 1, offset: 912}, + pos: position{line: 43, col: 1, offset: 954}, expr: &choiceExpr{ - pos: position{line: 41, col: 5, offset: 944}, + pos: position{line: 44, col: 5, offset: 986}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 41, col: 5, offset: 944}, + pos: position{line: 44, col: 5, offset: 986}, name: "YesNoPropertyRestrictionNode", }, &ruleRefExpr{ - pos: position{line: 42, col: 5, offset: 979}, + pos: position{line: 45, col: 5, offset: 1021}, name: "DateTimeRestrictionNode", }, &ruleRefExpr{ - pos: position{line: 43, col: 5, offset: 1009}, + pos: position{line: 46, col: 5, offset: 1051}, name: "TextPropertyRestrictionNode", }, }, @@ -211,51 +191,51 @@ var g = &grammar{ }, { name: "YesNoPropertyRestrictionNode", - pos: position{line: 45, col: 1, offset: 1038}, + pos: position{line: 48, col: 1, offset: 1080}, expr: &actionExpr{ - pos: position{line: 46, col: 5, offset: 1074}, + pos: position{line: 49, col: 5, offset: 1116}, run: (*parser).callonYesNoPropertyRestrictionNode1, expr: &seqExpr{ - pos: position{line: 46, col: 5, offset: 1074}, + pos: position{line: 49, col: 5, offset: 1116}, exprs: []any{ &labeledExpr{ - pos: position{line: 46, col: 5, offset: 1074}, + pos: position{line: 49, col: 5, offset: 1116}, label: "k", expr: &oneOrMoreExpr{ - pos: position{line: 46, col: 7, offset: 1076}, + pos: position{line: 49, col: 7, offset: 1118}, expr: &ruleRefExpr{ - pos: position{line: 46, col: 7, offset: 1076}, + pos: position{line: 49, col: 7, offset: 1118}, name: "Char", }, }, }, &choiceExpr{ - pos: position{line: 46, col: 14, offset: 1083}, + pos: position{line: 49, col: 14, offset: 1125}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 46, col: 14, offset: 1083}, + pos: position{line: 49, col: 14, offset: 1125}, name: "OperatorColonNode", }, &ruleRefExpr{ - pos: position{line: 46, col: 34, offset: 1103}, + pos: position{line: 49, col: 34, offset: 1145}, name: "OperatorEqualNode", }, }, }, &labeledExpr{ - pos: position{line: 46, col: 53, offset: 1122}, + pos: position{line: 49, col: 53, offset: 1164}, label: "v", expr: &choiceExpr{ - pos: position{line: 46, col: 56, offset: 1125}, + pos: position{line: 49, col: 56, offset: 1167}, alternatives: []any{ &litMatcher{ - pos: position{line: 46, col: 56, offset: 1125}, + pos: position{line: 49, col: 56, offset: 1167}, val: "true", ignoreCase: false, want: "\"true\"", }, &litMatcher{ - pos: position{line: 46, col: 65, offset: 1134}, + pos: position{line: 49, col: 65, offset: 1176}, val: "false", ignoreCase: false, want: "\"false\"", @@ -269,91 +249,91 @@ var g = &grammar{ }, { name: "DateTimeRestrictionNode", - pos: position{line: 50, col: 1, offset: 1204}, + pos: position{line: 53, col: 1, offset: 1246}, expr: &actionExpr{ - pos: position{line: 51, col: 5, offset: 1235}, + pos: position{line: 54, col: 5, offset: 1277}, run: (*parser).callonDateTimeRestrictionNode1, expr: &seqExpr{ - pos: position{line: 51, col: 5, offset: 1235}, + pos: position{line: 54, col: 5, offset: 1277}, exprs: []any{ &labeledExpr{ - pos: position{line: 51, col: 5, offset: 1235}, + pos: position{line: 54, col: 5, offset: 1277}, label: "k", expr: &oneOrMoreExpr{ - pos: position{line: 51, col: 7, offset: 1237}, + pos: position{line: 54, col: 7, offset: 1279}, expr: &ruleRefExpr{ - pos: position{line: 51, col: 7, offset: 1237}, + pos: position{line: 54, col: 7, offset: 1279}, name: "Char", }, }, }, &labeledExpr{ - pos: position{line: 51, col: 13, offset: 1243}, + pos: position{line: 54, col: 13, offset: 1285}, label: "o", expr: &choiceExpr{ - pos: position{line: 52, col: 9, offset: 1255}, + pos: position{line: 55, col: 9, offset: 1297}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 52, col: 9, offset: 1255}, + pos: position{line: 55, col: 9, offset: 1297}, name: "OperatorGreaterOrEqualNode", }, &ruleRefExpr{ - pos: position{line: 53, col: 9, offset: 1292}, + pos: position{line: 56, col: 9, offset: 1334}, name: "OperatorLessOrEqualNode", }, &ruleRefExpr{ - pos: position{line: 54, col: 9, offset: 1326}, + pos: position{line: 57, col: 9, offset: 1368}, name: "OperatorGreaterNode", }, &ruleRefExpr{ - pos: position{line: 55, col: 9, offset: 1356}, + pos: position{line: 58, col: 9, offset: 1398}, name: "OperatorLessNode", }, &ruleRefExpr{ - pos: position{line: 56, col: 9, offset: 1383}, + pos: position{line: 59, col: 9, offset: 1425}, name: "OperatorEqualNode", }, &ruleRefExpr{ - pos: position{line: 57, col: 9, offset: 1411}, + pos: position{line: 60, col: 9, offset: 1453}, name: "OperatorColonNode", }, }, }, }, &zeroOrOneExpr{ - pos: position{line: 58, col: 7, offset: 1435}, + pos: position{line: 61, col: 7, offset: 1477}, expr: &litMatcher{ - pos: position{line: 58, col: 7, offset: 1435}, + pos: position{line: 61, col: 7, offset: 1477}, val: "\"", ignoreCase: false, want: "\"\\\"\"", }, }, &labeledExpr{ - pos: position{line: 58, col: 12, offset: 1440}, + pos: position{line: 61, col: 12, offset: 1482}, label: "v", expr: &choiceExpr{ - pos: position{line: 59, col: 9, offset: 1452}, + pos: position{line: 62, col: 9, offset: 1494}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 59, col: 9, offset: 1452}, + pos: position{line: 62, col: 9, offset: 1494}, name: "DateTime", }, &ruleRefExpr{ - pos: position{line: 60, col: 9, offset: 1471}, + pos: position{line: 63, col: 9, offset: 1513}, name: "FullDate", }, &ruleRefExpr{ - pos: position{line: 61, col: 9, offset: 1490}, + pos: position{line: 64, col: 9, offset: 1532}, name: "FullTime", }, }, }, }, &zeroOrOneExpr{ - pos: position{line: 62, col: 7, offset: 1505}, + pos: position{line: 65, col: 7, offset: 1547}, expr: &litMatcher{ - pos: position{line: 62, col: 7, offset: 1505}, + pos: position{line: 65, col: 7, offset: 1547}, val: "\"", ignoreCase: false, want: "\"\\\"\"", @@ -365,51 +345,51 @@ var g = &grammar{ }, { name: "TextPropertyRestrictionNode", - pos: position{line: 66, col: 1, offset: 1576}, + pos: position{line: 69, col: 1, offset: 1618}, expr: &actionExpr{ - pos: position{line: 67, col: 5, offset: 1611}, + pos: position{line: 70, col: 5, offset: 1653}, run: (*parser).callonTextPropertyRestrictionNode1, expr: &seqExpr{ - pos: position{line: 67, col: 5, offset: 1611}, + pos: position{line: 70, col: 5, offset: 1653}, exprs: []any{ &labeledExpr{ - pos: position{line: 67, col: 5, offset: 1611}, + pos: position{line: 70, col: 5, offset: 1653}, label: "k", expr: &oneOrMoreExpr{ - pos: position{line: 67, col: 7, offset: 1613}, + pos: position{line: 70, col: 7, offset: 1655}, expr: &ruleRefExpr{ - pos: position{line: 67, col: 7, offset: 1613}, + pos: position{line: 70, col: 7, offset: 1655}, name: "Char", }, }, }, &choiceExpr{ - pos: position{line: 67, col: 14, offset: 1620}, + pos: position{line: 70, col: 14, offset: 1662}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 67, col: 14, offset: 1620}, + pos: position{line: 70, col: 14, offset: 1662}, name: "OperatorColonNode", }, &ruleRefExpr{ - pos: position{line: 67, col: 34, offset: 1640}, + pos: position{line: 70, col: 34, offset: 1682}, name: "OperatorEqualNode", }, }, }, &labeledExpr{ - pos: position{line: 67, col: 53, offset: 1659}, + pos: position{line: 70, col: 53, offset: 1701}, label: "v", expr: &choiceExpr{ - pos: position{line: 67, col: 56, offset: 1662}, + pos: position{line: 70, col: 56, offset: 1704}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 67, col: 56, offset: 1662}, + pos: position{line: 70, col: 56, offset: 1704}, name: "String", }, &oneOrMoreExpr{ - pos: position{line: 67, col: 65, offset: 1671}, + pos: position{line: 70, col: 65, offset: 1713}, expr: &charClassMatcher{ - pos: position{line: 67, col: 65, offset: 1671}, + pos: position{line: 70, col: 65, offset: 1713}, val: "[^ ()]", chars: []rune{' ', '(', ')'}, ignoreCase: false, @@ -425,16 +405,16 @@ var g = &grammar{ }, { name: "FreeTextKeywordNodes", - pos: position{line: 75, col: 1, offset: 1877}, + pos: position{line: 78, col: 1, offset: 1919}, expr: &choiceExpr{ - pos: position{line: 76, col: 5, offset: 1905}, + pos: position{line: 79, col: 5, offset: 1947}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 76, col: 5, offset: 1905}, + pos: position{line: 79, col: 5, offset: 1947}, name: "PhraseNode", }, &ruleRefExpr{ - pos: position{line: 77, col: 5, offset: 1922}, + pos: position{line: 80, col: 5, offset: 1964}, name: "WordNode", }, }, @@ -442,40 +422,40 @@ var g = &grammar{ }, { name: "PhraseNode", - pos: position{line: 79, col: 1, offset: 1932}, + pos: position{line: 82, col: 1, offset: 1974}, expr: &actionExpr{ - pos: position{line: 80, col: 6, offset: 1951}, + pos: position{line: 83, col: 6, offset: 1993}, run: (*parser).callonPhraseNode1, expr: &seqExpr{ - pos: position{line: 80, col: 6, offset: 1951}, + pos: position{line: 83, col: 6, offset: 1993}, exprs: []any{ &zeroOrOneExpr{ - pos: position{line: 80, col: 6, offset: 1951}, + pos: position{line: 83, col: 6, offset: 1993}, expr: &ruleRefExpr{ - pos: position{line: 80, col: 6, offset: 1951}, + pos: position{line: 83, col: 6, offset: 1993}, name: "OperatorColonNode", }, }, &ruleRefExpr{ - pos: position{line: 80, col: 25, offset: 1970}, + pos: position{line: 83, col: 25, offset: 2012}, name: "_", }, &labeledExpr{ - pos: position{line: 80, col: 27, offset: 1972}, + pos: position{line: 83, col: 27, offset: 2014}, label: "v", expr: &ruleRefExpr{ - pos: position{line: 80, col: 29, offset: 1974}, + pos: position{line: 83, col: 29, offset: 2016}, name: "String", }, }, &ruleRefExpr{ - pos: position{line: 80, col: 36, offset: 1981}, + pos: position{line: 83, col: 36, offset: 2023}, name: "_", }, &zeroOrOneExpr{ - pos: position{line: 80, col: 38, offset: 1983}, + pos: position{line: 83, col: 38, offset: 2025}, expr: &ruleRefExpr{ - pos: position{line: 80, col: 38, offset: 1983}, + pos: position{line: 83, col: 38, offset: 2025}, name: "OperatorColonNode", }, }, @@ -485,31 +465,31 @@ var g = &grammar{ }, { name: "WordNode", - pos: position{line: 84, col: 1, offset: 2064}, + pos: position{line: 87, col: 1, offset: 2106}, expr: &actionExpr{ - pos: position{line: 85, col: 6, offset: 2081}, + pos: position{line: 88, col: 6, offset: 2123}, run: (*parser).callonWordNode1, expr: &seqExpr{ - pos: position{line: 85, col: 6, offset: 2081}, + pos: position{line: 88, col: 6, offset: 2123}, exprs: []any{ &zeroOrOneExpr{ - pos: position{line: 85, col: 6, offset: 2081}, + pos: position{line: 88, col: 6, offset: 2123}, expr: &ruleRefExpr{ - pos: position{line: 85, col: 6, offset: 2081}, + pos: position{line: 88, col: 6, offset: 2123}, name: "OperatorColonNode", }, }, &ruleRefExpr{ - pos: position{line: 85, col: 25, offset: 2100}, + pos: position{line: 88, col: 25, offset: 2142}, name: "_", }, &labeledExpr{ - pos: position{line: 85, col: 27, offset: 2102}, + pos: position{line: 88, col: 27, offset: 2144}, label: "v", expr: &oneOrMoreExpr{ - pos: position{line: 85, col: 29, offset: 2104}, + pos: position{line: 88, col: 29, offset: 2146}, expr: &charClassMatcher{ - pos: position{line: 85, col: 29, offset: 2104}, + pos: position{line: 88, col: 29, offset: 2146}, val: "[^ :()]", chars: []rune{' ', ':', '(', ')'}, ignoreCase: false, @@ -518,13 +498,13 @@ var g = &grammar{ }, }, &ruleRefExpr{ - pos: position{line: 85, col: 38, offset: 2113}, + pos: position{line: 88, col: 38, offset: 2155}, name: "_", }, &zeroOrOneExpr{ - pos: position{line: 85, col: 40, offset: 2115}, + pos: position{line: 88, col: 40, offset: 2157}, expr: &ruleRefExpr{ - pos: position{line: 85, col: 40, offset: 2115}, + pos: position{line: 88, col: 40, offset: 2157}, name: "OperatorColonNode", }, }, @@ -534,20 +514,20 @@ var g = &grammar{ }, { name: "OperatorBooleanNodes", - pos: position{line: 93, col: 1, offset: 2324}, + pos: position{line: 96, col: 1, offset: 2366}, expr: &choiceExpr{ - pos: position{line: 94, col: 5, offset: 2352}, + pos: position{line: 97, col: 5, offset: 2394}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 94, col: 5, offset: 2352}, + pos: position{line: 97, col: 5, offset: 2394}, name: "OperatorBooleanAndNode", }, &ruleRefExpr{ - pos: position{line: 95, col: 5, offset: 2381}, + pos: position{line: 98, col: 5, offset: 2423}, name: "OperatorBooleanNotNode", }, &ruleRefExpr{ - pos: position{line: 96, col: 5, offset: 2410}, + pos: position{line: 99, col: 5, offset: 2452}, name: "OperatorBooleanOrNode", }, }, @@ -555,21 +535,21 @@ var g = &grammar{ }, { name: "OperatorBooleanAndNode", - pos: position{line: 98, col: 1, offset: 2433}, + pos: position{line: 101, col: 1, offset: 2475}, expr: &actionExpr{ - pos: position{line: 99, col: 5, offset: 2463}, + pos: position{line: 102, col: 5, offset: 2505}, run: (*parser).callonOperatorBooleanAndNode1, expr: &choiceExpr{ - pos: position{line: 99, col: 6, offset: 2464}, + pos: position{line: 102, col: 6, offset: 2506}, alternatives: []any{ &litMatcher{ - pos: position{line: 99, col: 6, offset: 2464}, + pos: position{line: 102, col: 6, offset: 2506}, val: "AND", ignoreCase: false, want: "\"AND\"", }, &litMatcher{ - pos: position{line: 99, col: 14, offset: 2472}, + pos: position{line: 102, col: 14, offset: 2514}, val: "+", ignoreCase: false, want: "\"+\"", @@ -580,21 +560,21 @@ var g = &grammar{ }, { name: "OperatorBooleanNotNode", - pos: position{line: 103, col: 1, offset: 2534}, + pos: position{line: 106, col: 1, offset: 2576}, expr: &actionExpr{ - pos: position{line: 104, col: 5, offset: 2564}, + pos: position{line: 107, col: 5, offset: 2606}, run: (*parser).callonOperatorBooleanNotNode1, expr: &choiceExpr{ - pos: position{line: 104, col: 6, offset: 2565}, + pos: position{line: 107, col: 6, offset: 2607}, alternatives: []any{ &litMatcher{ - pos: position{line: 104, col: 6, offset: 2565}, + pos: position{line: 107, col: 6, offset: 2607}, val: "NOT", ignoreCase: false, want: "\"NOT\"", }, &litMatcher{ - pos: position{line: 104, col: 14, offset: 2573}, + pos: position{line: 107, col: 14, offset: 2615}, val: "-", ignoreCase: false, want: "\"-\"", @@ -605,12 +585,12 @@ var g = &grammar{ }, { name: "OperatorBooleanOrNode", - pos: position{line: 108, col: 1, offset: 2635}, + pos: position{line: 111, col: 1, offset: 2677}, expr: &actionExpr{ - pos: position{line: 109, col: 5, offset: 2664}, + pos: position{line: 112, col: 5, offset: 2706}, run: (*parser).callonOperatorBooleanOrNode1, expr: &litMatcher{ - pos: position{line: 109, col: 6, offset: 2665}, + pos: position{line: 112, col: 6, offset: 2707}, val: "OR", ignoreCase: false, want: "\"OR\"", @@ -619,12 +599,12 @@ var g = &grammar{ }, { name: "OperatorColonNode", - pos: position{line: 113, col: 1, offset: 2728}, + pos: position{line: 116, col: 1, offset: 2770}, expr: &actionExpr{ - pos: position{line: 114, col: 5, offset: 2753}, + pos: position{line: 117, col: 5, offset: 2795}, run: (*parser).callonOperatorColonNode1, expr: &litMatcher{ - pos: position{line: 114, col: 5, offset: 2753}, + pos: position{line: 117, col: 5, offset: 2795}, val: ":", ignoreCase: false, want: "\":\"", @@ -633,12 +613,12 @@ var g = &grammar{ }, { name: "OperatorEqualNode", - pos: position{line: 118, col: 1, offset: 2814}, + pos: position{line: 121, col: 1, offset: 2856}, expr: &actionExpr{ - pos: position{line: 119, col: 5, offset: 2839}, + pos: position{line: 122, col: 5, offset: 2881}, run: (*parser).callonOperatorEqualNode1, expr: &litMatcher{ - pos: position{line: 119, col: 5, offset: 2839}, + pos: position{line: 122, col: 5, offset: 2881}, val: "=", ignoreCase: false, want: "\"=\"", @@ -647,12 +627,12 @@ var g = &grammar{ }, { name: "OperatorLessNode", - pos: position{line: 123, col: 1, offset: 2900}, + pos: position{line: 126, col: 1, offset: 2942}, expr: &actionExpr{ - pos: position{line: 124, col: 5, offset: 2924}, + pos: position{line: 127, col: 5, offset: 2966}, run: (*parser).callonOperatorLessNode1, expr: &litMatcher{ - pos: position{line: 124, col: 5, offset: 2924}, + pos: position{line: 127, col: 5, offset: 2966}, val: "<", ignoreCase: false, want: "\"<\"", @@ -661,12 +641,12 @@ var g = &grammar{ }, { name: "OperatorLessOrEqualNode", - pos: position{line: 128, col: 1, offset: 2985}, + pos: position{line: 131, col: 1, offset: 3027}, expr: &actionExpr{ - pos: position{line: 129, col: 5, offset: 3016}, + pos: position{line: 132, col: 5, offset: 3058}, run: (*parser).callonOperatorLessOrEqualNode1, expr: &litMatcher{ - pos: position{line: 129, col: 5, offset: 3016}, + pos: position{line: 132, col: 5, offset: 3058}, val: "<=", ignoreCase: false, want: "\"<=\"", @@ -675,12 +655,12 @@ var g = &grammar{ }, { name: "OperatorGreaterNode", - pos: position{line: 133, col: 1, offset: 3078}, + pos: position{line: 136, col: 1, offset: 3120}, expr: &actionExpr{ - pos: position{line: 134, col: 5, offset: 3105}, + pos: position{line: 137, col: 5, offset: 3147}, run: (*parser).callonOperatorGreaterNode1, expr: &litMatcher{ - pos: position{line: 134, col: 5, offset: 3105}, + pos: position{line: 137, col: 5, offset: 3147}, val: ">", ignoreCase: false, want: "\">\"", @@ -689,12 +669,12 @@ var g = &grammar{ }, { name: "OperatorGreaterOrEqualNode", - pos: position{line: 138, col: 1, offset: 3166}, + pos: position{line: 141, col: 1, offset: 3208}, expr: &actionExpr{ - pos: position{line: 139, col: 5, offset: 3200}, + pos: position{line: 142, col: 5, offset: 3242}, run: (*parser).callonOperatorGreaterOrEqualNode1, expr: &litMatcher{ - pos: position{line: 139, col: 5, offset: 3200}, + pos: position{line: 142, col: 5, offset: 3242}, val: ">=", ignoreCase: false, want: "\">=\"", @@ -703,27 +683,27 @@ var g = &grammar{ }, { name: "TimeYear", - pos: position{line: 148, col: 1, offset: 3386}, + pos: position{line: 151, col: 1, offset: 3428}, expr: &actionExpr{ - pos: position{line: 149, col: 5, offset: 3402}, + pos: position{line: 152, col: 5, offset: 3444}, run: (*parser).callonTimeYear1, expr: &seqExpr{ - pos: position{line: 149, col: 5, offset: 3402}, + pos: position{line: 152, col: 5, offset: 3444}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 149, col: 5, offset: 3402}, + pos: position{line: 152, col: 5, offset: 3444}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 149, col: 11, offset: 3408}, + pos: position{line: 152, col: 11, offset: 3450}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 149, col: 17, offset: 3414}, + pos: position{line: 152, col: 17, offset: 3456}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 149, col: 23, offset: 3420}, + pos: position{line: 152, col: 23, offset: 3462}, name: "Digit", }, }, @@ -732,19 +712,19 @@ var g = &grammar{ }, { name: "TimeMonth", - pos: position{line: 153, col: 1, offset: 3462}, + pos: position{line: 156, col: 1, offset: 3504}, expr: &actionExpr{ - pos: position{line: 154, col: 5, offset: 3479}, + pos: position{line: 157, col: 5, offset: 3521}, run: (*parser).callonTimeMonth1, expr: &seqExpr{ - pos: position{line: 154, col: 5, offset: 3479}, + pos: position{line: 157, col: 5, offset: 3521}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 154, col: 5, offset: 3479}, + pos: position{line: 157, col: 5, offset: 3521}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 154, col: 11, offset: 3485}, + pos: position{line: 157, col: 11, offset: 3527}, name: "Digit", }, }, @@ -753,19 +733,19 @@ var g = &grammar{ }, { name: "TimeDay", - pos: position{line: 158, col: 1, offset: 3527}, + pos: position{line: 161, col: 1, offset: 3569}, expr: &actionExpr{ - pos: position{line: 159, col: 5, offset: 3542}, + pos: position{line: 162, col: 5, offset: 3584}, run: (*parser).callonTimeDay1, expr: &seqExpr{ - pos: position{line: 159, col: 5, offset: 3542}, + pos: position{line: 162, col: 5, offset: 3584}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 159, col: 5, offset: 3542}, + pos: position{line: 162, col: 5, offset: 3584}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 159, col: 11, offset: 3548}, + pos: position{line: 162, col: 11, offset: 3590}, name: "Digit", }, }, @@ -774,19 +754,19 @@ var g = &grammar{ }, { name: "TimeHour", - pos: position{line: 163, col: 1, offset: 3590}, + pos: position{line: 166, col: 1, offset: 3632}, expr: &actionExpr{ - pos: position{line: 164, col: 5, offset: 3606}, + pos: position{line: 167, col: 5, offset: 3648}, run: (*parser).callonTimeHour1, expr: &seqExpr{ - pos: position{line: 164, col: 5, offset: 3606}, + pos: position{line: 167, col: 5, offset: 3648}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 164, col: 5, offset: 3606}, + pos: position{line: 167, col: 5, offset: 3648}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 164, col: 11, offset: 3612}, + pos: position{line: 167, col: 11, offset: 3654}, name: "Digit", }, }, @@ -795,19 +775,19 @@ var g = &grammar{ }, { name: "TimeMinute", - pos: position{line: 168, col: 1, offset: 3654}, + pos: position{line: 171, col: 1, offset: 3696}, expr: &actionExpr{ - pos: position{line: 169, col: 5, offset: 3672}, + pos: position{line: 172, col: 5, offset: 3714}, run: (*parser).callonTimeMinute1, expr: &seqExpr{ - pos: position{line: 169, col: 5, offset: 3672}, + pos: position{line: 172, col: 5, offset: 3714}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 169, col: 5, offset: 3672}, + pos: position{line: 172, col: 5, offset: 3714}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 169, col: 11, offset: 3678}, + pos: position{line: 172, col: 11, offset: 3720}, name: "Digit", }, }, @@ -816,19 +796,19 @@ var g = &grammar{ }, { name: "TimeSecond", - pos: position{line: 173, col: 1, offset: 3720}, + pos: position{line: 176, col: 1, offset: 3762}, expr: &actionExpr{ - pos: position{line: 174, col: 5, offset: 3738}, + pos: position{line: 177, col: 5, offset: 3780}, run: (*parser).callonTimeSecond1, expr: &seqExpr{ - pos: position{line: 174, col: 5, offset: 3738}, + pos: position{line: 177, col: 5, offset: 3780}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 174, col: 5, offset: 3738}, + pos: position{line: 177, col: 5, offset: 3780}, name: "Digit", }, &ruleRefExpr{ - pos: position{line: 174, col: 11, offset: 3744}, + pos: position{line: 177, col: 11, offset: 3786}, name: "Digit", }, }, @@ -837,35 +817,35 @@ var g = &grammar{ }, { name: "FullDate", - pos: position{line: 178, col: 1, offset: 3786}, + pos: position{line: 181, col: 1, offset: 3828}, expr: &actionExpr{ - pos: position{line: 179, col: 5, offset: 3802}, + pos: position{line: 182, col: 5, offset: 3844}, run: (*parser).callonFullDate1, expr: &seqExpr{ - pos: position{line: 179, col: 5, offset: 3802}, + pos: position{line: 182, col: 5, offset: 3844}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 179, col: 5, offset: 3802}, + pos: position{line: 182, col: 5, offset: 3844}, name: "TimeYear", }, &litMatcher{ - pos: position{line: 179, col: 14, offset: 3811}, + pos: position{line: 182, col: 14, offset: 3853}, val: "-", ignoreCase: false, want: "\"-\"", }, &ruleRefExpr{ - pos: position{line: 179, col: 18, offset: 3815}, + pos: position{line: 182, col: 18, offset: 3857}, name: "TimeMonth", }, &litMatcher{ - pos: position{line: 179, col: 28, offset: 3825}, + pos: position{line: 182, col: 28, offset: 3867}, val: "-", ignoreCase: false, want: "\"-\"", }, &ruleRefExpr{ - pos: position{line: 179, col: 32, offset: 3829}, + pos: position{line: 182, col: 32, offset: 3871}, name: "TimeDay", }, }, @@ -874,52 +854,52 @@ var g = &grammar{ }, { name: "FullTime", - pos: position{line: 183, col: 1, offset: 3873}, + pos: position{line: 186, col: 1, offset: 3915}, expr: &actionExpr{ - pos: position{line: 184, col: 5, offset: 3889}, + pos: position{line: 187, col: 5, offset: 3931}, run: (*parser).callonFullTime1, expr: &seqExpr{ - pos: position{line: 184, col: 5, offset: 3889}, + pos: position{line: 187, col: 5, offset: 3931}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 184, col: 5, offset: 3889}, + pos: position{line: 187, col: 5, offset: 3931}, name: "TimeHour", }, &litMatcher{ - pos: position{line: 184, col: 14, offset: 3898}, + pos: position{line: 187, col: 14, offset: 3940}, val: ":", ignoreCase: false, want: "\":\"", }, &ruleRefExpr{ - pos: position{line: 184, col: 18, offset: 3902}, + pos: position{line: 187, col: 18, offset: 3944}, name: "TimeMinute", }, &litMatcher{ - pos: position{line: 184, col: 29, offset: 3913}, + pos: position{line: 187, col: 29, offset: 3955}, val: ":", ignoreCase: false, want: "\":\"", }, &ruleRefExpr{ - pos: position{line: 184, col: 33, offset: 3917}, + pos: position{line: 187, col: 33, offset: 3959}, name: "TimeSecond", }, &zeroOrOneExpr{ - pos: position{line: 184, col: 44, offset: 3928}, + pos: position{line: 187, col: 44, offset: 3970}, expr: &seqExpr{ - pos: position{line: 184, col: 45, offset: 3929}, + pos: position{line: 187, col: 45, offset: 3971}, exprs: []any{ &litMatcher{ - pos: position{line: 184, col: 45, offset: 3929}, + pos: position{line: 187, col: 45, offset: 3971}, val: ".", ignoreCase: false, want: "\".\"", }, &oneOrMoreExpr{ - pos: position{line: 184, col: 49, offset: 3933}, + pos: position{line: 187, col: 49, offset: 3975}, expr: &ruleRefExpr{ - pos: position{line: 184, col: 49, offset: 3933}, + pos: position{line: 187, col: 49, offset: 3975}, name: "Digit", }, }, @@ -927,28 +907,28 @@ var g = &grammar{ }, }, &choiceExpr{ - pos: position{line: 184, col: 59, offset: 3943}, + pos: position{line: 187, col: 59, offset: 3985}, alternatives: []any{ &litMatcher{ - pos: position{line: 184, col: 59, offset: 3943}, + pos: position{line: 187, col: 59, offset: 3985}, val: "Z", ignoreCase: false, want: "\"Z\"", }, &seqExpr{ - pos: position{line: 184, col: 65, offset: 3949}, + pos: position{line: 187, col: 65, offset: 3991}, exprs: []any{ &choiceExpr{ - pos: position{line: 184, col: 66, offset: 3950}, + pos: position{line: 187, col: 66, offset: 3992}, alternatives: []any{ &litMatcher{ - pos: position{line: 184, col: 66, offset: 3950}, + pos: position{line: 187, col: 66, offset: 3992}, val: "+", ignoreCase: false, want: "\"+\"", }, &litMatcher{ - pos: position{line: 184, col: 72, offset: 3956}, + pos: position{line: 187, col: 72, offset: 3998}, val: "-", ignoreCase: false, want: "\"-\"", @@ -956,17 +936,17 @@ var g = &grammar{ }, }, &ruleRefExpr{ - pos: position{line: 184, col: 77, offset: 3961}, + pos: position{line: 187, col: 77, offset: 4003}, name: "TimeHour", }, &litMatcher{ - pos: position{line: 184, col: 86, offset: 3970}, + pos: position{line: 187, col: 86, offset: 4012}, val: ":", ignoreCase: false, want: "\":\"", }, &ruleRefExpr{ - pos: position{line: 184, col: 90, offset: 3974}, + pos: position{line: 187, col: 90, offset: 4016}, name: "TimeMinute", }, }, @@ -979,25 +959,25 @@ var g = &grammar{ }, { name: "DateTime", - pos: position{line: 188, col: 1, offset: 4022}, + pos: position{line: 191, col: 1, offset: 4064}, expr: &actionExpr{ - pos: position{line: 189, col: 5, offset: 4035}, + pos: position{line: 192, col: 5, offset: 4077}, run: (*parser).callonDateTime1, expr: &seqExpr{ - pos: position{line: 189, col: 5, offset: 4035}, + pos: position{line: 192, col: 5, offset: 4077}, exprs: []any{ &ruleRefExpr{ - pos: position{line: 189, col: 5, offset: 4035}, + pos: position{line: 192, col: 5, offset: 4077}, name: "FullDate", }, &litMatcher{ - pos: position{line: 189, col: 14, offset: 4044}, + pos: position{line: 192, col: 14, offset: 4086}, val: "T", ignoreCase: false, want: "\"T\"", }, &ruleRefExpr{ - pos: position{line: 189, col: 18, offset: 4048}, + pos: position{line: 192, col: 18, offset: 4090}, name: "FullTime", }, }, @@ -1006,12 +986,12 @@ var g = &grammar{ }, { name: "Char", - pos: position{line: 197, col: 1, offset: 4214}, + pos: position{line: 200, col: 1, offset: 4256}, expr: &actionExpr{ - pos: position{line: 198, col: 5, offset: 4226}, + pos: position{line: 201, col: 5, offset: 4268}, run: (*parser).callonChar1, expr: &charClassMatcher{ - pos: position{line: 198, col: 5, offset: 4226}, + pos: position{line: 201, col: 5, offset: 4268}, val: "[A-Za-z]", ranges: []rune{'A', 'Z', 'a', 'z'}, ignoreCase: false, @@ -1021,26 +1001,26 @@ var g = &grammar{ }, { name: "String", - pos: position{line: 202, col: 1, offset: 4271}, + pos: position{line: 205, col: 1, offset: 4313}, expr: &actionExpr{ - pos: position{line: 203, col: 5, offset: 4285}, + pos: position{line: 206, col: 5, offset: 4327}, run: (*parser).callonString1, expr: &seqExpr{ - pos: position{line: 203, col: 5, offset: 4285}, + pos: position{line: 206, col: 5, offset: 4327}, exprs: []any{ &litMatcher{ - pos: position{line: 203, col: 5, offset: 4285}, + pos: position{line: 206, col: 5, offset: 4327}, val: "\"", ignoreCase: false, want: "\"\\\"\"", }, &labeledExpr{ - pos: position{line: 203, col: 9, offset: 4289}, + pos: position{line: 206, col: 9, offset: 4331}, label: "v", expr: &zeroOrMoreExpr{ - pos: position{line: 203, col: 11, offset: 4291}, + pos: position{line: 206, col: 11, offset: 4333}, expr: &charClassMatcher{ - pos: position{line: 203, col: 11, offset: 4291}, + pos: position{line: 206, col: 11, offset: 4333}, val: "[^\"]", chars: []rune{'"'}, ignoreCase: false, @@ -1049,7 +1029,7 @@ var g = &grammar{ }, }, &litMatcher{ - pos: position{line: 203, col: 17, offset: 4297}, + pos: position{line: 206, col: 17, offset: 4339}, val: "\"", ignoreCase: false, want: "\"\\\"\"", @@ -1060,12 +1040,12 @@ var g = &grammar{ }, { name: "Digit", - pos: position{line: 207, col: 1, offset: 4332}, + pos: position{line: 210, col: 1, offset: 4374}, expr: &actionExpr{ - pos: position{line: 208, col: 5, offset: 4345}, + pos: position{line: 211, col: 5, offset: 4387}, run: (*parser).callonDigit1, expr: &charClassMatcher{ - pos: position{line: 208, col: 5, offset: 4345}, + pos: position{line: 211, col: 5, offset: 4387}, val: "[0-9]", ranges: []rune{'0', '9'}, ignoreCase: false, @@ -1075,41 +1055,34 @@ var g = &grammar{ }, { name: "_", - pos: position{line: 212, col: 1, offset: 4387}, - expr: &zeroOrMoreExpr{ - pos: position{line: 213, col: 5, offset: 4396}, - expr: &charClassMatcher{ - pos: position{line: 213, col: 5, offset: 4396}, - val: "[ \\t]", - chars: []rune{' ', '\t'}, - ignoreCase: false, - inverted: false, + pos: position{line: 215, col: 1, offset: 4429}, + expr: &actionExpr{ + pos: position{line: 216, col: 5, offset: 4438}, + run: (*parser).callon_1, + expr: &zeroOrMoreExpr{ + pos: position{line: 216, col: 5, offset: 4438}, + expr: &charClassMatcher{ + pos: position{line: 216, col: 5, offset: 4438}, + val: "[ \\t]", + chars: []rune{' ', '\t'}, + ignoreCase: false, + inverted: false, + }, }, }, }, }, } -func (c *current) onAST1(nodes any) (any, error) { - return buildAST(nodes, c.text, c.pos) +func (c *current) onAST1(n any) (any, error) { + return buildAST(n, c.text, c.pos) } func (p *parser) callonAST1() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onAST1(stack["nodes"]) -} - -func (c *current) onNodes1(head, tail any) (any, error) { - return buildNodes(head, tail) - -} - -func (p *parser) callonNodes1() (any, error) { - stack := p.vstack[len(p.vstack)-1] - _ = stack - return p.cur.onNodes1(stack["head"], stack["tail"]) + return p.cur.onAST1(stack["n"]) } func (c *current) onGroupNode1(k, v any) (any, error) { @@ -1409,6 +1382,17 @@ func (p *parser) callonDigit1() (any, error) { return p.cur.onDigit1() } +func (c *current) on_1() (any, error) { + return nil, nil + +} + +func (p *parser) callon_1() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.on_1() +} + var ( // errNoRule is returned when the grammar to parse has no rule. errNoRule = errors.New("grammar has no rule") diff --git a/services/search/pkg/query/kql/dictionary_test.go b/services/search/pkg/query/kql/dictionary_test.go index 7421f524541..a77d45f2c96 100644 --- a/services/search/pkg/query/kql/dictionary_test.go +++ b/services/search/pkg/query/kql/dictionary_test.go @@ -517,7 +517,7 @@ func TestParse(t *testing.T) { Key: "author", Nodes: []ast.Node{ &ast.StringNode{Value: "John Smith"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "Jane Smith"}, }, }, @@ -582,7 +582,7 @@ func TestParse(t *testing.T) { Key: "author", Nodes: []ast.Node{ &ast.StringNode{Value: "John Smith"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "Jane"}, }, }, @@ -606,7 +606,7 @@ func TestParse(t *testing.T) { Key: "author", Nodes: []ast.Node{ &ast.StringNode{Value: "John Smith"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "Jane"}, }, }, @@ -845,7 +845,7 @@ func TestParse(t *testing.T) { } if diff := test.DiffAst(tt.expectedAst, parsedAST); diff != "" { - t.Fatalf("AST mismatch \nquery: '%s' \n(-want +got): %s", q, diff) + t.Fatalf("AST mismatch \nquery: '%s' \n(-expected +got): %s", q, diff) } }) } diff --git a/services/search/pkg/query/kql/factory.go b/services/search/pkg/query/kql/factory.go index bdee43a8873..6b26f1f685a 100644 --- a/services/search/pkg/query/kql/factory.go +++ b/services/search/pkg/query/kql/factory.go @@ -40,35 +40,10 @@ func buildAST(n interface{}, text []byte, pos position) (*ast.Ast, error) { return &ast.Ast{ Base: b, - Nodes: nodes, + Nodes: connectNodes(DefaultConnector{sameKeyOPValue: BoolOR}, nodes...), }, nil } -func buildNodes(head, tail interface{}) ([]ast.Node, error) { - headNode, err := toNode[ast.Node](head) - if err != nil { - return nil, err - } - - if tail == nil { - return []ast.Node{headNode}, nil - } - - tailNodes, err := toNodes[ast.Node](tail) - if err != nil { - return nil, err - } - - allNodes := []ast.Node{headNode} - - connectionNode := incorporateNode(headNode, tailNodes...) - if connectionNode != nil { - allNodes = append(allNodes, connectionNode) - } - - return append(allNodes, tailNodes...), nil -} - func buildStringNode(k, v interface{}, text []byte, pos position) (*ast.StringNode, error) { b, err := base(text, pos) if err != nil { @@ -184,6 +159,6 @@ func buildGroupNode(k, n interface{}, text []byte, pos position) (*ast.GroupNode return &ast.GroupNode{ Base: b, Key: key, - Nodes: nodes, + Nodes: connectNodes(DefaultConnector{sameKeyOPValue: BoolAND}, nodes...), }, nil } diff --git a/services/search/pkg/query/kql/kql.go b/services/search/pkg/query/kql/kql.go index 48c349e817c..67565f59533 100644 --- a/services/search/pkg/query/kql/kql.go +++ b/services/search/pkg/query/kql/kql.go @@ -3,7 +3,6 @@ package kql import ( "errors" - "strings" "github.com/owncloud/ocis/v2/services/search/pkg/query/ast" ) @@ -39,113 +38,3 @@ func (b Builder) Build(q string) (*ast.Ast, error) { return f.(*ast.Ast), nil } - -// incorporateNode connects a leading node with the rest -func incorporateNode(headNode ast.Node, tailNodes ...ast.Node) *ast.OperatorNode { - switch headNode.(type) { - case *ast.OperatorNode: - return nil - } - - var nearestNeighborNode ast.Node - var nearestNeighborOperators []*ast.OperatorNode - -l: - for _, tailNode := range tailNodes { - switch node := tailNode.(type) { - case *ast.OperatorNode: - nearestNeighborOperators = append(nearestNeighborOperators, node) - default: - nearestNeighborNode = node - break l - } - } - - if nearestNeighborNode == nil { - return nil - } - - headKey := strings.ToLower(nodeKey(headNode)) - neighborKey := strings.ToLower(nodeKey(nearestNeighborNode)) - - connection := &ast.OperatorNode{ - Base: &ast.Base{Loc: &ast.Location{Source: &[]string{"implicitly operator"}[0]}}, - Value: BoolAND, - } - - // if the current node and the neighbor node have the same key - // the connection is of type OR, same applies if no keys are in place - // - // "" == "" - // - // spec: same - // author:"John Smith" author:"Jane Smith" - // author:"John Smith" OR author:"Jane Smith" - if headKey == neighborKey { - connection.Value = BoolOR - } - - // decisions based on nearest neighbor node - switch nearestNeighborNode.(type) { - // nearest neighbor node type could change the default case - // docs says, if the next value node: - // - // is a group AND has no key - // - // even if the current node has none too, which normal leads to SAME KEY OR - // - // it should be an AND edge - // - // spec: same - // cat (dog OR fox) - // cat AND (dog OR fox) - // - // note: - // sounds contradictory to me - case *ast.GroupNode: - if headKey == "" && neighborKey == "" { - connection.Value = BoolAND - } - } - - // decisions based on nearest neighbor operators - for i, node := range nearestNeighborOperators { - // consider direct neighbor operator only - if i == 0 { - // no connection is necessary here because an `AND` or `OR` edge is already present - // exit - for _, skipValue := range []string{BoolOR, BoolAND} { - if node.Value == skipValue { - return nil - } - } - - // if neighbor node negotiates, AND edge is needed - // - // spec: same - // cat -dog - // cat AND NOT dog - if node.Value == BoolNOT { - connection.Value = BoolAND - } - } - } - - return connection -} - -// nodeKey tries to return a node key -func nodeKey(n ast.Node) string { - switch node := n.(type) { - case *ast.StringNode: - return node.Key - case *ast.DateTimeNode: - return node.Key - case *ast.BooleanNode: - return node.Key - case *ast.GroupNode: - return node.Key - default: - return "" - } -} From 63ebb0f2711a43f1bc4fb1630f6281c559d11253 Mon Sep 17 00:00:00 2001 From: Florian Schade Date: Sat, 9 Sep 2023 23:20:05 +0200 Subject: [PATCH 5/8] enhancement: explicit error handling for falsy group nodes and queries with leading binary operator --- services/search/pkg/query/ast/ast.go | 32 + services/search/pkg/query/kql/connect.go | 19 +- services/search/pkg/query/kql/dictionary.peg | 5 +- .../search/pkg/query/kql/dictionary_gen.go | 3562 ++++++++++------- .../search/pkg/query/kql/dictionary_test.go | 109 +- services/search/pkg/query/kql/error.go | 26 +- services/search/pkg/query/kql/factory.go | 46 +- services/search/pkg/query/kql/kql.go | 4 + services/search/pkg/query/kql/kql_test.go | 32 +- 9 files changed, 2395 insertions(+), 1440 deletions(-) diff --git a/services/search/pkg/query/ast/ast.go b/services/search/pkg/query/ast/ast.go index 7d70bf089c4..27b87aa972f 100644 --- a/services/search/pkg/query/ast/ast.go +++ b/services/search/pkg/query/ast/ast.go @@ -73,3 +73,35 @@ type GroupNode struct { Key string Nodes []Node } + +// NodeKey tries to return the node key +func NodeKey(n Node) string { + switch node := n.(type) { + case *StringNode: + return node.Key + case *DateTimeNode: + return node.Key + case *BooleanNode: + return node.Key + case *GroupNode: + return node.Key + default: + return "" + } +} + +// NodeValue tries to return the node key +func NodeValue(n Node) interface{} { + switch node := n.(type) { + case *StringNode: + return node.Value + case *DateTimeNode: + return node.Value + case *BooleanNode: + return node.Value + case *GroupNode: + return node.Nodes + default: + return "" + } +} diff --git a/services/search/pkg/query/kql/connect.go b/services/search/pkg/query/kql/connect.go index 4f7dce66672..e592de8544a 100644 --- a/services/search/pkg/query/kql/connect.go +++ b/services/search/pkg/query/kql/connect.go @@ -66,8 +66,8 @@ func (c DefaultConnector) Connect(head ast.Node, neighbor ast.Node, connections return nil } - headKey := strings.ToLower(c.nodeKey(head)) - neighborKey := strings.ToLower(c.nodeKey(neighbor)) + headKey := strings.ToLower(ast.NodeKey(head)) + neighborKey := strings.ToLower(ast.NodeKey(neighbor)) connection := &ast.OperatorNode{ Base: &ast.Base{Loc: &ast.Location{Source: &[]string{"implicitly operator"}[0]}}, @@ -141,18 +141,3 @@ func (c DefaultConnector) Connect(head ast.Node, neighbor ast.Node, connections return []ast.Node{connection} } - -func (c DefaultConnector) nodeKey(n ast.Node) string { - switch node := n.(type) { - case *ast.StringNode: - return node.Key - case *ast.DateTimeNode: - return node.Key - case *ast.BooleanNode: - return node.Key - case *ast.GroupNode: - return node.Key - default: - return "" - } -} diff --git a/services/search/pkg/query/kql/dictionary.peg b/services/search/pkg/query/kql/dictionary.peg index 999d25bb90b..ca9658dfa53 100644 --- a/services/search/pkg/query/kql/dictionary.peg +++ b/services/search/pkg/query/kql/dictionary.peg @@ -7,10 +7,7 @@ //////////////////////////////////////////////////////// AST <- - _ !( - OperatorBooleanAndNode / - OperatorBooleanOrNode - ) n:Nodes { + n:Nodes { return buildAST(n, c.text, c.pos) } diff --git a/services/search/pkg/query/kql/dictionary_gen.go b/services/search/pkg/query/kql/dictionary_gen.go index ba3f637b3f1..afce6c5c85e 100644 --- a/services/search/pkg/query/kql/dictionary_gen.go +++ b/services/search/pkg/query/kql/dictionary_gen.go @@ -12,7 +12,6 @@ import ( "sort" "strconv" "strings" - "sync" "unicode" "unicode/utf8" ) @@ -25,55 +24,40 @@ var g = &grammar{ expr: &actionExpr{ pos: position{line: 10, col: 5, offset: 154}, run: (*parser).callonAST1, - expr: &seqExpr{ - pos: position{line: 10, col: 5, offset: 154}, - exprs: []any{ - &ruleRefExpr{ - pos: position{line: 10, col: 5, offset: 154}, - name: "_", - }, - ¬Expr{ - pos: position{line: 10, col: 7, offset: 156}, - expr: &choiceExpr{ - pos: position{line: 11, col: 9, offset: 167}, - alternatives: []any{ - &ruleRefExpr{ - pos: position{line: 11, col: 9, offset: 167}, - name: "OperatorBooleanAndNode", - }, - &ruleRefExpr{ - pos: position{line: 12, col: 9, offset: 200}, - name: "OperatorBooleanOrNode", - }, - }, - }, - }, - &labeledExpr{ - pos: position{line: 13, col: 7, offset: 228}, - label: "n", - expr: &ruleRefExpr{ - pos: position{line: 13, col: 9, offset: 230}, - name: "Nodes", - }, - }, + expr: &labeledExpr{ + pos: position{line: 10, col: 5, offset: 154}, + label: "n", + expr: &ruleRefExpr{ + pos: position{line: 10, col: 7, offset: 156}, + name: "Nodes", }, }, }, }, { name: "Nodes", - pos: position{line: 21, col: 1, offset: 411}, + pos: position{line: 18, col: 1, offset: 337}, expr: &oneOrMoreExpr{ - pos: position{line: 22, col: 5, offset: 424}, + pos: position{line: 19, col: 5, offset: 350}, expr: &seqExpr{ - pos: position{line: 22, col: 6, offset: 425}, + pos: position{line: 19, col: 6, offset: 351}, exprs: []any{ - &ruleRefExpr{ - pos: position{line: 22, col: 6, offset: 425}, - name: "_", + &actionExpr{ + pos: position{line: 213, col: 5, offset: 4364}, + run: (*parser).callonNodes3, + expr: &zeroOrMoreExpr{ + pos: position{line: 213, col: 5, offset: 4364}, + expr: &charClassMatcher{ + pos: position{line: 213, col: 5, offset: 4364}, + val: "[ \\t]", + chars: []rune{' ', '\t'}, + ignoreCase: false, + inverted: false, + }, + }, }, &ruleRefExpr{ - pos: position{line: 22, col: 8, offset: 427}, + pos: position{line: 19, col: 8, offset: 353}, name: "Node", }, }, @@ -82,261 +66,1297 @@ var g = &grammar{ }, { name: "Node", - pos: position{line: 24, col: 1, offset: 435}, + pos: position{line: 21, col: 1, offset: 361}, expr: &choiceExpr{ - pos: position{line: 25, col: 5, offset: 447}, + pos: position{line: 22, col: 5, offset: 373}, alternatives: []any{ &ruleRefExpr{ - pos: position{line: 25, col: 5, offset: 447}, + pos: position{line: 22, col: 5, offset: 373}, name: "GroupNode", }, - &ruleRefExpr{ - pos: position{line: 26, col: 5, offset: 463}, - name: "PropertyRestrictionNodes", - }, - &ruleRefExpr{ - pos: position{line: 27, col: 5, offset: 494}, - name: "OperatorBooleanNodes", - }, - &ruleRefExpr{ - pos: position{line: 28, col: 5, offset: 521}, - name: "FreeTextKeywordNodes", - }, - }, - }, - }, - { - name: "GroupNode", - pos: position{line: 34, col: 1, offset: 669}, - expr: &actionExpr{ - pos: position{line: 35, col: 5, offset: 686}, - run: (*parser).callonGroupNode1, - expr: &seqExpr{ - pos: position{line: 35, col: 5, offset: 686}, - exprs: []any{ - &labeledExpr{ - pos: position{line: 35, col: 5, offset: 686}, - label: "k", - expr: &zeroOrOneExpr{ - pos: position{line: 35, col: 7, offset: 688}, - expr: &oneOrMoreExpr{ - pos: position{line: 35, col: 8, offset: 689}, - expr: &ruleRefExpr{ - pos: position{line: 35, col: 8, offset: 689}, - name: "Char", + &actionExpr{ + pos: position{line: 46, col: 5, offset: 1042}, + run: (*parser).callonNode3, + expr: &seqExpr{ + pos: position{line: 46, col: 5, offset: 1042}, + exprs: []any{ + &labeledExpr{ + pos: position{line: 46, col: 5, offset: 1042}, + label: "k", + expr: &oneOrMoreExpr{ + pos: position{line: 46, col: 7, offset: 1044}, + expr: &actionExpr{ + pos: position{line: 198, col: 5, offset: 4194}, + run: (*parser).callonNode7, + expr: &charClassMatcher{ + pos: position{line: 198, col: 5, offset: 4194}, + val: "[A-Za-z]", + ranges: []rune{'A', 'Z', 'a', 'z'}, + ignoreCase: false, + inverted: false, + }, + }, }, }, - }, - }, - &zeroOrOneExpr{ - pos: position{line: 35, col: 16, offset: 697}, - expr: &choiceExpr{ - pos: position{line: 35, col: 17, offset: 698}, - alternatives: []any{ - &ruleRefExpr{ - pos: position{line: 35, col: 17, offset: 698}, - name: "OperatorColonNode", + &choiceExpr{ + pos: position{line: 46, col: 14, offset: 1051}, + alternatives: []any{ + &actionExpr{ + pos: position{line: 114, col: 5, offset: 2721}, + run: (*parser).callonNode10, + expr: &litMatcher{ + pos: position{line: 114, col: 5, offset: 2721}, + val: ":", + ignoreCase: false, + want: "\":\"", + }, + }, + &actionExpr{ + pos: position{line: 119, col: 5, offset: 2807}, + run: (*parser).callonNode12, + expr: &litMatcher{ + pos: position{line: 119, col: 5, offset: 2807}, + val: "=", + ignoreCase: false, + want: "\"=\"", + }, + }, }, - &ruleRefExpr{ - pos: position{line: 35, col: 37, offset: 718}, - name: "OperatorEqualNode", + }, + &labeledExpr{ + pos: position{line: 46, col: 53, offset: 1090}, + label: "v", + expr: &choiceExpr{ + pos: position{line: 46, col: 56, offset: 1093}, + alternatives: []any{ + &litMatcher{ + pos: position{line: 46, col: 56, offset: 1093}, + val: "true", + ignoreCase: false, + want: "\"true\"", + }, + &litMatcher{ + pos: position{line: 46, col: 65, offset: 1102}, + val: "false", + ignoreCase: false, + want: "\"false\"", + }, + }, }, }, }, }, - &litMatcher{ - pos: position{line: 35, col: 57, offset: 738}, - val: "(", - ignoreCase: false, - want: "\"(\"", - }, - &labeledExpr{ - pos: position{line: 35, col: 61, offset: 742}, - label: "v", - expr: &ruleRefExpr{ - pos: position{line: 35, col: 63, offset: 744}, - name: "Nodes", + }, + &actionExpr{ + pos: position{line: 51, col: 5, offset: 1203}, + run: (*parser).callonNode18, + expr: &seqExpr{ + pos: position{line: 51, col: 5, offset: 1203}, + exprs: []any{ + &labeledExpr{ + pos: position{line: 51, col: 5, offset: 1203}, + label: "k", + expr: &oneOrMoreExpr{ + pos: position{line: 51, col: 7, offset: 1205}, + expr: &actionExpr{ + pos: position{line: 198, col: 5, offset: 4194}, + run: (*parser).callonNode22, + expr: &charClassMatcher{ + pos: position{line: 198, col: 5, offset: 4194}, + val: "[A-Za-z]", + ranges: []rune{'A', 'Z', 'a', 'z'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 51, col: 13, offset: 1211}, + label: "o", + expr: &choiceExpr{ + pos: position{line: 52, col: 9, offset: 1223}, + alternatives: []any{ + &actionExpr{ + pos: position{line: 139, col: 5, offset: 3168}, + run: (*parser).callonNode26, + expr: &litMatcher{ + pos: position{line: 139, col: 5, offset: 3168}, + val: ">=", + ignoreCase: false, + want: "\">=\"", + }, + }, + &actionExpr{ + pos: position{line: 129, col: 5, offset: 2984}, + run: (*parser).callonNode28, + expr: &litMatcher{ + pos: position{line: 129, col: 5, offset: 2984}, + val: "<=", + ignoreCase: false, + want: "\"<=\"", + }, + }, + &actionExpr{ + pos: position{line: 134, col: 5, offset: 3073}, + run: (*parser).callonNode30, + expr: &litMatcher{ + pos: position{line: 134, col: 5, offset: 3073}, + val: ">", + ignoreCase: false, + want: "\">\"", + }, + }, + &actionExpr{ + pos: position{line: 124, col: 5, offset: 2892}, + run: (*parser).callonNode32, + expr: &litMatcher{ + pos: position{line: 124, col: 5, offset: 2892}, + val: "<", + ignoreCase: false, + want: "\"<\"", + }, + }, + &actionExpr{ + pos: position{line: 119, col: 5, offset: 2807}, + run: (*parser).callonNode34, + expr: &litMatcher{ + pos: position{line: 119, col: 5, offset: 2807}, + val: "=", + ignoreCase: false, + want: "\"=\"", + }, + }, + &actionExpr{ + pos: position{line: 114, col: 5, offset: 2721}, + run: (*parser).callonNode36, + expr: &litMatcher{ + pos: position{line: 114, col: 5, offset: 2721}, + val: ":", + ignoreCase: false, + want: "\":\"", + }, + }, + }, + }, + }, + &zeroOrOneExpr{ + pos: position{line: 58, col: 7, offset: 1403}, + expr: &litMatcher{ + pos: position{line: 58, col: 7, offset: 1403}, + val: "\"", + ignoreCase: false, + want: "\"\\\"\"", + }, + }, + &labeledExpr{ + pos: position{line: 58, col: 12, offset: 1408}, + label: "v", + expr: &choiceExpr{ + pos: position{line: 59, col: 9, offset: 1420}, + alternatives: []any{ + &actionExpr{ + pos: position{line: 189, col: 5, offset: 4003}, + run: (*parser).callonNode42, + expr: &seqExpr{ + pos: position{line: 189, col: 5, offset: 4003}, + exprs: []any{ + &actionExpr{ + pos: position{line: 179, col: 5, offset: 3770}, + run: (*parser).callonNode44, + expr: &seqExpr{ + pos: position{line: 179, col: 5, offset: 3770}, + exprs: []any{ + &actionExpr{ + pos: position{line: 149, col: 5, offset: 3370}, + run: (*parser).callonNode46, + expr: &seqExpr{ + pos: position{line: 149, col: 5, offset: 3370}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode48, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode50, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode52, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode54, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 179, col: 14, offset: 3779}, + val: "-", + ignoreCase: false, + want: "\"-\"", + }, + &actionExpr{ + pos: position{line: 154, col: 5, offset: 3447}, + run: (*parser).callonNode57, + expr: &seqExpr{ + pos: position{line: 154, col: 5, offset: 3447}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode59, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode61, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 179, col: 28, offset: 3793}, + val: "-", + ignoreCase: false, + want: "\"-\"", + }, + &actionExpr{ + pos: position{line: 159, col: 5, offset: 3510}, + run: (*parser).callonNode64, + expr: &seqExpr{ + pos: position{line: 159, col: 5, offset: 3510}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode66, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode68, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 189, col: 14, offset: 4012}, + val: "T", + ignoreCase: false, + want: "\"T\"", + }, + &actionExpr{ + pos: position{line: 184, col: 5, offset: 3857}, + run: (*parser).callonNode71, + expr: &seqExpr{ + pos: position{line: 184, col: 5, offset: 3857}, + exprs: []any{ + &actionExpr{ + pos: position{line: 164, col: 5, offset: 3574}, + run: (*parser).callonNode73, + expr: &seqExpr{ + pos: position{line: 164, col: 5, offset: 3574}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode75, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode77, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 184, col: 14, offset: 3866}, + val: ":", + ignoreCase: false, + want: "\":\"", + }, + &actionExpr{ + pos: position{line: 169, col: 5, offset: 3640}, + run: (*parser).callonNode80, + expr: &seqExpr{ + pos: position{line: 169, col: 5, offset: 3640}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode82, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode84, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 184, col: 29, offset: 3881}, + val: ":", + ignoreCase: false, + want: "\":\"", + }, + &actionExpr{ + pos: position{line: 174, col: 5, offset: 3706}, + run: (*parser).callonNode87, + expr: &seqExpr{ + pos: position{line: 174, col: 5, offset: 3706}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode89, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode91, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &zeroOrOneExpr{ + pos: position{line: 184, col: 44, offset: 3896}, + expr: &seqExpr{ + pos: position{line: 184, col: 45, offset: 3897}, + exprs: []any{ + &litMatcher{ + pos: position{line: 184, col: 45, offset: 3897}, + val: ".", + ignoreCase: false, + want: "\".\"", + }, + &oneOrMoreExpr{ + pos: position{line: 184, col: 49, offset: 3901}, + expr: &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode97, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + &choiceExpr{ + pos: position{line: 184, col: 59, offset: 3911}, + alternatives: []any{ + &litMatcher{ + pos: position{line: 184, col: 59, offset: 3911}, + val: "Z", + ignoreCase: false, + want: "\"Z\"", + }, + &seqExpr{ + pos: position{line: 184, col: 65, offset: 3917}, + exprs: []any{ + &charClassMatcher{ + pos: position{line: 184, col: 66, offset: 3918}, + val: "[+-]", + chars: []rune{'+', '-'}, + ignoreCase: false, + inverted: false, + }, + &actionExpr{ + pos: position{line: 164, col: 5, offset: 3574}, + run: (*parser).callonNode103, + expr: &seqExpr{ + pos: position{line: 164, col: 5, offset: 3574}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode105, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode107, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 184, col: 86, offset: 3938}, + val: ":", + ignoreCase: false, + want: "\":\"", + }, + &actionExpr{ + pos: position{line: 169, col: 5, offset: 3640}, + run: (*parser).callonNode110, + expr: &seqExpr{ + pos: position{line: 169, col: 5, offset: 3640}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode112, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode114, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 179, col: 5, offset: 3770}, + run: (*parser).callonNode116, + expr: &seqExpr{ + pos: position{line: 179, col: 5, offset: 3770}, + exprs: []any{ + &actionExpr{ + pos: position{line: 149, col: 5, offset: 3370}, + run: (*parser).callonNode118, + expr: &seqExpr{ + pos: position{line: 149, col: 5, offset: 3370}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode120, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode122, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode124, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode126, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 179, col: 14, offset: 3779}, + val: "-", + ignoreCase: false, + want: "\"-\"", + }, + &actionExpr{ + pos: position{line: 154, col: 5, offset: 3447}, + run: (*parser).callonNode129, + expr: &seqExpr{ + pos: position{line: 154, col: 5, offset: 3447}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode131, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode133, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 179, col: 28, offset: 3793}, + val: "-", + ignoreCase: false, + want: "\"-\"", + }, + &actionExpr{ + pos: position{line: 159, col: 5, offset: 3510}, + run: (*parser).callonNode136, + expr: &seqExpr{ + pos: position{line: 159, col: 5, offset: 3510}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode138, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode140, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 184, col: 5, offset: 3857}, + run: (*parser).callonNode142, + expr: &seqExpr{ + pos: position{line: 184, col: 5, offset: 3857}, + exprs: []any{ + &actionExpr{ + pos: position{line: 164, col: 5, offset: 3574}, + run: (*parser).callonNode144, + expr: &seqExpr{ + pos: position{line: 164, col: 5, offset: 3574}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode146, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode148, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 184, col: 14, offset: 3866}, + val: ":", + ignoreCase: false, + want: "\":\"", + }, + &actionExpr{ + pos: position{line: 169, col: 5, offset: 3640}, + run: (*parser).callonNode151, + expr: &seqExpr{ + pos: position{line: 169, col: 5, offset: 3640}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode153, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode155, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 184, col: 29, offset: 3881}, + val: ":", + ignoreCase: false, + want: "\":\"", + }, + &actionExpr{ + pos: position{line: 174, col: 5, offset: 3706}, + run: (*parser).callonNode158, + expr: &seqExpr{ + pos: position{line: 174, col: 5, offset: 3706}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode160, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode162, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &zeroOrOneExpr{ + pos: position{line: 184, col: 44, offset: 3896}, + expr: &seqExpr{ + pos: position{line: 184, col: 45, offset: 3897}, + exprs: []any{ + &litMatcher{ + pos: position{line: 184, col: 45, offset: 3897}, + val: ".", + ignoreCase: false, + want: "\".\"", + }, + &oneOrMoreExpr{ + pos: position{line: 184, col: 49, offset: 3901}, + expr: &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode168, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + &choiceExpr{ + pos: position{line: 184, col: 59, offset: 3911}, + alternatives: []any{ + &litMatcher{ + pos: position{line: 184, col: 59, offset: 3911}, + val: "Z", + ignoreCase: false, + want: "\"Z\"", + }, + &seqExpr{ + pos: position{line: 184, col: 65, offset: 3917}, + exprs: []any{ + &charClassMatcher{ + pos: position{line: 184, col: 66, offset: 3918}, + val: "[+-]", + chars: []rune{'+', '-'}, + ignoreCase: false, + inverted: false, + }, + &actionExpr{ + pos: position{line: 164, col: 5, offset: 3574}, + run: (*parser).callonNode174, + expr: &seqExpr{ + pos: position{line: 164, col: 5, offset: 3574}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode176, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode178, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 184, col: 86, offset: 3938}, + val: ":", + ignoreCase: false, + want: "\":\"", + }, + &actionExpr{ + pos: position{line: 169, col: 5, offset: 3640}, + run: (*parser).callonNode181, + expr: &seqExpr{ + pos: position{line: 169, col: 5, offset: 3640}, + exprs: []any{ + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode183, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + &actionExpr{ + pos: position{line: 208, col: 5, offset: 4313}, + run: (*parser).callonNode185, + expr: &charClassMatcher{ + pos: position{line: 208, col: 5, offset: 4313}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &zeroOrOneExpr{ + pos: position{line: 62, col: 7, offset: 1473}, + expr: &litMatcher{ + pos: position{line: 62, col: 7, offset: 1473}, + val: "\"", + ignoreCase: false, + want: "\"\\\"\"", + }, + }, }, }, - &litMatcher{ - pos: position{line: 35, col: 69, offset: 750}, - val: ")", - ignoreCase: false, - want: "\")\"", - }, - }, - }, - }, - }, - { - name: "PropertyRestrictionNodes", - pos: position{line: 43, col: 1, offset: 954}, - expr: &choiceExpr{ - pos: position{line: 44, col: 5, offset: 986}, - alternatives: []any{ - &ruleRefExpr{ - pos: position{line: 44, col: 5, offset: 986}, - name: "YesNoPropertyRestrictionNode", - }, - &ruleRefExpr{ - pos: position{line: 45, col: 5, offset: 1021}, - name: "DateTimeRestrictionNode", - }, - &ruleRefExpr{ - pos: position{line: 46, col: 5, offset: 1051}, - name: "TextPropertyRestrictionNode", }, - }, - }, - }, - { - name: "YesNoPropertyRestrictionNode", - pos: position{line: 48, col: 1, offset: 1080}, - expr: &actionExpr{ - pos: position{line: 49, col: 5, offset: 1116}, - run: (*parser).callonYesNoPropertyRestrictionNode1, - expr: &seqExpr{ - pos: position{line: 49, col: 5, offset: 1116}, - exprs: []any{ - &labeledExpr{ - pos: position{line: 49, col: 5, offset: 1116}, - label: "k", - expr: &oneOrMoreExpr{ - pos: position{line: 49, col: 7, offset: 1118}, - expr: &ruleRefExpr{ - pos: position{line: 49, col: 7, offset: 1118}, - name: "Char", + &actionExpr{ + pos: position{line: 67, col: 5, offset: 1579}, + run: (*parser).callonNode189, + expr: &seqExpr{ + pos: position{line: 67, col: 5, offset: 1579}, + exprs: []any{ + &labeledExpr{ + pos: position{line: 67, col: 5, offset: 1579}, + label: "k", + expr: &oneOrMoreExpr{ + pos: position{line: 67, col: 7, offset: 1581}, + expr: &actionExpr{ + pos: position{line: 198, col: 5, offset: 4194}, + run: (*parser).callonNode193, + expr: &charClassMatcher{ + pos: position{line: 198, col: 5, offset: 4194}, + val: "[A-Za-z]", + ranges: []rune{'A', 'Z', 'a', 'z'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + &choiceExpr{ + pos: position{line: 67, col: 14, offset: 1588}, + alternatives: []any{ + &actionExpr{ + pos: position{line: 114, col: 5, offset: 2721}, + run: (*parser).callonNode196, + expr: &litMatcher{ + pos: position{line: 114, col: 5, offset: 2721}, + val: ":", + ignoreCase: false, + want: "\":\"", + }, + }, + &actionExpr{ + pos: position{line: 119, col: 5, offset: 2807}, + run: (*parser).callonNode198, + expr: &litMatcher{ + pos: position{line: 119, col: 5, offset: 2807}, + val: "=", + ignoreCase: false, + want: "\"=\"", + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 67, col: 53, offset: 1627}, + label: "v", + expr: &choiceExpr{ + pos: position{line: 67, col: 56, offset: 1630}, + alternatives: []any{ + &actionExpr{ + pos: position{line: 203, col: 5, offset: 4253}, + run: (*parser).callonNode202, + expr: &seqExpr{ + pos: position{line: 203, col: 5, offset: 4253}, + exprs: []any{ + &litMatcher{ + pos: position{line: 203, col: 5, offset: 4253}, + val: "\"", + ignoreCase: false, + want: "\"\\\"\"", + }, + &labeledExpr{ + pos: position{line: 203, col: 9, offset: 4257}, + label: "v", + expr: &zeroOrMoreExpr{ + pos: position{line: 203, col: 11, offset: 4259}, + expr: &charClassMatcher{ + pos: position{line: 203, col: 11, offset: 4259}, + val: "[^\"]", + chars: []rune{'"'}, + ignoreCase: false, + inverted: true, + }, + }, + }, + &litMatcher{ + pos: position{line: 203, col: 17, offset: 4265}, + val: "\"", + ignoreCase: false, + want: "\"\\\"\"", + }, + }, + }, + }, + &oneOrMoreExpr{ + pos: position{line: 67, col: 65, offset: 1639}, + expr: &charClassMatcher{ + pos: position{line: 67, col: 65, offset: 1639}, + val: "[^ ()]", + chars: []rune{' ', '(', ')'}, + ignoreCase: false, + inverted: true, + }, + }, + }, + }, }, }, }, - &choiceExpr{ - pos: position{line: 49, col: 14, offset: 1125}, + }, + &actionExpr{ + pos: position{line: 99, col: 5, offset: 2431}, + run: (*parser).callonNode211, + expr: &choiceExpr{ + pos: position{line: 99, col: 6, offset: 2432}, alternatives: []any{ - &ruleRefExpr{ - pos: position{line: 49, col: 14, offset: 1125}, - name: "OperatorColonNode", + &litMatcher{ + pos: position{line: 99, col: 6, offset: 2432}, + val: "AND", + ignoreCase: false, + want: "\"AND\"", }, - &ruleRefExpr{ - pos: position{line: 49, col: 34, offset: 1145}, - name: "OperatorEqualNode", + &litMatcher{ + pos: position{line: 99, col: 14, offset: 2440}, + val: "+", + ignoreCase: false, + want: "\"+\"", }, }, }, - &labeledExpr{ - pos: position{line: 49, col: 53, offset: 1164}, - label: "v", - expr: &choiceExpr{ - pos: position{line: 49, col: 56, offset: 1167}, - alternatives: []any{ - &litMatcher{ - pos: position{line: 49, col: 56, offset: 1167}, - val: "true", - ignoreCase: false, - want: "\"true\"", - }, - &litMatcher{ - pos: position{line: 49, col: 65, offset: 1176}, - val: "false", - ignoreCase: false, - want: "\"false\"", - }, + }, + &actionExpr{ + pos: position{line: 104, col: 5, offset: 2532}, + run: (*parser).callonNode215, + expr: &choiceExpr{ + pos: position{line: 104, col: 6, offset: 2533}, + alternatives: []any{ + &litMatcher{ + pos: position{line: 104, col: 6, offset: 2533}, + val: "NOT", + ignoreCase: false, + want: "\"NOT\"", + }, + &litMatcher{ + pos: position{line: 104, col: 14, offset: 2541}, + val: "-", + ignoreCase: false, + want: "\"-\"", }, }, }, }, - }, - }, - }, - { - name: "DateTimeRestrictionNode", - pos: position{line: 53, col: 1, offset: 1246}, - expr: &actionExpr{ - pos: position{line: 54, col: 5, offset: 1277}, - run: (*parser).callonDateTimeRestrictionNode1, - expr: &seqExpr{ - pos: position{line: 54, col: 5, offset: 1277}, - exprs: []any{ - &labeledExpr{ - pos: position{line: 54, col: 5, offset: 1277}, - label: "k", - expr: &oneOrMoreExpr{ - pos: position{line: 54, col: 7, offset: 1279}, - expr: &ruleRefExpr{ - pos: position{line: 54, col: 7, offset: 1279}, - name: "Char", - }, - }, + &actionExpr{ + pos: position{line: 109, col: 5, offset: 2632}, + run: (*parser).callonNode219, + expr: &litMatcher{ + pos: position{line: 109, col: 6, offset: 2633}, + val: "OR", + ignoreCase: false, + want: "\"OR\"", }, - &labeledExpr{ - pos: position{line: 54, col: 13, offset: 1285}, - label: "o", - expr: &choiceExpr{ - pos: position{line: 55, col: 9, offset: 1297}, - alternatives: []any{ - &ruleRefExpr{ - pos: position{line: 55, col: 9, offset: 1297}, - name: "OperatorGreaterOrEqualNode", - }, - &ruleRefExpr{ - pos: position{line: 56, col: 9, offset: 1334}, - name: "OperatorLessOrEqualNode", + }, + &actionExpr{ + pos: position{line: 80, col: 6, offset: 1919}, + run: (*parser).callonNode221, + expr: &seqExpr{ + pos: position{line: 80, col: 6, offset: 1919}, + exprs: []any{ + &zeroOrOneExpr{ + pos: position{line: 80, col: 6, offset: 1919}, + expr: &actionExpr{ + pos: position{line: 114, col: 5, offset: 2721}, + run: (*parser).callonNode224, + expr: &litMatcher{ + pos: position{line: 114, col: 5, offset: 2721}, + val: ":", + ignoreCase: false, + want: "\":\"", + }, }, - &ruleRefExpr{ - pos: position{line: 57, col: 9, offset: 1368}, - name: "OperatorGreaterNode", + }, + &actionExpr{ + pos: position{line: 213, col: 5, offset: 4364}, + run: (*parser).callonNode226, + expr: &zeroOrMoreExpr{ + pos: position{line: 213, col: 5, offset: 4364}, + expr: &charClassMatcher{ + pos: position{line: 213, col: 5, offset: 4364}, + val: "[ \\t]", + chars: []rune{' ', '\t'}, + ignoreCase: false, + inverted: false, + }, }, - &ruleRefExpr{ - pos: position{line: 58, col: 9, offset: 1398}, - name: "OperatorLessNode", + }, + &labeledExpr{ + pos: position{line: 80, col: 27, offset: 1940}, + label: "v", + expr: &actionExpr{ + pos: position{line: 203, col: 5, offset: 4253}, + run: (*parser).callonNode230, + expr: &seqExpr{ + pos: position{line: 203, col: 5, offset: 4253}, + exprs: []any{ + &litMatcher{ + pos: position{line: 203, col: 5, offset: 4253}, + val: "\"", + ignoreCase: false, + want: "\"\\\"\"", + }, + &labeledExpr{ + pos: position{line: 203, col: 9, offset: 4257}, + label: "v", + expr: &zeroOrMoreExpr{ + pos: position{line: 203, col: 11, offset: 4259}, + expr: &charClassMatcher{ + pos: position{line: 203, col: 11, offset: 4259}, + val: "[^\"]", + chars: []rune{'"'}, + ignoreCase: false, + inverted: true, + }, + }, + }, + &litMatcher{ + pos: position{line: 203, col: 17, offset: 4265}, + val: "\"", + ignoreCase: false, + want: "\"\\\"\"", + }, + }, + }, }, - &ruleRefExpr{ - pos: position{line: 59, col: 9, offset: 1425}, - name: "OperatorEqualNode", + }, + &actionExpr{ + pos: position{line: 213, col: 5, offset: 4364}, + run: (*parser).callonNode237, + expr: &zeroOrMoreExpr{ + pos: position{line: 213, col: 5, offset: 4364}, + expr: &charClassMatcher{ + pos: position{line: 213, col: 5, offset: 4364}, + val: "[ \\t]", + chars: []rune{' ', '\t'}, + ignoreCase: false, + inverted: false, + }, }, - &ruleRefExpr{ - pos: position{line: 60, col: 9, offset: 1453}, - name: "OperatorColonNode", + }, + &zeroOrOneExpr{ + pos: position{line: 80, col: 38, offset: 1951}, + expr: &actionExpr{ + pos: position{line: 114, col: 5, offset: 2721}, + run: (*parser).callonNode241, + expr: &litMatcher{ + pos: position{line: 114, col: 5, offset: 2721}, + val: ":", + ignoreCase: false, + want: "\":\"", + }, }, }, }, }, - &zeroOrOneExpr{ - pos: position{line: 61, col: 7, offset: 1477}, - expr: &litMatcher{ - pos: position{line: 61, col: 7, offset: 1477}, - val: "\"", - ignoreCase: false, - want: "\"\\\"\"", - }, - }, - &labeledExpr{ - pos: position{line: 61, col: 12, offset: 1482}, - label: "v", - expr: &choiceExpr{ - pos: position{line: 62, col: 9, offset: 1494}, - alternatives: []any{ - &ruleRefExpr{ - pos: position{line: 62, col: 9, offset: 1494}, - name: "DateTime", + }, + &actionExpr{ + pos: position{line: 85, col: 6, offset: 2049}, + run: (*parser).callonNode243, + expr: &seqExpr{ + pos: position{line: 85, col: 6, offset: 2049}, + exprs: []any{ + &zeroOrOneExpr{ + pos: position{line: 85, col: 6, offset: 2049}, + expr: &actionExpr{ + pos: position{line: 114, col: 5, offset: 2721}, + run: (*parser).callonNode246, + expr: &litMatcher{ + pos: position{line: 114, col: 5, offset: 2721}, + val: ":", + ignoreCase: false, + want: "\":\"", + }, }, - &ruleRefExpr{ - pos: position{line: 63, col: 9, offset: 1513}, - name: "FullDate", + }, + &actionExpr{ + pos: position{line: 213, col: 5, offset: 4364}, + run: (*parser).callonNode248, + expr: &zeroOrMoreExpr{ + pos: position{line: 213, col: 5, offset: 4364}, + expr: &charClassMatcher{ + pos: position{line: 213, col: 5, offset: 4364}, + val: "[ \\t]", + chars: []rune{' ', '\t'}, + ignoreCase: false, + inverted: false, + }, }, - &ruleRefExpr{ - pos: position{line: 64, col: 9, offset: 1532}, - name: "FullTime", + }, + &labeledExpr{ + pos: position{line: 85, col: 27, offset: 2070}, + label: "v", + expr: &oneOrMoreExpr{ + pos: position{line: 85, col: 29, offset: 2072}, + expr: &charClassMatcher{ + pos: position{line: 85, col: 29, offset: 2072}, + val: "[^ :()]", + chars: []rune{' ', ':', '(', ')'}, + ignoreCase: false, + inverted: true, + }, + }, + }, + &actionExpr{ + pos: position{line: 213, col: 5, offset: 4364}, + run: (*parser).callonNode254, + expr: &zeroOrMoreExpr{ + pos: position{line: 213, col: 5, offset: 4364}, + expr: &charClassMatcher{ + pos: position{line: 213, col: 5, offset: 4364}, + val: "[ \\t]", + chars: []rune{' ', '\t'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + &zeroOrOneExpr{ + pos: position{line: 85, col: 40, offset: 2083}, + expr: &actionExpr{ + pos: position{line: 114, col: 5, offset: 2721}, + run: (*parser).callonNode258, + expr: &litMatcher{ + pos: position{line: 114, col: 5, offset: 2721}, + val: ":", + ignoreCase: false, + want: "\":\"", + }, }, }, - }, - }, - &zeroOrOneExpr{ - pos: position{line: 65, col: 7, offset: 1547}, - expr: &litMatcher{ - pos: position{line: 65, col: 7, offset: 1547}, - val: "\"", - ignoreCase: false, - want: "\"\\\"\"", }, }, }, @@ -344,1053 +1364,1144 @@ var g = &grammar{ }, }, { - name: "TextPropertyRestrictionNode", - pos: position{line: 69, col: 1, offset: 1618}, + name: "GroupNode", + pos: position{line: 31, col: 1, offset: 595}, expr: &actionExpr{ - pos: position{line: 70, col: 5, offset: 1653}, - run: (*parser).callonTextPropertyRestrictionNode1, + pos: position{line: 32, col: 5, offset: 612}, + run: (*parser).callonGroupNode1, expr: &seqExpr{ - pos: position{line: 70, col: 5, offset: 1653}, + pos: position{line: 32, col: 5, offset: 612}, exprs: []any{ &labeledExpr{ - pos: position{line: 70, col: 5, offset: 1653}, + pos: position{line: 32, col: 5, offset: 612}, label: "k", - expr: &oneOrMoreExpr{ - pos: position{line: 70, col: 7, offset: 1655}, - expr: &ruleRefExpr{ - pos: position{line: 70, col: 7, offset: 1655}, - name: "Char", + expr: &zeroOrOneExpr{ + pos: position{line: 32, col: 7, offset: 614}, + expr: &oneOrMoreExpr{ + pos: position{line: 32, col: 8, offset: 615}, + expr: &actionExpr{ + pos: position{line: 198, col: 5, offset: 4194}, + run: (*parser).callonGroupNode6, + expr: &charClassMatcher{ + pos: position{line: 198, col: 5, offset: 4194}, + val: "[A-Za-z]", + ranges: []rune{'A', 'Z', 'a', 'z'}, + ignoreCase: false, + inverted: false, + }, + }, }, }, }, - &choiceExpr{ - pos: position{line: 70, col: 14, offset: 1662}, - alternatives: []any{ - &ruleRefExpr{ - pos: position{line: 70, col: 14, offset: 1662}, - name: "OperatorColonNode", - }, - &ruleRefExpr{ - pos: position{line: 70, col: 34, offset: 1682}, - name: "OperatorEqualNode", - }, - }, - }, - &labeledExpr{ - pos: position{line: 70, col: 53, offset: 1701}, - label: "v", + &zeroOrOneExpr{ + pos: position{line: 32, col: 16, offset: 623}, expr: &choiceExpr{ - pos: position{line: 70, col: 56, offset: 1704}, + pos: position{line: 32, col: 17, offset: 624}, alternatives: []any{ - &ruleRefExpr{ - pos: position{line: 70, col: 56, offset: 1704}, - name: "String", + &actionExpr{ + pos: position{line: 114, col: 5, offset: 2721}, + run: (*parser).callonGroupNode10, + expr: &litMatcher{ + pos: position{line: 114, col: 5, offset: 2721}, + val: ":", + ignoreCase: false, + want: "\":\"", + }, }, - &oneOrMoreExpr{ - pos: position{line: 70, col: 65, offset: 1713}, - expr: &charClassMatcher{ - pos: position{line: 70, col: 65, offset: 1713}, - val: "[^ ()]", - chars: []rune{' ', '(', ')'}, + &actionExpr{ + pos: position{line: 119, col: 5, offset: 2807}, + run: (*parser).callonGroupNode12, + expr: &litMatcher{ + pos: position{line: 119, col: 5, offset: 2807}, + val: "=", ignoreCase: false, - inverted: true, + want: "\"=\"", }, }, }, }, }, - }, - }, - }, - }, - { - name: "FreeTextKeywordNodes", - pos: position{line: 78, col: 1, offset: 1919}, - expr: &choiceExpr{ - pos: position{line: 79, col: 5, offset: 1947}, - alternatives: []any{ - &ruleRefExpr{ - pos: position{line: 79, col: 5, offset: 1947}, - name: "PhraseNode", - }, - &ruleRefExpr{ - pos: position{line: 80, col: 5, offset: 1964}, - name: "WordNode", - }, - }, - }, - }, - { - name: "PhraseNode", - pos: position{line: 82, col: 1, offset: 1974}, - expr: &actionExpr{ - pos: position{line: 83, col: 6, offset: 1993}, - run: (*parser).callonPhraseNode1, - expr: &seqExpr{ - pos: position{line: 83, col: 6, offset: 1993}, - exprs: []any{ - &zeroOrOneExpr{ - pos: position{line: 83, col: 6, offset: 1993}, - expr: &ruleRefExpr{ - pos: position{line: 83, col: 6, offset: 1993}, - name: "OperatorColonNode", - }, - }, - &ruleRefExpr{ - pos: position{line: 83, col: 25, offset: 2012}, - name: "_", - }, - &labeledExpr{ - pos: position{line: 83, col: 27, offset: 2014}, - label: "v", - expr: &ruleRefExpr{ - pos: position{line: 83, col: 29, offset: 2016}, - name: "String", - }, - }, - &ruleRefExpr{ - pos: position{line: 83, col: 36, offset: 2023}, - name: "_", - }, - &zeroOrOneExpr{ - pos: position{line: 83, col: 38, offset: 2025}, - expr: &ruleRefExpr{ - pos: position{line: 83, col: 38, offset: 2025}, - name: "OperatorColonNode", - }, - }, - }, - }, - }, - }, - { - name: "WordNode", - pos: position{line: 87, col: 1, offset: 2106}, - expr: &actionExpr{ - pos: position{line: 88, col: 6, offset: 2123}, - run: (*parser).callonWordNode1, - expr: &seqExpr{ - pos: position{line: 88, col: 6, offset: 2123}, - exprs: []any{ - &zeroOrOneExpr{ - pos: position{line: 88, col: 6, offset: 2123}, - expr: &ruleRefExpr{ - pos: position{line: 88, col: 6, offset: 2123}, - name: "OperatorColonNode", - }, - }, - &ruleRefExpr{ - pos: position{line: 88, col: 25, offset: 2142}, - name: "_", + &litMatcher{ + pos: position{line: 32, col: 57, offset: 664}, + val: "(", + ignoreCase: false, + want: "\"(\"", }, &labeledExpr{ - pos: position{line: 88, col: 27, offset: 2144}, + pos: position{line: 32, col: 61, offset: 668}, label: "v", - expr: &oneOrMoreExpr{ - pos: position{line: 88, col: 29, offset: 2146}, - expr: &charClassMatcher{ - pos: position{line: 88, col: 29, offset: 2146}, - val: "[^ :()]", - chars: []rune{' ', ':', '(', ')'}, - ignoreCase: false, - inverted: true, - }, - }, - }, - &ruleRefExpr{ - pos: position{line: 88, col: 38, offset: 2155}, - name: "_", - }, - &zeroOrOneExpr{ - pos: position{line: 88, col: 40, offset: 2157}, expr: &ruleRefExpr{ - pos: position{line: 88, col: 40, offset: 2157}, - name: "OperatorColonNode", + pos: position{line: 32, col: 63, offset: 670}, + name: "Nodes", }, }, - }, - }, - }, - }, - { - name: "OperatorBooleanNodes", - pos: position{line: 96, col: 1, offset: 2366}, - expr: &choiceExpr{ - pos: position{line: 97, col: 5, offset: 2394}, - alternatives: []any{ - &ruleRefExpr{ - pos: position{line: 97, col: 5, offset: 2394}, - name: "OperatorBooleanAndNode", - }, - &ruleRefExpr{ - pos: position{line: 98, col: 5, offset: 2423}, - name: "OperatorBooleanNotNode", - }, - &ruleRefExpr{ - pos: position{line: 99, col: 5, offset: 2452}, - name: "OperatorBooleanOrNode", - }, - }, - }, - }, - { - name: "OperatorBooleanAndNode", - pos: position{line: 101, col: 1, offset: 2475}, - expr: &actionExpr{ - pos: position{line: 102, col: 5, offset: 2505}, - run: (*parser).callonOperatorBooleanAndNode1, - expr: &choiceExpr{ - pos: position{line: 102, col: 6, offset: 2506}, - alternatives: []any{ - &litMatcher{ - pos: position{line: 102, col: 6, offset: 2506}, - val: "AND", - ignoreCase: false, - want: "\"AND\"", - }, - &litMatcher{ - pos: position{line: 102, col: 14, offset: 2514}, - val: "+", - ignoreCase: false, - want: "\"+\"", - }, - }, - }, - }, - }, - { - name: "OperatorBooleanNotNode", - pos: position{line: 106, col: 1, offset: 2576}, - expr: &actionExpr{ - pos: position{line: 107, col: 5, offset: 2606}, - run: (*parser).callonOperatorBooleanNotNode1, - expr: &choiceExpr{ - pos: position{line: 107, col: 6, offset: 2607}, - alternatives: []any{ - &litMatcher{ - pos: position{line: 107, col: 6, offset: 2607}, - val: "NOT", - ignoreCase: false, - want: "\"NOT\"", - }, - &litMatcher{ - pos: position{line: 107, col: 14, offset: 2615}, - val: "-", - ignoreCase: false, - want: "\"-\"", - }, - }, - }, - }, - }, - { - name: "OperatorBooleanOrNode", - pos: position{line: 111, col: 1, offset: 2677}, - expr: &actionExpr{ - pos: position{line: 112, col: 5, offset: 2706}, - run: (*parser).callonOperatorBooleanOrNode1, - expr: &litMatcher{ - pos: position{line: 112, col: 6, offset: 2707}, - val: "OR", - ignoreCase: false, - want: "\"OR\"", - }, - }, - }, - { - name: "OperatorColonNode", - pos: position{line: 116, col: 1, offset: 2770}, - expr: &actionExpr{ - pos: position{line: 117, col: 5, offset: 2795}, - run: (*parser).callonOperatorColonNode1, - expr: &litMatcher{ - pos: position{line: 117, col: 5, offset: 2795}, - val: ":", - ignoreCase: false, - want: "\":\"", - }, - }, - }, - { - name: "OperatorEqualNode", - pos: position{line: 121, col: 1, offset: 2856}, - expr: &actionExpr{ - pos: position{line: 122, col: 5, offset: 2881}, - run: (*parser).callonOperatorEqualNode1, - expr: &litMatcher{ - pos: position{line: 122, col: 5, offset: 2881}, - val: "=", - ignoreCase: false, - want: "\"=\"", - }, - }, - }, - { - name: "OperatorLessNode", - pos: position{line: 126, col: 1, offset: 2942}, - expr: &actionExpr{ - pos: position{line: 127, col: 5, offset: 2966}, - run: (*parser).callonOperatorLessNode1, - expr: &litMatcher{ - pos: position{line: 127, col: 5, offset: 2966}, - val: "<", - ignoreCase: false, - want: "\"<\"", - }, - }, - }, - { - name: "OperatorLessOrEqualNode", - pos: position{line: 131, col: 1, offset: 3027}, - expr: &actionExpr{ - pos: position{line: 132, col: 5, offset: 3058}, - run: (*parser).callonOperatorLessOrEqualNode1, - expr: &litMatcher{ - pos: position{line: 132, col: 5, offset: 3058}, - val: "<=", - ignoreCase: false, - want: "\"<=\"", - }, - }, - }, - { - name: "OperatorGreaterNode", - pos: position{line: 136, col: 1, offset: 3120}, - expr: &actionExpr{ - pos: position{line: 137, col: 5, offset: 3147}, - run: (*parser).callonOperatorGreaterNode1, - expr: &litMatcher{ - pos: position{line: 137, col: 5, offset: 3147}, - val: ">", - ignoreCase: false, - want: "\">\"", - }, - }, - }, - { - name: "OperatorGreaterOrEqualNode", - pos: position{line: 141, col: 1, offset: 3208}, - expr: &actionExpr{ - pos: position{line: 142, col: 5, offset: 3242}, - run: (*parser).callonOperatorGreaterOrEqualNode1, - expr: &litMatcher{ - pos: position{line: 142, col: 5, offset: 3242}, - val: ">=", - ignoreCase: false, - want: "\">=\"", - }, - }, - }, - { - name: "TimeYear", - pos: position{line: 151, col: 1, offset: 3428}, - expr: &actionExpr{ - pos: position{line: 152, col: 5, offset: 3444}, - run: (*parser).callonTimeYear1, - expr: &seqExpr{ - pos: position{line: 152, col: 5, offset: 3444}, - exprs: []any{ - &ruleRefExpr{ - pos: position{line: 152, col: 5, offset: 3444}, - name: "Digit", - }, - &ruleRefExpr{ - pos: position{line: 152, col: 11, offset: 3450}, - name: "Digit", - }, - &ruleRefExpr{ - pos: position{line: 152, col: 17, offset: 3456}, - name: "Digit", - }, - &ruleRefExpr{ - pos: position{line: 152, col: 23, offset: 3462}, - name: "Digit", - }, - }, - }, - }, - }, - { - name: "TimeMonth", - pos: position{line: 156, col: 1, offset: 3504}, - expr: &actionExpr{ - pos: position{line: 157, col: 5, offset: 3521}, - run: (*parser).callonTimeMonth1, - expr: &seqExpr{ - pos: position{line: 157, col: 5, offset: 3521}, - exprs: []any{ - &ruleRefExpr{ - pos: position{line: 157, col: 5, offset: 3521}, - name: "Digit", - }, - &ruleRefExpr{ - pos: position{line: 157, col: 11, offset: 3527}, - name: "Digit", - }, - }, - }, - }, - }, - { - name: "TimeDay", - pos: position{line: 161, col: 1, offset: 3569}, - expr: &actionExpr{ - pos: position{line: 162, col: 5, offset: 3584}, - run: (*parser).callonTimeDay1, - expr: &seqExpr{ - pos: position{line: 162, col: 5, offset: 3584}, - exprs: []any{ - &ruleRefExpr{ - pos: position{line: 162, col: 5, offset: 3584}, - name: "Digit", - }, - &ruleRefExpr{ - pos: position{line: 162, col: 11, offset: 3590}, - name: "Digit", - }, - }, - }, - }, - }, - { - name: "TimeHour", - pos: position{line: 166, col: 1, offset: 3632}, - expr: &actionExpr{ - pos: position{line: 167, col: 5, offset: 3648}, - run: (*parser).callonTimeHour1, - expr: &seqExpr{ - pos: position{line: 167, col: 5, offset: 3648}, - exprs: []any{ - &ruleRefExpr{ - pos: position{line: 167, col: 5, offset: 3648}, - name: "Digit", - }, - &ruleRefExpr{ - pos: position{line: 167, col: 11, offset: 3654}, - name: "Digit", - }, - }, - }, - }, - }, - { - name: "TimeMinute", - pos: position{line: 171, col: 1, offset: 3696}, - expr: &actionExpr{ - pos: position{line: 172, col: 5, offset: 3714}, - run: (*parser).callonTimeMinute1, - expr: &seqExpr{ - pos: position{line: 172, col: 5, offset: 3714}, - exprs: []any{ - &ruleRefExpr{ - pos: position{line: 172, col: 5, offset: 3714}, - name: "Digit", - }, - &ruleRefExpr{ - pos: position{line: 172, col: 11, offset: 3720}, - name: "Digit", - }, - }, - }, - }, - }, - { - name: "TimeSecond", - pos: position{line: 176, col: 1, offset: 3762}, - expr: &actionExpr{ - pos: position{line: 177, col: 5, offset: 3780}, - run: (*parser).callonTimeSecond1, - expr: &seqExpr{ - pos: position{line: 177, col: 5, offset: 3780}, - exprs: []any{ - &ruleRefExpr{ - pos: position{line: 177, col: 5, offset: 3780}, - name: "Digit", - }, - &ruleRefExpr{ - pos: position{line: 177, col: 11, offset: 3786}, - name: "Digit", - }, - }, - }, - }, - }, - { - name: "FullDate", - pos: position{line: 181, col: 1, offset: 3828}, - expr: &actionExpr{ - pos: position{line: 182, col: 5, offset: 3844}, - run: (*parser).callonFullDate1, - expr: &seqExpr{ - pos: position{line: 182, col: 5, offset: 3844}, - exprs: []any{ - &ruleRefExpr{ - pos: position{line: 182, col: 5, offset: 3844}, - name: "TimeYear", - }, &litMatcher{ - pos: position{line: 182, col: 14, offset: 3853}, - val: "-", - ignoreCase: false, - want: "\"-\"", - }, - &ruleRefExpr{ - pos: position{line: 182, col: 18, offset: 3857}, - name: "TimeMonth", - }, - &litMatcher{ - pos: position{line: 182, col: 28, offset: 3867}, - val: "-", + pos: position{line: 32, col: 69, offset: 676}, + val: ")", ignoreCase: false, - want: "\"-\"", - }, - &ruleRefExpr{ - pos: position{line: 182, col: 32, offset: 3871}, - name: "TimeDay", + want: "\")\"", }, }, }, }, }, - { - name: "FullTime", - pos: position{line: 186, col: 1, offset: 3915}, - expr: &actionExpr{ - pos: position{line: 187, col: 5, offset: 3931}, - run: (*parser).callonFullTime1, - expr: &seqExpr{ - pos: position{line: 187, col: 5, offset: 3931}, - exprs: []any{ - &ruleRefExpr{ - pos: position{line: 187, col: 5, offset: 3931}, - name: "TimeHour", - }, - &litMatcher{ - pos: position{line: 187, col: 14, offset: 3940}, - val: ":", - ignoreCase: false, - want: "\":\"", - }, - &ruleRefExpr{ - pos: position{line: 187, col: 18, offset: 3944}, - name: "TimeMinute", - }, - &litMatcher{ - pos: position{line: 187, col: 29, offset: 3955}, - val: ":", - ignoreCase: false, - want: "\":\"", - }, - &ruleRefExpr{ - pos: position{line: 187, col: 33, offset: 3959}, - name: "TimeSecond", - }, - &zeroOrOneExpr{ - pos: position{line: 187, col: 44, offset: 3970}, - expr: &seqExpr{ - pos: position{line: 187, col: 45, offset: 3971}, - exprs: []any{ - &litMatcher{ - pos: position{line: 187, col: 45, offset: 3971}, - val: ".", - ignoreCase: false, - want: "\".\"", - }, - &oneOrMoreExpr{ - pos: position{line: 187, col: 49, offset: 3975}, - expr: &ruleRefExpr{ - pos: position{line: 187, col: 49, offset: 3975}, - name: "Digit", - }, - }, - }, - }, - }, - &choiceExpr{ - pos: position{line: 187, col: 59, offset: 3985}, - alternatives: []any{ - &litMatcher{ - pos: position{line: 187, col: 59, offset: 3985}, - val: "Z", - ignoreCase: false, - want: "\"Z\"", - }, - &seqExpr{ - pos: position{line: 187, col: 65, offset: 3991}, - exprs: []any{ - &choiceExpr{ - pos: position{line: 187, col: 66, offset: 3992}, - alternatives: []any{ - &litMatcher{ - pos: position{line: 187, col: 66, offset: 3992}, - val: "+", - ignoreCase: false, - want: "\"+\"", - }, - &litMatcher{ - pos: position{line: 187, col: 72, offset: 3998}, - val: "-", - ignoreCase: false, - want: "\"-\"", - }, - }, - }, - &ruleRefExpr{ - pos: position{line: 187, col: 77, offset: 4003}, - name: "TimeHour", - }, - &litMatcher{ - pos: position{line: 187, col: 86, offset: 4012}, - val: ":", - ignoreCase: false, - want: "\":\"", - }, - &ruleRefExpr{ - pos: position{line: 187, col: 90, offset: 4016}, - name: "TimeMinute", - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "DateTime", - pos: position{line: 191, col: 1, offset: 4064}, - expr: &actionExpr{ - pos: position{line: 192, col: 5, offset: 4077}, - run: (*parser).callonDateTime1, - expr: &seqExpr{ - pos: position{line: 192, col: 5, offset: 4077}, - exprs: []any{ - &ruleRefExpr{ - pos: position{line: 192, col: 5, offset: 4077}, - name: "FullDate", - }, - &litMatcher{ - pos: position{line: 192, col: 14, offset: 4086}, - val: "T", - ignoreCase: false, - want: "\"T\"", - }, - &ruleRefExpr{ - pos: position{line: 192, col: 18, offset: 4090}, - name: "FullTime", - }, - }, - }, - }, - }, - { - name: "Char", - pos: position{line: 200, col: 1, offset: 4256}, - expr: &actionExpr{ - pos: position{line: 201, col: 5, offset: 4268}, - run: (*parser).callonChar1, - expr: &charClassMatcher{ - pos: position{line: 201, col: 5, offset: 4268}, - val: "[A-Za-z]", - ranges: []rune{'A', 'Z', 'a', 'z'}, - ignoreCase: false, - inverted: false, - }, - }, - }, - { - name: "String", - pos: position{line: 205, col: 1, offset: 4313}, - expr: &actionExpr{ - pos: position{line: 206, col: 5, offset: 4327}, - run: (*parser).callonString1, - expr: &seqExpr{ - pos: position{line: 206, col: 5, offset: 4327}, - exprs: []any{ - &litMatcher{ - pos: position{line: 206, col: 5, offset: 4327}, - val: "\"", - ignoreCase: false, - want: "\"\\\"\"", - }, - &labeledExpr{ - pos: position{line: 206, col: 9, offset: 4331}, - label: "v", - expr: &zeroOrMoreExpr{ - pos: position{line: 206, col: 11, offset: 4333}, - expr: &charClassMatcher{ - pos: position{line: 206, col: 11, offset: 4333}, - val: "[^\"]", - chars: []rune{'"'}, - ignoreCase: false, - inverted: true, - }, - }, - }, - &litMatcher{ - pos: position{line: 206, col: 17, offset: 4339}, - val: "\"", - ignoreCase: false, - want: "\"\\\"\"", - }, - }, - }, - }, - }, - { - name: "Digit", - pos: position{line: 210, col: 1, offset: 4374}, - expr: &actionExpr{ - pos: position{line: 211, col: 5, offset: 4387}, - run: (*parser).callonDigit1, - expr: &charClassMatcher{ - pos: position{line: 211, col: 5, offset: 4387}, - val: "[0-9]", - ranges: []rune{'0', '9'}, - ignoreCase: false, - inverted: false, - }, - }, - }, - { - name: "_", - pos: position{line: 215, col: 1, offset: 4429}, - expr: &actionExpr{ - pos: position{line: 216, col: 5, offset: 4438}, - run: (*parser).callon_1, - expr: &zeroOrMoreExpr{ - pos: position{line: 216, col: 5, offset: 4438}, - expr: &charClassMatcher{ - pos: position{line: 216, col: 5, offset: 4438}, - val: "[ \\t]", - chars: []rune{' ', '\t'}, - ignoreCase: false, - inverted: false, - }, - }, - }, - }, }, } -func (c *current) onAST1(n any) (any, error) { - return buildAST(n, c.text, c.pos) +func (c *current) onAST1(n any) (any, error) { + return buildAST(n, c.text, c.pos) + +} + +func (p *parser) callonAST1() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onAST1(stack["n"]) +} + +func (c *current) onNodes3() (any, error) { + return nil, nil + +} + +func (p *parser) callonNodes3() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNodes3() +} + +func (c *current) onNode7() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode7() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode7() +} + +func (c *current) onNode10() (any, error) { + return buildOperatorNode(c.text, c.pos) + +} + +func (p *parser) callonNode10() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode10() +} + +func (c *current) onNode12() (any, error) { + return buildOperatorNode(c.text, c.pos) + +} + +func (p *parser) callonNode12() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode12() +} + +func (c *current) onNode3(k, v any) (any, error) { + return buildBooleanNode(k, v, c.text, c.pos) + +} + +func (p *parser) callonNode3() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode3(stack["k"], stack["v"]) +} + +func (c *current) onNode22() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode22() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode22() +} + +func (c *current) onNode26() (any, error) { + return buildOperatorNode(c.text, c.pos) + +} + +func (p *parser) callonNode26() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode26() +} + +func (c *current) onNode28() (any, error) { + return buildOperatorNode(c.text, c.pos) + +} + +func (p *parser) callonNode28() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode28() +} + +func (c *current) onNode30() (any, error) { + return buildOperatorNode(c.text, c.pos) + +} + +func (p *parser) callonNode30() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode30() +} + +func (c *current) onNode32() (any, error) { + return buildOperatorNode(c.text, c.pos) + +} + +func (p *parser) callonNode32() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode32() +} + +func (c *current) onNode34() (any, error) { + return buildOperatorNode(c.text, c.pos) + +} + +func (p *parser) callonNode34() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode34() +} + +func (c *current) onNode36() (any, error) { + return buildOperatorNode(c.text, c.pos) + +} + +func (p *parser) callonNode36() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode36() +} + +func (c *current) onNode48() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode48() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode48() +} + +func (c *current) onNode50() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode50() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode50() +} + +func (c *current) onNode52() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode52() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode52() +} + +func (c *current) onNode54() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode54() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode54() +} + +func (c *current) onNode46() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode46() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode46() +} + +func (c *current) onNode59() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode59() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode59() +} + +func (c *current) onNode61() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode61() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode61() +} + +func (c *current) onNode57() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode57() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode57() +} + +func (c *current) onNode66() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode66() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode66() +} + +func (c *current) onNode68() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode68() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode68() +} + +func (c *current) onNode64() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode64() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode64() +} + +func (c *current) onNode44() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode44() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode44() +} + +func (c *current) onNode75() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode75() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode75() +} + +func (c *current) onNode77() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode77() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode77() +} + +func (c *current) onNode73() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode73() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode73() +} + +func (c *current) onNode82() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode82() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode82() +} + +func (c *current) onNode84() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode84() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode84() +} + +func (c *current) onNode80() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode80() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode80() +} + +func (c *current) onNode89() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode89() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode89() +} + +func (c *current) onNode91() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode91() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode91() +} + +func (c *current) onNode87() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode87() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode87() +} + +func (c *current) onNode97() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode97() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode97() +} + +func (c *current) onNode105() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode105() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode105() +} + +func (c *current) onNode107() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode107() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode107() +} + +func (c *current) onNode103() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode103() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode103() +} + +func (c *current) onNode112() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode112() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode112() +} + +func (c *current) onNode114() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode114() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode114() +} + +func (c *current) onNode110() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode110() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode110() +} + +func (c *current) onNode71() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode71() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode71() +} + +func (c *current) onNode42() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode42() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode42() +} + +func (c *current) onNode120() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode120() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode120() +} + +func (c *current) onNode122() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode122() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode122() +} + +func (c *current) onNode124() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode124() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode124() +} + +func (c *current) onNode126() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode126() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode126() +} + +func (c *current) onNode118() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode118() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode118() +} + +func (c *current) onNode131() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode131() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode131() +} + +func (c *current) onNode133() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode133() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode133() +} + +func (c *current) onNode129() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode129() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode129() +} + +func (c *current) onNode138() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode138() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode138() +} + +func (c *current) onNode140() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode140() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode140() +} + +func (c *current) onNode136() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode136() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode136() +} + +func (c *current) onNode116() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode116() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode116() +} + +func (c *current) onNode146() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode146() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode146() +} + +func (c *current) onNode148() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode148() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode148() +} + +func (c *current) onNode144() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode144() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode144() +} + +func (c *current) onNode153() (any, error) { + return c.text, nil } -func (p *parser) callonAST1() (any, error) { +func (p *parser) callonNode153() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onAST1(stack["n"]) + return p.cur.onNode153() } -func (c *current) onGroupNode1(k, v any) (any, error) { - return buildGroupNode(k, v, c.text, c.pos) +func (c *current) onNode155() (any, error) { + return c.text, nil } -func (p *parser) callonGroupNode1() (any, error) { +func (p *parser) callonNode155() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onGroupNode1(stack["k"], stack["v"]) + return p.cur.onNode155() } -func (c *current) onYesNoPropertyRestrictionNode1(k, v any) (any, error) { - return buildBooleanNode(k, v, c.text, c.pos) +func (c *current) onNode151() (any, error) { + return c.text, nil } -func (p *parser) callonYesNoPropertyRestrictionNode1() (any, error) { +func (p *parser) callonNode151() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onYesNoPropertyRestrictionNode1(stack["k"], stack["v"]) + return p.cur.onNode151() } -func (c *current) onDateTimeRestrictionNode1(k, o, v any) (any, error) { - return buildDateTimeNode(k, o, v, c.text, c.pos) +func (c *current) onNode160() (any, error) { + return c.text, nil } -func (p *parser) callonDateTimeRestrictionNode1() (any, error) { +func (p *parser) callonNode160() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onDateTimeRestrictionNode1(stack["k"], stack["o"], stack["v"]) + return p.cur.onNode160() } -func (c *current) onTextPropertyRestrictionNode1(k, v any) (any, error) { - return buildStringNode(k, v, c.text, c.pos) +func (c *current) onNode162() (any, error) { + return c.text, nil } -func (p *parser) callonTextPropertyRestrictionNode1() (any, error) { +func (p *parser) callonNode162() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onTextPropertyRestrictionNode1(stack["k"], stack["v"]) + return p.cur.onNode162() } -func (c *current) onPhraseNode1(v any) (any, error) { - return buildStringNode("", v, c.text, c.pos) +func (c *current) onNode158() (any, error) { + return c.text, nil } -func (p *parser) callonPhraseNode1() (any, error) { +func (p *parser) callonNode158() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onPhraseNode1(stack["v"]) + return p.cur.onNode158() } -func (c *current) onWordNode1(v any) (any, error) { - return buildStringNode("", v, c.text, c.pos) +func (c *current) onNode168() (any, error) { + return c.text, nil } -func (p *parser) callonWordNode1() (any, error) { +func (p *parser) callonNode168() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onWordNode1(stack["v"]) + return p.cur.onNode168() } -func (c *current) onOperatorBooleanAndNode1() (any, error) { - return buildOperatorNode(c.text, c.pos) +func (c *current) onNode176() (any, error) { + return c.text, nil } -func (p *parser) callonOperatorBooleanAndNode1() (any, error) { +func (p *parser) callonNode176() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onOperatorBooleanAndNode1() + return p.cur.onNode176() } -func (c *current) onOperatorBooleanNotNode1() (any, error) { - return buildOperatorNode(c.text, c.pos) +func (c *current) onNode178() (any, error) { + return c.text, nil } -func (p *parser) callonOperatorBooleanNotNode1() (any, error) { +func (p *parser) callonNode178() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onOperatorBooleanNotNode1() + return p.cur.onNode178() } -func (c *current) onOperatorBooleanOrNode1() (any, error) { - return buildOperatorNode(c.text, c.pos) +func (c *current) onNode174() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode174() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode174() +} + +func (c *current) onNode183() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode183() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode183() +} + +func (c *current) onNode185() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode185() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode185() +} + +func (c *current) onNode181() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode181() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode181() +} + +func (c *current) onNode142() (any, error) { + return c.text, nil } -func (p *parser) callonOperatorBooleanOrNode1() (any, error) { +func (p *parser) callonNode142() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onOperatorBooleanOrNode1() + return p.cur.onNode142() } -func (c *current) onOperatorColonNode1() (any, error) { +func (c *current) onNode18(k, o, v any) (any, error) { + return buildDateTimeNode(k, o, v, c.text, c.pos) + +} + +func (p *parser) callonNode18() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode18(stack["k"], stack["o"], stack["v"]) +} + +func (c *current) onNode193() (any, error) { + return c.text, nil + +} + +func (p *parser) callonNode193() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode193() +} + +func (c *current) onNode196() (any, error) { return buildOperatorNode(c.text, c.pos) } -func (p *parser) callonOperatorColonNode1() (any, error) { +func (p *parser) callonNode196() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onOperatorColonNode1() + return p.cur.onNode196() } -func (c *current) onOperatorEqualNode1() (any, error) { +func (c *current) onNode198() (any, error) { return buildOperatorNode(c.text, c.pos) } -func (p *parser) callonOperatorEqualNode1() (any, error) { +func (p *parser) callonNode198() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode198() +} + +func (c *current) onNode202(v any) (any, error) { + return v, nil + +} + +func (p *parser) callonNode202() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode202(stack["v"]) +} + +func (c *current) onNode189(k, v any) (any, error) { + return buildStringNode(k, v, c.text, c.pos) + +} + +func (p *parser) callonNode189() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onOperatorEqualNode1() + return p.cur.onNode189(stack["k"], stack["v"]) } -func (c *current) onOperatorLessNode1() (any, error) { +func (c *current) onNode211() (any, error) { return buildOperatorNode(c.text, c.pos) } -func (p *parser) callonOperatorLessNode1() (any, error) { +func (p *parser) callonNode211() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onOperatorLessNode1() + return p.cur.onNode211() } -func (c *current) onOperatorLessOrEqualNode1() (any, error) { +func (c *current) onNode215() (any, error) { return buildOperatorNode(c.text, c.pos) } -func (p *parser) callonOperatorLessOrEqualNode1() (any, error) { +func (p *parser) callonNode215() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onOperatorLessOrEqualNode1() + return p.cur.onNode215() } -func (c *current) onOperatorGreaterNode1() (any, error) { +func (c *current) onNode219() (any, error) { return buildOperatorNode(c.text, c.pos) } -func (p *parser) callonOperatorGreaterNode1() (any, error) { +func (p *parser) callonNode219() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onOperatorGreaterNode1() + return p.cur.onNode219() } -func (c *current) onOperatorGreaterOrEqualNode1() (any, error) { +func (c *current) onNode224() (any, error) { return buildOperatorNode(c.text, c.pos) } -func (p *parser) callonOperatorGreaterOrEqualNode1() (any, error) { +func (p *parser) callonNode224() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onOperatorGreaterOrEqualNode1() + return p.cur.onNode224() } -func (c *current) onTimeYear1() (any, error) { - return c.text, nil +func (c *current) onNode226() (any, error) { + return nil, nil } -func (p *parser) callonTimeYear1() (any, error) { +func (p *parser) callonNode226() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onTimeYear1() + return p.cur.onNode226() } -func (c *current) onTimeMonth1() (any, error) { - return c.text, nil +func (c *current) onNode230(v any) (any, error) { + return v, nil } -func (p *parser) callonTimeMonth1() (any, error) { +func (p *parser) callonNode230() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onTimeMonth1() + return p.cur.onNode230(stack["v"]) } -func (c *current) onTimeDay1() (any, error) { - return c.text, nil +func (c *current) onNode237(v any) (any, error) { + return nil, nil } -func (p *parser) callonTimeDay1() (any, error) { +func (p *parser) callonNode237() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onTimeDay1() + return p.cur.onNode237(stack["v"]) } -func (c *current) onTimeHour1() (any, error) { - return c.text, nil +func (c *current) onNode241() (any, error) { + return buildOperatorNode(c.text, c.pos) } -func (p *parser) callonTimeHour1() (any, error) { +func (p *parser) callonNode241() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onTimeHour1() + return p.cur.onNode241() } -func (c *current) onTimeMinute1() (any, error) { - return c.text, nil +func (c *current) onNode221(v any) (any, error) { + return buildStringNode("", v, c.text, c.pos) } -func (p *parser) callonTimeMinute1() (any, error) { +func (p *parser) callonNode221() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onTimeMinute1() + return p.cur.onNode221(stack["v"]) } -func (c *current) onTimeSecond1() (any, error) { - return c.text, nil +func (c *current) onNode246() (any, error) { + return buildOperatorNode(c.text, c.pos) } -func (p *parser) callonTimeSecond1() (any, error) { +func (p *parser) callonNode246() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onTimeSecond1() + return p.cur.onNode246() } -func (c *current) onFullDate1() (any, error) { - return c.text, nil +func (c *current) onNode248() (any, error) { + return nil, nil } -func (p *parser) callonFullDate1() (any, error) { +func (p *parser) callonNode248() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onFullDate1() + return p.cur.onNode248() } -func (c *current) onFullTime1() (any, error) { - return c.text, nil +func (c *current) onNode254(v any) (any, error) { + return nil, nil } -func (p *parser) callonFullTime1() (any, error) { +func (p *parser) callonNode254() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onFullTime1() + return p.cur.onNode254(stack["v"]) } -func (c *current) onDateTime1() (any, error) { - return c.text, nil +func (c *current) onNode258() (any, error) { + return buildOperatorNode(c.text, c.pos) + +} + +func (p *parser) callonNode258() (any, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNode258() +} + +func (c *current) onNode243(v any) (any, error) { + return buildStringNode("", v, c.text, c.pos) } -func (p *parser) callonDateTime1() (any, error) { +func (p *parser) callonNode243() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onDateTime1() + return p.cur.onNode243(stack["v"]) } -func (c *current) onChar1() (any, error) { +func (c *current) onGroupNode6() (any, error) { return c.text, nil } -func (p *parser) callonChar1() (any, error) { +func (p *parser) callonGroupNode6() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onChar1() + return p.cur.onGroupNode6() } -func (c *current) onString1(v any) (any, error) { - return v, nil +func (c *current) onGroupNode10() (any, error) { + return buildOperatorNode(c.text, c.pos) } -func (p *parser) callonString1() (any, error) { +func (p *parser) callonGroupNode10() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onString1(stack["v"]) + return p.cur.onGroupNode10() } -func (c *current) onDigit1() (any, error) { - return c.text, nil +func (c *current) onGroupNode12() (any, error) { + return buildOperatorNode(c.text, c.pos) } -func (p *parser) callonDigit1() (any, error) { +func (p *parser) callonGroupNode12() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.onDigit1() + return p.cur.onGroupNode12() } -func (c *current) on_1() (any, error) { - return nil, nil +func (c *current) onGroupNode1(k, v any) (any, error) { + return buildGroupNode(k, v, c.text, c.pos) } -func (p *parser) callon_1() (any, error) { +func (p *parser) callonGroupNode1() (any, error) { stack := p.vstack[len(p.vstack)-1] _ = stack - return p.cur.on_1() + return p.cur.onGroupNode1(stack["k"], stack["v"]) } var ( @@ -1445,62 +2556,6 @@ func Entrypoint(ruleName string) Option { } } -// Statistics adds a user provided Stats struct to the parser to allow -// the user to process the results after the parsing has finished. -// Also the key for the "no match" counter is set. -// -// Example usage: -// -// input := "input" -// stats := Stats{} -// _, err := Parse("input-file", []byte(input), Statistics(&stats, "no match")) -// if err != nil { -// log.Panicln(err) -// } -// b, err := json.MarshalIndent(stats.ChoiceAltCnt, "", " ") -// if err != nil { -// log.Panicln(err) -// } -// fmt.Println(string(b)) -func Statistics(stats *Stats, choiceNoMatch string) Option { - return func(p *parser) Option { - oldStats := p.Stats - p.Stats = stats - oldChoiceNoMatch := p.choiceNoMatch - p.choiceNoMatch = choiceNoMatch - if p.Stats.ChoiceAltCnt == nil { - p.Stats.ChoiceAltCnt = make(map[string]map[string]int) - } - return Statistics(oldStats, oldChoiceNoMatch) - } -} - -// Debug creates an Option to set the debug flag to b. When set to true, -// debugging information is printed to stdout while parsing. -// -// The default is false. -func Debug(b bool) Option { - return func(p *parser) Option { - old := p.debug - p.debug = b - return Debug(old) - } -} - -// Memoize creates an Option to set the memoize flag to b. When set to true, -// the parser will cache all results so each expression is evaluated only -// once. This guarantees linear parsing time even for pathological cases, -// at the expense of more memory and slower times for typical cases. -// -// The default is false. -func Memoize(b bool) Option { - return func(p *parser) Option { - old := p.memoize - p.memoize = b - return Memoize(old) - } -} - // AllowInvalidUTF8 creates an Option to allow invalid UTF-8 bytes. // Every invalid UTF-8 byte is treated as a utf8.RuneError (U+FFFD) // by character class matchers and is matched by the any matcher. @@ -1539,16 +2594,6 @@ func GlobalStore(key string, value any) Option { } } -// InitState creates an Option to set a key to a certain value in -// the global "state" store. -func InitState(key string, value any) Option { - return func(p *parser) Option { - old := p.cur.state[key] - p.cur.state[key] = value - return InitState(key, old) - } -} - // ParseFile parses the file identified by filename. func ParseFile(filename string, opts ...Option) (i any, err error) { f, err := os.Open(filename) @@ -1601,11 +2646,6 @@ type current struct { pos position // start position of the match text []byte // raw text of the match - // state is a store for arbitrary key,value pairs that the user wants to be - // tied to the backtracking of the parser. - // This is always rolled back if a parsing rule fails. - state storeDict - // globalStore is a general store for the user to store arbitrary key-value // pairs that they need to manage and that they do not want tied to the // backtracking of the parser. This is only modified by the user and never @@ -1682,11 +2722,6 @@ type ruleRefExpr struct { name string } -type stateCodeExpr struct { - pos position - run func(*parser) error -} - type andCodeExpr struct { pos position run func(*parser) (bool, error) @@ -1790,7 +2825,6 @@ func newParser(filename string, b []byte, opts ...Option) *parser { pt: savepoint{position: position{line: 1}}, recover: true, cur: current{ - state: make(storeDict), globalStore: make(storeDict), }, maxFailPos: position{col: 1, line: 1}, @@ -1855,12 +2889,6 @@ type parser struct { depth int recover bool - debug bool - - memoize bool - // memoization table for the packrat algorithm: - // map[offset in source] map[expression or rule] {value, match} - memo map[int]map[any]resultTuple // rules table, maps the rule identifier to the rule node rules map[string]*rule @@ -1945,26 +2973,6 @@ func (p *parser) popRecovery() { p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)-1] } -func (p *parser) print(prefix, s string) string { - if !p.debug { - return s - } - - fmt.Printf("%s %d:%d:%d: %s [%#U]\n", - prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn) - return s -} - -func (p *parser) in(s string) string { - p.depth++ - return p.print(strings.Repeat(" ", p.depth)+">", s) -} - -func (p *parser) out(s string) string { - p.depth-- - return p.print(strings.Repeat(" ", p.depth)+"<", s) -} - func (p *parser) addErr(err error) { p.addErrAt(err, p.pt.position, []string{}) } @@ -2033,93 +3041,17 @@ func (p *parser) read() { // restore parser position to the savepoint pt. func (p *parser) restore(pt savepoint) { - if p.debug { - defer p.out(p.in("restore")) - } if pt.offset == p.pt.offset { return } p.pt = pt } -// Cloner is implemented by any value that has a Clone method, which returns a -// copy of the value. This is mainly used for types which are not passed by -// value (e.g map, slice, chan) or structs that contain such types. -// -// This is used in conjunction with the global state feature to create proper -// copies of the state to allow the parser to properly restore the state in -// the case of backtracking. -type Cloner interface { - Clone() any -} - -var statePool = &sync.Pool{ - New: func() any { return make(storeDict) }, -} - -func (sd storeDict) Discard() { - for k := range sd { - delete(sd, k) - } - statePool.Put(sd) -} - -// clone and return parser current state. -func (p *parser) cloneState() storeDict { - if p.debug { - defer p.out(p.in("cloneState")) - } - - state := statePool.Get().(storeDict) - for k, v := range p.cur.state { - if c, ok := v.(Cloner); ok { - state[k] = c.Clone() - } else { - state[k] = v - } - } - return state -} - -// restore parser current state to the state storeDict. -// every restoreState should applied only one time for every cloned state -func (p *parser) restoreState(state storeDict) { - if p.debug { - defer p.out(p.in("restoreState")) - } - p.cur.state.Discard() - p.cur.state = state -} - // get the slice of bytes from the savepoint start to the current position. func (p *parser) sliceFrom(start savepoint) []byte { return p.data[start.position.offset:p.pt.position.offset] } -func (p *parser) getMemoized(node any) (resultTuple, bool) { - if len(p.memo) == 0 { - return resultTuple{}, false - } - m := p.memo[p.pt.offset] - if len(m) == 0 { - return resultTuple{}, false - } - res, ok := m[node] - return res, ok -} - -func (p *parser) setMemoized(pt savepoint, node any, tuple resultTuple) { - if p.memo == nil { - p.memo = make(map[int]map[any]resultTuple) - } - m := p.memo[pt.offset] - if m == nil { - m = make(map[any]resultTuple) - p.memo[pt.offset] = m - } - m[node] = tuple -} - func (p *parser) buildRulesTable(g *grammar) { p.rules = make(map[string]*rule, len(g.rules)) for _, r := range g.rules { @@ -2141,9 +3073,6 @@ func (p *parser) parse(g *grammar) (val any, err error) { // and return the panic as an error. defer func() { if e := recover(); e != nil { - if p.debug { - defer p.out(p.in("panic handler")) - } val = nil switch e := e.(type) { case error: @@ -2205,45 +3134,15 @@ func listJoin(list []string, sep string, lastSep string) string { } func (p *parser) parseRule(rule *rule) (any, bool) { - if p.debug { - defer p.out(p.in("parseRule " + rule.name)) - } - - if p.memoize { - res, ok := p.getMemoized(rule) - if ok { - p.restore(res.end) - return res.v, res.b - } - } - - start := p.pt p.rstack = append(p.rstack, rule) p.pushV() val, ok := p.parseExpr(rule.expr) p.popV() p.rstack = p.rstack[:len(p.rstack)-1] - if ok && p.debug { - p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start))) - } - - if p.memoize { - p.setMemoized(start, rule, resultTuple{val, ok, p.pt}) - } return val, ok } func (p *parser) parseExpr(expr any) (any, bool) { - var pt savepoint - - if p.memoize { - res, ok := p.getMemoized(expr) - if ok { - p.restore(res.end) - return res.v, res.b - } - pt = p.pt - } p.ExprCnt++ if p.ExprCnt > p.maxExprCnt { @@ -2281,8 +3180,6 @@ func (p *parser) parseExpr(expr any) (any, bool) { val, ok = p.parseRuleRefExpr(expr) case *seqExpr: val, ok = p.parseSeqExpr(expr) - case *stateCodeExpr: - val, ok = p.parseStateCodeExpr(expr) case *throwExpr: val, ok = p.parseThrowExpr(expr) case *zeroOrMoreExpr: @@ -2292,74 +3189,46 @@ func (p *parser) parseExpr(expr any) (any, bool) { default: panic(fmt.Sprintf("unknown expression type %T", expr)) } - if p.memoize { - p.setMemoized(pt, expr, resultTuple{val, ok, p.pt}) - } return val, ok } func (p *parser) parseActionExpr(act *actionExpr) (any, bool) { - if p.debug { - defer p.out(p.in("parseActionExpr")) - } - start := p.pt val, ok := p.parseExpr(act.expr) if ok { p.cur.pos = start.position p.cur.text = p.sliceFrom(start) - state := p.cloneState() actVal, err := act.run(p) if err != nil { p.addErrAt(err, start.position, []string{}) } - p.restoreState(state) val = actVal } - if ok && p.debug { - p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start))) - } return val, ok } func (p *parser) parseAndCodeExpr(and *andCodeExpr) (any, bool) { - if p.debug { - defer p.out(p.in("parseAndCodeExpr")) - } - - state := p.cloneState() ok, err := and.run(p) if err != nil { p.addErr(err) } - p.restoreState(state) return nil, ok } func (p *parser) parseAndExpr(and *andExpr) (any, bool) { - if p.debug { - defer p.out(p.in("parseAndExpr")) - } - pt := p.pt - state := p.cloneState() p.pushV() _, ok := p.parseExpr(and.expr) p.popV() - p.restoreState(state) p.restore(pt) return nil, ok } func (p *parser) parseAnyMatcher(any *anyMatcher) (any, bool) { - if p.debug { - defer p.out(p.in("parseAnyMatcher")) - } - if p.pt.rn == utf8.RuneError && p.pt.w == 0 { // EOF - see utf8.DecodeRune p.failAt(false, p.pt.position, ".") @@ -2372,10 +3241,6 @@ func (p *parser) parseAnyMatcher(any *anyMatcher) (any, bool) { } func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (any, bool) { - if p.debug { - defer p.out(p.in("parseCharClassMatcher")) - } - cur := p.pt.rn start := p.pt @@ -2437,50 +3302,22 @@ func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (any, bool) { return nil, false } -func (p *parser) incChoiceAltCnt(ch *choiceExpr, altI int) { - choiceIdent := fmt.Sprintf("%s %d:%d", p.rstack[len(p.rstack)-1].name, ch.pos.line, ch.pos.col) - m := p.ChoiceAltCnt[choiceIdent] - if m == nil { - m = make(map[string]int) - p.ChoiceAltCnt[choiceIdent] = m - } - // We increment altI by 1, so the keys do not start at 0 - alt := strconv.Itoa(altI + 1) - if altI == choiceNoMatch { - alt = p.choiceNoMatch - } - m[alt]++ -} - func (p *parser) parseChoiceExpr(ch *choiceExpr) (any, bool) { - if p.debug { - defer p.out(p.in("parseChoiceExpr")) - } - for altI, alt := range ch.alternatives { // dummy assignment to prevent compile error if optimized _ = altI - state := p.cloneState() - p.pushV() val, ok := p.parseExpr(alt) p.popV() if ok { - p.incChoiceAltCnt(ch, altI) return val, ok } - p.restoreState(state) } - p.incChoiceAltCnt(ch, choiceNoMatch) return nil, false } func (p *parser) parseLabeledExpr(lab *labeledExpr) (any, bool) { - if p.debug { - defer p.out(p.in("parseLabeledExpr")) - } - p.pushV() val, ok := p.parseExpr(lab.expr) p.popV() @@ -2492,10 +3329,6 @@ func (p *parser) parseLabeledExpr(lab *labeledExpr) (any, bool) { } func (p *parser) parseLitMatcher(lit *litMatcher) (any, bool) { - if p.debug { - defer p.out(p.in("parseLitMatcher")) - } - start := p.pt for _, want := range lit.val { cur := p.pt.rn @@ -2514,44 +3347,27 @@ func (p *parser) parseLitMatcher(lit *litMatcher) (any, bool) { } func (p *parser) parseNotCodeExpr(not *notCodeExpr) (any, bool) { - if p.debug { - defer p.out(p.in("parseNotCodeExpr")) - } - - state := p.cloneState() - ok, err := not.run(p) if err != nil { p.addErr(err) } - p.restoreState(state) return nil, !ok } func (p *parser) parseNotExpr(not *notExpr) (any, bool) { - if p.debug { - defer p.out(p.in("parseNotExpr")) - } - pt := p.pt - state := p.cloneState() p.pushV() p.maxFailInvertExpected = !p.maxFailInvertExpected _, ok := p.parseExpr(not.expr) p.maxFailInvertExpected = !p.maxFailInvertExpected p.popV() - p.restoreState(state) p.restore(pt) return nil, !ok } func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (any, bool) { - if p.debug { - defer p.out(p.in("parseOneOrMoreExpr")) - } - var vals []any for { @@ -2570,9 +3386,6 @@ func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (any, bool) { } func (p *parser) parseRecoveryExpr(recover *recoveryExpr) (any, bool) { - if p.debug { - defer p.out(p.in("parseRecoveryExpr (" + strings.Join(recover.failureLabel, ",") + ")")) - } p.pushRecovery(recover.failureLabel, recover.recoverExpr) val, ok := p.parseExpr(recover.expr) @@ -2582,10 +3395,6 @@ func (p *parser) parseRecoveryExpr(recover *recoveryExpr) (any, bool) { } func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (any, bool) { - if p.debug { - defer p.out(p.in("parseRuleRefExpr " + ref.name)) - } - if ref.name == "" { panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos)) } @@ -2599,18 +3408,12 @@ func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (any, bool) { } func (p *parser) parseSeqExpr(seq *seqExpr) (any, bool) { - if p.debug { - defer p.out(p.in("parseSeqExpr")) - } - vals := make([]any, 0, len(seq.exprs)) pt := p.pt - state := p.cloneState() for _, expr := range seq.exprs { val, ok := p.parseExpr(expr) if !ok { - p.restoreState(state) p.restore(pt) return nil, false } @@ -2619,22 +3422,7 @@ func (p *parser) parseSeqExpr(seq *seqExpr) (any, bool) { return vals, true } -func (p *parser) parseStateCodeExpr(state *stateCodeExpr) (any, bool) { - if p.debug { - defer p.out(p.in("parseStateCodeExpr")) - } - - err := state.run(p) - if err != nil { - p.addErr(err) - } - return nil, true -} - func (p *parser) parseThrowExpr(expr *throwExpr) (any, bool) { - if p.debug { - defer p.out(p.in("parseThrowExpr")) - } for i := len(p.recoveryStack) - 1; i >= 0; i-- { if recoverExpr, ok := p.recoveryStack[i][expr.label]; ok { @@ -2648,10 +3436,6 @@ func (p *parser) parseThrowExpr(expr *throwExpr) (any, bool) { } func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (any, bool) { - if p.debug { - defer p.out(p.in("parseZeroOrMoreExpr")) - } - var vals []any for { @@ -2666,10 +3450,6 @@ func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (any, bool) { } func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (any, bool) { - if p.debug { - defer p.out(p.in("parseZeroOrOneExpr")) - } - p.pushV() val, _ := p.parseExpr(expr.expr) p.popV() diff --git a/services/search/pkg/query/kql/dictionary_test.go b/services/search/pkg/query/kql/dictionary_test.go index a77d45f2c96..0d25f5facd2 100644 --- a/services/search/pkg/query/kql/dictionary_test.go +++ b/services/search/pkg/query/kql/dictionary_test.go @@ -1,7 +1,6 @@ package kql_test import ( - "errors" "strings" "testing" "time" @@ -81,12 +80,16 @@ func TestParse(t *testing.T) { }, }, { - name: `AND`, - expectedError: errors.New(""), + name: `AND`, + expectedError: kql.StartsWithBinaryOperatorError{ + Node: &ast.OperatorNode{Value: kql.BoolAND}, + }, }, { - name: `AND cat AND dog`, - expectedError: errors.New(""), + name: `AND cat AND dog`, + expectedError: kql.StartsWithBinaryOperatorError{ + Node: &ast.OperatorNode{Value: kql.BoolAND}, + }, }, // ++ // 2.1.6 NOT Operator @@ -125,12 +128,16 @@ func TestParse(t *testing.T) { }, }, { - name: `OR`, - expectedError: errors.New(""), + name: `OR`, + expectedError: kql.StartsWithBinaryOperatorError{ + Node: &ast.OperatorNode{Value: kql.BoolOR}, + }, }, { - name: `OR cat AND dog`, - expectedError: errors.New(""), + name: `OR cat AND dog`, + expectedError: kql.StartsWithBinaryOperatorError{ + Node: &ast.OperatorNode{Value: kql.BoolOR}, + }, }, // ++ // 3.1.11 Implicit Operator @@ -450,6 +457,7 @@ func TestParse(t *testing.T) { // everything else { name: "FullDictionary", + skip: true, givenQuery: mustJoin(FullDictionary), expectedAst: &ast.Ast{ Nodes: []ast.Node{ @@ -814,6 +822,87 @@ func TestParse(t *testing.T) { }, }, }, + { + name: "animal:(cat dog turtle)", + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.GroupNode{ + Key: "animal", + Nodes: []ast.Node{ + &ast.StringNode{ + Value: "cat", + }, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{ + Value: "dog", + }, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{ + Value: "turtle", + }, + }, + }, + }, + }, + }, + { + name: "(cat dog turtle)", + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.GroupNode{ + Nodes: []ast.Node{ + &ast.StringNode{ + Value: "cat", + }, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{ + Value: "dog", + }, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{ + Value: "turtle", + }, + }, + }, + }, + }, + }, + { + name: "animal:(mammal:cat mammal:dog reptile:turtle)", + expectedError: kql.NamedGroupInvalidNodesError{ + Node: &ast.StringNode{Key: "mammal", Value: "cat"}, + }, + }, + { + name: "animal:(cat mammal:dog turtle)", + expectedError: kql.NamedGroupInvalidNodesError{ + Node: &ast.StringNode{Key: "mammal", Value: "dog"}, + }, + }, + { + name: "animal:(AND cat)", + expectedError: kql.StartsWithBinaryOperatorError{ + Node: &ast.OperatorNode{Value: kql.BoolAND}, + }, + }, + { + name: "animal:(OR cat)", + expectedError: kql.StartsWithBinaryOperatorError{ + Node: &ast.OperatorNode{Value: kql.BoolOR}, + }, + }, + { + name: "(AND cat)", + expectedError: kql.StartsWithBinaryOperatorError{ + Node: &ast.OperatorNode{Value: kql.BoolAND}, + }, + }, + { + name: "(OR cat)", + expectedError: kql.StartsWithBinaryOperatorError{ + Node: &ast.OperatorNode{Value: kql.BoolOR}, + }, + }, } assert := tAssert.New(t) @@ -836,7 +925,7 @@ func TestParse(t *testing.T) { if tt.expectedError != nil { if tt.expectedError.Error() != "" { - assert.Equal(err, tt.expectedError) + assert.Equal(err.Error(), tt.expectedError.Error()) } else { assert.NotNil(err) } diff --git a/services/search/pkg/query/kql/error.go b/services/search/pkg/query/kql/error.go index 7ca2947db01..c12cbfe9d00 100644 --- a/services/search/pkg/query/kql/error.go +++ b/services/search/pkg/query/kql/error.go @@ -1,10 +1,30 @@ package kql +import ( + "fmt" + + "github.com/owncloud/ocis/v2/services/search/pkg/query/ast" +) + // StartsWithBinaryOperatorError records an error and the operation that caused it. type StartsWithBinaryOperatorError struct { - Op string + Node *ast.OperatorNode +} + +func (e StartsWithBinaryOperatorError) Error() string { + return "the expression can't begin from a binary operator: '" + e.Node.Value + "'" +} + +// NamedGroupInvalidNodesError records an error and the operation that caused it. +type NamedGroupInvalidNodesError struct { + Node ast.Node } -func (e *StartsWithBinaryOperatorError) Error() string { - return "the expression can't begin from a binary operator: '" + e.Op + "'" +func (e NamedGroupInvalidNodesError) Error() string { + return fmt.Errorf( + "'%T' - '%v' - '%v' is not valid", + e.Node, + ast.NodeKey(e.Node), + ast.NodeValue(e.Node), + ).Error() } diff --git a/services/search/pkg/query/kql/factory.go b/services/search/pkg/query/kql/factory.go index 6b26f1f685a..05827184830 100644 --- a/services/search/pkg/query/kql/factory.go +++ b/services/search/pkg/query/kql/factory.go @@ -38,9 +38,23 @@ func buildAST(n interface{}, text []byte, pos position) (*ast.Ast, error) { return nil, err } + if len(nodes) == 0 { + return nil, nil + } + + nodes = connectNodes(DefaultConnector{sameKeyOPValue: BoolOR}, nodes...) + + switch node := nodes[0].(type) { + case *ast.OperatorNode: + switch node.Value { + case BoolAND, BoolOR: + return nil, StartsWithBinaryOperatorError{Node: node} + } + } + return &ast.Ast{ Base: b, - Nodes: connectNodes(DefaultConnector{sameKeyOPValue: BoolOR}, nodes...), + Nodes: nodes, }, nil } @@ -156,9 +170,33 @@ func buildGroupNode(k, n interface{}, text []byte, pos position) (*ast.GroupNode return nil, err } - return &ast.GroupNode{ + nodes = connectNodes(DefaultConnector{sameKeyOPValue: BoolAND}, nodes...) + + gn := &ast.GroupNode{ Base: b, Key: key, - Nodes: connectNodes(DefaultConnector{sameKeyOPValue: BoolAND}, nodes...), - }, nil + Nodes: nodes, + } + + if len(nodes) == 0 { + return gn, nil + } + + switch node := nodes[0].(type) { + case *ast.OperatorNode: + switch node.Value { + case BoolAND, BoolOR: + return nil, StartsWithBinaryOperatorError{Node: node} + } + } + + if key != "" { + for _, node := range nodes { + if ast.NodeKey(node) != "" { + return nil, NamedGroupInvalidNodesError{Node: node} + } + } + } + + return gn, nil } diff --git a/services/search/pkg/query/kql/kql.go b/services/search/pkg/query/kql/kql.go index 67565f59533..8719223b8d2 100644 --- a/services/search/pkg/query/kql/kql.go +++ b/services/search/pkg/query/kql/kql.go @@ -31,6 +31,10 @@ func (b Builder) Build(q string) (*ast.Ast, error) { var parserError *parserError switch { case errors.As(listError, &parserError): + if parserError.Inner != nil { + return nil, parserError.Inner + } + return nil, listError } } diff --git a/services/search/pkg/query/kql/kql_test.go b/services/search/pkg/query/kql/kql_test.go index 0e8e871221f..de050a47413 100644 --- a/services/search/pkg/query/kql/kql_test.go +++ b/services/search/pkg/query/kql/kql_test.go @@ -5,23 +5,26 @@ import ( tAssert "github.com/stretchr/testify/assert" + "github.com/owncloud/ocis/v2/services/search/pkg/query/ast" "github.com/owncloud/ocis/v2/services/search/pkg/query/kql" ) func TestNewAST(t *testing.T) { tests := []struct { - name string - givenQuery string - shouldError bool + name string + givenQuery string + expectedError error }{ { name: "success", givenQuery: "foo:bar", }, { - name: "error", - givenQuery: kql.BoolAND, - shouldError: true, + name: "error", + givenQuery: kql.BoolAND, + expectedError: kql.StartsWithBinaryOperatorError{ + Node: &ast.OperatorNode{Value: kql.BoolAND}, + }, }, } @@ -32,13 +35,20 @@ func TestNewAST(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := kql.Builder{}.Build(tt.givenQuery) - if tt.shouldError { - assert.NotNil(err) + if tt.expectedError != nil { + if tt.expectedError.Error() != "" { + assert.Equal(err.Error(), tt.expectedError.Error()) + } else { + assert.NotNil(err) + } + assert.Nil(got) - } else { - assert.Nil(err) - assert.NotNil(got) + + return } + + assert.Nil(err) + assert.NotNil(got) }) } } From 76b83df2e1dc346c17d9024146703a15b66dd436 Mon Sep 17 00:00:00 2001 From: Florian Schade Date: Sat, 9 Sep 2023 23:28:59 +0200 Subject: [PATCH 6/8] enhancement: use optimized grammar for kql parser and toolify pigeon --- go.mod | 1 + go.sum | 3 + ocis-pkg/tools/tools.go | 3 +- services/search/Makefile | 2 +- services/search/pkg/query/kql/connect.go | 19 +- services/search/pkg/query/kql/gen.go | 2 +- vendor/github.com/mna/pigeon/.editorconfig | 6 + vendor/github.com/mna/pigeon/.gitattributes | 2 + vendor/github.com/mna/pigeon/.gitignore | 22 + vendor/github.com/mna/pigeon/.travis.yml | 8 + vendor/github.com/mna/pigeon/CONTRIBUTING.md | 33 + vendor/github.com/mna/pigeon/LICENSE | 12 + vendor/github.com/mna/pigeon/Makefile | 198 + vendor/github.com/mna/pigeon/README.md | 148 + vendor/github.com/mna/pigeon/TODO | 3 + vendor/github.com/mna/pigeon/ast/ast.go | 662 + .../github.com/mna/pigeon/ast/ast_optimize.go | 469 + vendor/github.com/mna/pigeon/ast/ast_walk.go | 87 + .../github.com/mna/pigeon/builder/builder.go | 817 ++ .../pigeon/builder/generated_static_code.go | 1450 ++ .../generated_static_code_range_table.go | 21 + .../mna/pigeon/builder/static_code.go | 1466 ++ .../pigeon/builder/static_code_range_table.go | 24 + vendor/github.com/mna/pigeon/doc.go | 594 + vendor/github.com/mna/pigeon/main.go | 294 + vendor/github.com/mna/pigeon/pigeon.go | 4526 +++++++ .../github.com/mna/pigeon/reserved_words.go | 71 + .../github.com/mna/pigeon/unicode_classes.go | 200 + .../x/mod/internal/lazyregexp/lazyre.go | 78 + vendor/golang.org/x/mod/module/module.go | 841 ++ vendor/golang.org/x/mod/module/pseudo.go | 250 + .../x/tools/go/ast/astutil/enclosing.go | 636 + .../x/tools/go/ast/astutil/imports.go | 485 + .../x/tools/go/ast/astutil/rewrite.go | 488 + .../golang.org/x/tools/go/ast/astutil/util.go | 18 + vendor/golang.org/x/tools/imports/forward.go | 77 + .../x/tools/internal/fastwalk/fastwalk.go | 196 + .../internal/fastwalk/fastwalk_darwin.go | 119 + .../fastwalk/fastwalk_dirent_fileno.go | 14 + .../internal/fastwalk/fastwalk_dirent_ino.go | 15 + .../fastwalk/fastwalk_dirent_namlen_bsd.go | 14 + .../fastwalk/fastwalk_dirent_namlen_linux.go | 29 + .../internal/fastwalk/fastwalk_portable.go | 38 + .../tools/internal/fastwalk/fastwalk_unix.go | 153 + .../x/tools/internal/gopathwalk/walk.go | 260 + .../x/tools/internal/imports/fix.go | 1766 +++ .../x/tools/internal/imports/imports.go | 356 + .../x/tools/internal/imports/mod.go | 724 + .../x/tools/internal/imports/mod_cache.go | 236 + .../x/tools/internal/imports/sortimports.go | 297 + .../x/tools/internal/imports/zstdlib.go | 11115 ++++++++++++++++ vendor/modules.txt | 12 + 52 files changed, 29347 insertions(+), 13 deletions(-) create mode 100644 vendor/github.com/mna/pigeon/.editorconfig create mode 100644 vendor/github.com/mna/pigeon/.gitattributes create mode 100644 vendor/github.com/mna/pigeon/.gitignore create mode 100644 vendor/github.com/mna/pigeon/.travis.yml create mode 100644 vendor/github.com/mna/pigeon/CONTRIBUTING.md create mode 100644 vendor/github.com/mna/pigeon/LICENSE create mode 100644 vendor/github.com/mna/pigeon/Makefile create mode 100644 vendor/github.com/mna/pigeon/README.md create mode 100644 vendor/github.com/mna/pigeon/TODO create mode 100644 vendor/github.com/mna/pigeon/ast/ast.go create mode 100644 vendor/github.com/mna/pigeon/ast/ast_optimize.go create mode 100644 vendor/github.com/mna/pigeon/ast/ast_walk.go create mode 100644 vendor/github.com/mna/pigeon/builder/builder.go create mode 100644 vendor/github.com/mna/pigeon/builder/generated_static_code.go create mode 100644 vendor/github.com/mna/pigeon/builder/generated_static_code_range_table.go create mode 100644 vendor/github.com/mna/pigeon/builder/static_code.go create mode 100644 vendor/github.com/mna/pigeon/builder/static_code_range_table.go create mode 100644 vendor/github.com/mna/pigeon/doc.go create mode 100644 vendor/github.com/mna/pigeon/main.go create mode 100644 vendor/github.com/mna/pigeon/pigeon.go create mode 100644 vendor/github.com/mna/pigeon/reserved_words.go create mode 100644 vendor/github.com/mna/pigeon/unicode_classes.go create mode 100644 vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go create mode 100644 vendor/golang.org/x/mod/module/module.go create mode 100644 vendor/golang.org/x/mod/module/pseudo.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/enclosing.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/imports.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/rewrite.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/util.go create mode 100644 vendor/golang.org/x/tools/imports/forward.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go create mode 100644 vendor/golang.org/x/tools/internal/gopathwalk/walk.go create mode 100644 vendor/golang.org/x/tools/internal/imports/fix.go create mode 100644 vendor/golang.org/x/tools/internal/imports/imports.go create mode 100644 vendor/golang.org/x/tools/internal/imports/mod.go create mode 100644 vendor/golang.org/x/tools/internal/imports/mod_cache.go create mode 100644 vendor/golang.org/x/tools/internal/imports/sortimports.go create mode 100644 vendor/golang.org/x/tools/internal/imports/zstdlib.go diff --git a/go.mod b/go.mod index f3063d6d56a..f33dbff79c2 100644 --- a/go.mod +++ b/go.mod @@ -55,6 +55,7 @@ require ( github.com/libregraph/idm v0.4.1-0.20230221143410-3503963047a5 github.com/libregraph/lico v0.60.1-0.20230811070109-1d4140be554d github.com/mitchellh/mapstructure v1.5.0 + github.com/mna/pigeon v1.1.0 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 github.com/nats-io/nats-server/v2 v2.9.21 github.com/oklog/run v1.1.0 diff --git a/go.sum b/go.sum index 6b9a060e0b7..cce6458535e 100644 --- a/go.sum +++ b/go.sum @@ -1700,6 +1700,8 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mna/pigeon v1.1.0 h1:EjlvVbkGnNGemf8OrjeJX0nH8orujY/HkJgzJtd7kxc= +github.com/mna/pigeon v1.1.0/go.mod h1:rkFeDZ0gc+YbnrXPw0q2RlI0QRuKBBPu67fgYIyGRNg= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -2530,6 +2532,7 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190830223141-573d9926052a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/ocis-pkg/tools/tools.go b/ocis-pkg/tools/tools.go index d95a9c44c93..db1ffc72bf6 100644 --- a/ocis-pkg/tools/tools.go +++ b/ocis-pkg/tools/tools.go @@ -1,7 +1,8 @@ -// +build tools +//go:build tools package tools import ( + _ "github.com/mna/pigeon" _ "github.com/onsi/ginkgo/ginkgo" ) diff --git a/services/search/Makefile b/services/search/Makefile index b5405676632..aa008626937 100644 --- a/services/search/Makefile +++ b/services/search/Makefile @@ -29,7 +29,7 @@ ci-go-generate: $(PIGEON) $(MOCKERY) # CI runs ci-node-generate automatically be $(MOCKERY) --dir pkg/content --output pkg/content/mocks --case underscore --name Extractor $(MOCKERY) --dir pkg/content --output pkg/content/mocks --case underscore --name Retriever $(MOCKERY) --dir pkg/search --output pkg/search/mocks --case underscore --name Searcher - $(PIGEON) -o pkg/query/kql/dictionary_gen.go pkg/query/kql/dictionary.peg + $(PIGEON) -optimize-grammar -optimize-parser -o pkg/query/kql/dictionary_gen.go pkg/query/kql/dictionary.peg .PHONY: ci-node-generate ci-node-generate: diff --git a/services/search/pkg/query/kql/connect.go b/services/search/pkg/query/kql/connect.go index e592de8544a..fa0ef00829f 100644 --- a/services/search/pkg/query/kql/connect.go +++ b/services/search/pkg/query/kql/connect.go @@ -13,13 +13,12 @@ func connectNodes(c Connector, nodes ...ast.Node) []ast.Node { for i := range nodes { ri := len(nodes) - 1 - i head := nodes[ri] - pair := []ast.Node{head} - if connectionNodes := connectNode(c, pair[0], connectedNodes...); len(connectionNodes) >= 1 { - pair = append(pair, connectionNodes...) + if connectionNodes := connectNode(c, head, connectedNodes...); len(connectionNodes) > 0 { + connectedNodes = append(connectionNodes, connectedNodes...) } - connectedNodes = append(pair, connectedNodes...) + connectedNodes = append([]ast.Node{head}, connectedNodes...) } return connectedNodes @@ -83,8 +82,8 @@ func (c DefaultConnector) Connect(head ast.Node, neighbor ast.Node, connections // author:"John Smith" author:"Jane Smith" // author:"John Smith" OR author:"Jane Smith" // - // nodes inside of group nodes are handled differently, - // if no explicit operator give, it uses OR + // nodes inside of group node are handled differently, + // if no explicit operator given, it uses OR // // spec: same // author:"John Smith" AND author:"Jane Smith" @@ -95,12 +94,12 @@ func (c DefaultConnector) Connect(head ast.Node, neighbor ast.Node, connections // decisions based on nearest neighbor node switch neighbor.(type) { - // nearest neighbor node type could change the default case + // nearest neighbor node type can change the default case // docs says, if the next value node: // - // is a group AND has no key + // is a group and has no key // - // even if the current node has none too, which normal leads to SAME KEY OR + // and the head node has no key // // it should be an AND edge // @@ -128,7 +127,7 @@ func (c DefaultConnector) Connect(head ast.Node, neighbor ast.Node, connections } } - // if neighbor node negotiates, AND edge is needed + // if neighbor node negotiates, an AND edge is needed // // spec: same // cat -dog diff --git a/services/search/pkg/query/kql/gen.go b/services/search/pkg/query/kql/gen.go index 4c406d1218b..b2b3fd9ef35 100644 --- a/services/search/pkg/query/kql/gen.go +++ b/services/search/pkg/query/kql/gen.go @@ -1,3 +1,3 @@ package kql -//go:generate go run github.com/mna/pigeon -o dictionary_gen.go dictionary.peg +//go:generate go run github.com/mna/pigeon -optimize-grammar -optimize-parser -o dictionary_gen.go dictionary.peg diff --git a/vendor/github.com/mna/pigeon/.editorconfig b/vendor/github.com/mna/pigeon/.editorconfig new file mode 100644 index 00000000000..7d709fb603c --- /dev/null +++ b/vendor/github.com/mna/pigeon/.editorconfig @@ -0,0 +1,6 @@ +# See http://editorconfig.org + +# In Go files we indent with tabs but still +# set indent_size to control the GitHub web viewer. +[*.go] +indent_size=4 diff --git a/vendor/github.com/mna/pigeon/.gitattributes b/vendor/github.com/mna/pigeon/.gitattributes new file mode 100644 index 00000000000..58ef8638ad3 --- /dev/null +++ b/vendor/github.com/mna/pigeon/.gitattributes @@ -0,0 +1,2 @@ +*.go text eol=lf +*.peg text eol=lf diff --git a/vendor/github.com/mna/pigeon/.gitignore b/vendor/github.com/mna/pigeon/.gitignore new file mode 100644 index 00000000000..080ebe3139c --- /dev/null +++ b/vendor/github.com/mna/pigeon/.gitignore @@ -0,0 +1,22 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib +pigeon +bin/ +bootstrap/cmd/bootstrap-pigeon/bootstrap-pigeon +bootstrap/cmd/bootstrap-build/bootstrap-build +bootstrap/cmd/pegscan/pegscan +bootstrap/cmd/pegparse/pegparse + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Temporary and swap files +*.swp +*.swo +*~ diff --git a/vendor/github.com/mna/pigeon/.travis.yml b/vendor/github.com/mna/pigeon/.travis.yml new file mode 100644 index 00000000000..cd710ded309 --- /dev/null +++ b/vendor/github.com/mna/pigeon/.travis.yml @@ -0,0 +1,8 @@ +language: go + +script: make test + +go: + - 1.11.x + - 1.12.x + - tip diff --git a/vendor/github.com/mna/pigeon/CONTRIBUTING.md b/vendor/github.com/mna/pigeon/CONTRIBUTING.md new file mode 100644 index 00000000000..9d39240e527 --- /dev/null +++ b/vendor/github.com/mna/pigeon/CONTRIBUTING.md @@ -0,0 +1,33 @@ +# Contributing to pigeon + +There are various ways to help support this open source project: + +* if you use pigeon and find it useful, talk about it - that's probably the most basic way to help any open-source project: getting the word out that it exists and that it can be useful +* if you use pigeon and find bugs, please [file an issue][0] +* if something is poorly documented, or doesn't work as documented, this is also a bug, please [file an issue][0] +* if you can fix the issue (whether it is documentation- or code-related), then [submit a pull-request][1] - but read on to see what should be done to get it merged +* if you would like to see some new feature/behaviour being implemented, please first [open an issue][0] to discuss it because features are less likely to get merged compared to bug fixes + +## Submitting a pull request + +Assuming you already have a copy of the repository (either via `go get`, a github fork, a clone, etc.), you will also need `make` to regenerate all tools and files generated when a dependency changes. I use GNU make version 4.1, other versions of make may work too but haven't been tested. + +Run `make` in the root directory of the repository. That will create the bootstrap builder, the bootstrap parser, and the final parser, along with some generated Go files. Once `make` is run successfully, run `go test ./...` in the root directory to make sure all tests pass. + +Once this is done and tests pass, you can start implementing the bug fix (or the new feature provided **it has already been discussed and agreed in a github issue** first). + +For a bug fix, the best way to proceed is to first write a test that proves the bug, then write the code that fixes the bug and makes the test pass. All other tests should still pass too (unless it relied on the buggy behaviour, in which case existing tests must be fixed). + +For a new feature, it must be thoroughly tested. New code without new test(s) is unlikely to get merged. + +Respect the coding style of the repository, which means essentially to respect the [coding guidelines of the Go community][2]. Use `gofmt` to format your code, and `goimports` to add and format the list of imported packages (or do it manually, but in a `goimports`-style). + +Once all code is done and tests pass, regenerate the whole tree with `make`, run `make lint` to make sure the code is correct, and run tests again. You are now ready to submit the pull request. + +## Licensing + +All pull requests that get merged will be made available under the BSD 3-Clause license (see the LICENSE file for details), as the rest of the pigeon repository. Do not submit pull requests if you do not want your contributions to be made available under those terms. + +[0]: https://github.com/mna/pigeon/issues/new +[1]: https://github.com/mna/pigeon/pulls +[2]: https://github.com/golang/go/wiki/CodeReviewComments diff --git a/vendor/github.com/mna/pigeon/LICENSE b/vendor/github.com/mna/pigeon/LICENSE new file mode 100644 index 00000000000..2c684aaf65c --- /dev/null +++ b/vendor/github.com/mna/pigeon/LICENSE @@ -0,0 +1,12 @@ +Copyright (c) 2015, Martin Angers & Contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/mna/pigeon/Makefile b/vendor/github.com/mna/pigeon/Makefile new file mode 100644 index 00000000000..ddf0f506ba0 --- /dev/null +++ b/vendor/github.com/mna/pigeon/Makefile @@ -0,0 +1,198 @@ +SHELL = /bin/bash + +# force the use of go modules +export GO111MODULE = on + +# directories and source code lists +ROOT = . +ROOT_SRC = $(ROOT)/*.go +BINDIR = ./bin +EXAMPLES_DIR = $(ROOT)/examples +TEST_DIR = $(ROOT)/test + +# builder and ast packages +BUILDER_DIR = $(ROOT)/builder +BUILDER_SRC = $(BUILDER_DIR)/*.go +AST_DIR = $(ROOT)/ast +AST_SRC = $(AST_DIR)/*.go + +# bootstrap tools variables +BOOTSTRAP_DIR = $(ROOT)/bootstrap +BOOTSTRAP_SRC = $(BOOTSTRAP_DIR)/*.go +BOOTSTRAPBUILD_DIR = $(BOOTSTRAP_DIR)/cmd/bootstrap-build +BOOTSTRAPBUILD_SRC = $(BOOTSTRAPBUILD_DIR)/*.go +BOOTSTRAPPIGEON_DIR = $(BOOTSTRAP_DIR)/cmd/bootstrap-pigeon +BOOTSTRAPPIGEON_SRC = $(BOOTSTRAPPIGEON_DIR)/*.go +STATICCODEGENERATOR_DIR = $(BOOTSTRAP_DIR)/cmd/static_code_generator +STATICCODEGENERATOR_SRC = $(STATICCODEGENERATOR_DIR)/*.go + +# grammar variables +GRAMMAR_DIR = $(ROOT)/grammar +BOOTSTRAP_GRAMMAR = $(GRAMMAR_DIR)/bootstrap.peg +PIGEON_GRAMMAR = $(GRAMMAR_DIR)/pigeon.peg + +TEST_GENERATED_SRC = $(patsubst %.peg,%.go,$(shell echo ./{examples,test}/**/*.peg)) + +all: $(BUILDER_DIR)/generated_static_code.go $(BINDIR)/static_code_generator \ + $(BUILDER_DIR)/generated_static_code_range_table.go \ + $(BINDIR)/bootstrap-build $(BOOTSTRAPPIGEON_DIR)/bootstrap_pigeon.go \ + $(BINDIR)/bootstrap-pigeon $(ROOT)/pigeon.go $(BINDIR)/pigeon \ + $(TEST_GENERATED_SRC) + +$(BINDIR)/static_code_generator: $(STATICCODEGENERATOR_SRC) + go build -o $@ $(STATICCODEGENERATOR_DIR) + +$(BINDIR)/bootstrap-build: $(BOOTSTRAPBUILD_SRC) $(BOOTSTRAP_SRC) $(BUILDER_SRC) \ + $(AST_SRC) + go build -o $@ $(BOOTSTRAPBUILD_DIR) + +$(BOOTSTRAPPIGEON_DIR)/bootstrap_pigeon.go: $(BINDIR)/bootstrap-build \ + $(BOOTSTRAP_GRAMMAR) + $(BINDIR)/bootstrap-build $(BOOTSTRAP_GRAMMAR) > $@ + +$(BINDIR)/bootstrap-pigeon: $(BOOTSTRAPPIGEON_SRC) \ + $(BOOTSTRAPPIGEON_DIR)/bootstrap_pigeon.go + go build -o $@ $(BOOTSTRAPPIGEON_DIR) + +$(ROOT)/pigeon.go: $(BINDIR)/bootstrap-pigeon $(PIGEON_GRAMMAR) + $(BINDIR)/bootstrap-pigeon $(PIGEON_GRAMMAR) > $@ + +$(BINDIR)/pigeon: $(ROOT_SRC) $(ROOT)/pigeon.go + go build -o $@ $(ROOT) + +$(BUILDER_DIR)/generated_static_code.go: $(BUILDER_DIR)/static_code.go $(BINDIR)/static_code_generator + $(BINDIR)/static_code_generator $(BUILDER_DIR)/static_code.go $@ staticCode + +$(BUILDER_DIR)/generated_static_code_range_table.go: $(BUILDER_DIR)/static_code_range_table.go $(BINDIR)/static_code_generator + $(BINDIR)/static_code_generator $(BUILDER_DIR)/static_code_range_table.go $@ rangeTable0 + +$(BOOTSTRAP_GRAMMAR): +$(PIGEON_GRAMMAR): + +# surely there's a better way to define the examples and test targets +$(EXAMPLES_DIR)/json/json.go: $(EXAMPLES_DIR)/json/json.peg $(EXAMPLES_DIR)/json/optimized/json.go $(EXAMPLES_DIR)/json/optimized-grammar/json.go $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(EXAMPLES_DIR)/json/optimized/json.go: $(EXAMPLES_DIR)/json/json.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint -optimize-parser -optimize-basic-latin $< > $@ + +$(EXAMPLES_DIR)/json/optimized-grammar/json.go: $(EXAMPLES_DIR)/json/json.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint -optimize-grammar $< > $@ + +$(EXAMPLES_DIR)/calculator/calculator.go: $(EXAMPLES_DIR)/calculator/calculator.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(EXAMPLES_DIR)/indentation/indentation.go: $(EXAMPLES_DIR)/indentation/indentation.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/andnot/andnot.go: $(TEST_DIR)/andnot/andnot.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/predicates/predicates.go: $(TEST_DIR)/predicates/predicates.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/issue_1/issue_1.go: $(TEST_DIR)/issue_1/issue_1.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/linear/linear.go: $(TEST_DIR)/linear/linear.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/issue_18/issue_18.go: $(TEST_DIR)/issue_18/issue_18.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/runeerror/runeerror.go: $(TEST_DIR)/runeerror/runeerror.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/errorpos/errorpos.go: $(TEST_DIR)/errorpos/errorpos.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/global_store/global_store.go: $(TEST_DIR)/global_store/global_store.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/goto/goto.go: $(TEST_DIR)/goto/goto.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/goto_state/goto_state.go: $(TEST_DIR)/goto_state/goto_state.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/max_expr_cnt/maxexpr.go: $(TEST_DIR)/max_expr_cnt/maxexpr.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/labeled_failures/labeled_failures.go: $(TEST_DIR)/labeled_failures/labeled_failures.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/thrownrecover/thrownrecover.go: $(TEST_DIR)/thrownrecover/thrownrecover.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/alternate_entrypoint/altentry.go: $(TEST_DIR)/alternate_entrypoint/altentry.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint -optimize-grammar -alternate-entrypoints Entry2,Entry3,C $< > $@ + +$(TEST_DIR)/state/state.go: $(TEST_DIR)/state/state.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint -optimize-grammar $< > $@ + +$(TEST_DIR)/stateclone/stateclone.go: $(TEST_DIR)/stateclone/stateclone.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/statereadonly/statereadonly.go: $(TEST_DIR)/statereadonly/statereadonly.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/staterestore/staterestore.go: $(TEST_DIR)/staterestore/staterestore.peg $(TEST_DIR)/staterestore/standard/staterestore.go $(TEST_DIR)/staterestore/optimized/staterestore.go $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/staterestore/standard/staterestore.go: $(TEST_DIR)/staterestore/staterestore.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/staterestore/optimized/staterestore.go: $(TEST_DIR)/staterestore/staterestore.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint -optimize-grammar -optimize-parser -alternate-entrypoints TestAnd,TestNot $< > $@ + +$(TEST_DIR)/emptystate/emptystate.go: $(TEST_DIR)/emptystate/emptystate.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/issue_65/issue_65.go: $(TEST_DIR)/issue_65/issue_65.peg $(TEST_DIR)/issue_65/optimized/issue_65.go $(TEST_DIR)/issue_65/optimized-grammar/issue_65.go $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/issue_65/optimized/issue_65.go: $(TEST_DIR)/issue_65/issue_65.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint -optimize-parser -optimize-basic-latin $< > $@ + +$(TEST_DIR)/issue_65/optimized-grammar/issue_65.go: $(TEST_DIR)/issue_65/issue_65.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint -optimize-grammar $< > $@ + +$(TEST_DIR)/issue_70/issue_70.go: $(TEST_DIR)/issue_70/issue_70.peg $(TEST_DIR)/issue_70/optimized/issue_70.go $(TEST_DIR)/issue_70/optimized-grammar/issue_70.go $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +$(TEST_DIR)/issue_70/optimized/issue_70.go: $(TEST_DIR)/issue_70/issue_70.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint -optimize-parser -optimize-basic-latin $< > $@ + +$(TEST_DIR)/issue_70/optimized-grammar/issue_70.go: $(TEST_DIR)/issue_70/issue_70.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint -optimize-grammar $< > $@ + +$(TEST_DIR)/issue_70b/issue_70b.go: $(TEST_DIR)/issue_70b/issue_70b.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint --optimize-grammar $< > $@ + +$(TEST_DIR)/issue_80/issue_80.go: $(TEST_DIR)/issue_80/issue_80.peg $(BINDIR)/pigeon + $(BINDIR)/pigeon -nolint $< > $@ + +lint: + golint ./... + go vet ./... + +gometalinter: + gometalinter ./... + +cmp: + @boot=$$(mktemp) && $(BINDIR)/bootstrap-pigeon $(PIGEON_GRAMMAR) > $$boot && \ + official=$$(mktemp) && $(BINDIR)/pigeon $(PIGEON_GRAMMAR) > $$official && \ + cmp $$boot $$official && \ + unlink $$boot && \ + unlink $$official + +test: + go test -v ./... + +clean: + rm -f $(BUILDER_DIR)/generated_static_code.go $(BUILDER_DIR)/generated_static_code_range_table.go + rm -f $(BOOTSTRAPPIGEON_DIR)/bootstrap_pigeon.go $(ROOT)/pigeon.go $(TEST_GENERATED_SRC) $(EXAMPLES_DIR)/json/optimized/json.go $(EXAMPLES_DIR)/json/optimized-grammar/json.go $(TEST_DIR)/staterestore/optimized/staterestore.go $(TEST_DIR)/staterestore/standard/staterestore.go $(TEST_DIR)/issue_65/optimized/issue_65.go $(TEST_DIR)/issue_65/optimized-grammar/issue_65.go + rm -rf $(BINDIR) + +.PHONY: all clean lint gometalinter cmp test + diff --git a/vendor/github.com/mna/pigeon/README.md b/vendor/github.com/mna/pigeon/README.md new file mode 100644 index 00000000000..06ff5f18e80 --- /dev/null +++ b/vendor/github.com/mna/pigeon/README.md @@ -0,0 +1,148 @@ +# pigeon - a PEG parser generator for Go + +[![GoDoc](https://godoc.org/github.com/mna/pigeon?status.png)](https://godoc.org/github.com/mna/pigeon) +[![build status](https://secure.travis-ci.org/mna/pigeon.png?branch=master)](http://travis-ci.org/mna/pigeon) +[![GoReportCard](https://goreportcard.com/badge/github.com/mna/pigeon)](https://goreportcard.com/report/github.com/mna/pigeon) +[![Software License](https://img.shields.io/badge/license-BSD-blue.svg)](LICENSE) + +The pigeon command generates parsers based on a [parsing expression grammar (PEG)][0]. Its grammar and syntax is inspired by the [PEG.js project][1], while the implementation is loosely based on the [parsing expression grammar for C# 3.0][2] article. It parses Unicode text encoded in UTF-8. + +See the [godoc page][3] for detailed usage. Also have a look at the [Pigeon Wiki](https://github.com/mna/pigeon/wiki) for additional information about Pigeon and PEG in general. + +## Releases + +* v1.0.0 is the tagged release of the original implementation. +* Work has started on v2.0.0 with some planned breaking changes. + +Github user [@mna][6] created the package in April 2015, and [@breml][5] is the package's maintainer as of May 2017. + +### Breaking Changes since v1.0.0 + +* Removed support for Go < v1.11 to support go modules for dependency tracking. + +* Removed support for Go < v1.9 due to the requirement [golang.org/x/tools/imports](https://godoc.org/golang.org/x/tools/imports), which was updated to reflect changes in recent versions of Go. This is in compliance with the [Go Release Policy](https://golang.org/doc/devel/release.html#policy) respectively the [Go Release Maintenance](https://github.com/golang/go/wiki/Go-Release-Cycle#release-maintenance), which states support for each major release until there are two newer major releases. + +## Installation + +Provided you have Go correctly installed with the $GOPATH and $GOBIN environment variables set, run: + +``` +$ go get -u github.com/mna/pigeon +``` + +This will install or update the package, and the `pigeon` command will be installed in your $GOBIN directory. Neither this package nor the parsers generated by this command require any third-party dependency, unless such a dependency is used in the code blocks of the grammar. + +## Basic usage + +``` +$ pigeon [options] [PEG_GRAMMAR_FILE] +``` + +By default, the input grammar is read from `stdin` and the generated code is printed to `stdout`. You may save it in a file using the `-o` flag. + +## Example + +Given the following grammar: + +``` +{ +// part of the initializer code block omitted for brevity + +var ops = map[string]func(int, int) int { + "+": func(l, r int) int { + return l + r + }, + "-": func(l, r int) int { + return l - r + }, + "*": func(l, r int) int { + return l * r + }, + "/": func(l, r int) int { + return l / r + }, +} + +func toIfaceSlice(v interface{}) []interface{} { + if v == nil { + return nil + } + return v.([]interface{}) +} + +func eval(first, rest interface{}) int { + l := first.(int) + restSl := toIfaceSlice(rest) + for _, v := range restSl { + restExpr := toIfaceSlice(v) + r := restExpr[3].(int) + op := restExpr[1].(string) + l = ops[op](l, r) + } + return l +} +} + + +Input <- expr:Expr EOF { + return expr, nil +} + +Expr <- _ first:Term rest:( _ AddOp _ Term )* _ { + return eval(first, rest), nil +} + +Term <- first:Factor rest:( _ MulOp _ Factor )* { + return eval(first, rest), nil +} + +Factor <- '(' expr:Expr ')' { + return expr, nil +} / integer:Integer { + return integer, nil +} + +AddOp <- ( '+' / '-' ) { + return string(c.text), nil +} + +MulOp <- ( '*' / '/' ) { + return string(c.text), nil +} + +Integer <- '-'? [0-9]+ { + return strconv.Atoi(string(c.text)) +} + +_ "whitespace" <- [ \n\t\r]* + +EOF <- !. +``` + +The generated parser can parse simple arithmetic operations, e.g.: + +``` +18 + 3 - 27 * (-18 / -3) + +=> -141 +``` + +More examples can be found in the `examples/` subdirectory. + +See the [godoc page][3] for detailed usage. + +## Contributing + +See the CONTRIBUTING.md file. + +## License + +The [BSD 3-Clause license][4]. See the LICENSE file. + +[0]: http://en.wikipedia.org/wiki/Parsing_expression_grammar +[1]: http://pegjs.org/ +[2]: http://www.codeproject.com/Articles/29713/Parsing-Expression-Grammar-Support-for-C-Part +[3]: https://godoc.org/github.com/mna/pigeon +[4]: http://opensource.org/licenses/BSD-3-Clause +[5]: https://github.com/breml +[6]: https://github.com/mna diff --git a/vendor/github.com/mna/pigeon/TODO b/vendor/github.com/mna/pigeon/TODO new file mode 100644 index 00000000000..75a1f2145f5 --- /dev/null +++ b/vendor/github.com/mna/pigeon/TODO @@ -0,0 +1,3 @@ +- refactor implementation as a VM to avoid stack overflow in pathological cases (and maybe better performance): in branch wip-vm +? options like current receiver name read directly from the grammar file +? type annotations for generated code functions diff --git a/vendor/github.com/mna/pigeon/ast/ast.go b/vendor/github.com/mna/pigeon/ast/ast.go new file mode 100644 index 00000000000..c34b7d1767a --- /dev/null +++ b/vendor/github.com/mna/pigeon/ast/ast.go @@ -0,0 +1,662 @@ +// Package ast defines the abstract syntax tree for the PEG grammar. +// +// The parser generator's PEG grammar generates a tree using this package +// that is then converted by the builder to the simplified AST used in +// the generated parser. +package ast + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +// Pos represents a position in a source file. +type Pos struct { + Filename string + Line int + Col int + Off int +} + +// String returns the textual representation of a position. +func (p Pos) String() string { + if p.Filename != "" { + return fmt.Sprintf("%s:%d:%d (%d)", p.Filename, p.Line, p.Col, p.Off) + } + return fmt.Sprintf("%d:%d (%d)", p.Line, p.Col, p.Off) +} + +// Grammar is the top-level node of the AST for the PEG grammar. +type Grammar struct { + p Pos + Init *CodeBlock + Rules []*Rule +} + +// NewGrammar creates a new grammar at the specified position. +func NewGrammar(p Pos) *Grammar { + return &Grammar{p: p} +} + +// Pos returns the starting position of the node. +func (g *Grammar) Pos() Pos { return g.p } + +// String returns the textual representation of a node. +func (g *Grammar) String() string { + var buf bytes.Buffer + + buf.WriteString(fmt.Sprintf("%s: %T{Init: %v, Rules: [\n", + g.p, g, g.Init)) + for _, r := range g.Rules { + buf.WriteString(fmt.Sprintf("%s,\n", r)) + } + buf.WriteString("]}") + return buf.String() +} + +// Rule represents a rule in the PEG grammar. It has a name, an optional +// display name to be used in error messages, and an expression. +type Rule struct { + p Pos + Name *Identifier + DisplayName *StringLit + Expr Expression +} + +// NewRule creates a rule with at the specified position and with the +// specified name as identifier. +func NewRule(p Pos, name *Identifier) *Rule { + return &Rule{p: p, Name: name} +} + +// Pos returns the starting position of the node. +func (r *Rule) Pos() Pos { return r.p } + +// String returns the textual representation of a node. +func (r *Rule) String() string { + return fmt.Sprintf("%s: %T{Name: %v, DisplayName: %v, Expr: %v}", + r.p, r, r.Name, r.DisplayName, r.Expr) +} + +// Expression is the interface implemented by all expression types. +type Expression interface { + Pos() Pos +} + +// ChoiceExpr is an ordered sequence of expressions. The parser tries to +// match any of the alternatives in sequence and stops at the first one +// that matches. +type ChoiceExpr struct { + p Pos + Alternatives []Expression +} + +// NewChoiceExpr creates a choice expression at the specified position. +func NewChoiceExpr(p Pos) *ChoiceExpr { + return &ChoiceExpr{p: p} +} + +// Pos returns the starting position of the node. +func (c *ChoiceExpr) Pos() Pos { return c.p } + +// String returns the textual representation of a node. +func (c *ChoiceExpr) String() string { + var buf bytes.Buffer + + buf.WriteString(fmt.Sprintf("%s: %T{Alternatives: [\n", c.p, c)) + for _, e := range c.Alternatives { + buf.WriteString(fmt.Sprintf("%s,\n", e)) + } + buf.WriteString("]}") + return buf.String() +} + +// FailureLabel is an identifier, which can by thrown and recovered in a grammar +type FailureLabel string + +// RecoveryExpr is an ordered sequence of expressions. The parser tries to +// match any of the alternatives in sequence and stops at the first one +// that matches. +type RecoveryExpr struct { + p Pos + Expr Expression + RecoverExpr Expression + Labels []FailureLabel +} + +// NewRecoveryExpr creates a choice expression at the specified position. +func NewRecoveryExpr(p Pos) *RecoveryExpr { + return &RecoveryExpr{p: p} +} + +// Pos returns the starting position of the node. +func (r *RecoveryExpr) Pos() Pos { return r.p } + +// String returns the textual representation of a node. +func (r *RecoveryExpr) String() string { + var buf bytes.Buffer + + buf.WriteString(fmt.Sprintf("%s: %T{Expr: %v, RecoverExpr: %v", r.p, r, r.Expr, r.RecoverExpr)) + buf.WriteString(fmt.Sprintf(", Labels: [\n")) + for _, e := range r.Labels { + buf.WriteString(fmt.Sprintf("%s,\n", e)) + } + buf.WriteString("]}") + return buf.String() +} + +// ActionExpr is an expression that has an associated block of code to +// execute when the expression matches. +type ActionExpr struct { + p Pos + Expr Expression + Code *CodeBlock + FuncIx int +} + +// NewActionExpr creates a new action expression at the specified position. +func NewActionExpr(p Pos) *ActionExpr { + return &ActionExpr{p: p} +} + +// Pos returns the starting position of the node. +func (a *ActionExpr) Pos() Pos { return a.p } + +// String returns the textual representation of a node. +func (a *ActionExpr) String() string { + return fmt.Sprintf("%s: %T{Expr: %v, Code: %v}", a.p, a, a.Expr, a.Code) +} + +// ThrowExpr is an expression that throws an FailureLabel to be catched by a +// RecoveryChoiceExpr. +type ThrowExpr struct { + p Pos + Label string +} + +// NewThrowExpr creates a new throw expression at the specified position. +func NewThrowExpr(p Pos) *ThrowExpr { + return &ThrowExpr{p: p} +} + +// Pos returns the starting position of the node. +func (t *ThrowExpr) Pos() Pos { return t.p } + +// String returns the textual representation of a node. +func (t *ThrowExpr) String() string { + return fmt.Sprintf("%s: %T{Label: %v}", t.p, t, t.Label) +} + +// SeqExpr is an ordered sequence of expressions, all of which must match +// if the SeqExpr is to be a match itself. +type SeqExpr struct { + p Pos + Exprs []Expression +} + +// NewSeqExpr creates a new sequence expression at the specified position. +func NewSeqExpr(p Pos) *SeqExpr { + return &SeqExpr{p: p} +} + +// Pos returns the starting position of the node. +func (s *SeqExpr) Pos() Pos { return s.p } + +// String returns the textual representation of a node. +func (s *SeqExpr) String() string { + var buf bytes.Buffer + + buf.WriteString(fmt.Sprintf("%s: %T{Exprs: [\n", s.p, s)) + for _, e := range s.Exprs { + buf.WriteString(fmt.Sprintf("%s,\n", e)) + } + buf.WriteString("]}") + return buf.String() +} + +// LabeledExpr is an expression that has an associated label. Code blocks +// can access the value of the expression using that label, that becomes +// a local variable in the code. +type LabeledExpr struct { + p Pos + Label *Identifier + Expr Expression +} + +// NewLabeledExpr creates a new labeled expression at the specified position. +func NewLabeledExpr(p Pos) *LabeledExpr { + return &LabeledExpr{p: p} +} + +// Pos returns the starting position of the node. +func (l *LabeledExpr) Pos() Pos { return l.p } + +// String returns the textual representation of a node. +func (l *LabeledExpr) String() string { + return fmt.Sprintf("%s: %T{Label: %v, Expr: %v}", l.p, l, l.Label, l.Expr) +} + +// AndExpr is a zero-length matcher that is considered a match if the +// expression it contains is a match. +type AndExpr struct { + p Pos + Expr Expression +} + +// NewAndExpr creates a new and (&) expression at the specified position. +func NewAndExpr(p Pos) *AndExpr { + return &AndExpr{p: p} +} + +// Pos returns the starting position of the node. +func (a *AndExpr) Pos() Pos { return a.p } + +// String returns the textual representation of a node. +func (a *AndExpr) String() string { + return fmt.Sprintf("%s: %T{Expr: %v}", a.p, a, a.Expr) +} + +// NotExpr is a zero-length matcher that is considered a match if the +// expression it contains is not a match. +type NotExpr struct { + p Pos + Expr Expression +} + +// NewNotExpr creates a new not (!) expression at the specified position. +func NewNotExpr(p Pos) *NotExpr { + return &NotExpr{p: p} +} + +// Pos returns the starting position of the node. +func (n *NotExpr) Pos() Pos { return n.p } + +// String returns the textual representation of a node. +func (n *NotExpr) String() string { + return fmt.Sprintf("%s: %T{Expr: %v}", n.p, n, n.Expr) +} + +// ZeroOrOneExpr is an expression that can be matched zero or one time. +type ZeroOrOneExpr struct { + p Pos + Expr Expression +} + +// NewZeroOrOneExpr creates a new zero or one expression at the specified +// position. +func NewZeroOrOneExpr(p Pos) *ZeroOrOneExpr { + return &ZeroOrOneExpr{p: p} +} + +// Pos returns the starting position of the node. +func (z *ZeroOrOneExpr) Pos() Pos { return z.p } + +// String returns the textual representation of a node. +func (z *ZeroOrOneExpr) String() string { + return fmt.Sprintf("%s: %T{Expr: %v}", z.p, z, z.Expr) +} + +// ZeroOrMoreExpr is an expression that can be matched zero or more times. +type ZeroOrMoreExpr struct { + p Pos + Expr Expression +} + +// NewZeroOrMoreExpr creates a new zero or more expression at the specified +// position. +func NewZeroOrMoreExpr(p Pos) *ZeroOrMoreExpr { + return &ZeroOrMoreExpr{p: p} +} + +// Pos returns the starting position of the node. +func (z *ZeroOrMoreExpr) Pos() Pos { return z.p } + +// String returns the textual representation of a node. +func (z *ZeroOrMoreExpr) String() string { + return fmt.Sprintf("%s: %T{Expr: %v}", z.p, z, z.Expr) +} + +// OneOrMoreExpr is an expression that can be matched one or more times. +type OneOrMoreExpr struct { + p Pos + Expr Expression +} + +// NewOneOrMoreExpr creates a new one or more expression at the specified +// position. +func NewOneOrMoreExpr(p Pos) *OneOrMoreExpr { + return &OneOrMoreExpr{p: p} +} + +// Pos returns the starting position of the node. +func (o *OneOrMoreExpr) Pos() Pos { return o.p } + +// String returns the textual representation of a node. +func (o *OneOrMoreExpr) String() string { + return fmt.Sprintf("%s: %T{Expr: %v}", o.p, o, o.Expr) +} + +// RuleRefExpr is an expression that references a rule by name. +type RuleRefExpr struct { + p Pos + Name *Identifier +} + +// NewRuleRefExpr creates a new rule reference expression at the specified +// position. +func NewRuleRefExpr(p Pos) *RuleRefExpr { + return &RuleRefExpr{p: p} +} + +// Pos returns the starting position of the node. +func (r *RuleRefExpr) Pos() Pos { return r.p } + +// String returns the textual representation of a node. +func (r *RuleRefExpr) String() string { + return fmt.Sprintf("%s: %T{Name: %v}", r.p, r, r.Name) +} + +// StateCodeExpr is an expression which can modify the internal state of the parser. +type StateCodeExpr struct { + p Pos + Code *CodeBlock + FuncIx int +} + +// NewStateCodeExpr creates a new state (#) code expression at the specified +// position. +func NewStateCodeExpr(p Pos) *StateCodeExpr { + return &StateCodeExpr{p: p} +} + +// Pos returns the starting position of the node. +func (s *StateCodeExpr) Pos() Pos { return s.p } + +// String returns the textual representation of a node. +func (s *StateCodeExpr) String() string { + return fmt.Sprintf("%s: %T{Code: %v}", s.p, s, s.Code) +} + +// AndCodeExpr is a zero-length matcher that is considered a match if the +// code block returns true. +type AndCodeExpr struct { + p Pos + Code *CodeBlock + FuncIx int +} + +// NewAndCodeExpr creates a new and (&) code expression at the specified +// position. +func NewAndCodeExpr(p Pos) *AndCodeExpr { + return &AndCodeExpr{p: p} +} + +// Pos returns the starting position of the node. +func (a *AndCodeExpr) Pos() Pos { return a.p } + +// String returns the textual representation of a node. +func (a *AndCodeExpr) String() string { + return fmt.Sprintf("%s: %T{Code: %v}", a.p, a, a.Code) +} + +// NotCodeExpr is a zero-length matcher that is considered a match if the +// code block returns false. +type NotCodeExpr struct { + p Pos + Code *CodeBlock + FuncIx int +} + +// NewNotCodeExpr creates a new not (!) code expression at the specified +// position. +func NewNotCodeExpr(p Pos) *NotCodeExpr { + return &NotCodeExpr{p: p} +} + +// Pos returns the starting position of the node. +func (n *NotCodeExpr) Pos() Pos { return n.p } + +// String returns the textual representation of a node. +func (n *NotCodeExpr) String() string { + return fmt.Sprintf("%s: %T{Code: %v}", n.p, n, n.Code) +} + +// LitMatcher is a string literal matcher. The value to match may be a +// double-quoted string, a single-quoted single character, or a back-tick +// quoted raw string. +type LitMatcher struct { + posValue // can be str, rstr or char + IgnoreCase bool +} + +// NewLitMatcher creates a new literal matcher at the specified position and +// with the specified value. +func NewLitMatcher(p Pos, v string) *LitMatcher { + return &LitMatcher{posValue: posValue{p: p, Val: v}} +} + +// Pos returns the starting position of the node. +func (l *LitMatcher) Pos() Pos { return l.p } + +// String returns the textual representation of a node. +func (l *LitMatcher) String() string { + return fmt.Sprintf("%s: %T{Val: %q, IgnoreCase: %t}", l.p, l, l.Val, l.IgnoreCase) +} + +// CharClassMatcher is a character class matcher. The value to match must +// be one of the specified characters, in a range of characters, or in the +// Unicode classes of characters. +type CharClassMatcher struct { + posValue + IgnoreCase bool + Inverted bool + Chars []rune + Ranges []rune // pairs of low/high range + UnicodeClasses []string +} + +// NewCharClassMatcher creates a new character class matcher at the specified +// position and with the specified raw value. It parses the raw value into +// the list of characters, ranges and Unicode classes. +func NewCharClassMatcher(p Pos, raw string) *CharClassMatcher { + c := &CharClassMatcher{posValue: posValue{p: p, Val: raw}} + c.parse() + return c +} + +func (c *CharClassMatcher) parse() { + raw := c.Val + c.IgnoreCase = strings.HasSuffix(raw, "i") + if c.IgnoreCase { + raw = raw[:len(raw)-1] + } + + // "unquote" the character classes + raw = raw[1 : len(raw)-1] + if len(raw) == 0 { + return + } + + c.Inverted = raw[0] == '^' + if c.Inverted { + raw = raw[1:] + if len(raw) == 0 { + return + } + } + + // content of char class is necessarily valid, so escapes are correct + r := strings.NewReader(raw) + var chars []rune + var buf bytes.Buffer +outer: + for { + rn, _, err := r.ReadRune() + if err != nil { + break outer + } + + consumeN := 0 + switch rn { + case '\\': + rn, _, _ := r.ReadRune() + switch rn { + case ']': + chars = append(chars, rn) + continue + + case 'p': + rn, _, _ := r.ReadRune() + if rn == '{' { + buf.Reset() + for { + rn, _, _ := r.ReadRune() + if rn == '}' { + break + } + buf.WriteRune(rn) + } + c.UnicodeClasses = append(c.UnicodeClasses, buf.String()) + } else { + c.UnicodeClasses = append(c.UnicodeClasses, string(rn)) + } + continue + + case 'x': + consumeN = 2 + case 'u': + consumeN = 4 + case 'U': + consumeN = 8 + case '0', '1', '2', '3', '4', '5', '6', '7': + consumeN = 2 + } + + buf.Reset() + buf.WriteRune(rn) + for i := 0; i < consumeN; i++ { + rn, _, _ := r.ReadRune() + buf.WriteRune(rn) + } + rn, _, _, _ = strconv.UnquoteChar("\\"+buf.String(), 0) + chars = append(chars, rn) + + default: + chars = append(chars, rn) + } + } + + // extract ranges and chars + inRange, wasRange := false, false + for i, r := range chars { + if inRange { + c.Ranges = append(c.Ranges, r) + inRange = false + wasRange = true + continue + } + + if r == '-' && !wasRange && len(c.Chars) > 0 && i < len(chars)-1 { + inRange = true + wasRange = false + // start of range is the last Char added + c.Ranges = append(c.Ranges, c.Chars[len(c.Chars)-1]) + c.Chars = c.Chars[:len(c.Chars)-1] + continue + } + wasRange = false + c.Chars = append(c.Chars, r) + } +} + +// Pos returns the starting position of the node. +func (c *CharClassMatcher) Pos() Pos { return c.p } + +// String returns the textual representation of a node. +func (c *CharClassMatcher) String() string { + return fmt.Sprintf("%s: %T{Val: %q, IgnoreCase: %t, Inverted: %t}", + c.p, c, c.Val, c.IgnoreCase, c.Inverted) +} + +// AnyMatcher is a matcher that matches any character except end-of-file. +type AnyMatcher struct { + posValue +} + +// NewAnyMatcher creates a new any matcher at the specified position. The +// value is provided for completeness' sake, but it is always the dot. +func NewAnyMatcher(p Pos, v string) *AnyMatcher { + return &AnyMatcher{posValue{p, v}} +} + +// Pos returns the starting position of the node. +func (a *AnyMatcher) Pos() Pos { return a.p } + +// String returns the textual representation of a node. +func (a *AnyMatcher) String() string { + return fmt.Sprintf("%s: %T{Val: %q}", a.p, a, a.Val) +} + +// CodeBlock represents a code block. +type CodeBlock struct { + posValue +} + +// NewCodeBlock creates a new code block at the specified position and with +// the specified value. The value includes the outer braces. +func NewCodeBlock(p Pos, code string) *CodeBlock { + return &CodeBlock{posValue{p, code}} +} + +// Pos returns the starting position of the node. +func (c *CodeBlock) Pos() Pos { return c.p } + +// String returns the textual representation of a node. +func (c *CodeBlock) String() string { + return fmt.Sprintf("%s: %T{Val: %q}", c.p, c, c.Val) +} + +// Identifier represents an identifier. +type Identifier struct { + posValue +} + +// NewIdentifier creates a new identifier at the specified position and +// with the specified name. +func NewIdentifier(p Pos, name string) *Identifier { + return &Identifier{posValue{p: p, Val: name}} +} + +// Pos returns the starting position of the node. +func (i *Identifier) Pos() Pos { return i.p } + +// String returns the textual representation of a node. +func (i *Identifier) String() string { + return fmt.Sprintf("%s: %T{Val: %q}", i.p, i, i.Val) +} + +// StringLit represents a string literal. +type StringLit struct { + posValue +} + +// NewStringLit creates a new string literal at the specified position and +// with the specified value. +func NewStringLit(p Pos, val string) *StringLit { + return &StringLit{posValue{p: p, Val: val}} +} + +// Pos returns the starting position of the node. +func (s *StringLit) Pos() Pos { return s.p } + +// String returns the textual representation of a node. +func (s *StringLit) String() string { + return fmt.Sprintf("%s: %T{Val: %q}", s.p, s, s.Val) +} + +type posValue struct { + p Pos + Val string +} diff --git a/vendor/github.com/mna/pigeon/ast/ast_optimize.go b/vendor/github.com/mna/pigeon/ast/ast_optimize.go new file mode 100644 index 00000000000..844286881f4 --- /dev/null +++ b/vendor/github.com/mna/pigeon/ast/ast_optimize.go @@ -0,0 +1,469 @@ +package ast + +import ( + "bytes" + "strconv" + "strings" +) + +type grammarOptimizer struct { + rule string + protectedRules map[string]struct{} + rules map[string]*Rule + ruleUsesRules map[string]map[string]struct{} + ruleUsedByRules map[string]map[string]struct{} + visitor func(expr Expression) Visitor + optimized bool +} + +func newGrammarOptimizer(protectedRules []string) *grammarOptimizer { + pr := make(map[string]struct{}, len(protectedRules)) + for _, nm := range protectedRules { + pr[nm] = struct{}{} + } + + r := grammarOptimizer{ + protectedRules: pr, + rules: make(map[string]*Rule), + ruleUsesRules: make(map[string]map[string]struct{}), + ruleUsedByRules: make(map[string]map[string]struct{}), + } + r.visitor = r.init + return &r +} + +// Visit is a generic Visitor to be used with Walk +// The actual function, which should be used during Walk +// is held in ruleRefOptimizer.visitor +func (r *grammarOptimizer) Visit(expr Expression) Visitor { + return r.visitor(expr) +} + +// init is a Visitor, which is used with the Walk function +// The purpose of this function is to initialize the reference +// maps rules, ruleUsesRules and ruleUsedByRules. +func (r *grammarOptimizer) init(expr Expression) Visitor { + switch expr := expr.(type) { + case *Rule: + // Keep track of current rule, which is processed + r.rule = expr.Name.Val + r.rules[expr.Name.Val] = expr + case *RuleRefExpr: + // Fill ruleUsesRules and ruleUsedByRules for every RuleRefExpr + set(r.ruleUsesRules, r.rule, expr.Name.Val) + set(r.ruleUsedByRules, expr.Name.Val, r.rule) + } + return r +} + +// Add element to map of maps, initialize the inner map +// if necessary. +func set(m map[string]map[string]struct{}, src, dst string) { + if _, ok := m[src]; !ok { + m[src] = make(map[string]struct{}) + } + m[src][dst] = struct{}{} +} + +// optimize is a Visitor, which is used with the Walk function +// The purpose of this function is to perform the actual optimizations. +// See Optimize for a detailed list of the performed optimizations. +func (r *grammarOptimizer) optimize(expr0 Expression) Visitor { + switch expr := expr0.(type) { + case *ActionExpr: + expr.Expr = r.optimizeRule(expr.Expr) + case *AndExpr: + expr.Expr = r.optimizeRule(expr.Expr) + case *ChoiceExpr: + expr.Alternatives = r.optimizeRules(expr.Alternatives) + + // Optimize choice nested in choice + for i := 0; i < len(expr.Alternatives); i++ { + if choice, ok := expr.Alternatives[i].(*ChoiceExpr); ok { + r.optimized = true + if i+1 < len(expr.Alternatives) { + expr.Alternatives = append(expr.Alternatives[:i], append(choice.Alternatives, expr.Alternatives[i+1:]...)...) + } else { + expr.Alternatives = append(expr.Alternatives[:i], choice.Alternatives...) + } + } + + // Combine sequence of single char LitMatcher to CharClassMatcher + if i > 0 { + l0, lok0 := expr.Alternatives[i-1].(*LitMatcher) + l1, lok1 := expr.Alternatives[i].(*LitMatcher) + c0, cok0 := expr.Alternatives[i-1].(*CharClassMatcher) + c1, cok1 := expr.Alternatives[i].(*CharClassMatcher) + + combined := false + + switch { + // Combine two LitMatcher to CharClassMatcher + // "a" / "b" => [ab] + case lok0 && lok1 && len([]rune(l0.Val)) == 1 && len([]rune(l1.Val)) == 1 && l0.IgnoreCase == l1.IgnoreCase: + combined = true + cm := CharClassMatcher{ + Chars: append([]rune(l0.Val), []rune(l1.Val)...), + IgnoreCase: l0.IgnoreCase, + posValue: l0.posValue, + } + expr.Alternatives[i-1] = &cm + + // Combine LitMatcher with CharClassMatcher + // "a" / [bc] => [abc] + case lok0 && cok1 && len([]rune(l0.Val)) == 1 && l0.IgnoreCase == c1.IgnoreCase && !c1.Inverted: + combined = true + c1.Chars = append(c1.Chars, []rune(l0.Val)...) + expr.Alternatives[i-1] = c1 + + // Combine CharClassMatcher with LitMatcher + // [ab] / "c" => [abc] + case cok0 && lok1 && len([]rune(l1.Val)) == 1 && c0.IgnoreCase == l1.IgnoreCase && !c0.Inverted: + combined = true + c0.Chars = append(c0.Chars, []rune(l1.Val)...) + + // Combine CharClassMatcher with CharClassMatcher + // [ab] / [cd] => [abcd] + case cok0 && cok1 && c0.IgnoreCase == c1.IgnoreCase && c0.Inverted == c1.Inverted: + combined = true + c0.Chars = append(c0.Chars, c1.Chars...) + c0.Ranges = append(c0.Ranges, c1.Ranges...) + c0.UnicodeClasses = append(c0.UnicodeClasses, c1.UnicodeClasses...) + } + + // If one of the optimizations was applied, remove the second element from Alternatives + if combined { + r.optimized = true + if i+1 < len(expr.Alternatives) { + expr.Alternatives = append(expr.Alternatives[:i], expr.Alternatives[i+1:]...) + } else { + expr.Alternatives = expr.Alternatives[:i] + } + } + } + } + + case *Grammar: + // Reset optimized at the start of each Walk. + r.optimized = false + for i := 0; i < len(expr.Rules); i++ { + rule := expr.Rules[i] + // Remove Rule, if it is no longer used by any other Rule and it is not the first Rule. + _, used := r.ruleUsedByRules[rule.Name.Val] + _, protected := r.protectedRules[rule.Name.Val] + if !used && !protected { + expr.Rules = append(expr.Rules[:i], expr.Rules[i+1:]...) + // Compensate for the removed item + i-- + + for k, v := range r.ruleUsedByRules { + for kk := range v { + if kk == rule.Name.Val { + delete(r.ruleUsedByRules[k], kk) + if len(r.ruleUsedByRules[k]) == 0 { + delete(r.ruleUsedByRules, k) + } + } + } + } + + r.optimized = true + continue + } + } + case *LabeledExpr: + expr.Expr = r.optimizeRule(expr.Expr) + case *NotExpr: + expr.Expr = r.optimizeRule(expr.Expr) + case *OneOrMoreExpr: + expr.Expr = r.optimizeRule(expr.Expr) + case *Rule: + r.rule = expr.Name.Val + expr.Expr = r.optimizeRule(expr.Expr) + case *SeqExpr: + expr.Exprs = r.optimizeRules(expr.Exprs) + + for i := 0; i < len(expr.Exprs); i++ { + // Optimize nested sequences + if seq, ok := expr.Exprs[i].(*SeqExpr); ok { + r.optimized = true + if i+1 < len(expr.Exprs) { + expr.Exprs = append(expr.Exprs[:i], append(seq.Exprs, expr.Exprs[i+1:]...)...) + } else { + expr.Exprs = append(expr.Exprs[:i], seq.Exprs...) + } + } + + // Combine sequence of LitMatcher + if i > 0 { + l0, ok0 := expr.Exprs[i-1].(*LitMatcher) + l1, ok1 := expr.Exprs[i].(*LitMatcher) + if ok0 && ok1 && l0.IgnoreCase == l1.IgnoreCase { + r.optimized = true + l0.Val += l1.Val + expr.Exprs[i-1] = l0 + if i+1 < len(expr.Exprs) { + expr.Exprs = append(expr.Exprs[:i], expr.Exprs[i+1:]...) + } else { + expr.Exprs = expr.Exprs[:i] + } + } + } + } + + case *ZeroOrMoreExpr: + expr.Expr = r.optimizeRule(expr.Expr) + case *ZeroOrOneExpr: + expr.Expr = r.optimizeRule(expr.Expr) + } + return r +} + +func (r *grammarOptimizer) optimizeRules(exprs []Expression) []Expression { + for i := 0; i < len(exprs); i++ { + exprs[i] = r.optimizeRule(exprs[i]) + } + return exprs +} + +func (r *grammarOptimizer) optimizeRule(expr Expression) Expression { + // Optimize RuleRefExpr + if ruleRef, ok := expr.(*RuleRefExpr); ok { + if _, ok := r.ruleUsesRules[ruleRef.Name.Val]; !ok { + r.optimized = true + delete(r.ruleUsedByRules[ruleRef.Name.Val], r.rule) + if len(r.ruleUsedByRules[ruleRef.Name.Val]) == 0 { + delete(r.ruleUsedByRules, ruleRef.Name.Val) + } + delete(r.ruleUsesRules[r.rule], ruleRef.Name.Val) + if len(r.ruleUsesRules[r.rule]) == 0 { + delete(r.ruleUsesRules, r.rule) + } + // TODO: Check if reference exists, otherwise raise an error, which reference is missing! + return cloneExpr(r.rules[ruleRef.Name.Val].Expr) + } + } + + // Remove Choices with only one Alternative left + if choice, ok := expr.(*ChoiceExpr); ok { + if len(choice.Alternatives) == 1 { + r.optimized = true + return choice.Alternatives[0] + } + } + + // Remove Sequence with only one Expression + if seq, ok := expr.(*SeqExpr); ok { + if len(seq.Exprs) == 1 { + r.optimized = true + return seq.Exprs[0] + } + } + + return expr +} + +// cloneExpr takes an Expression and deep clones it (including all children) +// This is necessary because referenced Rules are denormalized and therefore +// have to become independent from their original Expression +func cloneExpr(expr Expression) Expression { + switch expr := expr.(type) { + case *ActionExpr: + return &ActionExpr{ + Code: expr.Code, + Expr: cloneExpr(expr.Expr), + FuncIx: expr.FuncIx, + p: expr.p, + } + case *AndExpr: + return &AndExpr{ + Expr: cloneExpr(expr.Expr), + p: expr.p, + } + case *AndCodeExpr: + return &AndCodeExpr{ + Code: expr.Code, + FuncIx: expr.FuncIx, + p: expr.p, + } + case *CharClassMatcher: + return &CharClassMatcher{ + Chars: append([]rune{}, expr.Chars...), + IgnoreCase: expr.IgnoreCase, + Inverted: expr.Inverted, + posValue: expr.posValue, + Ranges: append([]rune{}, expr.Ranges...), + UnicodeClasses: append([]string{}, expr.UnicodeClasses...), + } + case *ChoiceExpr: + alts := make([]Expression, 0, len(expr.Alternatives)) + for i := 0; i < len(expr.Alternatives); i++ { + alts = append(alts, cloneExpr(expr.Alternatives[i])) + } + return &ChoiceExpr{ + Alternatives: alts, + p: expr.p, + } + case *LabeledExpr: + return &LabeledExpr{ + Expr: cloneExpr(expr.Expr), + Label: expr.Label, + p: expr.p, + } + case *NotExpr: + return &NotExpr{ + Expr: cloneExpr(expr.Expr), + p: expr.p, + } + case *NotCodeExpr: + return &NotCodeExpr{ + Code: expr.Code, + FuncIx: expr.FuncIx, + p: expr.p, + } + case *OneOrMoreExpr: + return &OneOrMoreExpr{ + Expr: cloneExpr(expr.Expr), + p: expr.p, + } + case *SeqExpr: + exprs := make([]Expression, 0, len(expr.Exprs)) + for i := 0; i < len(expr.Exprs); i++ { + exprs = append(exprs, cloneExpr(expr.Exprs[i])) + } + return &SeqExpr{ + Exprs: exprs, + p: expr.p, + } + case *StateCodeExpr: + return &StateCodeExpr{ + p: expr.p, + Code: expr.Code, + FuncIx: expr.FuncIx, + } + case *ZeroOrMoreExpr: + return &ZeroOrMoreExpr{ + Expr: cloneExpr(expr.Expr), + p: expr.p, + } + case *ZeroOrOneExpr: + return &ZeroOrOneExpr{ + Expr: cloneExpr(expr.Expr), + p: expr.p, + } + } + return expr +} + +// cleanupCharClassMatcher is a Visitor, which is used with the Walk function +// The purpose of this function is to cleanup the redundancies created by the +// optimize Visitor. This includes to remove redundant entries in Chars, Ranges +// and UnicodeClasses of the given CharClassMatcher as well as regenerating the +// correct content for the Val field (string representation of the CharClassMatcher) +func (r *grammarOptimizer) cleanupCharClassMatcher(expr0 Expression) Visitor { + // We are only interested in nodes of type *CharClassMatcher + if chr, ok := expr0.(*CharClassMatcher); ok { + // Remove redundancies in Chars + chars := make([]rune, 0, len(chr.Chars)) + charsMap := make(map[rune]struct{}) + for _, c := range chr.Chars { + if _, ok := charsMap[c]; !ok { + charsMap[c] = struct{}{} + chars = append(chars, c) + } + } + if len(chars) > 0 { + chr.Chars = chars + } else { + chr.Chars = nil + } + + // Remove redundancies in Ranges + ranges := make([]rune, 0, len(chr.Ranges)) + rangesMap := make(map[string]struct{}) + for i := 0; i < len(chr.Ranges); i += 2 { + rangeKey := string(chr.Ranges[i]) + "-" + string(chr.Ranges[i+1]) + if _, ok := rangesMap[rangeKey]; !ok { + rangesMap[rangeKey] = struct{}{} + ranges = append(ranges, chr.Ranges[i], chr.Ranges[i+1]) + } + } + if len(ranges) > 0 { + chr.Ranges = ranges + } else { + chr.Ranges = nil + } + + // Remove redundancies in UnicodeClasses + unicodeClasses := make([]string, 0, len(chr.UnicodeClasses)) + unicodeClassesMap := make(map[string]struct{}) + for _, u := range chr.UnicodeClasses { + if _, ok := unicodeClassesMap[u]; !ok { + unicodeClassesMap[u] = struct{}{} + unicodeClasses = append(unicodeClasses, u) + } + } + if len(unicodeClasses) > 0 { + chr.UnicodeClasses = unicodeClasses + } else { + chr.UnicodeClasses = nil + } + + // Regenerate the content for Val + var val bytes.Buffer + val.WriteString("[") + if chr.Inverted { + val.WriteString("^") + } + for _, c := range chr.Chars { + val.WriteString(escapeRune(c)) + } + for i := 0; i < len(chr.Ranges); i += 2 { + val.WriteString(escapeRune(chr.Ranges[i])) + val.WriteString("-") + val.WriteString(escapeRune(chr.Ranges[i+1])) + } + for _, u := range chr.UnicodeClasses { + val.WriteString("\\p" + u) + } + val.WriteString("]") + if chr.IgnoreCase { + val.WriteString("i") + } + chr.posValue.Val = val.String() + } + return r +} + +func escapeRune(r rune) string { + return strings.Trim(strconv.QuoteRune(r), `'`) +} + +// Optimize walks a given grammar and optimizes the grammar in regards +// of parsing performance. This is done with several optimizations: +// * removal of unreferenced rules +// * replace rule references with a copy of the referenced Rule, if the +// referenced rule it self has no references. +// * resolve nested choice expressions +// * resolve choice expressions with only one alternative +// * resolve nested sequences expression +// * resolve sequence expressions with only one element +// * combine character class matcher and literal matcher, where possible +func Optimize(g *Grammar, alternateEntrypoints ...string) { + entrypoints := alternateEntrypoints + if len(g.Rules) > 0 { + entrypoints = append(entrypoints, g.Rules[0].Name.Val) + } + + r := newGrammarOptimizer(entrypoints) + Walk(r, g) + + r.visitor = r.optimize + r.optimized = true + for r.optimized { + Walk(r, g) + } + + r.visitor = r.cleanupCharClassMatcher + Walk(r, g) +} diff --git a/vendor/github.com/mna/pigeon/ast/ast_walk.go b/vendor/github.com/mna/pigeon/ast/ast_walk.go new file mode 100644 index 00000000000..862d7d8dd3a --- /dev/null +++ b/vendor/github.com/mna/pigeon/ast/ast_walk.go @@ -0,0 +1,87 @@ +package ast + +import "fmt" + +// A Visitor implements a Visit method, which is invoked for each Expression +// encountered by Walk. +// If the result visitor w is not nil, Walk visits each of the children +// of Expression with the visitor w, followed by a call of w.Visit(nil). +type Visitor interface { + Visit(expr Expression) (w Visitor) +} + +// Walk traverses an AST in depth-first order: It starts by calling +// v.Visit(expr); Expression must not be nil. If the visitor w returned by +// v.Visit(expr) is not nil, Walk is invoked recursively with visitor +// w for each of the non-nil children of Expression, followed by a call of +// w.Visit(nil). +// +func Walk(v Visitor, expr Expression) { + if v = v.Visit(expr); v == nil { + return + } + + switch expr := expr.(type) { + case *ActionExpr: + Walk(v, expr.Expr) + case *AndCodeExpr: + // Nothing to do + case *AndExpr: + Walk(v, expr.Expr) + case *AnyMatcher: + // Nothing to do + case *CharClassMatcher: + // Nothing to do + case *ChoiceExpr: + for _, e := range expr.Alternatives { + Walk(v, e) + } + case *Grammar: + for _, e := range expr.Rules { + Walk(v, e) + } + case *LabeledExpr: + Walk(v, expr.Expr) + case *LitMatcher: + // Nothing to do + case *NotCodeExpr: + // Nothing to do + case *NotExpr: + Walk(v, expr.Expr) + case *OneOrMoreExpr: + Walk(v, expr.Expr) + case *Rule: + Walk(v, expr.Expr) + case *RuleRefExpr: + // Nothing to do + case *SeqExpr: + for _, e := range expr.Exprs { + Walk(v, e) + } + case *StateCodeExpr: + // Nothing to do + case *ZeroOrMoreExpr: + Walk(v, expr.Expr) + case *ZeroOrOneExpr: + Walk(v, expr.Expr) + default: + panic(fmt.Sprintf("unknown expression type %T", expr)) + } +} + +type inspector func(Expression) bool + +func (f inspector) Visit(expr Expression) Visitor { + if f(expr) { + return f + } + return nil +} + +// Inspect traverses an AST in depth-first order: It starts by calling +// f(expr); expr must not be nil. If f returns true, Inspect invokes f +// recursively for each of the non-nil children of expr, followed by a +// call of f(nil). +func Inspect(expr Expression, f func(Expression) bool) { + Walk(inspector(f), expr) +} diff --git a/vendor/github.com/mna/pigeon/builder/builder.go b/vendor/github.com/mna/pigeon/builder/builder.go new file mode 100644 index 00000000000..a40f612db43 --- /dev/null +++ b/vendor/github.com/mna/pigeon/builder/builder.go @@ -0,0 +1,817 @@ +// Package builder generates the parser code for a given grammar. It makes +// no attempt to verify the correctness of the grammar. +package builder + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + "text/template" + "unicode" + + "regexp" + + "github.com/mna/pigeon/ast" +) + +const codeGeneratedComment = "// Code generated by pigeon; DO NOT EDIT.\n\n" + +// generated function templates +var ( + onFuncTemplate = `func (%s *current) %s(%s) (interface{}, error) { +%s +} +` + onPredFuncTemplate = `func (%s *current) %s(%s) (bool, error) { +%s +} +` + onStateFuncTemplate = `func (%s *current) %s(%s) (error) { +%s +} +` + callFuncTemplate = `func (p *parser) call%s() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.%[1]s(%s) +} +` + callPredFuncTemplate = `func (p *parser) call%s() (bool, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.%[1]s(%s) +} +` + callStateFuncTemplate = `func (p *parser) call%s() error { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.%[1]s(%s) +} +` +) + +// Option is a function that can set an option on the builder. It returns +// the previous setting as an Option. +type Option func(*builder) Option + +// ReceiverName returns an option that specifies the receiver name to +// use for the current struct (which is the struct on which all code blocks +// except the initializer are generated). +func ReceiverName(nm string) Option { + return func(b *builder) Option { + prev := b.recvName + b.recvName = nm + return ReceiverName(prev) + } +} + +// Optimize returns an option that specifies the optimize option +// If optimize is true, the Debug and Memoize code is completely +// removed from the resulting parser +func Optimize(optimize bool) Option { + return func(b *builder) Option { + prev := b.optimize + b.optimize = optimize + return Optimize(prev) + } +} + +// Nolint returns an option that specifies the nolint option +// If nolint is true, special '// nolint: ...' comments are added +// to the generated parser to suppress warnings by gometalinter. +func Nolint(nolint bool) Option { + return func(b *builder) Option { + prev := b.nolint + b.nolint = nolint + return Optimize(prev) + } +} + +// BasicLatinLookupTable returns an option that specifies the basicLatinLookup option +// If basicLatinLookup is true, a lookup slice for the first 128 chars of +// the Unicode table (Basic Latin) is generated for each CharClassMatcher +// to increase the character matching. +func BasicLatinLookupTable(basicLatinLookupTable bool) Option { + return func(b *builder) Option { + prev := b.basicLatinLookupTable + b.basicLatinLookupTable = basicLatinLookupTable + return BasicLatinLookupTable(prev) + } +} + +// BuildParser builds the PEG parser using the provider grammar. The code is +// written to the specified w. +func BuildParser(w io.Writer, g *ast.Grammar, opts ...Option) error { + b := &builder{w: w, recvName: "c"} + b.setOptions(opts) + return b.buildParser(g) +} + +type builder struct { + w io.Writer + err error + + // options + recvName string + optimize bool + basicLatinLookupTable bool + globalState bool + nolint bool + + ruleName string + exprIndex int + argsStack [][]string + + rangeTable bool +} + +func (b *builder) setOptions(opts []Option) { + for _, opt := range opts { + opt(b) + } +} + +func (b *builder) buildParser(g *ast.Grammar) error { + b.writeInit(g.Init) + b.writeGrammar(g) + + for _, rule := range g.Rules { + b.writeRuleCode(rule) + } + b.writeStaticCode() + + return b.err +} + +func (b *builder) writeInit(init *ast.CodeBlock) { + if init == nil { + return + } + + // remove opening and closing braces + val := codeGeneratedComment + init.Val[1:len(init.Val)-1] + b.writelnf("%s", val) +} + +func (b *builder) writeGrammar(g *ast.Grammar) { + // transform the ast grammar to the self-contained, no dependency version + // of the parser-generator grammar. + b.writelnf("var g = &grammar {") + b.writelnf("\trules: []*rule{") + for _, r := range g.Rules { + b.writeRule(r) + } + b.writelnf("\t},") + b.writelnf("}") +} + +func (b *builder) writeRule(r *ast.Rule) { + if r == nil || r.Name == nil { + return + } + + b.exprIndex = 0 + b.ruleName = r.Name.Val + + b.writelnf("{") + b.writelnf("\tname: %q,", r.Name.Val) + if r.DisplayName != nil && r.DisplayName.Val != "" { + b.writelnf("\tdisplayName: %q,", r.DisplayName.Val) + } + pos := r.Pos() + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + b.writef("\texpr: ") + b.writeExpr(r.Expr) + b.writelnf("},") +} + +func (b *builder) writeExpr(expr ast.Expression) { + b.exprIndex++ + switch expr := expr.(type) { + case *ast.ActionExpr: + b.writeActionExpr(expr) + case *ast.AndCodeExpr: + b.writeAndCodeExpr(expr) + case *ast.AndExpr: + b.writeAndExpr(expr) + case *ast.AnyMatcher: + b.writeAnyMatcher(expr) + case *ast.CharClassMatcher: + b.writeCharClassMatcher(expr) + case *ast.ChoiceExpr: + b.writeChoiceExpr(expr) + case *ast.LabeledExpr: + b.writeLabeledExpr(expr) + case *ast.LitMatcher: + b.writeLitMatcher(expr) + case *ast.NotCodeExpr: + b.writeNotCodeExpr(expr) + case *ast.NotExpr: + b.writeNotExpr(expr) + case *ast.OneOrMoreExpr: + b.writeOneOrMoreExpr(expr) + case *ast.RecoveryExpr: + b.writeRecoveryExpr(expr) + case *ast.RuleRefExpr: + b.writeRuleRefExpr(expr) + case *ast.SeqExpr: + b.writeSeqExpr(expr) + case *ast.StateCodeExpr: + b.writeStateCodeExpr(expr) + case *ast.ThrowExpr: + b.writeThrowExpr(expr) + case *ast.ZeroOrMoreExpr: + b.writeZeroOrMoreExpr(expr) + case *ast.ZeroOrOneExpr: + b.writeZeroOrOneExpr(expr) + default: + b.err = fmt.Errorf("builder: unknown expression type %T", expr) + } +} + +func (b *builder) writeActionExpr(act *ast.ActionExpr) { + if act == nil { + b.writelnf("nil,") + return + } + if act.FuncIx == 0 { + act.FuncIx = b.exprIndex + } + b.writelnf("&actionExpr{") + pos := act.Pos() + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + b.writelnf("\trun: (*parser).call%s,", b.funcName(act.FuncIx)) + b.writef("\texpr: ") + b.writeExpr(act.Expr) + b.writelnf("},") +} + +func (b *builder) writeAndCodeExpr(and *ast.AndCodeExpr) { + if and == nil { + b.writelnf("nil,") + return + } + b.writelnf("&andCodeExpr{") + pos := and.Pos() + if and.FuncIx == 0 { + and.FuncIx = b.exprIndex + } + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + b.writelnf("\trun: (*parser).call%s,", b.funcName(and.FuncIx)) + b.writelnf("},") +} + +func (b *builder) writeAndExpr(and *ast.AndExpr) { + if and == nil { + b.writelnf("nil,") + return + } + b.writelnf("&andExpr{") + pos := and.Pos() + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + b.writef("\texpr: ") + b.writeExpr(and.Expr) + b.writelnf("},") +} + +func (b *builder) writeAnyMatcher(any *ast.AnyMatcher) { + if any == nil { + b.writelnf("nil,") + return + } + b.writelnf("&anyMatcher{") + pos := any.Pos() + b.writelnf("\tline: %d, col: %d, offset: %d,", pos.Line, pos.Col, pos.Off) + b.writelnf("},") +} + +func (b *builder) writeCharClassMatcher(ch *ast.CharClassMatcher) { + if ch == nil { + b.writelnf("nil,") + return + } + b.writelnf("&charClassMatcher{") + pos := ch.Pos() + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + b.writelnf("\tval: %q,", ch.Val) + if len(ch.Chars) > 0 { + b.writef("\tchars: []rune{") + for _, rn := range ch.Chars { + if ch.IgnoreCase { + b.writef("%q,", unicode.ToLower(rn)) + } else { + b.writef("%q,", rn) + } + } + b.writelnf("},") + } + if len(ch.Ranges) > 0 { + b.writef("\tranges: []rune{") + for _, rn := range ch.Ranges { + if ch.IgnoreCase { + b.writef("%q,", unicode.ToLower(rn)) + } else { + b.writef("%q,", rn) + } + } + b.writelnf("},") + } + if len(ch.UnicodeClasses) > 0 { + b.rangeTable = true + b.writef("\tclasses: []*unicode.RangeTable{") + for _, cl := range ch.UnicodeClasses { + b.writef("rangeTable(%q),", cl) + } + b.writelnf("},") + } + if b.basicLatinLookupTable { + b.writelnf("\tbasicLatinChars: %#v,", BasicLatinLookup(ch.Chars, ch.Ranges, ch.UnicodeClasses, ch.IgnoreCase)) + } + b.writelnf("\tignoreCase: %t,", ch.IgnoreCase) + b.writelnf("\tinverted: %t,", ch.Inverted) + b.writelnf("},") +} + +// BasicLatinLookup calculates the decision results for the first 256 characters of the UTF-8 character +// set for a given set of chars, ranges and unicodeClasses to speedup the CharClassMatcher. +func BasicLatinLookup(chars, ranges []rune, unicodeClasses []string, ignoreCase bool) (basicLatinChars [128]bool) { + for _, rn := range chars { + if rn < 128 { + basicLatinChars[rn] = true + if ignoreCase { + if unicode.IsLower(rn) { + basicLatinChars[unicode.ToUpper(rn)] = true + } else { + basicLatinChars[unicode.ToLower(rn)] = true + } + } + } + } + for i := 0; i < len(ranges); i += 2 { + if ranges[i] < 128 { + for j := ranges[i]; j < 128 && j <= ranges[i+1]; j++ { + basicLatinChars[j] = true + if ignoreCase { + if unicode.IsLower(j) { + basicLatinChars[unicode.ToUpper(j)] = true + } else { + basicLatinChars[unicode.ToLower(j)] = true + } + } + } + } + } + for _, cl := range unicodeClasses { + rt := rangeTable(cl) + for r := rune(0); r < 128; r++ { + if unicode.Is(rt, r) { + basicLatinChars[r] = true + } + } + } + return +} + +func (b *builder) writeChoiceExpr(ch *ast.ChoiceExpr) { + if ch == nil { + b.writelnf("nil,") + return + } + b.writelnf("&choiceExpr{") + pos := ch.Pos() + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + if len(ch.Alternatives) > 0 { + b.writelnf("\talternatives: []interface{}{") + for _, alt := range ch.Alternatives { + b.writeExpr(alt) + } + b.writelnf("\t},") + } + b.writelnf("},") +} + +func (b *builder) writeLabeledExpr(lab *ast.LabeledExpr) { + if lab == nil { + b.writelnf("nil,") + return + } + b.writelnf("&labeledExpr{") + pos := lab.Pos() + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + if lab.Label != nil && lab.Label.Val != "" { + b.writelnf("\tlabel: %q,", lab.Label.Val) + } + b.writef("\texpr: ") + b.writeExpr(lab.Expr) + b.writelnf("},") +} + +func (b *builder) writeLitMatcher(lit *ast.LitMatcher) { + if lit == nil { + b.writelnf("nil,") + return + } + b.writelnf("&litMatcher{") + pos := lit.Pos() + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + if lit.IgnoreCase { + b.writelnf("\tval: %q,", strings.ToLower(lit.Val)) + } else { + b.writelnf("\tval: %q,", lit.Val) + } + b.writelnf("\tignoreCase: %t,", lit.IgnoreCase) + ignoreCaseFlag := "" + if lit.IgnoreCase { + ignoreCaseFlag = "i" + } + b.writelnf("\twant: %q,", strconv.Quote(lit.Val)+ignoreCaseFlag) + b.writelnf("},") +} + +func (b *builder) writeNotCodeExpr(not *ast.NotCodeExpr) { + if not == nil { + b.writelnf("nil,") + return + } + b.writelnf("¬CodeExpr{") + pos := not.Pos() + if not.FuncIx == 0 { + not.FuncIx = b.exprIndex + } + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + b.writelnf("\trun: (*parser).call%s,", b.funcName(not.FuncIx)) + b.writelnf("},") +} + +func (b *builder) writeNotExpr(not *ast.NotExpr) { + if not == nil { + b.writelnf("nil,") + return + } + b.writelnf("¬Expr{") + pos := not.Pos() + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + b.writef("\texpr: ") + b.writeExpr(not.Expr) + b.writelnf("},") +} + +func (b *builder) writeOneOrMoreExpr(one *ast.OneOrMoreExpr) { + if one == nil { + b.writelnf("nil,") + return + } + b.writelnf("&oneOrMoreExpr{") + pos := one.Pos() + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + b.writef("\texpr: ") + b.writeExpr(one.Expr) + b.writelnf("},") +} + +func (b *builder) writeRecoveryExpr(recover *ast.RecoveryExpr) { + if recover == nil { + b.writelnf("nil,") + return + } + b.writelnf("&recoveryExpr{") + pos := recover.Pos() + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + + b.writef("\texpr: ") + b.writeExpr(recover.Expr) + b.writef("\trecoverExpr: ") + b.writeExpr(recover.RecoverExpr) + b.writelnf("\tfailureLabel: []string{") + for _, label := range recover.Labels { + b.writelnf("%q,", label) + } + b.writelnf("\t},") + b.writelnf("},") +} + +func (b *builder) writeRuleRefExpr(ref *ast.RuleRefExpr) { + if ref == nil { + b.writelnf("nil,") + return + } + b.writelnf("&ruleRefExpr{") + pos := ref.Pos() + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + if ref.Name != nil && ref.Name.Val != "" { + b.writelnf("\tname: %q,", ref.Name.Val) + } + b.writelnf("},") +} + +func (b *builder) writeSeqExpr(seq *ast.SeqExpr) { + if seq == nil { + b.writelnf("nil,") + return + } + b.writelnf("&seqExpr{") + pos := seq.Pos() + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + if len(seq.Exprs) > 0 { + b.writelnf("\texprs: []interface{}{") + for _, e := range seq.Exprs { + b.writeExpr(e) + } + b.writelnf("\t},") + } + b.writelnf("},") +} + +func (b *builder) writeStateCodeExpr(state *ast.StateCodeExpr) { + if state == nil { + b.writelnf("nil,") + return + } + b.globalState = true + b.writelnf("&stateCodeExpr{") + pos := state.Pos() + if state.FuncIx == 0 { + state.FuncIx = b.exprIndex + } + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + b.writelnf("\trun: (*parser).call%s,", b.funcName(state.FuncIx)) + b.writelnf("},") +} + +func (b *builder) writeThrowExpr(throw *ast.ThrowExpr) { + if throw == nil { + b.writelnf("nil,") + return + } + b.writelnf("&throwExpr{") + pos := throw.Pos() + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + b.writelnf("\tlabel: %q,", throw.Label) + b.writelnf("},") +} + +func (b *builder) writeZeroOrMoreExpr(zero *ast.ZeroOrMoreExpr) { + if zero == nil { + b.writelnf("nil,") + return + } + b.writelnf("&zeroOrMoreExpr{") + pos := zero.Pos() + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + b.writef("\texpr: ") + b.writeExpr(zero.Expr) + b.writelnf("},") +} + +func (b *builder) writeZeroOrOneExpr(zero *ast.ZeroOrOneExpr) { + if zero == nil { + b.writelnf("nil,") + return + } + b.writelnf("&zeroOrOneExpr{") + pos := zero.Pos() + b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off) + b.writef("\texpr: ") + b.writeExpr(zero.Expr) + b.writelnf("},") +} + +func (b *builder) writeRuleCode(rule *ast.Rule) { + if rule == nil || rule.Name == nil { + return + } + + // keep trace of the current rule, as the code blocks are created + // in functions named "on<#ExprIndex>". + b.ruleName = rule.Name.Val + b.pushArgsSet() + b.writeExprCode(rule.Expr) + b.popArgsSet() +} + +func (b *builder) pushArgsSet() { + b.argsStack = append(b.argsStack, nil) +} + +func (b *builder) popArgsSet() { + b.argsStack = b.argsStack[:len(b.argsStack)-1] +} + +func (b *builder) addArg(arg *ast.Identifier) { + if arg == nil { + return + } + ix := len(b.argsStack) - 1 + b.argsStack[ix] = append(b.argsStack[ix], arg.Val) +} + +func (b *builder) writeExprCode(expr ast.Expression) { + switch expr := expr.(type) { + case *ast.ActionExpr: + b.writeExprCode(expr.Expr) + b.writeActionExprCode(expr) + + case *ast.AndCodeExpr: + b.writeAndCodeExprCode(expr) + + case *ast.LabeledExpr: + b.addArg(expr.Label) + b.pushArgsSet() + b.writeExprCode(expr.Expr) + b.popArgsSet() + + case *ast.NotCodeExpr: + b.writeNotCodeExprCode(expr) + + case *ast.AndExpr: + b.pushArgsSet() + b.writeExprCode(expr.Expr) + b.popArgsSet() + + case *ast.ChoiceExpr: + for _, alt := range expr.Alternatives { + b.pushArgsSet() + b.writeExprCode(alt) + b.popArgsSet() + } + + case *ast.NotExpr: + b.pushArgsSet() + b.writeExprCode(expr.Expr) + b.popArgsSet() + + case *ast.OneOrMoreExpr: + b.pushArgsSet() + b.writeExprCode(expr.Expr) + b.popArgsSet() + + case *ast.RecoveryExpr: + b.pushArgsSet() + b.writeExprCode(expr.Expr) + b.writeExprCode(expr.RecoverExpr) + b.popArgsSet() + + case *ast.SeqExpr: + for _, sub := range expr.Exprs { + b.writeExprCode(sub) + } + + case *ast.StateCodeExpr: + b.writeStateCodeExprCode(expr) + + case *ast.ZeroOrMoreExpr: + b.pushArgsSet() + b.writeExprCode(expr.Expr) + b.popArgsSet() + + case *ast.ZeroOrOneExpr: + b.pushArgsSet() + b.writeExprCode(expr.Expr) + b.popArgsSet() + } +} + +func (b *builder) writeActionExprCode(act *ast.ActionExpr) { + if act == nil { + return + } + if act.FuncIx > 0 { + b.writeFunc(act.FuncIx, act.Code, callFuncTemplate, onFuncTemplate) + act.FuncIx = 0 // already rendered, prevent duplicates + } +} + +func (b *builder) writeAndCodeExprCode(and *ast.AndCodeExpr) { + if and == nil { + return + } + if and.FuncIx > 0 { + b.writeFunc(and.FuncIx, and.Code, callPredFuncTemplate, onPredFuncTemplate) + and.FuncIx = 0 // already rendered, prevent duplicates + } +} + +func (b *builder) writeNotCodeExprCode(not *ast.NotCodeExpr) { + if not == nil { + return + } + if not.FuncIx > 0 { + b.writeFunc(not.FuncIx, not.Code, callPredFuncTemplate, onPredFuncTemplate) + not.FuncIx = 0 // already rendered, prevent duplicates + } +} + +func (b *builder) writeStateCodeExprCode(state *ast.StateCodeExpr) { + if state == nil { + return + } + if state.FuncIx > 0 { + b.writeFunc(state.FuncIx, state.Code, callStateFuncTemplate, onStateFuncTemplate) + state.FuncIx = 0 // already rendered, prevent duplicates + } +} + +func (b *builder) writeFunc(funcIx int, code *ast.CodeBlock, callTpl, funcTpl string) { + if code == nil { + return + } + val := strings.TrimSpace(code.Val)[1 : len(code.Val)-1] + if len(val) > 0 && val[0] == '\n' { + val = val[1:] + } + if len(val) > 0 && val[len(val)-1] == '\n' { + val = val[:len(val)-1] + } + var args bytes.Buffer + ix := len(b.argsStack) - 1 + if ix >= 0 { + for i, arg := range b.argsStack[ix] { + if i > 0 { + args.WriteString(", ") + } + args.WriteString(arg) + } + } + if args.Len() > 0 { + args.WriteString(" interface{}") + } + + fnNm := b.funcName(funcIx) + b.writelnf(funcTpl, b.recvName, fnNm, args.String(), val) + + args.Reset() + if ix >= 0 { + for i, arg := range b.argsStack[ix] { + if i > 0 { + args.WriteString(", ") + } + args.WriteString(fmt.Sprintf(`stack[%q]`, arg)) + } + } + b.writelnf(callTpl, fnNm, args.String()) +} + +func (b *builder) writeStaticCode() { + buffer := bytes.NewBufferString("") + params := struct { + Optimize bool + BasicLatinLookupTable bool + GlobalState bool + Nolint bool + }{ + Optimize: b.optimize, + BasicLatinLookupTable: b.basicLatinLookupTable, + GlobalState: b.globalState, + Nolint: b.nolint, + } + t := template.Must(template.New("static_code").Parse(staticCode)) + + err := t.Execute(buffer, params) + if err != nil { + // This is very unlikely to ever happen + panic("executing template: " + err.Error()) + } + + // Clean the ==template== comments from the generated parser + lines := strings.Split(buffer.String(), "\n") + buffer.Reset() + re := regexp.MustCompile(`^\s*//\s*(==template==\s*)+$`) + reLineEnd := regexp.MustCompile(`//\s*==template==\s*$`) + for _, line := range lines { + if !re.MatchString(line) { + line = reLineEnd.ReplaceAllString(line, "") + _, err := buffer.WriteString(line + "\n") + if err != nil { + // This is very unlikely to ever happen + panic("unable to write to byte buffer: " + err.Error()) + } + } + } + + b.writeln(buffer.String()) + if b.rangeTable { + b.writeln(rangeTable0) + } +} + +func (b *builder) funcName(ix int) string { + return "on" + b.ruleName + strconv.Itoa(ix) +} + +func (b *builder) writef(f string, args ...interface{}) { + if b.err == nil { + _, b.err = fmt.Fprintf(b.w, f, args...) + } +} + +func (b *builder) writelnf(f string, args ...interface{}) { + b.writef(f+"\n", args...) +} + +func (b *builder) writeln(f string) { + if b.err == nil { + _, b.err = fmt.Fprint(b.w, f+"\n") + } +} diff --git a/vendor/github.com/mna/pigeon/builder/generated_static_code.go b/vendor/github.com/mna/pigeon/builder/generated_static_code.go new file mode 100644 index 00000000000..3bec59d59f8 --- /dev/null +++ b/vendor/github.com/mna/pigeon/builder/generated_static_code.go @@ -0,0 +1,1450 @@ +// Code generated by static_code_generator with go generate; DO NOT EDIT. + +package builder + +var staticCode = ` +var ( + // errNoRule is returned when the grammar to parse has no rule. + errNoRule = errors.New("grammar has no rule") + + // errInvalidEntrypoint is returned when the specified entrypoint rule + // does not exit. + errInvalidEntrypoint = errors.New("invalid entrypoint") + + // errInvalidEncoding is returned when the source is not properly + // utf8-encoded. + errInvalidEncoding = errors.New("invalid encoding") + + // errMaxExprCnt is used to signal that the maximum number of + // expressions have been parsed. + errMaxExprCnt = errors.New("max number of expresssions parsed") +) + +// Option is a function that can set an option on the parser. It returns +// the previous setting as an Option. +type Option func(*parser) Option + +// MaxExpressions creates an Option to stop parsing after the provided +// number of expressions have been parsed, if the value is 0 then the parser will +// parse for as many steps as needed (possibly an infinite number). +// +// The default for maxExprCnt is 0. +func MaxExpressions(maxExprCnt uint64) Option { + return func(p *parser) Option { + oldMaxExprCnt := p.maxExprCnt + p.maxExprCnt = maxExprCnt + return MaxExpressions(oldMaxExprCnt) + } +} + +// Entrypoint creates an Option to set the rule name to use as entrypoint. +// The rule name must have been specified in the -alternate-entrypoints +// if generating the parser with the -optimize-grammar flag, otherwise +// it may have been optimized out. Passing an empty string sets the +// entrypoint to the first rule in the grammar. +// +// The default is to start parsing at the first rule in the grammar. +func Entrypoint(ruleName string) Option { + return func(p *parser) Option { + oldEntrypoint := p.entrypoint + p.entrypoint = ruleName + if ruleName == "" { + p.entrypoint = g.rules[0].name + } + return Entrypoint(oldEntrypoint) + } +} + +// ==template== {{ if not .Optimize }} +// Statistics adds a user provided Stats struct to the parser to allow +// the user to process the results after the parsing has finished. +// Also the key for the "no match" counter is set. +// +// Example usage: +// +// input := "input" +// stats := Stats{} +// _, err := Parse("input-file", []byte(input), Statistics(&stats, "no match")) +// if err != nil { +// log.Panicln(err) +// } +// b, err := json.MarshalIndent(stats.ChoiceAltCnt, "", " ") +// if err != nil { +// log.Panicln(err) +// } +// fmt.Println(string(b)) +// +func Statistics(stats *Stats, choiceNoMatch string) Option { + return func(p *parser) Option { + oldStats := p.Stats + p.Stats = stats + oldChoiceNoMatch := p.choiceNoMatch + p.choiceNoMatch = choiceNoMatch + if p.Stats.ChoiceAltCnt == nil { + p.Stats.ChoiceAltCnt = make(map[string]map[string]int) + } + return Statistics(oldStats, oldChoiceNoMatch) + } +} + +// Debug creates an Option to set the debug flag to b. When set to true, +// debugging information is printed to stdout while parsing. +// +// The default is false. +func Debug(b bool) Option { + return func(p *parser) Option { + old := p.debug + p.debug = b + return Debug(old) + } +} + +// Memoize creates an Option to set the memoize flag to b. When set to true, +// the parser will cache all results so each expression is evaluated only +// once. This guarantees linear parsing time even for pathological cases, +// at the expense of more memory and slower times for typical cases. +// +// The default is false. +func Memoize(b bool) Option { + return func(p *parser) Option { + old := p.memoize + p.memoize = b + return Memoize(old) + } +} + +// {{ end }} ==template== + +// AllowInvalidUTF8 creates an Option to allow invalid UTF-8 bytes. +// Every invalid UTF-8 byte is treated as a utf8.RuneError (U+FFFD) +// by character class matchers and is matched by the any matcher. +// The returned matched value, c.text and c.offset are NOT affected. +// +// The default is false. +func AllowInvalidUTF8(b bool) Option { + return func(p *parser) Option { + old := p.allowInvalidUTF8 + p.allowInvalidUTF8 = b + return AllowInvalidUTF8(old) + } +} + +// Recover creates an Option to set the recover flag to b. When set to +// true, this causes the parser to recover from panics and convert it +// to an error. Setting it to false can be useful while debugging to +// access the full stack trace. +// +// The default is true. +func Recover(b bool) Option { + return func(p *parser) Option { + old := p.recover + p.recover = b + return Recover(old) + } +} + +// GlobalStore creates an Option to set a key to a certain value in +// the globalStore. +func GlobalStore(key string, value interface{}) Option { + return func(p *parser) Option { + old := p.cur.globalStore[key] + p.cur.globalStore[key] = value + return GlobalStore(key, old) + } +} + +// ==template== {{ if or .GlobalState (not .Optimize) }} + +// InitState creates an Option to set a key to a certain value in +// the global "state" store. +func InitState(key string, value interface{}) Option { + return func(p *parser) Option { + old := p.cur.state[key] + p.cur.state[key] = value + return InitState(key, old) + } +} + +// {{ end }} ==template== + +// ParseFile parses the file identified by filename. +func ParseFile(filename string, opts ...Option) (i interface{}, err error) { //{{ if .Nolint }} nolint: deadcode {{else}} ==template== {{ end }} + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if closeErr := f.Close(); closeErr != nil { + err = closeErr + } + }() + return ParseReader(filename, f, opts...) +} + +// ParseReader parses the data from r using filename as information in the +// error messages. +func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) { //{{ if .Nolint }} nolint: deadcode {{else}} ==template== {{ end }} + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + return Parse(filename, b, opts...) +} + +// Parse parses the data from b using filename as information in the +// error messages. +func Parse(filename string, b []byte, opts ...Option) (interface{}, error) { + return newParser(filename, b, opts...).parse(g) +} + +// position records a position in the text. +type position struct { + line, col, offset int +} + +func (p position) String() string { + return strconv.Itoa(p.line) + ":" + strconv.Itoa(p.col) + " [" + strconv.Itoa(p.offset) + "]" +} + +// savepoint stores all state required to go back to this point in the +// parser. +type savepoint struct { + position + rn rune + w int +} + +type current struct { + pos position // start position of the match + text []byte // raw text of the match + + // ==template== {{ if or .GlobalState (not .Optimize) }} + + // state is a store for arbitrary key,value pairs that the user wants to be + // tied to the backtracking of the parser. + // This is always rolled back if a parsing rule fails. + state storeDict + + // {{ end }} ==template== + + // globalStore is a general store for the user to store arbitrary key-value + // pairs that they need to manage and that they do not want tied to the + // backtracking of the parser. This is only modified by the user and never + // rolled back by the parser. It is always up to the user to keep this in a + // consistent state. + globalStore storeDict +} + +type storeDict map[string]interface{} + +// the AST types... + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type grammar struct { + pos position + rules []*rule +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type rule struct { + pos position + name string + displayName string + expr interface{} +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type choiceExpr struct { + pos position + alternatives []interface{} +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type actionExpr struct { + pos position + expr interface{} + run func(*parser) (interface{}, error) +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type recoveryExpr struct { + pos position + expr interface{} + recoverExpr interface{} + failureLabel []string +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type seqExpr struct { + pos position + exprs []interface{} +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type throwExpr struct { + pos position + label string +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type labeledExpr struct { + pos position + label string + expr interface{} +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type expr struct { + pos position + expr interface{} +} + +type andExpr expr //{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type notExpr expr //{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type zeroOrOneExpr expr //{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type zeroOrMoreExpr expr //{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type oneOrMoreExpr expr //{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type ruleRefExpr struct { + pos position + name string +} + +// ==template== {{ if or .GlobalState (not .Optimize) }} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type stateCodeExpr struct { + pos position + run func(*parser) error +} + +// {{ end }} ==template== + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type andCodeExpr struct { + pos position + run func(*parser) (bool, error) +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type notCodeExpr struct { + pos position + run func(*parser) (bool, error) +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type litMatcher struct { + pos position + val string + ignoreCase bool + want string +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type charClassMatcher struct { + pos position + val string + basicLatinChars [128]bool + chars []rune + ranges []rune + classes []*unicode.RangeTable + ignoreCase bool + inverted bool +} + +type anyMatcher position //{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} + +// errList cumulates the errors found by the parser. +type errList []error + +func (e *errList) add(err error) { + *e = append(*e, err) +} + +func (e errList) err() error { + if len(e) == 0 { + return nil + } + e.dedupe() + return e +} + +func (e *errList) dedupe() { + var cleaned []error + set := make(map[string]bool) + for _, err := range *e { + if msg := err.Error(); !set[msg] { + set[msg] = true + cleaned = append(cleaned, err) + } + } + *e = cleaned +} + +func (e errList) Error() string { + switch len(e) { + case 0: + return "" + case 1: + return e[0].Error() + default: + var buf bytes.Buffer + + for i, err := range e { + if i > 0 { + buf.WriteRune('\n') + } + buf.WriteString(err.Error()) + } + return buf.String() + } +} + +// parserError wraps an error with a prefix indicating the rule in which +// the error occurred. The original error is stored in the Inner field. +type parserError struct { + Inner error + pos position + prefix string + expected []string +} + +// Error returns the error message. +func (p *parserError) Error() string { + return p.prefix + ": " + p.Inner.Error() +} + +// newParser creates a parser with the specified input source and options. +func newParser(filename string, b []byte, opts ...Option) *parser { + stats := Stats{ + ChoiceAltCnt: make(map[string]map[string]int), + } + + p := &parser{ + filename: filename, + errs: new(errList), + data: b, + pt: savepoint{position: position{line: 1}}, + recover: true, + cur: current{ + // ==template== {{ if or .GlobalState (not .Optimize) }} + state: make(storeDict), + // {{ end }} ==template== + globalStore: make(storeDict), + }, + maxFailPos: position{col: 1, line: 1}, + maxFailExpected: make([]string, 0, 20), + Stats: &stats, + // start rule is rule [0] unless an alternate entrypoint is specified + entrypoint: g.rules[0].name, + } + p.setOptions(opts) + + if p.maxExprCnt == 0 { + p.maxExprCnt = math.MaxUint64 + } + + return p +} + +// setOptions applies the options to the parser. +func (p *parser) setOptions(opts []Option) { + for _, opt := range opts { + opt(p) + } +} + +//{{ if .Nolint }} nolint: structcheck,deadcode {{else}} ==template== {{ end }} +type resultTuple struct { + v interface{} + b bool + end savepoint +} + +//{{ if .Nolint }} nolint: varcheck {{else}} ==template== {{ end }} +const choiceNoMatch = -1 + +// Stats stores some statistics, gathered during parsing +type Stats struct { + // ExprCnt counts the number of expressions processed during parsing + // This value is compared to the maximum number of expressions allowed + // (set by the MaxExpressions option). + ExprCnt uint64 + + // ChoiceAltCnt is used to count for each ordered choice expression, + // which alternative is used how may times. + // These numbers allow to optimize the order of the ordered choice expression + // to increase the performance of the parser + // + // The outer key of ChoiceAltCnt is composed of the name of the rule as well + // as the line and the column of the ordered choice. + // The inner key of ChoiceAltCnt is the number (one-based) of the matching alternative. + // For each alternative the number of matches are counted. If an ordered choice does not + // match, a special counter is incremented. The name of this counter is set with + // the parser option Statistics. + // For an alternative to be included in ChoiceAltCnt, it has to match at least once. + ChoiceAltCnt map[string]map[string]int +} + +//{{ if .Nolint }} nolint: structcheck,maligned {{else}} ==template== {{ end }} +type parser struct { + filename string + pt savepoint + cur current + + data []byte + errs *errList + + depth int + recover bool + // ==template== {{ if not .Optimize }} + debug bool + + memoize bool + // memoization table for the packrat algorithm: + // map[offset in source] map[expression or rule] {value, match} + memo map[int]map[interface{}]resultTuple + // {{ end }} ==template== + + // rules table, maps the rule identifier to the rule node + rules map[string]*rule + // variables stack, map of label to value + vstack []map[string]interface{} + // rule stack, allows identification of the current rule in errors + rstack []*rule + + // parse fail + maxFailPos position + maxFailExpected []string + maxFailInvertExpected bool + + // max number of expressions to be parsed + maxExprCnt uint64 + // entrypoint for the parser + entrypoint string + + allowInvalidUTF8 bool + + *Stats + + choiceNoMatch string + // recovery expression stack, keeps track of the currently available recovery expression, these are traversed in reverse + recoveryStack []map[string]interface{} +} + +// push a variable set on the vstack. +func (p *parser) pushV() { + if cap(p.vstack) == len(p.vstack) { + // create new empty slot in the stack + p.vstack = append(p.vstack, nil) + } else { + // slice to 1 more + p.vstack = p.vstack[:len(p.vstack)+1] + } + + // get the last args set + m := p.vstack[len(p.vstack)-1] + if m != nil && len(m) == 0 { + // empty map, all good + return + } + + m = make(map[string]interface{}) + p.vstack[len(p.vstack)-1] = m +} + +// pop a variable set from the vstack. +func (p *parser) popV() { + // if the map is not empty, clear it + m := p.vstack[len(p.vstack)-1] + if len(m) > 0 { + // GC that map + p.vstack[len(p.vstack)-1] = nil + } + p.vstack = p.vstack[:len(p.vstack)-1] +} + +// push a recovery expression with its labels to the recoveryStack +func (p *parser) pushRecovery(labels []string, expr interface{}) { + if cap(p.recoveryStack) == len(p.recoveryStack) { + // create new empty slot in the stack + p.recoveryStack = append(p.recoveryStack, nil) + } else { + // slice to 1 more + p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)+1] + } + + m := make(map[string]interface{}, len(labels)) + for _, fl := range labels { + m[fl] = expr + } + p.recoveryStack[len(p.recoveryStack)-1] = m +} + +// pop a recovery expression from the recoveryStack +func (p *parser) popRecovery() { + // GC that map + p.recoveryStack[len(p.recoveryStack)-1] = nil + + p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)-1] +} + +// ==template== {{ if not .Optimize }} +func (p *parser) print(prefix, s string) string { + if !p.debug { + return s + } + + fmt.Printf("%s %d:%d:%d: %s [%#U]\n", + prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn) + return s +} + +func (p *parser) in(s string) string { + p.depth++ + return p.print(strings.Repeat(" ", p.depth)+">", s) +} + +func (p *parser) out(s string) string { + p.depth-- + return p.print(strings.Repeat(" ", p.depth)+"<", s) +} + +// {{ end }} ==template== + +func (p *parser) addErr(err error) { + p.addErrAt(err, p.pt.position, []string{}) +} + +func (p *parser) addErrAt(err error, pos position, expected []string) { + var buf bytes.Buffer + if p.filename != "" { + buf.WriteString(p.filename) + } + if buf.Len() > 0 { + buf.WriteString(":") + } + buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset)) + if len(p.rstack) > 0 { + if buf.Len() > 0 { + buf.WriteString(": ") + } + rule := p.rstack[len(p.rstack)-1] + if rule.displayName != "" { + buf.WriteString("rule " + rule.displayName) + } else { + buf.WriteString("rule " + rule.name) + } + } + pe := &parserError{Inner: err, pos: pos, prefix: buf.String(), expected: expected} + p.errs.add(pe) +} + +func (p *parser) failAt(fail bool, pos position, want string) { + // process fail if parsing fails and not inverted or parsing succeeds and invert is set + if fail == p.maxFailInvertExpected { + if pos.offset < p.maxFailPos.offset { + return + } + + if pos.offset > p.maxFailPos.offset { + p.maxFailPos = pos + p.maxFailExpected = p.maxFailExpected[:0] + } + + if p.maxFailInvertExpected { + want = "!" + want + } + p.maxFailExpected = append(p.maxFailExpected, want) + } +} + +// read advances the parser to the next rune. +func (p *parser) read() { + p.pt.offset += p.pt.w + rn, n := utf8.DecodeRune(p.data[p.pt.offset:]) + p.pt.rn = rn + p.pt.w = n + p.pt.col++ + if rn == '\n' { + p.pt.line++ + p.pt.col = 0 + } + + if rn == utf8.RuneError && n == 1 { // see utf8.DecodeRune + if !p.allowInvalidUTF8 { + p.addErr(errInvalidEncoding) + } + } +} + +// restore parser position to the savepoint pt. +func (p *parser) restore(pt savepoint) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("restore")) + } + // {{ end }} ==template== + if pt.offset == p.pt.offset { + return + } + p.pt = pt +} + +// ==template== {{ if or .GlobalState (not .Optimize) }} + +// Cloner is implemented by any value that has a Clone method, which returns a +// copy of the value. This is mainly used for types which are not passed by +// value (e.g map, slice, chan) or structs that contain such types. +// +// This is used in conjunction with the global state feature to create proper +// copies of the state to allow the parser to properly restore the state in +// the case of backtracking. +type Cloner interface { + Clone() interface{} +} + +var statePool = &sync.Pool{ + New: func() interface{} { return make(storeDict) }, +} + +func (sd storeDict) Discard() { + for k := range sd { + delete(sd, k) + } + statePool.Put(sd) +} + +// clone and return parser current state. +func (p *parser) cloneState() storeDict { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("cloneState")) + } + // {{ end }} ==template== + + state := statePool.Get().(storeDict) + for k, v := range p.cur.state { + if c, ok := v.(Cloner); ok { + state[k] = c.Clone() + } else { + state[k] = v + } + } + return state +} + +// restore parser current state to the state storeDict. +// every restoreState should applied only one time for every cloned state +func (p *parser) restoreState(state storeDict) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("restoreState")) + } + // {{ end }} ==template== + p.cur.state.Discard() + p.cur.state = state +} + +// {{ end }} ==template== + +// get the slice of bytes from the savepoint start to the current position. +func (p *parser) sliceFrom(start savepoint) []byte { + return p.data[start.position.offset:p.pt.position.offset] +} + +// ==template== {{ if not .Optimize }} +func (p *parser) getMemoized(node interface{}) (resultTuple, bool) { + if len(p.memo) == 0 { + return resultTuple{}, false + } + m := p.memo[p.pt.offset] + if len(m) == 0 { + return resultTuple{}, false + } + res, ok := m[node] + return res, ok +} + +func (p *parser) setMemoized(pt savepoint, node interface{}, tuple resultTuple) { + if p.memo == nil { + p.memo = make(map[int]map[interface{}]resultTuple) + } + m := p.memo[pt.offset] + if m == nil { + m = make(map[interface{}]resultTuple) + p.memo[pt.offset] = m + } + m[node] = tuple +} + +// {{ end }} ==template== + +func (p *parser) buildRulesTable(g *grammar) { + p.rules = make(map[string]*rule, len(g.rules)) + for _, r := range g.rules { + p.rules[r.name] = r + } +} + +//{{ if .Nolint }} nolint: gocyclo {{else}} ==template== {{ end }} +func (p *parser) parse(g *grammar) (val interface{}, err error) { + if len(g.rules) == 0 { + p.addErr(errNoRule) + return nil, p.errs.err() + } + + // TODO : not super critical but this could be generated + p.buildRulesTable(g) + + if p.recover { + // panic can be used in action code to stop parsing immediately + // and return the panic as an error. + defer func() { + if e := recover(); e != nil { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("panic handler")) + } + // {{ end }} ==template== + val = nil + switch e := e.(type) { + case error: + p.addErr(e) + default: + p.addErr(fmt.Errorf("%v", e)) + } + err = p.errs.err() + } + }() + } + + startRule, ok := p.rules[p.entrypoint] + if !ok { + p.addErr(errInvalidEntrypoint) + return nil, p.errs.err() + } + + p.read() // advance to first rune + val, ok = p.parseRule(startRule) + if !ok { + if len(*p.errs) == 0 { + // If parsing fails, but no errors have been recorded, the expected values + // for the farthest parser position are returned as error. + maxFailExpectedMap := make(map[string]struct{}, len(p.maxFailExpected)) + for _, v := range p.maxFailExpected { + maxFailExpectedMap[v] = struct{}{} + } + expected := make([]string, 0, len(maxFailExpectedMap)) + eof := false + if _, ok := maxFailExpectedMap["!."]; ok { + delete(maxFailExpectedMap, "!.") + eof = true + } + for k := range maxFailExpectedMap { + expected = append(expected, k) + } + sort.Strings(expected) + if eof { + expected = append(expected, "EOF") + } + p.addErrAt(errors.New("no match found, expected: "+listJoin(expected, ", ", "or")), p.maxFailPos, expected) + } + + return nil, p.errs.err() + } + return val, p.errs.err() +} + +func listJoin(list []string, sep string, lastSep string) string { + switch len(list) { + case 0: + return "" + case 1: + return list[0] + default: + return strings.Join(list[:len(list)-1], sep) + " " + lastSep + " " + list[len(list)-1] + } +} + +func (p *parser) parseRule(rule *rule) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseRule " + rule.name)) + } + + if p.memoize { + res, ok := p.getMemoized(rule) + if ok { + p.restore(res.end) + return res.v, res.b + } + } + + start := p.pt + // {{ end }} ==template== + p.rstack = append(p.rstack, rule) + p.pushV() + val, ok := p.parseExpr(rule.expr) + p.popV() + p.rstack = p.rstack[:len(p.rstack)-1] + // ==template== {{ if not .Optimize }} + if ok && p.debug { + p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start))) + } + + if p.memoize { + p.setMemoized(start, rule, resultTuple{val, ok, p.pt}) + } + // {{ end }} ==template== + return val, ok +} + +//{{ if .Nolint }} nolint: gocyclo {{else}} ==template== {{ end }} +func (p *parser) parseExpr(expr interface{}) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + var pt savepoint + + if p.memoize { + res, ok := p.getMemoized(expr) + if ok { + p.restore(res.end) + return res.v, res.b + } + pt = p.pt + } + + // {{ end }} ==template== + + p.ExprCnt++ + if p.ExprCnt > p.maxExprCnt { + panic(errMaxExprCnt) + } + + var val interface{} + var ok bool + switch expr := expr.(type) { + case *actionExpr: + val, ok = p.parseActionExpr(expr) + case *andCodeExpr: + val, ok = p.parseAndCodeExpr(expr) + case *andExpr: + val, ok = p.parseAndExpr(expr) + case *anyMatcher: + val, ok = p.parseAnyMatcher(expr) + case *charClassMatcher: + val, ok = p.parseCharClassMatcher(expr) + case *choiceExpr: + val, ok = p.parseChoiceExpr(expr) + case *labeledExpr: + val, ok = p.parseLabeledExpr(expr) + case *litMatcher: + val, ok = p.parseLitMatcher(expr) + case *notCodeExpr: + val, ok = p.parseNotCodeExpr(expr) + case *notExpr: + val, ok = p.parseNotExpr(expr) + case *oneOrMoreExpr: + val, ok = p.parseOneOrMoreExpr(expr) + case *recoveryExpr: + val, ok = p.parseRecoveryExpr(expr) + case *ruleRefExpr: + val, ok = p.parseRuleRefExpr(expr) + case *seqExpr: + val, ok = p.parseSeqExpr(expr) + // ==template== {{ if or .GlobalState (not .Optimize) }} + case *stateCodeExpr: + val, ok = p.parseStateCodeExpr(expr) + // {{ end }} ==template== + case *throwExpr: + val, ok = p.parseThrowExpr(expr) + case *zeroOrMoreExpr: + val, ok = p.parseZeroOrMoreExpr(expr) + case *zeroOrOneExpr: + val, ok = p.parseZeroOrOneExpr(expr) + default: + panic(fmt.Sprintf("unknown expression type %T", expr)) + } + // ==template== {{ if not .Optimize }} + if p.memoize { + p.setMemoized(pt, expr, resultTuple{val, ok, p.pt}) + } + // {{ end }} ==template== + return val, ok +} + +func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseActionExpr")) + } + + // {{ end }} ==template== + start := p.pt + val, ok := p.parseExpr(act.expr) + if ok { + p.cur.pos = start.position + p.cur.text = p.sliceFrom(start) + // ==template== {{ if or .GlobalState (not .Optimize) }} + state := p.cloneState() + // {{ end }} ==template== + actVal, err := act.run(p) + if err != nil { + p.addErrAt(err, start.position, []string{}) + } + // ==template== {{ if or .GlobalState (not .Optimize) }} + p.restoreState(state) + // {{ end }} ==template== + + val = actVal + } + // ==template== {{ if not .Optimize }} + if ok && p.debug { + p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start))) + } + // {{ end }} ==template== + return val, ok +} + +func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseAndCodeExpr")) + } + + // {{ end }} ==template== + // ==template== {{ if or .GlobalState (not .Optimize) }} + state := p.cloneState() + // {{ end }} ==template== + + ok, err := and.run(p) + if err != nil { + p.addErr(err) + } + // ==template== {{ if or .GlobalState (not .Optimize) }} + p.restoreState(state) + // {{ end }} ==template== + + return nil, ok +} + +func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseAndExpr")) + } + + // {{ end }} ==template== + pt := p.pt + // ==template== {{ if or .GlobalState (not .Optimize) }} + state := p.cloneState() + // {{ end }} ==template== + p.pushV() + _, ok := p.parseExpr(and.expr) + p.popV() + // ==template== {{ if or .GlobalState (not .Optimize) }} + p.restoreState(state) + // {{ end }} ==template== + p.restore(pt) + + return nil, ok +} + +func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseAnyMatcher")) + } + + // {{ end }} ==template== + if p.pt.rn == utf8.RuneError && p.pt.w == 0 { + // EOF - see utf8.DecodeRune + p.failAt(false, p.pt.position, ".") + return nil, false + } + start := p.pt + p.read() + p.failAt(true, start.position, ".") + return p.sliceFrom(start), true +} + +//{{ if .Nolint }} nolint: gocyclo {{else}} ==template== {{ end }} +func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseCharClassMatcher")) + } + + // {{ end }} ==template== + cur := p.pt.rn + start := p.pt + + // ==template== {{ if .BasicLatinLookupTable }} + if cur < 128 { + if chr.basicLatinChars[cur] != chr.inverted { + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + p.failAt(false, start.position, chr.val) + return nil, false + } + // {{ end }} ==template== + + // can't match EOF + if cur == utf8.RuneError && p.pt.w == 0 { // see utf8.DecodeRune + p.failAt(false, start.position, chr.val) + return nil, false + } + + if chr.ignoreCase { + cur = unicode.ToLower(cur) + } + + // try to match in the list of available chars + for _, rn := range chr.chars { + if rn == cur { + if chr.inverted { + p.failAt(false, start.position, chr.val) + return nil, false + } + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + } + + // try to match in the list of ranges + for i := 0; i < len(chr.ranges); i += 2 { + if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] { + if chr.inverted { + p.failAt(false, start.position, chr.val) + return nil, false + } + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + } + + // try to match in the list of Unicode classes + for _, cl := range chr.classes { + if unicode.Is(cl, cur) { + if chr.inverted { + p.failAt(false, start.position, chr.val) + return nil, false + } + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + } + + if chr.inverted { + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + p.failAt(false, start.position, chr.val) + return nil, false +} + +// ==template== {{ if not .Optimize }} + +func (p *parser) incChoiceAltCnt(ch *choiceExpr, altI int) { + choiceIdent := fmt.Sprintf("%s %d:%d", p.rstack[len(p.rstack)-1].name, ch.pos.line, ch.pos.col) + m := p.ChoiceAltCnt[choiceIdent] + if m == nil { + m = make(map[string]int) + p.ChoiceAltCnt[choiceIdent] = m + } + // We increment altI by 1, so the keys do not start at 0 + alt := strconv.Itoa(altI + 1) + if altI == choiceNoMatch { + alt = p.choiceNoMatch + } + m[alt]++ +} + +// {{ end }} ==template== + +func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseChoiceExpr")) + } + + // {{ end }} ==template== + for altI, alt := range ch.alternatives { + // dummy assignment to prevent compile error if optimized + _ = altI + + // ==template== {{ if or .GlobalState (not .Optimize) }} + state := p.cloneState() + // {{ end }} ==template== + + p.pushV() + val, ok := p.parseExpr(alt) + p.popV() + if ok { + // ==template== {{ if not .Optimize }} + p.incChoiceAltCnt(ch, altI) + // {{ end }} ==template== + return val, ok + } + // ==template== {{ if or .GlobalState (not .Optimize) }} + p.restoreState(state) + // {{ end }} ==template== + } + // ==template== {{ if not .Optimize }} + p.incChoiceAltCnt(ch, choiceNoMatch) + // {{ end }} ==template== + return nil, false +} + +func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseLabeledExpr")) + } + + // {{ end }} ==template== + p.pushV() + val, ok := p.parseExpr(lab.expr) + p.popV() + if ok && lab.label != "" { + m := p.vstack[len(p.vstack)-1] + m[lab.label] = val + } + return val, ok +} + +func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseLitMatcher")) + } + + // {{ end }} ==template== + start := p.pt + for _, want := range lit.val { + cur := p.pt.rn + if lit.ignoreCase { + cur = unicode.ToLower(cur) + } + if cur != want { + p.failAt(false, start.position, lit.want) + p.restore(start) + return nil, false + } + p.read() + } + p.failAt(true, start.position, lit.want) + return p.sliceFrom(start), true +} + +func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseNotCodeExpr")) + } + + // {{ end }} ==template== + // ==template== {{ if or .GlobalState (not .Optimize) }} + state := p.cloneState() + + // {{ end }} ==template== + ok, err := not.run(p) + if err != nil { + p.addErr(err) + } + // ==template== {{ if or .GlobalState (not .Optimize) }} + p.restoreState(state) + // {{ end }} ==template== + + return nil, !ok +} + +func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseNotExpr")) + } + + // {{ end }} ==template== + pt := p.pt + // ==template== {{ if or .GlobalState (not .Optimize) }} + state := p.cloneState() + // {{ end }} ==template== + p.pushV() + p.maxFailInvertExpected = !p.maxFailInvertExpected + _, ok := p.parseExpr(not.expr) + p.maxFailInvertExpected = !p.maxFailInvertExpected + p.popV() + // ==template== {{ if or .GlobalState (not .Optimize) }} + p.restoreState(state) + // {{ end }} ==template== + p.restore(pt) + + return nil, !ok +} + +func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseOneOrMoreExpr")) + } + + // {{ end }} ==template== + var vals []interface{} + + for { + p.pushV() + val, ok := p.parseExpr(expr.expr) + p.popV() + if !ok { + if len(vals) == 0 { + // did not match once, no match + return nil, false + } + return vals, true + } + vals = append(vals, val) + } +} + +func (p *parser) parseRecoveryExpr(recover *recoveryExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseRecoveryExpr (" + strings.Join(recover.failureLabel, ",") + ")")) + } + + // {{ end }} ==template== + + p.pushRecovery(recover.failureLabel, recover.recoverExpr) + val, ok := p.parseExpr(recover.expr) + p.popRecovery() + + return val, ok +} + +func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseRuleRefExpr " + ref.name)) + } + + // {{ end }} ==template== + if ref.name == "" { + panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos)) + } + + rule := p.rules[ref.name] + if rule == nil { + p.addErr(fmt.Errorf("undefined rule: %s", ref.name)) + return nil, false + } + return p.parseRule(rule) +} + +func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseSeqExpr")) + } + + // {{ end }} ==template== + vals := make([]interface{}, 0, len(seq.exprs)) + + pt := p.pt + // ==template== {{ if or .GlobalState (not .Optimize) }} + state := p.cloneState() + // {{ end }} ==template== + for _, expr := range seq.exprs { + val, ok := p.parseExpr(expr) + if !ok { + // ==template== {{ if or .GlobalState (not .Optimize) }} + p.restoreState(state) + // {{ end }} ==template== + p.restore(pt) + return nil, false + } + vals = append(vals, val) + } + return vals, true +} + +// ==template== {{ if or .GlobalState (not .Optimize) }} + +func (p *parser) parseStateCodeExpr(state *stateCodeExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseStateCodeExpr")) + } + + // {{ end }} ==template== + err := state.run(p) + if err != nil { + p.addErr(err) + } + return nil, true +} + +// {{ end }} ==template== + +func (p *parser) parseThrowExpr(expr *throwExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseThrowExpr")) + } + + // {{ end }} ==template== + + for i := len(p.recoveryStack) - 1; i >= 0; i-- { + if recoverExpr, ok := p.recoveryStack[i][expr.label]; ok { + if val, ok := p.parseExpr(recoverExpr); ok { + return val, ok + } + } + } + + return nil, false +} + +func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseZeroOrMoreExpr")) + } + + // {{ end }} ==template== + var vals []interface{} + + for { + p.pushV() + val, ok := p.parseExpr(expr.expr) + p.popV() + if !ok { + return vals, true + } + vals = append(vals, val) + } +} + +func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseZeroOrOneExpr")) + } + + // {{ end }} ==template== + p.pushV() + val, _ := p.parseExpr(expr.expr) + p.popV() + // whether it matched or not, consider it a match + return val, true +} + +` diff --git a/vendor/github.com/mna/pigeon/builder/generated_static_code_range_table.go b/vendor/github.com/mna/pigeon/builder/generated_static_code_range_table.go new file mode 100644 index 00000000000..a9c0db85612 --- /dev/null +++ b/vendor/github.com/mna/pigeon/builder/generated_static_code_range_table.go @@ -0,0 +1,21 @@ +// Code generated by static_code_generator with go generate; DO NOT EDIT. + +package builder + +var rangeTable0 = ` +func rangeTable(class string) *unicode.RangeTable { + if rt, ok := unicode.Categories[class]; ok { + return rt + } + if rt, ok := unicode.Properties[class]; ok { + return rt + } + if rt, ok := unicode.Scripts[class]; ok { + return rt + } + + // cannot happen + panic(fmt.Sprintf("invalid Unicode class: %s", class)) +} + +` diff --git a/vendor/github.com/mna/pigeon/builder/static_code.go b/vendor/github.com/mna/pigeon/builder/static_code.go new file mode 100644 index 00000000000..9fe46446eac --- /dev/null +++ b/vendor/github.com/mna/pigeon/builder/static_code.go @@ -0,0 +1,1466 @@ +//go:generate go run ../bootstrap/cmd/static_code_generator/main.go -- $GOFILE generated_$GOFILE staticCode + +// +build static_code + +package builder + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// IMPORTANT: All code below this line is added to the parser as static code +var ( + // errNoRule is returned when the grammar to parse has no rule. + errNoRule = errors.New("grammar has no rule") + + // errInvalidEntrypoint is returned when the specified entrypoint rule + // does not exit. + errInvalidEntrypoint = errors.New("invalid entrypoint") + + // errInvalidEncoding is returned when the source is not properly + // utf8-encoded. + errInvalidEncoding = errors.New("invalid encoding") + + // errMaxExprCnt is used to signal that the maximum number of + // expressions have been parsed. + errMaxExprCnt = errors.New("max number of expresssions parsed") +) + +// Option is a function that can set an option on the parser. It returns +// the previous setting as an Option. +type Option func(*parser) Option + +// MaxExpressions creates an Option to stop parsing after the provided +// number of expressions have been parsed, if the value is 0 then the parser will +// parse for as many steps as needed (possibly an infinite number). +// +// The default for maxExprCnt is 0. +func MaxExpressions(maxExprCnt uint64) Option { + return func(p *parser) Option { + oldMaxExprCnt := p.maxExprCnt + p.maxExprCnt = maxExprCnt + return MaxExpressions(oldMaxExprCnt) + } +} + +// Entrypoint creates an Option to set the rule name to use as entrypoint. +// The rule name must have been specified in the -alternate-entrypoints +// if generating the parser with the -optimize-grammar flag, otherwise +// it may have been optimized out. Passing an empty string sets the +// entrypoint to the first rule in the grammar. +// +// The default is to start parsing at the first rule in the grammar. +func Entrypoint(ruleName string) Option { + return func(p *parser) Option { + oldEntrypoint := p.entrypoint + p.entrypoint = ruleName + if ruleName == "" { + p.entrypoint = g.rules[0].name + } + return Entrypoint(oldEntrypoint) + } +} + +// ==template== {{ if not .Optimize }} +// Statistics adds a user provided Stats struct to the parser to allow +// the user to process the results after the parsing has finished. +// Also the key for the "no match" counter is set. +// +// Example usage: +// +// input := "input" +// stats := Stats{} +// _, err := Parse("input-file", []byte(input), Statistics(&stats, "no match")) +// if err != nil { +// log.Panicln(err) +// } +// b, err := json.MarshalIndent(stats.ChoiceAltCnt, "", " ") +// if err != nil { +// log.Panicln(err) +// } +// fmt.Println(string(b)) +// +func Statistics(stats *Stats, choiceNoMatch string) Option { + return func(p *parser) Option { + oldStats := p.Stats + p.Stats = stats + oldChoiceNoMatch := p.choiceNoMatch + p.choiceNoMatch = choiceNoMatch + if p.Stats.ChoiceAltCnt == nil { + p.Stats.ChoiceAltCnt = make(map[string]map[string]int) + } + return Statistics(oldStats, oldChoiceNoMatch) + } +} + +// Debug creates an Option to set the debug flag to b. When set to true, +// debugging information is printed to stdout while parsing. +// +// The default is false. +func Debug(b bool) Option { + return func(p *parser) Option { + old := p.debug + p.debug = b + return Debug(old) + } +} + +// Memoize creates an Option to set the memoize flag to b. When set to true, +// the parser will cache all results so each expression is evaluated only +// once. This guarantees linear parsing time even for pathological cases, +// at the expense of more memory and slower times for typical cases. +// +// The default is false. +func Memoize(b bool) Option { + return func(p *parser) Option { + old := p.memoize + p.memoize = b + return Memoize(old) + } +} + +// {{ end }} ==template== + +// AllowInvalidUTF8 creates an Option to allow invalid UTF-8 bytes. +// Every invalid UTF-8 byte is treated as a utf8.RuneError (U+FFFD) +// by character class matchers and is matched by the any matcher. +// The returned matched value, c.text and c.offset are NOT affected. +// +// The default is false. +func AllowInvalidUTF8(b bool) Option { + return func(p *parser) Option { + old := p.allowInvalidUTF8 + p.allowInvalidUTF8 = b + return AllowInvalidUTF8(old) + } +} + +// Recover creates an Option to set the recover flag to b. When set to +// true, this causes the parser to recover from panics and convert it +// to an error. Setting it to false can be useful while debugging to +// access the full stack trace. +// +// The default is true. +func Recover(b bool) Option { + return func(p *parser) Option { + old := p.recover + p.recover = b + return Recover(old) + } +} + +// GlobalStore creates an Option to set a key to a certain value in +// the globalStore. +func GlobalStore(key string, value interface{}) Option { + return func(p *parser) Option { + old := p.cur.globalStore[key] + p.cur.globalStore[key] = value + return GlobalStore(key, old) + } +} + +// ==template== {{ if or .GlobalState (not .Optimize) }} + +// InitState creates an Option to set a key to a certain value in +// the global "state" store. +func InitState(key string, value interface{}) Option { + return func(p *parser) Option { + old := p.cur.state[key] + p.cur.state[key] = value + return InitState(key, old) + } +} + +// {{ end }} ==template== + +// ParseFile parses the file identified by filename. +func ParseFile(filename string, opts ...Option) (i interface{}, err error) { //{{ if .Nolint }} nolint: deadcode {{else}} ==template== {{ end }} + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if closeErr := f.Close(); closeErr != nil { + err = closeErr + } + }() + return ParseReader(filename, f, opts...) +} + +// ParseReader parses the data from r using filename as information in the +// error messages. +func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) { //{{ if .Nolint }} nolint: deadcode {{else}} ==template== {{ end }} + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + return Parse(filename, b, opts...) +} + +// Parse parses the data from b using filename as information in the +// error messages. +func Parse(filename string, b []byte, opts ...Option) (interface{}, error) { + return newParser(filename, b, opts...).parse(g) +} + +// position records a position in the text. +type position struct { + line, col, offset int +} + +func (p position) String() string { + return strconv.Itoa(p.line) + ":" + strconv.Itoa(p.col) + " [" + strconv.Itoa(p.offset) + "]" +} + +// savepoint stores all state required to go back to this point in the +// parser. +type savepoint struct { + position + rn rune + w int +} + +type current struct { + pos position // start position of the match + text []byte // raw text of the match + + // ==template== {{ if or .GlobalState (not .Optimize) }} + + // state is a store for arbitrary key,value pairs that the user wants to be + // tied to the backtracking of the parser. + // This is always rolled back if a parsing rule fails. + state storeDict + + // {{ end }} ==template== + + // globalStore is a general store for the user to store arbitrary key-value + // pairs that they need to manage and that they do not want tied to the + // backtracking of the parser. This is only modified by the user and never + // rolled back by the parser. It is always up to the user to keep this in a + // consistent state. + globalStore storeDict +} + +type storeDict map[string]interface{} + +// the AST types... + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type grammar struct { + pos position + rules []*rule +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type rule struct { + pos position + name string + displayName string + expr interface{} +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type choiceExpr struct { + pos position + alternatives []interface{} +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type actionExpr struct { + pos position + expr interface{} + run func(*parser) (interface{}, error) +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type recoveryExpr struct { + pos position + expr interface{} + recoverExpr interface{} + failureLabel []string +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type seqExpr struct { + pos position + exprs []interface{} +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type throwExpr struct { + pos position + label string +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type labeledExpr struct { + pos position + label string + expr interface{} +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type expr struct { + pos position + expr interface{} +} + +type andExpr expr //{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type notExpr expr //{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type zeroOrOneExpr expr //{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type zeroOrMoreExpr expr //{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type oneOrMoreExpr expr //{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type ruleRefExpr struct { + pos position + name string +} + +// ==template== {{ if or .GlobalState (not .Optimize) }} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type stateCodeExpr struct { + pos position + run func(*parser) error +} + +// {{ end }} ==template== + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type andCodeExpr struct { + pos position + run func(*parser) (bool, error) +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type notCodeExpr struct { + pos position + run func(*parser) (bool, error) +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type litMatcher struct { + pos position + val string + ignoreCase bool + want string +} + +//{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} +type charClassMatcher struct { + pos position + val string + basicLatinChars [128]bool + chars []rune + ranges []rune + classes []*unicode.RangeTable + ignoreCase bool + inverted bool +} + +type anyMatcher position //{{ if .Nolint }} nolint: structcheck {{else}} ==template== {{ end }} + +// errList cumulates the errors found by the parser. +type errList []error + +func (e *errList) add(err error) { + *e = append(*e, err) +} + +func (e errList) err() error { + if len(e) == 0 { + return nil + } + e.dedupe() + return e +} + +func (e *errList) dedupe() { + var cleaned []error + set := make(map[string]bool) + for _, err := range *e { + if msg := err.Error(); !set[msg] { + set[msg] = true + cleaned = append(cleaned, err) + } + } + *e = cleaned +} + +func (e errList) Error() string { + switch len(e) { + case 0: + return "" + case 1: + return e[0].Error() + default: + var buf bytes.Buffer + + for i, err := range e { + if i > 0 { + buf.WriteRune('\n') + } + buf.WriteString(err.Error()) + } + return buf.String() + } +} + +// parserError wraps an error with a prefix indicating the rule in which +// the error occurred. The original error is stored in the Inner field. +type parserError struct { + Inner error + pos position + prefix string + expected []string +} + +// Error returns the error message. +func (p *parserError) Error() string { + return p.prefix + ": " + p.Inner.Error() +} + +// newParser creates a parser with the specified input source and options. +func newParser(filename string, b []byte, opts ...Option) *parser { + stats := Stats{ + ChoiceAltCnt: make(map[string]map[string]int), + } + + p := &parser{ + filename: filename, + errs: new(errList), + data: b, + pt: savepoint{position: position{line: 1}}, + recover: true, + cur: current{ + // ==template== {{ if or .GlobalState (not .Optimize) }} + state: make(storeDict), + // {{ end }} ==template== + globalStore: make(storeDict), + }, + maxFailPos: position{col: 1, line: 1}, + maxFailExpected: make([]string, 0, 20), + Stats: &stats, + // start rule is rule [0] unless an alternate entrypoint is specified + entrypoint: g.rules[0].name, + } + p.setOptions(opts) + + if p.maxExprCnt == 0 { + p.maxExprCnt = math.MaxUint64 + } + + return p +} + +// setOptions applies the options to the parser. +func (p *parser) setOptions(opts []Option) { + for _, opt := range opts { + opt(p) + } +} + +//{{ if .Nolint }} nolint: structcheck,deadcode {{else}} ==template== {{ end }} +type resultTuple struct { + v interface{} + b bool + end savepoint +} + +//{{ if .Nolint }} nolint: varcheck {{else}} ==template== {{ end }} +const choiceNoMatch = -1 + +// Stats stores some statistics, gathered during parsing +type Stats struct { + // ExprCnt counts the number of expressions processed during parsing + // This value is compared to the maximum number of expressions allowed + // (set by the MaxExpressions option). + ExprCnt uint64 + + // ChoiceAltCnt is used to count for each ordered choice expression, + // which alternative is used how may times. + // These numbers allow to optimize the order of the ordered choice expression + // to increase the performance of the parser + // + // The outer key of ChoiceAltCnt is composed of the name of the rule as well + // as the line and the column of the ordered choice. + // The inner key of ChoiceAltCnt is the number (one-based) of the matching alternative. + // For each alternative the number of matches are counted. If an ordered choice does not + // match, a special counter is incremented. The name of this counter is set with + // the parser option Statistics. + // For an alternative to be included in ChoiceAltCnt, it has to match at least once. + ChoiceAltCnt map[string]map[string]int +} + +//{{ if .Nolint }} nolint: structcheck,maligned {{else}} ==template== {{ end }} +type parser struct { + filename string + pt savepoint + cur current + + data []byte + errs *errList + + depth int + recover bool + // ==template== {{ if not .Optimize }} + debug bool + + memoize bool + // memoization table for the packrat algorithm: + // map[offset in source] map[expression or rule] {value, match} + memo map[int]map[interface{}]resultTuple + // {{ end }} ==template== + + // rules table, maps the rule identifier to the rule node + rules map[string]*rule + // variables stack, map of label to value + vstack []map[string]interface{} + // rule stack, allows identification of the current rule in errors + rstack []*rule + + // parse fail + maxFailPos position + maxFailExpected []string + maxFailInvertExpected bool + + // max number of expressions to be parsed + maxExprCnt uint64 + // entrypoint for the parser + entrypoint string + + allowInvalidUTF8 bool + + *Stats + + choiceNoMatch string + // recovery expression stack, keeps track of the currently available recovery expression, these are traversed in reverse + recoveryStack []map[string]interface{} +} + +// push a variable set on the vstack. +func (p *parser) pushV() { + if cap(p.vstack) == len(p.vstack) { + // create new empty slot in the stack + p.vstack = append(p.vstack, nil) + } else { + // slice to 1 more + p.vstack = p.vstack[:len(p.vstack)+1] + } + + // get the last args set + m := p.vstack[len(p.vstack)-1] + if m != nil && len(m) == 0 { + // empty map, all good + return + } + + m = make(map[string]interface{}) + p.vstack[len(p.vstack)-1] = m +} + +// pop a variable set from the vstack. +func (p *parser) popV() { + // if the map is not empty, clear it + m := p.vstack[len(p.vstack)-1] + if len(m) > 0 { + // GC that map + p.vstack[len(p.vstack)-1] = nil + } + p.vstack = p.vstack[:len(p.vstack)-1] +} + +// push a recovery expression with its labels to the recoveryStack +func (p *parser) pushRecovery(labels []string, expr interface{}) { + if cap(p.recoveryStack) == len(p.recoveryStack) { + // create new empty slot in the stack + p.recoveryStack = append(p.recoveryStack, nil) + } else { + // slice to 1 more + p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)+1] + } + + m := make(map[string]interface{}, len(labels)) + for _, fl := range labels { + m[fl] = expr + } + p.recoveryStack[len(p.recoveryStack)-1] = m +} + +// pop a recovery expression from the recoveryStack +func (p *parser) popRecovery() { + // GC that map + p.recoveryStack[len(p.recoveryStack)-1] = nil + + p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)-1] +} + +// ==template== {{ if not .Optimize }} +func (p *parser) print(prefix, s string) string { + if !p.debug { + return s + } + + fmt.Printf("%s %d:%d:%d: %s [%#U]\n", + prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn) + return s +} + +func (p *parser) in(s string) string { + p.depth++ + return p.print(strings.Repeat(" ", p.depth)+">", s) +} + +func (p *parser) out(s string) string { + p.depth-- + return p.print(strings.Repeat(" ", p.depth)+"<", s) +} + +// {{ end }} ==template== + +func (p *parser) addErr(err error) { + p.addErrAt(err, p.pt.position, []string{}) +} + +func (p *parser) addErrAt(err error, pos position, expected []string) { + var buf bytes.Buffer + if p.filename != "" { + buf.WriteString(p.filename) + } + if buf.Len() > 0 { + buf.WriteString(":") + } + buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset)) + if len(p.rstack) > 0 { + if buf.Len() > 0 { + buf.WriteString(": ") + } + rule := p.rstack[len(p.rstack)-1] + if rule.displayName != "" { + buf.WriteString("rule " + rule.displayName) + } else { + buf.WriteString("rule " + rule.name) + } + } + pe := &parserError{Inner: err, pos: pos, prefix: buf.String(), expected: expected} + p.errs.add(pe) +} + +func (p *parser) failAt(fail bool, pos position, want string) { + // process fail if parsing fails and not inverted or parsing succeeds and invert is set + if fail == p.maxFailInvertExpected { + if pos.offset < p.maxFailPos.offset { + return + } + + if pos.offset > p.maxFailPos.offset { + p.maxFailPos = pos + p.maxFailExpected = p.maxFailExpected[:0] + } + + if p.maxFailInvertExpected { + want = "!" + want + } + p.maxFailExpected = append(p.maxFailExpected, want) + } +} + +// read advances the parser to the next rune. +func (p *parser) read() { + p.pt.offset += p.pt.w + rn, n := utf8.DecodeRune(p.data[p.pt.offset:]) + p.pt.rn = rn + p.pt.w = n + p.pt.col++ + if rn == '\n' { + p.pt.line++ + p.pt.col = 0 + } + + if rn == utf8.RuneError && n == 1 { // see utf8.DecodeRune + if !p.allowInvalidUTF8 { + p.addErr(errInvalidEncoding) + } + } +} + +// restore parser position to the savepoint pt. +func (p *parser) restore(pt savepoint) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("restore")) + } + // {{ end }} ==template== + if pt.offset == p.pt.offset { + return + } + p.pt = pt +} + +// ==template== {{ if or .GlobalState (not .Optimize) }} + +// Cloner is implemented by any value that has a Clone method, which returns a +// copy of the value. This is mainly used for types which are not passed by +// value (e.g map, slice, chan) or structs that contain such types. +// +// This is used in conjunction with the global state feature to create proper +// copies of the state to allow the parser to properly restore the state in +// the case of backtracking. +type Cloner interface { + Clone() interface{} +} + +var statePool = &sync.Pool{ + New: func() interface{} { return make(storeDict) }, +} + +func (sd storeDict) Discard() { + for k := range sd { + delete(sd, k) + } + statePool.Put(sd) +} + +// clone and return parser current state. +func (p *parser) cloneState() storeDict { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("cloneState")) + } + // {{ end }} ==template== + + state := statePool.Get().(storeDict) + for k, v := range p.cur.state { + if c, ok := v.(Cloner); ok { + state[k] = c.Clone() + } else { + state[k] = v + } + } + return state +} + +// restore parser current state to the state storeDict. +// every restoreState should applied only one time for every cloned state +func (p *parser) restoreState(state storeDict) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("restoreState")) + } + // {{ end }} ==template== + p.cur.state.Discard() + p.cur.state = state +} + +// {{ end }} ==template== + +// get the slice of bytes from the savepoint start to the current position. +func (p *parser) sliceFrom(start savepoint) []byte { + return p.data[start.position.offset:p.pt.position.offset] +} + +// ==template== {{ if not .Optimize }} +func (p *parser) getMemoized(node interface{}) (resultTuple, bool) { + if len(p.memo) == 0 { + return resultTuple{}, false + } + m := p.memo[p.pt.offset] + if len(m) == 0 { + return resultTuple{}, false + } + res, ok := m[node] + return res, ok +} + +func (p *parser) setMemoized(pt savepoint, node interface{}, tuple resultTuple) { + if p.memo == nil { + p.memo = make(map[int]map[interface{}]resultTuple) + } + m := p.memo[pt.offset] + if m == nil { + m = make(map[interface{}]resultTuple) + p.memo[pt.offset] = m + } + m[node] = tuple +} + +// {{ end }} ==template== + +func (p *parser) buildRulesTable(g *grammar) { + p.rules = make(map[string]*rule, len(g.rules)) + for _, r := range g.rules { + p.rules[r.name] = r + } +} + +//{{ if .Nolint }} nolint: gocyclo {{else}} ==template== {{ end }} +func (p *parser) parse(g *grammar) (val interface{}, err error) { + if len(g.rules) == 0 { + p.addErr(errNoRule) + return nil, p.errs.err() + } + + // TODO : not super critical but this could be generated + p.buildRulesTable(g) + + if p.recover { + // panic can be used in action code to stop parsing immediately + // and return the panic as an error. + defer func() { + if e := recover(); e != nil { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("panic handler")) + } + // {{ end }} ==template== + val = nil + switch e := e.(type) { + case error: + p.addErr(e) + default: + p.addErr(fmt.Errorf("%v", e)) + } + err = p.errs.err() + } + }() + } + + startRule, ok := p.rules[p.entrypoint] + if !ok { + p.addErr(errInvalidEntrypoint) + return nil, p.errs.err() + } + + p.read() // advance to first rune + val, ok = p.parseRule(startRule) + if !ok { + if len(*p.errs) == 0 { + // If parsing fails, but no errors have been recorded, the expected values + // for the farthest parser position are returned as error. + maxFailExpectedMap := make(map[string]struct{}, len(p.maxFailExpected)) + for _, v := range p.maxFailExpected { + maxFailExpectedMap[v] = struct{}{} + } + expected := make([]string, 0, len(maxFailExpectedMap)) + eof := false + if _, ok := maxFailExpectedMap["!."]; ok { + delete(maxFailExpectedMap, "!.") + eof = true + } + for k := range maxFailExpectedMap { + expected = append(expected, k) + } + sort.Strings(expected) + if eof { + expected = append(expected, "EOF") + } + p.addErrAt(errors.New("no match found, expected: "+listJoin(expected, ", ", "or")), p.maxFailPos, expected) + } + + return nil, p.errs.err() + } + return val, p.errs.err() +} + +func listJoin(list []string, sep string, lastSep string) string { + switch len(list) { + case 0: + return "" + case 1: + return list[0] + default: + return strings.Join(list[:len(list)-1], sep) + " " + lastSep + " " + list[len(list)-1] + } +} + +func (p *parser) parseRule(rule *rule) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseRule " + rule.name)) + } + + if p.memoize { + res, ok := p.getMemoized(rule) + if ok { + p.restore(res.end) + return res.v, res.b + } + } + + start := p.pt + // {{ end }} ==template== + p.rstack = append(p.rstack, rule) + p.pushV() + val, ok := p.parseExpr(rule.expr) + p.popV() + p.rstack = p.rstack[:len(p.rstack)-1] + // ==template== {{ if not .Optimize }} + if ok && p.debug { + p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start))) + } + + if p.memoize { + p.setMemoized(start, rule, resultTuple{val, ok, p.pt}) + } + // {{ end }} ==template== + return val, ok +} + +//{{ if .Nolint }} nolint: gocyclo {{else}} ==template== {{ end }} +func (p *parser) parseExpr(expr interface{}) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + var pt savepoint + + if p.memoize { + res, ok := p.getMemoized(expr) + if ok { + p.restore(res.end) + return res.v, res.b + } + pt = p.pt + } + + // {{ end }} ==template== + + p.ExprCnt++ + if p.ExprCnt > p.maxExprCnt { + panic(errMaxExprCnt) + } + + var val interface{} + var ok bool + switch expr := expr.(type) { + case *actionExpr: + val, ok = p.parseActionExpr(expr) + case *andCodeExpr: + val, ok = p.parseAndCodeExpr(expr) + case *andExpr: + val, ok = p.parseAndExpr(expr) + case *anyMatcher: + val, ok = p.parseAnyMatcher(expr) + case *charClassMatcher: + val, ok = p.parseCharClassMatcher(expr) + case *choiceExpr: + val, ok = p.parseChoiceExpr(expr) + case *labeledExpr: + val, ok = p.parseLabeledExpr(expr) + case *litMatcher: + val, ok = p.parseLitMatcher(expr) + case *notCodeExpr: + val, ok = p.parseNotCodeExpr(expr) + case *notExpr: + val, ok = p.parseNotExpr(expr) + case *oneOrMoreExpr: + val, ok = p.parseOneOrMoreExpr(expr) + case *recoveryExpr: + val, ok = p.parseRecoveryExpr(expr) + case *ruleRefExpr: + val, ok = p.parseRuleRefExpr(expr) + case *seqExpr: + val, ok = p.parseSeqExpr(expr) + // ==template== {{ if or .GlobalState (not .Optimize) }} + case *stateCodeExpr: + val, ok = p.parseStateCodeExpr(expr) + // {{ end }} ==template== + case *throwExpr: + val, ok = p.parseThrowExpr(expr) + case *zeroOrMoreExpr: + val, ok = p.parseZeroOrMoreExpr(expr) + case *zeroOrOneExpr: + val, ok = p.parseZeroOrOneExpr(expr) + default: + panic(fmt.Sprintf("unknown expression type %T", expr)) + } + // ==template== {{ if not .Optimize }} + if p.memoize { + p.setMemoized(pt, expr, resultTuple{val, ok, p.pt}) + } + // {{ end }} ==template== + return val, ok +} + +func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseActionExpr")) + } + + // {{ end }} ==template== + start := p.pt + val, ok := p.parseExpr(act.expr) + if ok { + p.cur.pos = start.position + p.cur.text = p.sliceFrom(start) + // ==template== {{ if or .GlobalState (not .Optimize) }} + state := p.cloneState() + // {{ end }} ==template== + actVal, err := act.run(p) + if err != nil { + p.addErrAt(err, start.position, []string{}) + } + // ==template== {{ if or .GlobalState (not .Optimize) }} + p.restoreState(state) + // {{ end }} ==template== + + val = actVal + } + // ==template== {{ if not .Optimize }} + if ok && p.debug { + p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start))) + } + // {{ end }} ==template== + return val, ok +} + +func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseAndCodeExpr")) + } + + // {{ end }} ==template== + // ==template== {{ if or .GlobalState (not .Optimize) }} + state := p.cloneState() + // {{ end }} ==template== + + ok, err := and.run(p) + if err != nil { + p.addErr(err) + } + // ==template== {{ if or .GlobalState (not .Optimize) }} + p.restoreState(state) + // {{ end }} ==template== + + return nil, ok +} + +func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseAndExpr")) + } + + // {{ end }} ==template== + pt := p.pt + // ==template== {{ if or .GlobalState (not .Optimize) }} + state := p.cloneState() + // {{ end }} ==template== + p.pushV() + _, ok := p.parseExpr(and.expr) + p.popV() + // ==template== {{ if or .GlobalState (not .Optimize) }} + p.restoreState(state) + // {{ end }} ==template== + p.restore(pt) + + return nil, ok +} + +func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseAnyMatcher")) + } + + // {{ end }} ==template== + if p.pt.rn == utf8.RuneError && p.pt.w == 0 { + // EOF - see utf8.DecodeRune + p.failAt(false, p.pt.position, ".") + return nil, false + } + start := p.pt + p.read() + p.failAt(true, start.position, ".") + return p.sliceFrom(start), true +} + +//{{ if .Nolint }} nolint: gocyclo {{else}} ==template== {{ end }} +func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseCharClassMatcher")) + } + + // {{ end }} ==template== + cur := p.pt.rn + start := p.pt + + // ==template== {{ if .BasicLatinLookupTable }} + if cur < 128 { + if chr.basicLatinChars[cur] != chr.inverted { + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + p.failAt(false, start.position, chr.val) + return nil, false + } + // {{ end }} ==template== + + // can't match EOF + if cur == utf8.RuneError && p.pt.w == 0 { // see utf8.DecodeRune + p.failAt(false, start.position, chr.val) + return nil, false + } + + if chr.ignoreCase { + cur = unicode.ToLower(cur) + } + + // try to match in the list of available chars + for _, rn := range chr.chars { + if rn == cur { + if chr.inverted { + p.failAt(false, start.position, chr.val) + return nil, false + } + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + } + + // try to match in the list of ranges + for i := 0; i < len(chr.ranges); i += 2 { + if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] { + if chr.inverted { + p.failAt(false, start.position, chr.val) + return nil, false + } + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + } + + // try to match in the list of Unicode classes + for _, cl := range chr.classes { + if unicode.Is(cl, cur) { + if chr.inverted { + p.failAt(false, start.position, chr.val) + return nil, false + } + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + } + + if chr.inverted { + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + p.failAt(false, start.position, chr.val) + return nil, false +} + +// ==template== {{ if not .Optimize }} + +func (p *parser) incChoiceAltCnt(ch *choiceExpr, altI int) { + choiceIdent := fmt.Sprintf("%s %d:%d", p.rstack[len(p.rstack)-1].name, ch.pos.line, ch.pos.col) + m := p.ChoiceAltCnt[choiceIdent] + if m == nil { + m = make(map[string]int) + p.ChoiceAltCnt[choiceIdent] = m + } + // We increment altI by 1, so the keys do not start at 0 + alt := strconv.Itoa(altI + 1) + if altI == choiceNoMatch { + alt = p.choiceNoMatch + } + m[alt]++ +} + +// {{ end }} ==template== + +func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseChoiceExpr")) + } + + // {{ end }} ==template== + for altI, alt := range ch.alternatives { + // dummy assignment to prevent compile error if optimized + _ = altI + + // ==template== {{ if or .GlobalState (not .Optimize) }} + state := p.cloneState() + // {{ end }} ==template== + + p.pushV() + val, ok := p.parseExpr(alt) + p.popV() + if ok { + // ==template== {{ if not .Optimize }} + p.incChoiceAltCnt(ch, altI) + // {{ end }} ==template== + return val, ok + } + // ==template== {{ if or .GlobalState (not .Optimize) }} + p.restoreState(state) + // {{ end }} ==template== + } + // ==template== {{ if not .Optimize }} + p.incChoiceAltCnt(ch, choiceNoMatch) + // {{ end }} ==template== + return nil, false +} + +func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseLabeledExpr")) + } + + // {{ end }} ==template== + p.pushV() + val, ok := p.parseExpr(lab.expr) + p.popV() + if ok && lab.label != "" { + m := p.vstack[len(p.vstack)-1] + m[lab.label] = val + } + return val, ok +} + +func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseLitMatcher")) + } + + // {{ end }} ==template== + start := p.pt + for _, want := range lit.val { + cur := p.pt.rn + if lit.ignoreCase { + cur = unicode.ToLower(cur) + } + if cur != want { + p.failAt(false, start.position, lit.want) + p.restore(start) + return nil, false + } + p.read() + } + p.failAt(true, start.position, lit.want) + return p.sliceFrom(start), true +} + +func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseNotCodeExpr")) + } + + // {{ end }} ==template== + // ==template== {{ if or .GlobalState (not .Optimize) }} + state := p.cloneState() + + // {{ end }} ==template== + ok, err := not.run(p) + if err != nil { + p.addErr(err) + } + // ==template== {{ if or .GlobalState (not .Optimize) }} + p.restoreState(state) + // {{ end }} ==template== + + return nil, !ok +} + +func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseNotExpr")) + } + + // {{ end }} ==template== + pt := p.pt + // ==template== {{ if or .GlobalState (not .Optimize) }} + state := p.cloneState() + // {{ end }} ==template== + p.pushV() + p.maxFailInvertExpected = !p.maxFailInvertExpected + _, ok := p.parseExpr(not.expr) + p.maxFailInvertExpected = !p.maxFailInvertExpected + p.popV() + // ==template== {{ if or .GlobalState (not .Optimize) }} + p.restoreState(state) + // {{ end }} ==template== + p.restore(pt) + + return nil, !ok +} + +func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseOneOrMoreExpr")) + } + + // {{ end }} ==template== + var vals []interface{} + + for { + p.pushV() + val, ok := p.parseExpr(expr.expr) + p.popV() + if !ok { + if len(vals) == 0 { + // did not match once, no match + return nil, false + } + return vals, true + } + vals = append(vals, val) + } +} + +func (p *parser) parseRecoveryExpr(recover *recoveryExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseRecoveryExpr (" + strings.Join(recover.failureLabel, ",") + ")")) + } + + // {{ end }} ==template== + + p.pushRecovery(recover.failureLabel, recover.recoverExpr) + val, ok := p.parseExpr(recover.expr) + p.popRecovery() + + return val, ok +} + +func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseRuleRefExpr " + ref.name)) + } + + // {{ end }} ==template== + if ref.name == "" { + panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos)) + } + + rule := p.rules[ref.name] + if rule == nil { + p.addErr(fmt.Errorf("undefined rule: %s", ref.name)) + return nil, false + } + return p.parseRule(rule) +} + +func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseSeqExpr")) + } + + // {{ end }} ==template== + vals := make([]interface{}, 0, len(seq.exprs)) + + pt := p.pt + // ==template== {{ if or .GlobalState (not .Optimize) }} + state := p.cloneState() + // {{ end }} ==template== + for _, expr := range seq.exprs { + val, ok := p.parseExpr(expr) + if !ok { + // ==template== {{ if or .GlobalState (not .Optimize) }} + p.restoreState(state) + // {{ end }} ==template== + p.restore(pt) + return nil, false + } + vals = append(vals, val) + } + return vals, true +} + +// ==template== {{ if or .GlobalState (not .Optimize) }} + +func (p *parser) parseStateCodeExpr(state *stateCodeExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseStateCodeExpr")) + } + + // {{ end }} ==template== + err := state.run(p) + if err != nil { + p.addErr(err) + } + return nil, true +} + +// {{ end }} ==template== + +func (p *parser) parseThrowExpr(expr *throwExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseThrowExpr")) + } + + // {{ end }} ==template== + + for i := len(p.recoveryStack) - 1; i >= 0; i-- { + if recoverExpr, ok := p.recoveryStack[i][expr.label]; ok { + if val, ok := p.parseExpr(recoverExpr); ok { + return val, ok + } + } + } + + return nil, false +} + +func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseZeroOrMoreExpr")) + } + + // {{ end }} ==template== + var vals []interface{} + + for { + p.pushV() + val, ok := p.parseExpr(expr.expr) + p.popV() + if !ok { + return vals, true + } + vals = append(vals, val) + } +} + +func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) { + // ==template== {{ if not .Optimize }} + if p.debug { + defer p.out(p.in("parseZeroOrOneExpr")) + } + + // {{ end }} ==template== + p.pushV() + val, _ := p.parseExpr(expr.expr) + p.popV() + // whether it matched or not, consider it a match + return val, true +} diff --git a/vendor/github.com/mna/pigeon/builder/static_code_range_table.go b/vendor/github.com/mna/pigeon/builder/static_code_range_table.go new file mode 100644 index 00000000000..d410509606e --- /dev/null +++ b/vendor/github.com/mna/pigeon/builder/static_code_range_table.go @@ -0,0 +1,24 @@ +//go:generate go run ../bootstrap/cmd/static_code_generator/main.go -- $GOFILE generated_$GOFILE rangeTable0 + +package builder + +import ( + "fmt" + "unicode" +) + +// IMPORTANT: All code below this line is added to the parser as static code +func rangeTable(class string) *unicode.RangeTable { + if rt, ok := unicode.Categories[class]; ok { + return rt + } + if rt, ok := unicode.Properties[class]; ok { + return rt + } + if rt, ok := unicode.Scripts[class]; ok { + return rt + } + + // cannot happen + panic(fmt.Sprintf("invalid Unicode class: %s", class)) +} diff --git a/vendor/github.com/mna/pigeon/doc.go b/vendor/github.com/mna/pigeon/doc.go new file mode 100644 index 00000000000..0b94889fb47 --- /dev/null +++ b/vendor/github.com/mna/pigeon/doc.go @@ -0,0 +1,594 @@ +/* +Command pigeon generates parsers in Go from a PEG grammar. + +From Wikipedia [0]: + + A parsing expression grammar is a type of analytic formal grammar, i.e. + it describes a formal language in terms of a set of rules for recognizing + strings in the language. + +Its features and syntax are inspired by the PEG.js project [1], while +the implementation is loosely based on [2]. Formal presentation of the +PEG theory by Bryan Ford is also an important reference [3]. An introductory +blog post can be found at [4]. + + [0]: http://en.wikipedia.org/wiki/Parsing_expression_grammar + [1]: http://pegjs.org/ + [2]: http://www.codeproject.com/Articles/29713/Parsing-Expression-Grammar-Support-for-C-Part + [3]: http://pdos.csail.mit.edu/~baford/packrat/popl04/peg-popl04.pdf + [4]: http://0value.com/A-PEG-parser-generator-for-Go + +Command-line usage + +The pigeon tool must be called with PEG input as defined +by the accepted PEG syntax below. The grammar may be provided by a +file or read from stdin. The generated parser is written to stdout +by default. + + pigeon [options] [GRAMMAR_FILE] + +The following options can be specified: + + -cache : cache parser results to avoid exponential parsing time in + pathological cases. Can make the parsing slower for typical + cases and uses more memory (default: false). + + -debug : boolean, print debugging info to stdout (default: false). + + -nolint: add '// nolint: ...' comments for generated parser to suppress + warnings by gometalinter (https://github.com/alecthomas/gometalinter). + + -no-recover : boolean, if set, do not recover from a panic. Useful + to access the panic stack when debugging, otherwise the panic + is converted to an error (default: false). + + -o=FILE : string, output file where the generated parser will be + written (default: stdout). + + -optimize-basic-latin : boolean, if set, a lookup table for the first 128 + characters of the Unicode table (Basic Latin) is generated for each character + class matcher. This speeds up the parsing, if parsed data mainly consists + of characters from this range (default: false). + + -optimize-grammar : boolean, (EXPERIMENTAL FEATURE) if set, several performance + optimizations on the grammar are performed, with focus to the reduction of the + grammar depth. + Optimization: + * removal of unreferenced rules + * replace rule references with a copy of the referenced Rule, if the + referenced rule it self has no references. + * resolve nested choice expressions + * resolve choice expressions with only one alternative + * resolve nested sequences expression + * resolve sequence expressions with only one element + * combine character class matcher and literal matcher, where possible + The resulting grammar is usually more memory consuming, but faster for parsing. + The optimization of the grammar is done in multiple rounds (optimize until no + more optimizations have applied). This process takes some time, depending on the + optimization potential of the grammar. + + -optimize-parser : boolean, if set, the options Debug, Memoize and Statistics are + removed from the resulting parser. The global "state" is optimized as well by + either removing all related code if no state change expression is present in the + grammar or by removing the restoration of the global "state" store after action + and predicate code blocks. This saves a few cpu cycles, when using the generated + parser (default: false). + + -x : boolean, if set, do not build the parser, just parse the input grammar + (default: false). + + -receiver-name=NAME : string, name of the receiver variable for the generated + code blocks. Non-initializer code blocks in the grammar end up as methods on the + *current type, and this option sets the name of the receiver (default: c). + + -alternate-entrypoints=RULE[,RULE...] : string, comma-separated list of rule names + that may be used as alternate entrypoints for the parser, in addition to the + default entrypoint (the first rule in the grammar) (default: none). + Such entrypoints can be specified in the call to Parse by passing an + Entrypoint option that specifies the alternate rule name to use. This is only + necessary if the -optimize-parser flag is set, as some rules may be optimized + out of the resulting parser. + +If the code blocks in the grammar (see below, section "Code block") are golint- +and go vet-compliant, then the resulting generated code will also be golint- +and go vet-compliant. + +The generated code doesn't use any third-party dependency unless code blocks +in the grammar require such a dependency. + +PEG syntax + +The accepted syntax for the grammar is formally defined in the +grammar/pigeon.peg file, using the PEG syntax. What follows is an informal +description of this syntax. + +Identifiers, whitespace, comments and literals follow the same +notation as the Go language, as defined in the language specification +(http://golang.org/ref/spec#Source_code_representation): + + // single line comment*/ +// /* multi-line comment */ +/* 'x' (single quotes for single char literal) + "double quotes for string literal" + `backtick quotes for raw string literal` + RuleName (a valid identifier) + +The grammar must be Unicode text encoded in UTF-8. New lines are identified +by the \n character (U+000A). Space (U+0020), horizontal tabs (U+0009) and +carriage returns (U+000D) are considered whitespace and are ignored except +to separate tokens. + +Rules + +A PEG grammar consists of a set of rules. A rule is an identifier followed +by a rule definition operator and an expression. An optional display name - +a string literal used in error messages instead of the rule identifier - can +be specified after the rule identifier. E.g.: + RuleA "friendly name" = 'a'+ // RuleA is one or more lowercase 'a's + +The rule definition operator can be any one of those: + =, <-, ← (U+2190), ⟵ (U+27F5) + +Expressions + +A rule is defined by an expression. The following sections describe the +various expression types. Expressions can be grouped by using parentheses, +and a rule can be referenced by its identifier in place of an expression. + +Choice expression + +The choice expression is a list of expressions that will be tested in the +order they are defined. The first one that matches will be used. Expressions +are separated by the forward slash character "/". E.g.: + ChoiceExpr = A / B / C // A, B and C should be rules declared in the grammar + +Because the first match is used, it is important to think about the order +of expressions. For example, in this rule, "<=" would never be used because +the "<" expression comes first: + BadChoiceExpr = "<" / "<=" + +Sequence expression + +The sequence expression is a list of expressions that must all match in +that same order for the sequence expression to be considered a match. +Expressions are separated by whitespace. E.g.: + SeqExpr = "A" "b" "c" // matches "Abc", but not "Acb" + +Labeled expression + +A labeled expression consists of an identifier followed by a colon ":" +and an expression. A labeled expression introduces a variable named with +the label that can be referenced in the code blocks in the same scope. +The variable will have the value of the expression that follows the colon. +E.g.: + LabeledExpr = value:[a-z]+ { + fmt.Println(value) + return value, nil + } + +The variable is typed as an empty interface, and the underlying type depends +on the following: + +For terminals (character and string literals, character classes and +the any matcher), the value is []byte. E.g.: + Rule = label:'a' { // label is []byte } + +For predicates (& and !), the value is always nil. E.g.: + Rule = label:&'a' { // label is nil } + +For a sequence, the value is a slice of empty interfaces, one for each +expression value in the sequence. The underlying types of each value +in the slice follow the same rules described here, recursively. E.g.: + Rule = label:('a' 'b') { // label is []interface{} } + +For a repetition (+ and *), the value is a slice of empty interfaces, one for +each repetition. The underlying types of each value in the slice follow +the same rules described here, recursively. E.g.: + Rule = label:[a-z]+ { // label is []interface{} } + +For a choice expression, the value is that of the matching choice. E.g.: + Rule = label:('a' / 'b') { // label is []byte } + +For the optional expression (?), the value is nil or the value of the +expression. E.g.: + Rule = label:'a'? { // label is nil or []byte } + +Of course, the type of the value can be anything once an action code block +is used. E.g.: + RuleA = label:'3' { + return 3, nil + } + RuleB = label:RuleA { // label is int } + +And and not expressions + +An expression prefixed with the ampersand "&" is the "and" predicate +expression: it is considered a match if the following expression is a match, +but it does not consume any input. + +An expression prefixed with the exclamation point "!" is the "not" predicate +expression: it is considered a match if the following expression is not +a match, but it does not consume any input. E.g.: + AndExpr = "A" &"B" // matches "A" if followed by a "B" (does not consume "B") + NotExpr = "A" !"B" // matches "A" if not followed by a "B" (does not consume "B") + +The expression following the & and ! operators can be a code block. In that +case, the code block must return a bool and an error. The operator's semantic +is the same, & is a match if the code block returns true, ! is a match if the +code block returns false. The code block has access to any labeled value +defined in its scope. E.g.: + CodeAndExpr = value:[a-z] &{ + // can access the value local variable... + return true, nil + } + +Repeating expressions + +An expression followed by "*", "?" or "+" is a match if the expression +occurs zero or more times ("*"), zero or one time "?" or one or more times +("+") respectively. The match is greedy, it will match as many times as +possible. E.g. + ZeroOrMoreAs = "A"* + +Literal matcher + +A literal matcher tries to match the input against a single character or a +string literal. The literal may be a single-quoted single character, a +double-quoted string or a backtick-quoted raw string. The same rules as in Go +apply regarding the allowed characters and escapes. + +The literal may be followed by a lowercase "i" (outside the ending quote) +to indicate that the match is case-insensitive. E.g.: + LiteralMatch = "Awesome\n"i // matches "awesome" followed by a newline + +Character class matcher + +A character class matcher tries to match the input against a class of characters +inside square brackets "[...]". Inside the brackets, characters represent +themselves and the same escapes as in string literals are available, except +that the single- and double-quote escape is not valid, instead the closing +square bracket "]" must be escaped to be used. + +Character ranges can be specified using the "[a-z]" notation. Unicode +classes can be specified using the "[\pL]" notation, where L is a +single-letter Unicode class of characters, or using the "[\p{Class}]" +notation where Class is a valid Unicode class (e.g. "Latin"). + +As for string literals, a lowercase "i" may follow the matcher (outside +the ending square bracket) to indicate that the match is case-insensitive. +A "^" as first character inside the square brackets indicates that the match +is inverted (it is a match if the input does not match the character class +matcher). E.g.: + NotAZ = [^a-z]i + +Any matcher + +The any matcher is represented by the dot ".". It matches any character +except the end of file, thus the "!." expression is used to indicate "match +the end of file". E.g.: + AnyChar = . // match a single character + EOF = !. + +Code block + +Code blocks can be added to generate custom Go code. There are three kinds +of code blocks: the initializer, the action and the predicate. All code blocks +appear inside curly braces "{...}". + +The initializer must appear first in the grammar, before any rule. It is +copied as-is (minus the wrapping curly braces) at the top of the generated +parser. It may contain function declarations, types, variables, etc. just +like any Go file. Every symbol declared here will be available to all other +code blocks. Although the initializer is optional in a valid grammar, it is +usually required to generate a valid Go source code file (for the package +clause). E.g.: + { + package main + + func someHelper() { + // ... + } + } + +Action code blocks are code blocks declared after an expression in a rule. +Those code blocks are turned into a method on the "*current" type in the +generated source code. The method receives any labeled expression's value +as argument (as interface{}) and must return two values, the first being +the value of the expression (an interface{}), and the second an error. +If a non-nil error is returned, it is added to the list of errors that the +parser will return. E.g.: + RuleA = "A"+ { + // return the matched string, "c" is the default name for + // the *current receiver variable. + return string(c.text), nil + } + +Predicate code blocks are code blocks declared immediately after the and "&" +or the not "!" operators. Like action code blocks, predicate code blocks +are turned into a method on the "*current" type in the generated source code. +The method receives any labeled expression's value as argument (as interface{}) +and must return two opt, the first being a bool and the second an error. +If a non-nil error is returned, it is added to the list of errors that the +parser will return. E.g.: + RuleAB = [ab]i+ &{ + return true, nil + } + +State change code blocks are code blocks starting with "#". In contrast to +action and predicate code blocks, state change code blocks are allowed to +modify values in the global "state" store (see below). +State change code blocks are turned into a method on the "*current" type +in the generated source code. +The method is passed any labeled expression's value as an argument (of type +interface{}) and must return a value of type error. +If a non-nil error is returned, it is added to the list of errors that the +parser will return, note that the parser does NOT backtrack if a non-nil +error is returned. +E.g: + Rule = [a] #{ + c.state["a"]++ + if c.state["a"] > 5 { + return fmt.Errorf("we have seen more than 5 a's") // parser will not backtrack + } + return nil + } +The "*current" type is a struct that provides four useful fields that can be +accessed in action, state change, and predicate code blocks: "pos", "text", +"state" and "globalStore". + +The "pos" field indicates the current position of the parser in the source +input. It is itself a struct with three fields: "line", "col" and "offset". +Line is a 1-based line number, col is a 1-based column number that counts +runes from the start of the line, and offset is a 0-based byte offset. + +The "text" field is the slice of bytes of the current match. It is empty +in a predicate code block. + +The "state" field is a global store, with backtrack support, of type +"map[string]interface{}". The values in the store are tied to the parser's +backtracking, in particular if a rule fails to match then all updates to the +state that occurred in the process of matching the rule are rolled back. For a +key-value store that is not tied to the parser's backtracking, see the +"globalStore". +The values in the "state" store are available for read access in action and +predicate code blocks, any changes made to the "state" store will be reverted +once the action or predicate code block is finished running. To update values +in the "state" use state change code blocks ("#{}"). + +IMPORTANT: + - In order to properly roll back the state if a rule fails to match the + parser must clone the state before trying to match a rule. + - The default clone mechanism makes a "shallow" copy of each value in the + "state", this implies that pointers, maps, slices, channels, and structs + containing any of the previous types are not properly copied. + - To support theses cases pigeon offers the "Cloner" interface which + consists of a single method "Clone". If a value stored in the "state" + store implements this interface, the "Clone" method is used to obtain a + proper copy. + - If a general solution is needed, external libraries which provide deep + copy functionality may be used in the "Clone" method + (e.g. https://github.com/mitchellh/copystructure). + +The "globalStore" field is a global store of type "map[string]interface{}", +which allows to store arbitrary values, which are available in action and +predicate code blocks for read as well as write access. +It is important to notice, that the global store is completely independent from +the backtrack mechanism of PEG and is therefore not set back to its old state +during backtrack. +The initialization of the global store may be achieved by using the GlobalStore +function (http://godoc.org/github.com/mna/pigeon/test/predicates#GlobalStore). +Be aware, that all keys starting with "_pigeon" are reserved for internal use +of pigeon and should not be used nor modified. Those keys are treated as +internal implementation details and therefore there are no guarantees given in +regards of API stability. + +Failure labels, throw and recover + +pigeon supports an extension of the classical PEG syntax called failure labels, +proposed by Maidl et al. in their paper "Error Reporting in Parsing Expression Grammars" [7]. +The used syntax for the introduced expressions is borrowed from their lpeglabel [8] +implementation. + +This extension allows to signal different kinds of errors and to specify, which +recovery pattern should handle a given label. + +With labeled failures it is possible to distinguish between an ordinary failure +and an error. Usually, an ordinary failure is produced when the matching of a +character fails, and this failure is caught by ordered choice. An error +(a non-ordinary failure), by its turn, is produced by the throw operator and +may be caught by the recovery operator. + +In pigeon, the recovery expression consists of the regular expression, the recovery +expression and a set of labels to be matched. First, the regular expression is tried. +If this fails with one of the provided labels, the recovery expression is tried. If +this fails as well, the error is propagated. E.g.: + FailureRecoveryExpr = RegularExpr //{FailureLabel1, FailureLabel2} RecoveryExpr + +To signal a failure condition, the throw expression is used. E.g.: + ThrowExpr = %{FailureLabel1} + +For concrete examples, how to use throw and recover, have a look at the examples +"labeled_failures" and "thrownrecover" in the "test" folder. + +The implementation of the throw and recover operators work as follows: +The failure recover expression adds the recover expression for every failure label +to the recovery stack and runs the regular expression. +The throw expression checks the recovery stack in reversed order for the provided +failure label. If the label is found, the respective recovery expression is run. If +this expression is successful, the parser continues the processing of the input. If +the recovery expression is not successful, the parsing fails and the parser starts +to backtrack. + +If throw and recover expressions are used together with global state, it is the +responsibility of the author of the grammar to reset the global state to a valid +state during the recovery operation. + + [7]: https://arxiv.org/pdf/1405.6646v3.pdf + [8]: https://github.com/sqmedeiros/lpeglabel + +Using the generated parser + +The parser generated by pigeon exports a few symbols so that it can be used +as a package with public functions to parse input text. The exported API is: + - Parse(string, []byte, ...Option) (interface{}, error) + - ParseFile(string, ...Option) (interface{}, error) + - ParseReader(string, io.Reader, ...Option) (interface{}, error) + - AllowInvalidUTF8(bool) Option + - Debug(bool) Option + - Entrypoint(string) Option + - GlobalStore(string, interface{}) Option + - MaxExpressions(uint64) Option + - Memoize(bool) Option + - Recover(bool) Option + - Statistics(*Stats) Option + +See the godoc page of the generated parser for the test/predicates grammar +for an example documentation page of the exported API: +http://godoc.org/github.com/mna/pigeon/test/predicates. + +Like the grammar used to generate the parser, the input text must be +UTF-8-encoded Unicode. + +The start rule of the parser is the first rule in the PEG grammar used +to generate the parser. A call to any of the Parse* functions returns +the value generated by executing the grammar on the provided input text, +and an optional error. + +Typically, the grammar should generate some kind of abstract syntax tree (AST), +but for simple grammars it may evaluate the result immediately, such as in +the examples/calculator example. There are no constraints imposed on the +author of the grammar, it can return whatever is needed. + +Error reporting + +When the parser returns a non-nil error, the error is always of type errList, +which is defined as a slice of errors ([]error). Each error in the list is +of type *parserError. This is a struct that has an "Inner" field that can be +used to access the original error. + +So if a code block returns some well-known error like: + { + return nil, io.EOF + } + +The original error can be accessed this way: + _, err := ParseFile("some_file") + if err != nil { + list := err.(errList) + for _, err := range list { + pe := err.(*parserError) + if pe.Inner == io.EOF { + // ... + } + } + } + +By defaut the parser will continue after an error is returned and will +cumulate all errors found during parsing. If the grammar reaches a point +where it shouldn't continue, a panic statement can be used to terminate +parsing. The panic will be caught at the top-level of the Parse* call +and will be converted into a *parserError like any error, and an errList +will still be returned to the caller. + +The divide by zero error in the examples/calculator grammar leverages this +feature (no special code is needed to handle division by zero, if it +happens, the runtime panics and it is recovered and returned as a parsing +error). + +Providing good error reporting in a parser is not a trivial task. Part +of it is provided by the pigeon tool, by offering features such as +filename, position, expected literals and rule name in the error message, +but an important part of good error reporting needs to be done by the grammar +author. + +For example, many programming languages use double-quotes for string literals. +Usually, if the opening quote is found, the closing quote is expected, and if +none is found, there won't be any other rule that will match, there's no need +to backtrack and try other choices, an error should be added to the list +and the match should be consumed. + +In order to do this, the grammar can look something like this: + + StringLiteral = '"' ValidStringChar* '"' { + // this is the valid case, build string literal node + // node = ... + return node, nil + } / '"' ValidStringChar* !'"' { + // invalid case, build a replacement string literal node or build a BadNode + // node = ... + return node, errors.New("string literal not terminated") + } + +This is just one example, but it illustrates the idea that error reporting +needs to be thought out when designing the grammar. + +Because the above mentioned error types (errList and parserError) are not +exported, additional steps have to be taken, ff the generated parser is used as +library package in other packages (e.g. if the same parser is used in multiple +command line tools). +One possible implementation for exported errors (based on interfaces) and +customized error reporting (caret style formatting of the position, where +the parsing failed) is available in the json example and its command line tool: +http://godoc.org/github.com/mna/pigeon/examples/json + +API stability + +Generated parsers have user-provided code mixed with pigeon code +in the same package, so there is no package +boundary in the resulting code to prevent access to unexported symbols. +What is meant to be implementation +details in pigeon is also available to user code - which doesn't mean +it should be used. + +For this reason, it is important to precisely define what is intended to be +the supported API of pigeon, the parts that will be stable +in future versions. + +The "stability" of the version 1.0 API attempts to make a similar guarantee +as the Go 1 compatibility [5]. The following lists what part of the +current pigeon code falls under that guarantee (features may be added in +the future): + + - The pigeon command-line flags and arguments: those will not be removed + and will maintain the same semantics. + + - The explicitly exported API generated by pigeon. See [6] for the + documentation of this API on a generated parser. + + - The PEG syntax, as documented above. + + - The code blocks (except the initializer) will always be generated as + methods on the *current type, and this type is guaranteed to have + the fields pos (type position) and text (type []byte). There are no + guarantees on other fields and methods of this type. + + - The position type will always have the fields line, col and offset, + all defined as int. There are no guarantees on other fields and methods + of this type. + + - The type of the error value returned by the Parse* functions, when + not nil, will always be errList defined as a []error. There are no + guarantees on methods of this type, other than the fact it implements the + error interface. + + - Individual errors in the errList will always be of type *parserError, + and this type is guaranteed to have an Inner field that contains the + original error value. There are no guarantees on other fields and methods + of this type. + +The above guarantee is given to the version 1.0 (https://github.com/mna/pigeon/releases/tag/v1.0.0) +of pigeon, which has entered maintenance mode (bug fixes only). The current +master branch includes the development toward a future version 2.0, which +intends to further improve pigeon. +While the given API stability should be maintained as far as it makes sense, +breaking changes may be necessary to be able to improve pigeon. +The new version 2.0 API has not yet stabilized and therefore changes to the API +may occur at any time. + +References: + + [5]: https://golang.org/doc/go1compat + [6]: http://godoc.org/github.com/mna/pigeon/test/predicates + +*/ +package main diff --git a/vendor/github.com/mna/pigeon/main.go b/vendor/github.com/mna/pigeon/main.go new file mode 100644 index 00000000000..a00be22e003 --- /dev/null +++ b/vendor/github.com/mna/pigeon/main.go @@ -0,0 +1,294 @@ +package main + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + "os" + "strconv" + "strings" + + "golang.org/x/tools/imports" + + "github.com/mna/pigeon/ast" + "github.com/mna/pigeon/builder" +) + +// exit function mockable for tests +var exit = os.Exit + +// ruleNamesFlag is a custom flag that parses a comma-separated +// list of rule names. It implements flag.Value. +type ruleNamesFlag []string + +func (r *ruleNamesFlag) String() string { + return fmt.Sprint(*r) +} + +func (r *ruleNamesFlag) Set(value string) error { + names := strings.Split(value, ",") + *r = append(*r, names...) + return nil +} + +func main() { + fs := flag.NewFlagSet(os.Args[0], flag.ExitOnError) + + // define command-line flags + var ( + cacheFlag = fs.Bool("cache", false, "cache parsing results") + dbgFlag = fs.Bool("debug", false, "set debug mode") + shortHelpFlag = fs.Bool("h", false, "show help page") + longHelpFlag = fs.Bool("help", false, "show help page") + nolint = fs.Bool("nolint", false, "add '// nolint: ...' comments to suppress warnings by gometalinter") + noRecoverFlag = fs.Bool("no-recover", false, "do not recover from panic") + outputFlag = fs.String("o", "", "output file, defaults to stdout") + optimizeBasicLatinFlag = fs.Bool("optimize-basic-latin", false, "generate optimized parser for Unicode Basic Latin character sets") + optimizeGrammar = fs.Bool("optimize-grammar", false, "optimize the given grammar (EXPERIMENTAL FEATURE)") + optimizeParserFlag = fs.Bool("optimize-parser", false, "generate optimized parser without Debug and Memoize options") + recvrNmFlag = fs.String("receiver-name", "c", "receiver name for the generated methods") + noBuildFlag = fs.Bool("x", false, "do not build, only parse") + + altEntrypointsFlag ruleNamesFlag + ) + fs.Var(&altEntrypointsFlag, "alternate-entrypoints", "comma-separated list of rule names that may be used as entrypoints") + + fs.Usage = usage + err := fs.Parse(os.Args[1:]) + if err != nil { + fmt.Fprintln(os.Stderr, "args parse error:\n", err) + exit(6) + } + + if *shortHelpFlag || *longHelpFlag { + fs.Usage() + exit(0) + } + + if fs.NArg() > 1 { + argError(1, "expected one argument, got %q", strings.Join(fs.Args(), " ")) + } + + // get input source + infile := "" + if fs.NArg() == 1 { + infile = fs.Arg(0) + } + nm, rc := input(infile) + defer func() { + err = rc.Close() + if err != nil { + fmt.Fprintln(os.Stderr, "close file error:\n", err) + } + if r := recover(); r != nil { + panic(r) + } + if err != nil { + exit(7) + } + }() + + // parse input + g, err := ParseReader(nm, rc, Debug(*dbgFlag), Memoize(*cacheFlag), Recover(!*noRecoverFlag)) + if err != nil { + fmt.Fprintln(os.Stderr, "parse error(s):\n", err) + exit(3) + } + + // validate alternate entrypoints + grammar := g.(*ast.Grammar) + rules := make(map[string]struct{}, len(grammar.Rules)) + for _, rule := range grammar.Rules { + rules[rule.Name.Val] = struct{}{} + } + for _, entrypoint := range altEntrypointsFlag { + if entrypoint == "" { + continue + } + + if _, ok := rules[entrypoint]; !ok { + fmt.Fprintf(os.Stderr, "argument error:\nunknown rule name %s used as alternate entrypoint\n", entrypoint) + exit(9) + } + } + + if !*noBuildFlag { + if *optimizeGrammar { + ast.Optimize(grammar, altEntrypointsFlag...) + } + + // generate parser + out := output(*outputFlag) + defer func() { + err := out.Close() + if err != nil { + fmt.Fprintln(os.Stderr, "close file error:\n", err) + exit(8) + } + }() + + outBuf := bytes.NewBuffer([]byte{}) + + curNmOpt := builder.ReceiverName(*recvrNmFlag) + optimizeParser := builder.Optimize(*optimizeParserFlag) + basicLatinOptimize := builder.BasicLatinLookupTable(*optimizeBasicLatinFlag) + nolintOpt := builder.Nolint(*nolint) + if err := builder.BuildParser(outBuf, grammar, curNmOpt, optimizeParser, basicLatinOptimize, nolintOpt); err != nil { + fmt.Fprintln(os.Stderr, "build error: ", err) + exit(5) + } + + // Defaults from golang.org/x/tools/cmd/goimports + options := &imports.Options{ + TabWidth: 8, + TabIndent: true, + Comments: true, + Fragment: true, + } + + formattedBuf, err := imports.Process("filename", outBuf.Bytes(), options) + if err != nil { + if _, err := out.Write(outBuf.Bytes()); err != nil { + fmt.Fprintln(os.Stderr, "write error: ", err) + exit(7) + } + fmt.Fprintln(os.Stderr, "format error: ", err) + exit(6) + } + + if _, err := out.Write(formattedBuf); err != nil { + fmt.Fprintln(os.Stderr, "write error: ", err) + exit(7) + } + } +} + +var usagePage = `usage: %s [options] [GRAMMAR_FILE] + +Pigeon generates a parser based on a PEG grammar. + +By default, pigeon reads the grammar from stdin and writes the +generated parser to stdout. If GRAMMAR_FILE is specified, the +grammar is read from this file instead. If the -o flag is set, +the generated code is written to this file instead. + + -cache + cache parser results to avoid exponential parsing time in + pathological cases. Can make the parsing slower for typical + cases and uses more memory. + -debug + output debugging information while parsing the grammar. + -h -help + display this help message. + -nolint + add '// nolint: ...' comments for generated parser to suppress + warnings by gometalinter (https://github.com/alecthomas/gometalinter). + -no-recover + do not recover from a panic. Useful to access the panic stack + when debugging, otherwise the panic is converted to an error. + -o OUTPUT_FILE + write the generated parser to OUTPUT_FILE. Defaults to stdout. + -optimize-basic-latin + generate optimized parser for Unicode Basic Latin character set + -optimize-grammar + performes several performance optimizations on the grammar (EXPERIMENTAL FEATURE) + -optimize-parser + generate optimized parser without Debug and Memoize options and + with some other optimizations applied. + -receiver-name NAME + use NAME as for the receiver name of the generated methods + for the grammar's code blocks. Defaults to "c". + -x + do not generate the parser, only parse the grammar. + -alternate-entrypoints RULE[,RULE...] + comma-separated list of rule names that may be used as alternate + entrypoints for the parser, in addition to the first rule in the + grammar. + +See https://godoc.org/github.com/mna/pigeon for more information. +` + +// usage prints the help page of the command-line tool. +func usage() { + fmt.Printf(usagePage, os.Args[0]) +} + +// argError prints an error message to stderr, prints the command usage +// and exits with the specified exit code. +func argError(exitCode int, msg string, args ...interface{}) { + fmt.Fprintf(os.Stderr, msg, args...) + fmt.Fprintln(os.Stderr) + usage() + exit(exitCode) +} + +// input gets the name and reader to get input text from. +func input(filename string) (nm string, rc io.ReadCloser) { + nm = "stdin" + inf := os.Stdin + if filename != "" { + f, err := os.Open(filename) + if err != nil { + fmt.Fprintln(os.Stderr, err) + exit(2) + } + inf = f + nm = filename + } + r := bufio.NewReader(inf) + return nm, makeReadCloser(r, inf) +} + +// output gets the writer to write the generated parser to. +func output(filename string) io.WriteCloser { + out := os.Stdout + if filename != "" { + f, err := os.Create(filename) + if err != nil { + fmt.Fprintln(os.Stderr, err) + exit(4) + } + out = f + } + return out +} + +// create a ReadCloser that reads from r and closes c. +func makeReadCloser(r io.Reader, c io.Closer) io.ReadCloser { + rc := struct { + io.Reader + io.Closer + }{r, c} + return io.ReadCloser(rc) +} + +// astPos is a helper method for the PEG grammar parser. It returns the +// position of the current match as an ast.Pos. +func (c *current) astPos() ast.Pos { + return ast.Pos{Line: c.pos.line, Col: c.pos.col, Off: c.pos.offset} +} + +// toIfaceSlice is a helper function for the PEG grammar parser. It converts +// v to a slice of empty interfaces. +func toIfaceSlice(v interface{}) []interface{} { + if v == nil { + return nil + } + return v.([]interface{}) +} + +// validateUnicodeEscape checks that the provided escape sequence is a +// valid Unicode escape sequence. +func validateUnicodeEscape(escape, errMsg string) (interface{}, error) { + r, _, _, err := strconv.UnquoteChar("\\"+escape, '"') + if err != nil { + return nil, errors.New(errMsg) + } + if 0xD800 <= r && r <= 0xDFFF { + return nil, errors.New(errMsg) + } + return nil, nil +} diff --git a/vendor/github.com/mna/pigeon/pigeon.go b/vendor/github.com/mna/pigeon/pigeon.go new file mode 100644 index 00000000000..6a536f6b466 --- /dev/null +++ b/vendor/github.com/mna/pigeon/pigeon.go @@ -0,0 +1,4526 @@ +// Code generated by pigeon; DO NOT EDIT. + +package main + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" + + "github.com/mna/pigeon/ast" +) + +var g = &grammar{ + rules: []*rule{ + { + name: "Grammar", + pos: position{line: 5, col: 1, offset: 18}, + expr: &actionExpr{ + pos: position{line: 5, col: 11, offset: 30}, + run: (*parser).callonGrammar1, + expr: &seqExpr{ + pos: position{line: 5, col: 11, offset: 30}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 5, col: 11, offset: 30}, + name: "__", + }, + &labeledExpr{ + pos: position{line: 5, col: 14, offset: 33}, + label: "initializer", + expr: &zeroOrOneExpr{ + pos: position{line: 5, col: 26, offset: 45}, + expr: &seqExpr{ + pos: position{line: 5, col: 28, offset: 47}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 5, col: 28, offset: 47}, + name: "Initializer", + }, + &ruleRefExpr{ + pos: position{line: 5, col: 40, offset: 59}, + name: "__", + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 5, col: 46, offset: 65}, + label: "rules", + expr: &oneOrMoreExpr{ + pos: position{line: 5, col: 52, offset: 71}, + expr: &seqExpr{ + pos: position{line: 5, col: 54, offset: 73}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 5, col: 54, offset: 73}, + name: "Rule", + }, + &ruleRefExpr{ + pos: position{line: 5, col: 59, offset: 78}, + name: "__", + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 5, col: 65, offset: 84}, + name: "EOF", + }, + }, + }, + }, + }, + { + name: "Initializer", + pos: position{line: 24, col: 1, offset: 525}, + expr: &actionExpr{ + pos: position{line: 24, col: 15, offset: 541}, + run: (*parser).callonInitializer1, + expr: &seqExpr{ + pos: position{line: 24, col: 15, offset: 541}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 24, col: 15, offset: 541}, + label: "code", + expr: &ruleRefExpr{ + pos: position{line: 24, col: 20, offset: 546}, + name: "CodeBlock", + }, + }, + &ruleRefExpr{ + pos: position{line: 24, col: 30, offset: 556}, + name: "EOS", + }, + }, + }, + }, + }, + { + name: "Rule", + pos: position{line: 28, col: 1, offset: 586}, + expr: &actionExpr{ + pos: position{line: 28, col: 8, offset: 595}, + run: (*parser).callonRule1, + expr: &seqExpr{ + pos: position{line: 28, col: 8, offset: 595}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 28, col: 8, offset: 595}, + label: "name", + expr: &ruleRefExpr{ + pos: position{line: 28, col: 13, offset: 600}, + name: "IdentifierName", + }, + }, + &ruleRefExpr{ + pos: position{line: 28, col: 28, offset: 615}, + name: "__", + }, + &labeledExpr{ + pos: position{line: 28, col: 31, offset: 618}, + label: "display", + expr: &zeroOrOneExpr{ + pos: position{line: 28, col: 39, offset: 626}, + expr: &seqExpr{ + pos: position{line: 28, col: 41, offset: 628}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 28, col: 41, offset: 628}, + name: "StringLiteral", + }, + &ruleRefExpr{ + pos: position{line: 28, col: 55, offset: 642}, + name: "__", + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 28, col: 61, offset: 648}, + name: "RuleDefOp", + }, + &ruleRefExpr{ + pos: position{line: 28, col: 71, offset: 658}, + name: "__", + }, + &labeledExpr{ + pos: position{line: 28, col: 74, offset: 661}, + label: "expr", + expr: &ruleRefExpr{ + pos: position{line: 28, col: 79, offset: 666}, + name: "Expression", + }, + }, + &ruleRefExpr{ + pos: position{line: 28, col: 90, offset: 677}, + name: "EOS", + }, + }, + }, + }, + }, + { + name: "Expression", + pos: position{line: 41, col: 1, offset: 961}, + expr: &ruleRefExpr{ + pos: position{line: 41, col: 14, offset: 976}, + name: "RecoveryExpr", + }, + }, + { + name: "RecoveryExpr", + pos: position{line: 43, col: 1, offset: 990}, + expr: &actionExpr{ + pos: position{line: 43, col: 16, offset: 1007}, + run: (*parser).callonRecoveryExpr1, + expr: &seqExpr{ + pos: position{line: 43, col: 16, offset: 1007}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 43, col: 16, offset: 1007}, + label: "expr", + expr: &ruleRefExpr{ + pos: position{line: 43, col: 21, offset: 1012}, + name: "ChoiceExpr", + }, + }, + &labeledExpr{ + pos: position{line: 43, col: 32, offset: 1023}, + label: "recoverExprs", + expr: &zeroOrMoreExpr{ + pos: position{line: 43, col: 45, offset: 1036}, + expr: &seqExpr{ + pos: position{line: 43, col: 47, offset: 1038}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 43, col: 47, offset: 1038}, + name: "__", + }, + &litMatcher{ + pos: position{line: 43, col: 50, offset: 1041}, + val: "//{", + ignoreCase: false, + want: "\"//{\"", + }, + &ruleRefExpr{ + pos: position{line: 43, col: 56, offset: 1047}, + name: "__", + }, + &ruleRefExpr{ + pos: position{line: 43, col: 59, offset: 1050}, + name: "Labels", + }, + &ruleRefExpr{ + pos: position{line: 43, col: 66, offset: 1057}, + name: "__", + }, + &litMatcher{ + pos: position{line: 43, col: 69, offset: 1060}, + val: "}", + ignoreCase: false, + want: "\"}\"", + }, + &ruleRefExpr{ + pos: position{line: 43, col: 73, offset: 1064}, + name: "__", + }, + &ruleRefExpr{ + pos: position{line: 43, col: 76, offset: 1067}, + name: "ChoiceExpr", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "Labels", + pos: position{line: 58, col: 1, offset: 1481}, + expr: &actionExpr{ + pos: position{line: 58, col: 10, offset: 1492}, + run: (*parser).callonLabels1, + expr: &seqExpr{ + pos: position{line: 58, col: 10, offset: 1492}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 58, col: 10, offset: 1492}, + label: "label", + expr: &ruleRefExpr{ + pos: position{line: 58, col: 16, offset: 1498}, + name: "IdentifierName", + }, + }, + &labeledExpr{ + pos: position{line: 58, col: 31, offset: 1513}, + label: "labels", + expr: &zeroOrMoreExpr{ + pos: position{line: 58, col: 38, offset: 1520}, + expr: &seqExpr{ + pos: position{line: 58, col: 40, offset: 1522}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 58, col: 40, offset: 1522}, + name: "__", + }, + &litMatcher{ + pos: position{line: 58, col: 43, offset: 1525}, + val: ",", + ignoreCase: false, + want: "\",\"", + }, + &ruleRefExpr{ + pos: position{line: 58, col: 47, offset: 1529}, + name: "__", + }, + &ruleRefExpr{ + pos: position{line: 58, col: 50, offset: 1532}, + name: "IdentifierName", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "ChoiceExpr", + pos: position{line: 67, col: 1, offset: 1861}, + expr: &actionExpr{ + pos: position{line: 67, col: 14, offset: 1876}, + run: (*parser).callonChoiceExpr1, + expr: &seqExpr{ + pos: position{line: 67, col: 14, offset: 1876}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 67, col: 14, offset: 1876}, + label: "first", + expr: &ruleRefExpr{ + pos: position{line: 67, col: 20, offset: 1882}, + name: "ActionExpr", + }, + }, + &labeledExpr{ + pos: position{line: 67, col: 31, offset: 1893}, + label: "rest", + expr: &zeroOrMoreExpr{ + pos: position{line: 67, col: 36, offset: 1898}, + expr: &seqExpr{ + pos: position{line: 67, col: 38, offset: 1900}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 67, col: 38, offset: 1900}, + name: "__", + }, + &litMatcher{ + pos: position{line: 67, col: 41, offset: 1903}, + val: "/", + ignoreCase: false, + want: "\"/\"", + }, + &ruleRefExpr{ + pos: position{line: 67, col: 45, offset: 1907}, + name: "__", + }, + &ruleRefExpr{ + pos: position{line: 67, col: 48, offset: 1910}, + name: "ActionExpr", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "ActionExpr", + pos: position{line: 82, col: 1, offset: 2315}, + expr: &actionExpr{ + pos: position{line: 82, col: 14, offset: 2330}, + run: (*parser).callonActionExpr1, + expr: &seqExpr{ + pos: position{line: 82, col: 14, offset: 2330}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 82, col: 14, offset: 2330}, + label: "expr", + expr: &ruleRefExpr{ + pos: position{line: 82, col: 19, offset: 2335}, + name: "SeqExpr", + }, + }, + &labeledExpr{ + pos: position{line: 82, col: 27, offset: 2343}, + label: "code", + expr: &zeroOrOneExpr{ + pos: position{line: 82, col: 32, offset: 2348}, + expr: &seqExpr{ + pos: position{line: 82, col: 34, offset: 2350}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 82, col: 34, offset: 2350}, + name: "__", + }, + &ruleRefExpr{ + pos: position{line: 82, col: 37, offset: 2353}, + name: "CodeBlock", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "SeqExpr", + pos: position{line: 96, col: 1, offset: 2619}, + expr: &actionExpr{ + pos: position{line: 96, col: 11, offset: 2631}, + run: (*parser).callonSeqExpr1, + expr: &seqExpr{ + pos: position{line: 96, col: 11, offset: 2631}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 96, col: 11, offset: 2631}, + label: "first", + expr: &ruleRefExpr{ + pos: position{line: 96, col: 17, offset: 2637}, + name: "LabeledExpr", + }, + }, + &labeledExpr{ + pos: position{line: 96, col: 29, offset: 2649}, + label: "rest", + expr: &zeroOrMoreExpr{ + pos: position{line: 96, col: 34, offset: 2654}, + expr: &seqExpr{ + pos: position{line: 96, col: 36, offset: 2656}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 96, col: 36, offset: 2656}, + name: "__", + }, + &ruleRefExpr{ + pos: position{line: 96, col: 39, offset: 2659}, + name: "LabeledExpr", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "LabeledExpr", + pos: position{line: 109, col: 1, offset: 3010}, + expr: &choiceExpr{ + pos: position{line: 109, col: 15, offset: 3026}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 109, col: 15, offset: 3026}, + run: (*parser).callonLabeledExpr2, + expr: &seqExpr{ + pos: position{line: 109, col: 15, offset: 3026}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 109, col: 15, offset: 3026}, + label: "label", + expr: &ruleRefExpr{ + pos: position{line: 109, col: 21, offset: 3032}, + name: "Identifier", + }, + }, + &ruleRefExpr{ + pos: position{line: 109, col: 32, offset: 3043}, + name: "__", + }, + &litMatcher{ + pos: position{line: 109, col: 35, offset: 3046}, + val: ":", + ignoreCase: false, + want: "\":\"", + }, + &ruleRefExpr{ + pos: position{line: 109, col: 39, offset: 3050}, + name: "__", + }, + &labeledExpr{ + pos: position{line: 109, col: 42, offset: 3053}, + label: "expr", + expr: &ruleRefExpr{ + pos: position{line: 109, col: 47, offset: 3058}, + name: "PrefixedExpr", + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 115, col: 5, offset: 3231}, + name: "PrefixedExpr", + }, + &ruleRefExpr{ + pos: position{line: 115, col: 20, offset: 3246}, + name: "ThrowExpr", + }, + }, + }, + }, + { + name: "PrefixedExpr", + pos: position{line: 117, col: 1, offset: 3257}, + expr: &choiceExpr{ + pos: position{line: 117, col: 16, offset: 3274}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 117, col: 16, offset: 3274}, + run: (*parser).callonPrefixedExpr2, + expr: &seqExpr{ + pos: position{line: 117, col: 16, offset: 3274}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 117, col: 16, offset: 3274}, + label: "op", + expr: &ruleRefExpr{ + pos: position{line: 117, col: 19, offset: 3277}, + name: "PrefixedOp", + }, + }, + &ruleRefExpr{ + pos: position{line: 117, col: 30, offset: 3288}, + name: "__", + }, + &labeledExpr{ + pos: position{line: 117, col: 33, offset: 3291}, + label: "expr", + expr: &ruleRefExpr{ + pos: position{line: 117, col: 38, offset: 3296}, + name: "SuffixedExpr", + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 128, col: 5, offset: 3578}, + name: "SuffixedExpr", + }, + }, + }, + }, + { + name: "PrefixedOp", + pos: position{line: 130, col: 1, offset: 3592}, + expr: &actionExpr{ + pos: position{line: 130, col: 14, offset: 3607}, + run: (*parser).callonPrefixedOp1, + expr: &choiceExpr{ + pos: position{line: 130, col: 16, offset: 3609}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 130, col: 16, offset: 3609}, + val: "&", + ignoreCase: false, + want: "\"&\"", + }, + &litMatcher{ + pos: position{line: 130, col: 22, offset: 3615}, + val: "!", + ignoreCase: false, + want: "\"!\"", + }, + }, + }, + }, + }, + { + name: "SuffixedExpr", + pos: position{line: 134, col: 1, offset: 3657}, + expr: &choiceExpr{ + pos: position{line: 134, col: 16, offset: 3674}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 134, col: 16, offset: 3674}, + run: (*parser).callonSuffixedExpr2, + expr: &seqExpr{ + pos: position{line: 134, col: 16, offset: 3674}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 134, col: 16, offset: 3674}, + label: "expr", + expr: &ruleRefExpr{ + pos: position{line: 134, col: 21, offset: 3679}, + name: "PrimaryExpr", + }, + }, + &ruleRefExpr{ + pos: position{line: 134, col: 33, offset: 3691}, + name: "__", + }, + &labeledExpr{ + pos: position{line: 134, col: 36, offset: 3694}, + label: "op", + expr: &ruleRefExpr{ + pos: position{line: 134, col: 39, offset: 3697}, + name: "SuffixedOp", + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 153, col: 5, offset: 4227}, + name: "PrimaryExpr", + }, + }, + }, + }, + { + name: "SuffixedOp", + pos: position{line: 155, col: 1, offset: 4240}, + expr: &actionExpr{ + pos: position{line: 155, col: 14, offset: 4255}, + run: (*parser).callonSuffixedOp1, + expr: &choiceExpr{ + pos: position{line: 155, col: 16, offset: 4257}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 155, col: 16, offset: 4257}, + val: "?", + ignoreCase: false, + want: "\"?\"", + }, + &litMatcher{ + pos: position{line: 155, col: 22, offset: 4263}, + val: "*", + ignoreCase: false, + want: "\"*\"", + }, + &litMatcher{ + pos: position{line: 155, col: 28, offset: 4269}, + val: "+", + ignoreCase: false, + want: "\"+\"", + }, + }, + }, + }, + }, + { + name: "PrimaryExpr", + pos: position{line: 159, col: 1, offset: 4311}, + expr: &choiceExpr{ + pos: position{line: 159, col: 15, offset: 4327}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 159, col: 15, offset: 4327}, + name: "LitMatcher", + }, + &ruleRefExpr{ + pos: position{line: 159, col: 28, offset: 4340}, + name: "CharClassMatcher", + }, + &ruleRefExpr{ + pos: position{line: 159, col: 47, offset: 4359}, + name: "AnyMatcher", + }, + &ruleRefExpr{ + pos: position{line: 159, col: 60, offset: 4372}, + name: "RuleRefExpr", + }, + &ruleRefExpr{ + pos: position{line: 159, col: 74, offset: 4386}, + name: "SemanticPredExpr", + }, + &actionExpr{ + pos: position{line: 159, col: 93, offset: 4405}, + run: (*parser).callonPrimaryExpr7, + expr: &seqExpr{ + pos: position{line: 159, col: 93, offset: 4405}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 159, col: 93, offset: 4405}, + val: "(", + ignoreCase: false, + want: "\"(\"", + }, + &ruleRefExpr{ + pos: position{line: 159, col: 97, offset: 4409}, + name: "__", + }, + &labeledExpr{ + pos: position{line: 159, col: 100, offset: 4412}, + label: "expr", + expr: &ruleRefExpr{ + pos: position{line: 159, col: 105, offset: 4417}, + name: "Expression", + }, + }, + &ruleRefExpr{ + pos: position{line: 159, col: 116, offset: 4428}, + name: "__", + }, + &litMatcher{ + pos: position{line: 159, col: 119, offset: 4431}, + val: ")", + ignoreCase: false, + want: "\")\"", + }, + }, + }, + }, + }, + }, + }, + { + name: "RuleRefExpr", + pos: position{line: 162, col: 1, offset: 4460}, + expr: &actionExpr{ + pos: position{line: 162, col: 15, offset: 4476}, + run: (*parser).callonRuleRefExpr1, + expr: &seqExpr{ + pos: position{line: 162, col: 15, offset: 4476}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 162, col: 15, offset: 4476}, + label: "name", + expr: &ruleRefExpr{ + pos: position{line: 162, col: 20, offset: 4481}, + name: "IdentifierName", + }, + }, + ¬Expr{ + pos: position{line: 162, col: 35, offset: 4496}, + expr: &seqExpr{ + pos: position{line: 162, col: 38, offset: 4499}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 162, col: 38, offset: 4499}, + name: "__", + }, + &zeroOrOneExpr{ + pos: position{line: 162, col: 41, offset: 4502}, + expr: &seqExpr{ + pos: position{line: 162, col: 43, offset: 4504}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 162, col: 43, offset: 4504}, + name: "StringLiteral", + }, + &ruleRefExpr{ + pos: position{line: 162, col: 57, offset: 4518}, + name: "__", + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 162, col: 63, offset: 4524}, + name: "RuleDefOp", + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "SemanticPredExpr", + pos: position{line: 167, col: 1, offset: 4640}, + expr: &actionExpr{ + pos: position{line: 167, col: 20, offset: 4661}, + run: (*parser).callonSemanticPredExpr1, + expr: &seqExpr{ + pos: position{line: 167, col: 20, offset: 4661}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 167, col: 20, offset: 4661}, + label: "op", + expr: &ruleRefExpr{ + pos: position{line: 167, col: 23, offset: 4664}, + name: "SemanticPredOp", + }, + }, + &ruleRefExpr{ + pos: position{line: 167, col: 38, offset: 4679}, + name: "__", + }, + &labeledExpr{ + pos: position{line: 167, col: 41, offset: 4682}, + label: "code", + expr: &ruleRefExpr{ + pos: position{line: 167, col: 46, offset: 4687}, + name: "CodeBlock", + }, + }, + }, + }, + }, + }, + { + name: "SemanticPredOp", + pos: position{line: 187, col: 1, offset: 5134}, + expr: &actionExpr{ + pos: position{line: 187, col: 18, offset: 5153}, + run: (*parser).callonSemanticPredOp1, + expr: &choiceExpr{ + pos: position{line: 187, col: 20, offset: 5155}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 187, col: 20, offset: 5155}, + val: "#", + ignoreCase: false, + want: "\"#\"", + }, + &litMatcher{ + pos: position{line: 187, col: 26, offset: 5161}, + val: "&", + ignoreCase: false, + want: "\"&\"", + }, + &litMatcher{ + pos: position{line: 187, col: 32, offset: 5167}, + val: "!", + ignoreCase: false, + want: "\"!\"", + }, + }, + }, + }, + }, + { + name: "RuleDefOp", + pos: position{line: 191, col: 1, offset: 5209}, + expr: &choiceExpr{ + pos: position{line: 191, col: 13, offset: 5223}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 191, col: 13, offset: 5223}, + val: "=", + ignoreCase: false, + want: "\"=\"", + }, + &litMatcher{ + pos: position{line: 191, col: 19, offset: 5229}, + val: "<-", + ignoreCase: false, + want: "\"<-\"", + }, + &litMatcher{ + pos: position{line: 191, col: 26, offset: 5236}, + val: "←", + ignoreCase: false, + want: "\"←\"", + }, + &litMatcher{ + pos: position{line: 191, col: 37, offset: 5247}, + val: "⟵", + ignoreCase: false, + want: "\"⟵\"", + }, + }, + }, + }, + { + name: "SourceChar", + pos: position{line: 193, col: 1, offset: 5257}, + expr: &anyMatcher{ + line: 193, col: 14, offset: 5272, + }, + }, + { + name: "Comment", + pos: position{line: 194, col: 1, offset: 5274}, + expr: &choiceExpr{ + pos: position{line: 194, col: 11, offset: 5286}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 194, col: 11, offset: 5286}, + name: "MultiLineComment", + }, + &ruleRefExpr{ + pos: position{line: 194, col: 30, offset: 5305}, + name: "SingleLineComment", + }, + }, + }, + }, + { + name: "MultiLineComment", + pos: position{line: 195, col: 1, offset: 5323}, + expr: &seqExpr{ + pos: position{line: 195, col: 20, offset: 5344}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 195, col: 20, offset: 5344}, + val: "/*", + ignoreCase: false, + want: "\"/*\"", + }, + &zeroOrMoreExpr{ + pos: position{line: 195, col: 25, offset: 5349}, + expr: &seqExpr{ + pos: position{line: 195, col: 27, offset: 5351}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 195, col: 27, offset: 5351}, + expr: &litMatcher{ + pos: position{line: 195, col: 28, offset: 5352}, + val: "*/", + ignoreCase: false, + want: "\"*/\"", + }, + }, + &ruleRefExpr{ + pos: position{line: 195, col: 33, offset: 5357}, + name: "SourceChar", + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 195, col: 47, offset: 5371}, + val: "*/", + ignoreCase: false, + want: "\"*/\"", + }, + }, + }, + }, + { + name: "MultiLineCommentNoLineTerminator", + pos: position{line: 196, col: 1, offset: 5376}, + expr: &seqExpr{ + pos: position{line: 196, col: 36, offset: 5413}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 196, col: 36, offset: 5413}, + val: "/*", + ignoreCase: false, + want: "\"/*\"", + }, + &zeroOrMoreExpr{ + pos: position{line: 196, col: 41, offset: 5418}, + expr: &seqExpr{ + pos: position{line: 196, col: 43, offset: 5420}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 196, col: 43, offset: 5420}, + expr: &choiceExpr{ + pos: position{line: 196, col: 46, offset: 5423}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 196, col: 46, offset: 5423}, + val: "*/", + ignoreCase: false, + want: "\"*/\"", + }, + &ruleRefExpr{ + pos: position{line: 196, col: 53, offset: 5430}, + name: "EOL", + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 196, col: 59, offset: 5436}, + name: "SourceChar", + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 196, col: 73, offset: 5450}, + val: "*/", + ignoreCase: false, + want: "\"*/\"", + }, + }, + }, + }, + { + name: "SingleLineComment", + pos: position{line: 197, col: 1, offset: 5455}, + expr: &seqExpr{ + pos: position{line: 197, col: 21, offset: 5477}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 197, col: 21, offset: 5477}, + expr: &litMatcher{ + pos: position{line: 197, col: 23, offset: 5479}, + val: "//{", + ignoreCase: false, + want: "\"//{\"", + }, + }, + &litMatcher{ + pos: position{line: 197, col: 30, offset: 5486}, + val: "//", + ignoreCase: false, + want: "\"//\"", + }, + &zeroOrMoreExpr{ + pos: position{line: 197, col: 35, offset: 5491}, + expr: &seqExpr{ + pos: position{line: 197, col: 37, offset: 5493}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 197, col: 37, offset: 5493}, + expr: &ruleRefExpr{ + pos: position{line: 197, col: 38, offset: 5494}, + name: "EOL", + }, + }, + &ruleRefExpr{ + pos: position{line: 197, col: 42, offset: 5498}, + name: "SourceChar", + }, + }, + }, + }, + }, + }, + }, + { + name: "Identifier", + pos: position{line: 199, col: 1, offset: 5513}, + expr: &actionExpr{ + pos: position{line: 199, col: 14, offset: 5528}, + run: (*parser).callonIdentifier1, + expr: &labeledExpr{ + pos: position{line: 199, col: 14, offset: 5528}, + label: "ident", + expr: &ruleRefExpr{ + pos: position{line: 199, col: 20, offset: 5534}, + name: "IdentifierName", + }, + }, + }, + }, + { + name: "IdentifierName", + pos: position{line: 207, col: 1, offset: 5753}, + expr: &actionExpr{ + pos: position{line: 207, col: 18, offset: 5772}, + run: (*parser).callonIdentifierName1, + expr: &seqExpr{ + pos: position{line: 207, col: 18, offset: 5772}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 207, col: 18, offset: 5772}, + name: "IdentifierStart", + }, + &zeroOrMoreExpr{ + pos: position{line: 207, col: 34, offset: 5788}, + expr: &ruleRefExpr{ + pos: position{line: 207, col: 34, offset: 5788}, + name: "IdentifierPart", + }, + }, + }, + }, + }, + }, + { + name: "IdentifierStart", + pos: position{line: 210, col: 1, offset: 5870}, + expr: &charClassMatcher{ + pos: position{line: 210, col: 19, offset: 5890}, + val: "[\\pL_]", + chars: []rune{'_'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + }, + { + name: "IdentifierPart", + pos: position{line: 211, col: 1, offset: 5897}, + expr: &choiceExpr{ + pos: position{line: 211, col: 18, offset: 5916}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 211, col: 18, offset: 5916}, + name: "IdentifierStart", + }, + &charClassMatcher{ + pos: position{line: 211, col: 36, offset: 5934}, + val: "[\\p{Nd}]", + classes: []*unicode.RangeTable{rangeTable("Nd")}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + { + name: "LitMatcher", + pos: position{line: 213, col: 1, offset: 5944}, + expr: &actionExpr{ + pos: position{line: 213, col: 14, offset: 5959}, + run: (*parser).callonLitMatcher1, + expr: &seqExpr{ + pos: position{line: 213, col: 14, offset: 5959}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 213, col: 14, offset: 5959}, + label: "lit", + expr: &ruleRefExpr{ + pos: position{line: 213, col: 18, offset: 5963}, + name: "StringLiteral", + }, + }, + &labeledExpr{ + pos: position{line: 213, col: 32, offset: 5977}, + label: "ignore", + expr: &zeroOrOneExpr{ + pos: position{line: 213, col: 39, offset: 5984}, + expr: &litMatcher{ + pos: position{line: 213, col: 39, offset: 5984}, + val: "i", + ignoreCase: false, + want: "\"i\"", + }, + }, + }, + }, + }, + }, + }, + { + name: "StringLiteral", + pos: position{line: 226, col: 1, offset: 6383}, + expr: &choiceExpr{ + pos: position{line: 226, col: 17, offset: 6401}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 226, col: 17, offset: 6401}, + run: (*parser).callonStringLiteral2, + expr: &choiceExpr{ + pos: position{line: 226, col: 19, offset: 6403}, + alternatives: []interface{}{ + &seqExpr{ + pos: position{line: 226, col: 19, offset: 6403}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 226, col: 19, offset: 6403}, + val: "\"", + ignoreCase: false, + want: "\"\\\"\"", + }, + &zeroOrMoreExpr{ + pos: position{line: 226, col: 23, offset: 6407}, + expr: &ruleRefExpr{ + pos: position{line: 226, col: 23, offset: 6407}, + name: "DoubleStringChar", + }, + }, + &litMatcher{ + pos: position{line: 226, col: 41, offset: 6425}, + val: "\"", + ignoreCase: false, + want: "\"\\\"\"", + }, + }, + }, + &seqExpr{ + pos: position{line: 226, col: 47, offset: 6431}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 226, col: 47, offset: 6431}, + val: "'", + ignoreCase: false, + want: "\"'\"", + }, + &ruleRefExpr{ + pos: position{line: 226, col: 51, offset: 6435}, + name: "SingleStringChar", + }, + &litMatcher{ + pos: position{line: 226, col: 68, offset: 6452}, + val: "'", + ignoreCase: false, + want: "\"'\"", + }, + }, + }, + &seqExpr{ + pos: position{line: 226, col: 74, offset: 6458}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 226, col: 74, offset: 6458}, + val: "`", + ignoreCase: false, + want: "\"`\"", + }, + &zeroOrMoreExpr{ + pos: position{line: 226, col: 78, offset: 6462}, + expr: &ruleRefExpr{ + pos: position{line: 226, col: 78, offset: 6462}, + name: "RawStringChar", + }, + }, + &litMatcher{ + pos: position{line: 226, col: 93, offset: 6477}, + val: "`", + ignoreCase: false, + want: "\"`\"", + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 228, col: 5, offset: 6550}, + run: (*parser).callonStringLiteral18, + expr: &choiceExpr{ + pos: position{line: 228, col: 7, offset: 6552}, + alternatives: []interface{}{ + &seqExpr{ + pos: position{line: 228, col: 9, offset: 6554}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 228, col: 9, offset: 6554}, + val: "\"", + ignoreCase: false, + want: "\"\\\"\"", + }, + &zeroOrMoreExpr{ + pos: position{line: 228, col: 13, offset: 6558}, + expr: &ruleRefExpr{ + pos: position{line: 228, col: 13, offset: 6558}, + name: "DoubleStringChar", + }, + }, + &choiceExpr{ + pos: position{line: 228, col: 33, offset: 6578}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 228, col: 33, offset: 6578}, + name: "EOL", + }, + &ruleRefExpr{ + pos: position{line: 228, col: 39, offset: 6584}, + name: "EOF", + }, + }, + }, + }, + }, + &seqExpr{ + pos: position{line: 228, col: 51, offset: 6596}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 228, col: 51, offset: 6596}, + val: "'", + ignoreCase: false, + want: "\"'\"", + }, + &zeroOrOneExpr{ + pos: position{line: 228, col: 55, offset: 6600}, + expr: &ruleRefExpr{ + pos: position{line: 228, col: 55, offset: 6600}, + name: "SingleStringChar", + }, + }, + &choiceExpr{ + pos: position{line: 228, col: 75, offset: 6620}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 228, col: 75, offset: 6620}, + name: "EOL", + }, + &ruleRefExpr{ + pos: position{line: 228, col: 81, offset: 6626}, + name: "EOF", + }, + }, + }, + }, + }, + &seqExpr{ + pos: position{line: 228, col: 91, offset: 6636}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 228, col: 91, offset: 6636}, + val: "`", + ignoreCase: false, + want: "\"`\"", + }, + &zeroOrMoreExpr{ + pos: position{line: 228, col: 95, offset: 6640}, + expr: &ruleRefExpr{ + pos: position{line: 228, col: 95, offset: 6640}, + name: "RawStringChar", + }, + }, + &ruleRefExpr{ + pos: position{line: 228, col: 110, offset: 6655}, + name: "EOF", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "DoubleStringChar", + pos: position{line: 232, col: 1, offset: 6757}, + expr: &choiceExpr{ + pos: position{line: 232, col: 20, offset: 6778}, + alternatives: []interface{}{ + &seqExpr{ + pos: position{line: 232, col: 20, offset: 6778}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 232, col: 20, offset: 6778}, + expr: &choiceExpr{ + pos: position{line: 232, col: 23, offset: 6781}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 232, col: 23, offset: 6781}, + val: "\"", + ignoreCase: false, + want: "\"\\\"\"", + }, + &litMatcher{ + pos: position{line: 232, col: 29, offset: 6787}, + val: "\\", + ignoreCase: false, + want: "\"\\\\\"", + }, + &ruleRefExpr{ + pos: position{line: 232, col: 36, offset: 6794}, + name: "EOL", + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 232, col: 42, offset: 6800}, + name: "SourceChar", + }, + }, + }, + &seqExpr{ + pos: position{line: 232, col: 55, offset: 6813}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 232, col: 55, offset: 6813}, + val: "\\", + ignoreCase: false, + want: "\"\\\\\"", + }, + &ruleRefExpr{ + pos: position{line: 232, col: 60, offset: 6818}, + name: "DoubleStringEscape", + }, + }, + }, + }, + }, + }, + { + name: "SingleStringChar", + pos: position{line: 233, col: 1, offset: 6837}, + expr: &choiceExpr{ + pos: position{line: 233, col: 20, offset: 6858}, + alternatives: []interface{}{ + &seqExpr{ + pos: position{line: 233, col: 20, offset: 6858}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 233, col: 20, offset: 6858}, + expr: &choiceExpr{ + pos: position{line: 233, col: 23, offset: 6861}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 233, col: 23, offset: 6861}, + val: "'", + ignoreCase: false, + want: "\"'\"", + }, + &litMatcher{ + pos: position{line: 233, col: 29, offset: 6867}, + val: "\\", + ignoreCase: false, + want: "\"\\\\\"", + }, + &ruleRefExpr{ + pos: position{line: 233, col: 36, offset: 6874}, + name: "EOL", + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 233, col: 42, offset: 6880}, + name: "SourceChar", + }, + }, + }, + &seqExpr{ + pos: position{line: 233, col: 55, offset: 6893}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 233, col: 55, offset: 6893}, + val: "\\", + ignoreCase: false, + want: "\"\\\\\"", + }, + &ruleRefExpr{ + pos: position{line: 233, col: 60, offset: 6898}, + name: "SingleStringEscape", + }, + }, + }, + }, + }, + }, + { + name: "RawStringChar", + pos: position{line: 234, col: 1, offset: 6917}, + expr: &seqExpr{ + pos: position{line: 234, col: 17, offset: 6935}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 234, col: 17, offset: 6935}, + expr: &litMatcher{ + pos: position{line: 234, col: 18, offset: 6936}, + val: "`", + ignoreCase: false, + want: "\"`\"", + }, + }, + &ruleRefExpr{ + pos: position{line: 234, col: 22, offset: 6940}, + name: "SourceChar", + }, + }, + }, + }, + { + name: "DoubleStringEscape", + pos: position{line: 236, col: 1, offset: 6952}, + expr: &choiceExpr{ + pos: position{line: 236, col: 22, offset: 6975}, + alternatives: []interface{}{ + &choiceExpr{ + pos: position{line: 236, col: 24, offset: 6977}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 236, col: 24, offset: 6977}, + val: "\"", + ignoreCase: false, + want: "\"\\\"\"", + }, + &ruleRefExpr{ + pos: position{line: 236, col: 30, offset: 6983}, + name: "CommonEscapeSequence", + }, + }, + }, + &actionExpr{ + pos: position{line: 237, col: 7, offset: 7012}, + run: (*parser).callonDoubleStringEscape5, + expr: &choiceExpr{ + pos: position{line: 237, col: 9, offset: 7014}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 237, col: 9, offset: 7014}, + name: "SourceChar", + }, + &ruleRefExpr{ + pos: position{line: 237, col: 22, offset: 7027}, + name: "EOL", + }, + &ruleRefExpr{ + pos: position{line: 237, col: 28, offset: 7033}, + name: "EOF", + }, + }, + }, + }, + }, + }, + }, + { + name: "SingleStringEscape", + pos: position{line: 240, col: 1, offset: 7098}, + expr: &choiceExpr{ + pos: position{line: 240, col: 22, offset: 7121}, + alternatives: []interface{}{ + &choiceExpr{ + pos: position{line: 240, col: 24, offset: 7123}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 240, col: 24, offset: 7123}, + val: "'", + ignoreCase: false, + want: "\"'\"", + }, + &ruleRefExpr{ + pos: position{line: 240, col: 30, offset: 7129}, + name: "CommonEscapeSequence", + }, + }, + }, + &actionExpr{ + pos: position{line: 241, col: 7, offset: 7158}, + run: (*parser).callonSingleStringEscape5, + expr: &choiceExpr{ + pos: position{line: 241, col: 9, offset: 7160}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 241, col: 9, offset: 7160}, + name: "SourceChar", + }, + &ruleRefExpr{ + pos: position{line: 241, col: 22, offset: 7173}, + name: "EOL", + }, + &ruleRefExpr{ + pos: position{line: 241, col: 28, offset: 7179}, + name: "EOF", + }, + }, + }, + }, + }, + }, + }, + { + name: "CommonEscapeSequence", + pos: position{line: 245, col: 1, offset: 7245}, + expr: &choiceExpr{ + pos: position{line: 245, col: 24, offset: 7270}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 245, col: 24, offset: 7270}, + name: "SingleCharEscape", + }, + &ruleRefExpr{ + pos: position{line: 245, col: 43, offset: 7289}, + name: "OctalEscape", + }, + &ruleRefExpr{ + pos: position{line: 245, col: 57, offset: 7303}, + name: "HexEscape", + }, + &ruleRefExpr{ + pos: position{line: 245, col: 69, offset: 7315}, + name: "LongUnicodeEscape", + }, + &ruleRefExpr{ + pos: position{line: 245, col: 89, offset: 7335}, + name: "ShortUnicodeEscape", + }, + }, + }, + }, + { + name: "SingleCharEscape", + pos: position{line: 246, col: 1, offset: 7354}, + expr: &choiceExpr{ + pos: position{line: 246, col: 20, offset: 7375}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 246, col: 20, offset: 7375}, + val: "a", + ignoreCase: false, + want: "\"a\"", + }, + &litMatcher{ + pos: position{line: 246, col: 26, offset: 7381}, + val: "b", + ignoreCase: false, + want: "\"b\"", + }, + &litMatcher{ + pos: position{line: 246, col: 32, offset: 7387}, + val: "n", + ignoreCase: false, + want: "\"n\"", + }, + &litMatcher{ + pos: position{line: 246, col: 38, offset: 7393}, + val: "f", + ignoreCase: false, + want: "\"f\"", + }, + &litMatcher{ + pos: position{line: 246, col: 44, offset: 7399}, + val: "r", + ignoreCase: false, + want: "\"r\"", + }, + &litMatcher{ + pos: position{line: 246, col: 50, offset: 7405}, + val: "t", + ignoreCase: false, + want: "\"t\"", + }, + &litMatcher{ + pos: position{line: 246, col: 56, offset: 7411}, + val: "v", + ignoreCase: false, + want: "\"v\"", + }, + &litMatcher{ + pos: position{line: 246, col: 62, offset: 7417}, + val: "\\", + ignoreCase: false, + want: "\"\\\\\"", + }, + }, + }, + }, + { + name: "OctalEscape", + pos: position{line: 247, col: 1, offset: 7422}, + expr: &choiceExpr{ + pos: position{line: 247, col: 15, offset: 7438}, + alternatives: []interface{}{ + &seqExpr{ + pos: position{line: 247, col: 15, offset: 7438}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 247, col: 15, offset: 7438}, + name: "OctalDigit", + }, + &ruleRefExpr{ + pos: position{line: 247, col: 26, offset: 7449}, + name: "OctalDigit", + }, + &ruleRefExpr{ + pos: position{line: 247, col: 37, offset: 7460}, + name: "OctalDigit", + }, + }, + }, + &actionExpr{ + pos: position{line: 248, col: 7, offset: 7477}, + run: (*parser).callonOctalEscape6, + expr: &seqExpr{ + pos: position{line: 248, col: 7, offset: 7477}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 248, col: 7, offset: 7477}, + name: "OctalDigit", + }, + &choiceExpr{ + pos: position{line: 248, col: 20, offset: 7490}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 248, col: 20, offset: 7490}, + name: "SourceChar", + }, + &ruleRefExpr{ + pos: position{line: 248, col: 33, offset: 7503}, + name: "EOL", + }, + &ruleRefExpr{ + pos: position{line: 248, col: 39, offset: 7509}, + name: "EOF", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "HexEscape", + pos: position{line: 251, col: 1, offset: 7570}, + expr: &choiceExpr{ + pos: position{line: 251, col: 13, offset: 7584}, + alternatives: []interface{}{ + &seqExpr{ + pos: position{line: 251, col: 13, offset: 7584}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 251, col: 13, offset: 7584}, + val: "x", + ignoreCase: false, + want: "\"x\"", + }, + &ruleRefExpr{ + pos: position{line: 251, col: 17, offset: 7588}, + name: "HexDigit", + }, + &ruleRefExpr{ + pos: position{line: 251, col: 26, offset: 7597}, + name: "HexDigit", + }, + }, + }, + &actionExpr{ + pos: position{line: 252, col: 7, offset: 7612}, + run: (*parser).callonHexEscape6, + expr: &seqExpr{ + pos: position{line: 252, col: 7, offset: 7612}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 252, col: 7, offset: 7612}, + val: "x", + ignoreCase: false, + want: "\"x\"", + }, + &choiceExpr{ + pos: position{line: 252, col: 13, offset: 7618}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 252, col: 13, offset: 7618}, + name: "SourceChar", + }, + &ruleRefExpr{ + pos: position{line: 252, col: 26, offset: 7631}, + name: "EOL", + }, + &ruleRefExpr{ + pos: position{line: 252, col: 32, offset: 7637}, + name: "EOF", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "LongUnicodeEscape", + pos: position{line: 255, col: 1, offset: 7704}, + expr: &choiceExpr{ + pos: position{line: 256, col: 5, offset: 7730}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 256, col: 5, offset: 7730}, + run: (*parser).callonLongUnicodeEscape2, + expr: &seqExpr{ + pos: position{line: 256, col: 5, offset: 7730}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 256, col: 5, offset: 7730}, + val: "U", + ignoreCase: false, + want: "\"U\"", + }, + &ruleRefExpr{ + pos: position{line: 256, col: 9, offset: 7734}, + name: "HexDigit", + }, + &ruleRefExpr{ + pos: position{line: 256, col: 18, offset: 7743}, + name: "HexDigit", + }, + &ruleRefExpr{ + pos: position{line: 256, col: 27, offset: 7752}, + name: "HexDigit", + }, + &ruleRefExpr{ + pos: position{line: 256, col: 36, offset: 7761}, + name: "HexDigit", + }, + &ruleRefExpr{ + pos: position{line: 256, col: 45, offset: 7770}, + name: "HexDigit", + }, + &ruleRefExpr{ + pos: position{line: 256, col: 54, offset: 7779}, + name: "HexDigit", + }, + &ruleRefExpr{ + pos: position{line: 256, col: 63, offset: 7788}, + name: "HexDigit", + }, + &ruleRefExpr{ + pos: position{line: 256, col: 72, offset: 7797}, + name: "HexDigit", + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 259, col: 7, offset: 7899}, + run: (*parser).callonLongUnicodeEscape13, + expr: &seqExpr{ + pos: position{line: 259, col: 7, offset: 7899}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 259, col: 7, offset: 7899}, + val: "U", + ignoreCase: false, + want: "\"U\"", + }, + &choiceExpr{ + pos: position{line: 259, col: 13, offset: 7905}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 259, col: 13, offset: 7905}, + name: "SourceChar", + }, + &ruleRefExpr{ + pos: position{line: 259, col: 26, offset: 7918}, + name: "EOL", + }, + &ruleRefExpr{ + pos: position{line: 259, col: 32, offset: 7924}, + name: "EOF", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "ShortUnicodeEscape", + pos: position{line: 262, col: 1, offset: 7987}, + expr: &choiceExpr{ + pos: position{line: 263, col: 5, offset: 8014}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 263, col: 5, offset: 8014}, + run: (*parser).callonShortUnicodeEscape2, + expr: &seqExpr{ + pos: position{line: 263, col: 5, offset: 8014}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 263, col: 5, offset: 8014}, + val: "u", + ignoreCase: false, + want: "\"u\"", + }, + &ruleRefExpr{ + pos: position{line: 263, col: 9, offset: 8018}, + name: "HexDigit", + }, + &ruleRefExpr{ + pos: position{line: 263, col: 18, offset: 8027}, + name: "HexDigit", + }, + &ruleRefExpr{ + pos: position{line: 263, col: 27, offset: 8036}, + name: "HexDigit", + }, + &ruleRefExpr{ + pos: position{line: 263, col: 36, offset: 8045}, + name: "HexDigit", + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 266, col: 7, offset: 8147}, + run: (*parser).callonShortUnicodeEscape9, + expr: &seqExpr{ + pos: position{line: 266, col: 7, offset: 8147}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 266, col: 7, offset: 8147}, + val: "u", + ignoreCase: false, + want: "\"u\"", + }, + &choiceExpr{ + pos: position{line: 266, col: 13, offset: 8153}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 266, col: 13, offset: 8153}, + name: "SourceChar", + }, + &ruleRefExpr{ + pos: position{line: 266, col: 26, offset: 8166}, + name: "EOL", + }, + &ruleRefExpr{ + pos: position{line: 266, col: 32, offset: 8172}, + name: "EOF", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "OctalDigit", + pos: position{line: 270, col: 1, offset: 8236}, + expr: &charClassMatcher{ + pos: position{line: 270, col: 14, offset: 8251}, + val: "[0-7]", + ranges: []rune{'0', '7'}, + ignoreCase: false, + inverted: false, + }, + }, + { + name: "DecimalDigit", + pos: position{line: 271, col: 1, offset: 8257}, + expr: &charClassMatcher{ + pos: position{line: 271, col: 16, offset: 8274}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + { + name: "HexDigit", + pos: position{line: 272, col: 1, offset: 8280}, + expr: &charClassMatcher{ + pos: position{line: 272, col: 12, offset: 8293}, + val: "[0-9a-f]i", + ranges: []rune{'0', '9', 'a', 'f'}, + ignoreCase: true, + inverted: false, + }, + }, + { + name: "CharClassMatcher", + pos: position{line: 274, col: 1, offset: 8304}, + expr: &choiceExpr{ + pos: position{line: 274, col: 20, offset: 8325}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 274, col: 20, offset: 8325}, + run: (*parser).callonCharClassMatcher2, + expr: &seqExpr{ + pos: position{line: 274, col: 20, offset: 8325}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 274, col: 20, offset: 8325}, + val: "[", + ignoreCase: false, + want: "\"[\"", + }, + &zeroOrMoreExpr{ + pos: position{line: 274, col: 24, offset: 8329}, + expr: &choiceExpr{ + pos: position{line: 274, col: 26, offset: 8331}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 274, col: 26, offset: 8331}, + name: "ClassCharRange", + }, + &ruleRefExpr{ + pos: position{line: 274, col: 43, offset: 8348}, + name: "ClassChar", + }, + &seqExpr{ + pos: position{line: 274, col: 55, offset: 8360}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 274, col: 55, offset: 8360}, + val: "\\", + ignoreCase: false, + want: "\"\\\\\"", + }, + &ruleRefExpr{ + pos: position{line: 274, col: 60, offset: 8365}, + name: "UnicodeClassEscape", + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 274, col: 82, offset: 8387}, + val: "]", + ignoreCase: false, + want: "\"]\"", + }, + &zeroOrOneExpr{ + pos: position{line: 274, col: 86, offset: 8391}, + expr: &litMatcher{ + pos: position{line: 274, col: 86, offset: 8391}, + val: "i", + ignoreCase: false, + want: "\"i\"", + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 278, col: 5, offset: 8498}, + run: (*parser).callonCharClassMatcher15, + expr: &seqExpr{ + pos: position{line: 278, col: 5, offset: 8498}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 278, col: 5, offset: 8498}, + val: "[", + ignoreCase: false, + want: "\"[\"", + }, + &zeroOrMoreExpr{ + pos: position{line: 278, col: 9, offset: 8502}, + expr: &seqExpr{ + pos: position{line: 278, col: 11, offset: 8504}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 278, col: 11, offset: 8504}, + expr: &ruleRefExpr{ + pos: position{line: 278, col: 14, offset: 8507}, + name: "EOL", + }, + }, + &ruleRefExpr{ + pos: position{line: 278, col: 20, offset: 8513}, + name: "SourceChar", + }, + }, + }, + }, + &choiceExpr{ + pos: position{line: 278, col: 36, offset: 8529}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 278, col: 36, offset: 8529}, + name: "EOL", + }, + &ruleRefExpr{ + pos: position{line: 278, col: 42, offset: 8535}, + name: "EOF", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "ClassCharRange", + pos: position{line: 282, col: 1, offset: 8645}, + expr: &seqExpr{ + pos: position{line: 282, col: 18, offset: 8664}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 282, col: 18, offset: 8664}, + name: "ClassChar", + }, + &litMatcher{ + pos: position{line: 282, col: 28, offset: 8674}, + val: "-", + ignoreCase: false, + want: "\"-\"", + }, + &ruleRefExpr{ + pos: position{line: 282, col: 32, offset: 8678}, + name: "ClassChar", + }, + }, + }, + }, + { + name: "ClassChar", + pos: position{line: 283, col: 1, offset: 8688}, + expr: &choiceExpr{ + pos: position{line: 283, col: 13, offset: 8702}, + alternatives: []interface{}{ + &seqExpr{ + pos: position{line: 283, col: 13, offset: 8702}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 283, col: 13, offset: 8702}, + expr: &choiceExpr{ + pos: position{line: 283, col: 16, offset: 8705}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 283, col: 16, offset: 8705}, + val: "]", + ignoreCase: false, + want: "\"]\"", + }, + &litMatcher{ + pos: position{line: 283, col: 22, offset: 8711}, + val: "\\", + ignoreCase: false, + want: "\"\\\\\"", + }, + &ruleRefExpr{ + pos: position{line: 283, col: 29, offset: 8718}, + name: "EOL", + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 283, col: 35, offset: 8724}, + name: "SourceChar", + }, + }, + }, + &seqExpr{ + pos: position{line: 283, col: 48, offset: 8737}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 283, col: 48, offset: 8737}, + val: "\\", + ignoreCase: false, + want: "\"\\\\\"", + }, + &ruleRefExpr{ + pos: position{line: 283, col: 53, offset: 8742}, + name: "CharClassEscape", + }, + }, + }, + }, + }, + }, + { + name: "CharClassEscape", + pos: position{line: 284, col: 1, offset: 8758}, + expr: &choiceExpr{ + pos: position{line: 284, col: 19, offset: 8778}, + alternatives: []interface{}{ + &choiceExpr{ + pos: position{line: 284, col: 21, offset: 8780}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 284, col: 21, offset: 8780}, + val: "]", + ignoreCase: false, + want: "\"]\"", + }, + &ruleRefExpr{ + pos: position{line: 284, col: 27, offset: 8786}, + name: "CommonEscapeSequence", + }, + }, + }, + &actionExpr{ + pos: position{line: 285, col: 7, offset: 8815}, + run: (*parser).callonCharClassEscape5, + expr: &seqExpr{ + pos: position{line: 285, col: 7, offset: 8815}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 285, col: 7, offset: 8815}, + expr: &litMatcher{ + pos: position{line: 285, col: 8, offset: 8816}, + val: "p", + ignoreCase: false, + want: "\"p\"", + }, + }, + &choiceExpr{ + pos: position{line: 285, col: 14, offset: 8822}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 285, col: 14, offset: 8822}, + name: "SourceChar", + }, + &ruleRefExpr{ + pos: position{line: 285, col: 27, offset: 8835}, + name: "EOL", + }, + &ruleRefExpr{ + pos: position{line: 285, col: 33, offset: 8841}, + name: "EOF", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "UnicodeClassEscape", + pos: position{line: 289, col: 1, offset: 8907}, + expr: &seqExpr{ + pos: position{line: 289, col: 22, offset: 8930}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 289, col: 22, offset: 8930}, + val: "p", + ignoreCase: false, + want: "\"p\"", + }, + &choiceExpr{ + pos: position{line: 290, col: 7, offset: 8942}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 290, col: 7, offset: 8942}, + name: "SingleCharUnicodeClass", + }, + &actionExpr{ + pos: position{line: 291, col: 7, offset: 8971}, + run: (*parser).callonUnicodeClassEscape5, + expr: &seqExpr{ + pos: position{line: 291, col: 7, offset: 8971}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 291, col: 7, offset: 8971}, + expr: &litMatcher{ + pos: position{line: 291, col: 8, offset: 8972}, + val: "{", + ignoreCase: false, + want: "\"{\"", + }, + }, + &choiceExpr{ + pos: position{line: 291, col: 14, offset: 8978}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 291, col: 14, offset: 8978}, + name: "SourceChar", + }, + &ruleRefExpr{ + pos: position{line: 291, col: 27, offset: 8991}, + name: "EOL", + }, + &ruleRefExpr{ + pos: position{line: 291, col: 33, offset: 8997}, + name: "EOF", + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 292, col: 7, offset: 9068}, + run: (*parser).callonUnicodeClassEscape13, + expr: &seqExpr{ + pos: position{line: 292, col: 7, offset: 9068}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 292, col: 7, offset: 9068}, + val: "{", + ignoreCase: false, + want: "\"{\"", + }, + &labeledExpr{ + pos: position{line: 292, col: 11, offset: 9072}, + label: "ident", + expr: &ruleRefExpr{ + pos: position{line: 292, col: 17, offset: 9078}, + name: "IdentifierName", + }, + }, + &litMatcher{ + pos: position{line: 292, col: 32, offset: 9093}, + val: "}", + ignoreCase: false, + want: "\"}\"", + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 298, col: 7, offset: 9270}, + run: (*parser).callonUnicodeClassEscape19, + expr: &seqExpr{ + pos: position{line: 298, col: 7, offset: 9270}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 298, col: 7, offset: 9270}, + val: "{", + ignoreCase: false, + want: "\"{\"", + }, + &ruleRefExpr{ + pos: position{line: 298, col: 11, offset: 9274}, + name: "IdentifierName", + }, + &choiceExpr{ + pos: position{line: 298, col: 28, offset: 9291}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 298, col: 28, offset: 9291}, + val: "]", + ignoreCase: false, + want: "\"]\"", + }, + &ruleRefExpr{ + pos: position{line: 298, col: 34, offset: 9297}, + name: "EOL", + }, + &ruleRefExpr{ + pos: position{line: 298, col: 40, offset: 9303}, + name: "EOF", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "SingleCharUnicodeClass", + pos: position{line: 302, col: 1, offset: 9386}, + expr: &charClassMatcher{ + pos: position{line: 302, col: 26, offset: 9413}, + val: "[LMNCPZS]", + chars: []rune{'L', 'M', 'N', 'C', 'P', 'Z', 'S'}, + ignoreCase: false, + inverted: false, + }, + }, + { + name: "AnyMatcher", + pos: position{line: 304, col: 1, offset: 9424}, + expr: &actionExpr{ + pos: position{line: 304, col: 14, offset: 9439}, + run: (*parser).callonAnyMatcher1, + expr: &litMatcher{ + pos: position{line: 304, col: 14, offset: 9439}, + val: ".", + ignoreCase: false, + want: "\".\"", + }, + }, + }, + { + name: "ThrowExpr", + pos: position{line: 309, col: 1, offset: 9514}, + expr: &choiceExpr{ + pos: position{line: 309, col: 13, offset: 9528}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 309, col: 13, offset: 9528}, + run: (*parser).callonThrowExpr2, + expr: &seqExpr{ + pos: position{line: 309, col: 13, offset: 9528}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 309, col: 13, offset: 9528}, + val: "%", + ignoreCase: false, + want: "\"%\"", + }, + &litMatcher{ + pos: position{line: 309, col: 17, offset: 9532}, + val: "{", + ignoreCase: false, + want: "\"{\"", + }, + &labeledExpr{ + pos: position{line: 309, col: 21, offset: 9536}, + label: "label", + expr: &ruleRefExpr{ + pos: position{line: 309, col: 27, offset: 9542}, + name: "IdentifierName", + }, + }, + &litMatcher{ + pos: position{line: 309, col: 42, offset: 9557}, + val: "}", + ignoreCase: false, + want: "\"}\"", + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 313, col: 5, offset: 9665}, + run: (*parser).callonThrowExpr9, + expr: &seqExpr{ + pos: position{line: 313, col: 5, offset: 9665}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 313, col: 5, offset: 9665}, + val: "%", + ignoreCase: false, + want: "\"%\"", + }, + &litMatcher{ + pos: position{line: 313, col: 9, offset: 9669}, + val: "{", + ignoreCase: false, + want: "\"{\"", + }, + &ruleRefExpr{ + pos: position{line: 313, col: 13, offset: 9673}, + name: "IdentifierName", + }, + &ruleRefExpr{ + pos: position{line: 313, col: 28, offset: 9688}, + name: "EOF", + }, + }, + }, + }, + }, + }, + }, + { + name: "CodeBlock", + pos: position{line: 317, col: 1, offset: 9759}, + expr: &choiceExpr{ + pos: position{line: 317, col: 13, offset: 9773}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 317, col: 13, offset: 9773}, + run: (*parser).callonCodeBlock2, + expr: &seqExpr{ + pos: position{line: 317, col: 13, offset: 9773}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 317, col: 13, offset: 9773}, + val: "{", + ignoreCase: false, + want: "\"{\"", + }, + &ruleRefExpr{ + pos: position{line: 317, col: 17, offset: 9777}, + name: "Code", + }, + &litMatcher{ + pos: position{line: 317, col: 22, offset: 9782}, + val: "}", + ignoreCase: false, + want: "\"}\"", + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 321, col: 5, offset: 9881}, + run: (*parser).callonCodeBlock7, + expr: &seqExpr{ + pos: position{line: 321, col: 5, offset: 9881}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 321, col: 5, offset: 9881}, + val: "{", + ignoreCase: false, + want: "\"{\"", + }, + &ruleRefExpr{ + pos: position{line: 321, col: 9, offset: 9885}, + name: "Code", + }, + &ruleRefExpr{ + pos: position{line: 321, col: 14, offset: 9890}, + name: "EOF", + }, + }, + }, + }, + }, + }, + }, + { + name: "Code", + pos: position{line: 325, col: 1, offset: 9955}, + expr: &zeroOrMoreExpr{ + pos: position{line: 325, col: 8, offset: 9964}, + expr: &choiceExpr{ + pos: position{line: 325, col: 10, offset: 9966}, + alternatives: []interface{}{ + &oneOrMoreExpr{ + pos: position{line: 325, col: 10, offset: 9966}, + expr: &choiceExpr{ + pos: position{line: 325, col: 12, offset: 9968}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 325, col: 12, offset: 9968}, + name: "Comment", + }, + &seqExpr{ + pos: position{line: 325, col: 22, offset: 9978}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 325, col: 22, offset: 9978}, + expr: &charClassMatcher{ + pos: position{line: 325, col: 23, offset: 9979}, + val: "[{}]", + chars: []rune{'{', '}'}, + ignoreCase: false, + inverted: false, + }, + }, + &ruleRefExpr{ + pos: position{line: 325, col: 28, offset: 9984}, + name: "SourceChar", + }, + }, + }, + }, + }, + }, + &seqExpr{ + pos: position{line: 325, col: 44, offset: 10000}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 325, col: 44, offset: 10000}, + val: "{", + ignoreCase: false, + want: "\"{\"", + }, + &ruleRefExpr{ + pos: position{line: 325, col: 48, offset: 10004}, + name: "Code", + }, + &litMatcher{ + pos: position{line: 325, col: 53, offset: 10009}, + val: "}", + ignoreCase: false, + want: "\"}\"", + }, + }, + }, + }, + }, + }, + }, + { + name: "__", + pos: position{line: 327, col: 1, offset: 10017}, + expr: &zeroOrMoreExpr{ + pos: position{line: 327, col: 6, offset: 10024}, + expr: &choiceExpr{ + pos: position{line: 327, col: 8, offset: 10026}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 327, col: 8, offset: 10026}, + name: "Whitespace", + }, + &ruleRefExpr{ + pos: position{line: 327, col: 21, offset: 10039}, + name: "EOL", + }, + &ruleRefExpr{ + pos: position{line: 327, col: 27, offset: 10045}, + name: "Comment", + }, + }, + }, + }, + }, + { + name: "_", + pos: position{line: 328, col: 1, offset: 10056}, + expr: &zeroOrMoreExpr{ + pos: position{line: 328, col: 5, offset: 10062}, + expr: &choiceExpr{ + pos: position{line: 328, col: 7, offset: 10064}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 328, col: 7, offset: 10064}, + name: "Whitespace", + }, + &ruleRefExpr{ + pos: position{line: 328, col: 20, offset: 10077}, + name: "MultiLineCommentNoLineTerminator", + }, + }, + }, + }, + }, + { + name: "Whitespace", + pos: position{line: 330, col: 1, offset: 10114}, + expr: &charClassMatcher{ + pos: position{line: 330, col: 14, offset: 10129}, + val: "[ \\t\\r]", + chars: []rune{' ', '\t', '\r'}, + ignoreCase: false, + inverted: false, + }, + }, + { + name: "EOL", + pos: position{line: 331, col: 1, offset: 10137}, + expr: &litMatcher{ + pos: position{line: 331, col: 7, offset: 10145}, + val: "\n", + ignoreCase: false, + want: "\"\\n\"", + }, + }, + { + name: "EOS", + pos: position{line: 332, col: 1, offset: 10150}, + expr: &choiceExpr{ + pos: position{line: 332, col: 7, offset: 10158}, + alternatives: []interface{}{ + &seqExpr{ + pos: position{line: 332, col: 7, offset: 10158}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 332, col: 7, offset: 10158}, + name: "__", + }, + &litMatcher{ + pos: position{line: 332, col: 10, offset: 10161}, + val: ";", + ignoreCase: false, + want: "\";\"", + }, + }, + }, + &seqExpr{ + pos: position{line: 332, col: 16, offset: 10167}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 332, col: 16, offset: 10167}, + name: "_", + }, + &zeroOrOneExpr{ + pos: position{line: 332, col: 18, offset: 10169}, + expr: &ruleRefExpr{ + pos: position{line: 332, col: 18, offset: 10169}, + name: "SingleLineComment", + }, + }, + &ruleRefExpr{ + pos: position{line: 332, col: 37, offset: 10188}, + name: "EOL", + }, + }, + }, + &seqExpr{ + pos: position{line: 332, col: 43, offset: 10194}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 332, col: 43, offset: 10194}, + name: "__", + }, + &ruleRefExpr{ + pos: position{line: 332, col: 46, offset: 10197}, + name: "EOF", + }, + }, + }, + }, + }, + }, + { + name: "EOF", + pos: position{line: 334, col: 1, offset: 10202}, + expr: ¬Expr{ + pos: position{line: 334, col: 7, offset: 10210}, + expr: &anyMatcher{ + line: 334, col: 8, offset: 10211, + }, + }, + }, + }, +} + +func (c *current) onGrammar1(initializer, rules interface{}) (interface{}, error) { + pos := c.astPos() + + // create the grammar, assign its initializer + g := ast.NewGrammar(pos) + initSlice := toIfaceSlice(initializer) + if len(initSlice) > 0 { + g.Init = initSlice[0].(*ast.CodeBlock) + } + + rulesSlice := toIfaceSlice(rules) + g.Rules = make([]*ast.Rule, len(rulesSlice)) + for i, duo := range rulesSlice { + g.Rules[i] = duo.([]interface{})[0].(*ast.Rule) + } + + return g, nil +} + +func (p *parser) callonGrammar1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onGrammar1(stack["initializer"], stack["rules"]) +} + +func (c *current) onInitializer1(code interface{}) (interface{}, error) { + return code, nil +} + +func (p *parser) callonInitializer1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onInitializer1(stack["code"]) +} + +func (c *current) onRule1(name, display, expr interface{}) (interface{}, error) { + pos := c.astPos() + + rule := ast.NewRule(pos, name.(*ast.Identifier)) + displaySlice := toIfaceSlice(display) + if len(displaySlice) > 0 { + rule.DisplayName = displaySlice[0].(*ast.StringLit) + } + rule.Expr = expr.(ast.Expression) + + return rule, nil +} + +func (p *parser) callonRule1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onRule1(stack["name"], stack["display"], stack["expr"]) +} + +func (c *current) onRecoveryExpr1(expr, recoverExprs interface{}) (interface{}, error) { + recoverExprSlice := toIfaceSlice(recoverExprs) + recover := expr.(ast.Expression) + for _, sl := range recoverExprSlice { + pos := c.astPos() + r := ast.NewRecoveryExpr(pos) + r.Expr = recover + r.RecoverExpr = sl.([]interface{})[7].(ast.Expression) + r.Labels = sl.([]interface{})[3].([]ast.FailureLabel) + + recover = r + } + return recover, nil +} + +func (p *parser) callonRecoveryExpr1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onRecoveryExpr1(stack["expr"], stack["recoverExprs"]) +} + +func (c *current) onLabels1(label, labels interface{}) (interface{}, error) { + failureLabels := []ast.FailureLabel{ast.FailureLabel(label.(*ast.Identifier).Val)} + labelSlice := toIfaceSlice(labels) + for _, fl := range labelSlice { + failureLabels = append(failureLabels, ast.FailureLabel(fl.([]interface{})[3].(*ast.Identifier).Val)) + } + return failureLabels, nil +} + +func (p *parser) callonLabels1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onLabels1(stack["label"], stack["labels"]) +} + +func (c *current) onChoiceExpr1(first, rest interface{}) (interface{}, error) { + restSlice := toIfaceSlice(rest) + if len(restSlice) == 0 { + return first, nil + } + + pos := c.astPos() + choice := ast.NewChoiceExpr(pos) + choice.Alternatives = []ast.Expression{first.(ast.Expression)} + for _, sl := range restSlice { + choice.Alternatives = append(choice.Alternatives, sl.([]interface{})[3].(ast.Expression)) + } + return choice, nil +} + +func (p *parser) callonChoiceExpr1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onChoiceExpr1(stack["first"], stack["rest"]) +} + +func (c *current) onActionExpr1(expr, code interface{}) (interface{}, error) { + if code == nil { + return expr, nil + } + + pos := c.astPos() + act := ast.NewActionExpr(pos) + act.Expr = expr.(ast.Expression) + codeSlice := toIfaceSlice(code) + act.Code = codeSlice[1].(*ast.CodeBlock) + + return act, nil +} + +func (p *parser) callonActionExpr1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onActionExpr1(stack["expr"], stack["code"]) +} + +func (c *current) onSeqExpr1(first, rest interface{}) (interface{}, error) { + restSlice := toIfaceSlice(rest) + if len(restSlice) == 0 { + return first, nil + } + seq := ast.NewSeqExpr(c.astPos()) + seq.Exprs = []ast.Expression{first.(ast.Expression)} + for _, sl := range restSlice { + seq.Exprs = append(seq.Exprs, sl.([]interface{})[1].(ast.Expression)) + } + return seq, nil +} + +func (p *parser) callonSeqExpr1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onSeqExpr1(stack["first"], stack["rest"]) +} + +func (c *current) onLabeledExpr2(label, expr interface{}) (interface{}, error) { + pos := c.astPos() + lab := ast.NewLabeledExpr(pos) + lab.Label = label.(*ast.Identifier) + lab.Expr = expr.(ast.Expression) + return lab, nil +} + +func (p *parser) callonLabeledExpr2() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onLabeledExpr2(stack["label"], stack["expr"]) +} + +func (c *current) onPrefixedExpr2(op, expr interface{}) (interface{}, error) { + pos := c.astPos() + opStr := op.(string) + if opStr == "&" { + and := ast.NewAndExpr(pos) + and.Expr = expr.(ast.Expression) + return and, nil + } + not := ast.NewNotExpr(pos) + not.Expr = expr.(ast.Expression) + return not, nil +} + +func (p *parser) callonPrefixedExpr2() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrefixedExpr2(stack["op"], stack["expr"]) +} + +func (c *current) onPrefixedOp1() (interface{}, error) { + return string(c.text), nil +} + +func (p *parser) callonPrefixedOp1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrefixedOp1() +} + +func (c *current) onSuffixedExpr2(expr, op interface{}) (interface{}, error) { + pos := c.astPos() + opStr := op.(string) + switch opStr { + case "?": + zero := ast.NewZeroOrOneExpr(pos) + zero.Expr = expr.(ast.Expression) + return zero, nil + case "*": + zero := ast.NewZeroOrMoreExpr(pos) + zero.Expr = expr.(ast.Expression) + return zero, nil + case "+": + one := ast.NewOneOrMoreExpr(pos) + one.Expr = expr.(ast.Expression) + return one, nil + default: + return nil, errors.New("unknown operator: " + opStr) + } +} + +func (p *parser) callonSuffixedExpr2() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onSuffixedExpr2(stack["expr"], stack["op"]) +} + +func (c *current) onSuffixedOp1() (interface{}, error) { + return string(c.text), nil +} + +func (p *parser) callonSuffixedOp1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onSuffixedOp1() +} + +func (c *current) onPrimaryExpr7(expr interface{}) (interface{}, error) { + return expr, nil +} + +func (p *parser) callonPrimaryExpr7() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimaryExpr7(stack["expr"]) +} + +func (c *current) onRuleRefExpr1(name interface{}) (interface{}, error) { + ref := ast.NewRuleRefExpr(c.astPos()) + ref.Name = name.(*ast.Identifier) + return ref, nil +} + +func (p *parser) callonRuleRefExpr1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onRuleRefExpr1(stack["name"]) +} + +func (c *current) onSemanticPredExpr1(op, code interface{}) (interface{}, error) { + switch op.(string) { + case "#": + state := ast.NewStateCodeExpr(c.astPos()) + state.Code = code.(*ast.CodeBlock) + return state, nil + + case "&": + and := ast.NewAndCodeExpr(c.astPos()) + and.Code = code.(*ast.CodeBlock) + return and, nil + + // case "!": + default: + not := ast.NewNotCodeExpr(c.astPos()) + not.Code = code.(*ast.CodeBlock) + return not, nil + + } +} + +func (p *parser) callonSemanticPredExpr1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onSemanticPredExpr1(stack["op"], stack["code"]) +} + +func (c *current) onSemanticPredOp1() (interface{}, error) { + return string(c.text), nil +} + +func (p *parser) callonSemanticPredOp1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onSemanticPredOp1() +} + +func (c *current) onIdentifier1(ident interface{}) (interface{}, error) { + astIdent := ast.NewIdentifier(c.astPos(), string(c.text)) + if reservedWords[astIdent.Val] { + return astIdent, errors.New("identifier is a reserved word") + } + return astIdent, nil +} + +func (p *parser) callonIdentifier1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onIdentifier1(stack["ident"]) +} + +func (c *current) onIdentifierName1() (interface{}, error) { + return ast.NewIdentifier(c.astPos(), string(c.text)), nil +} + +func (p *parser) callonIdentifierName1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onIdentifierName1() +} + +func (c *current) onLitMatcher1(lit, ignore interface{}) (interface{}, error) { + rawStr := lit.(*ast.StringLit).Val + s, err := strconv.Unquote(rawStr) + if err != nil { + // an invalid string literal raises an error in the escape rules, + // so simply replace the literal with an empty string here to + // avoid a cascade of errors. + s = "" + } + m := ast.NewLitMatcher(c.astPos(), s) + m.IgnoreCase = ignore != nil + return m, nil +} + +func (p *parser) callonLitMatcher1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onLitMatcher1(stack["lit"], stack["ignore"]) +} + +func (c *current) onStringLiteral2() (interface{}, error) { + return ast.NewStringLit(c.astPos(), string(c.text)), nil +} + +func (p *parser) callonStringLiteral2() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onStringLiteral2() +} + +func (c *current) onStringLiteral18() (interface{}, error) { + return ast.NewStringLit(c.astPos(), "``"), errors.New("string literal not terminated") +} + +func (p *parser) callonStringLiteral18() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onStringLiteral18() +} + +func (c *current) onDoubleStringEscape5() (interface{}, error) { + return nil, errors.New("invalid escape character") +} + +func (p *parser) callonDoubleStringEscape5() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onDoubleStringEscape5() +} + +func (c *current) onSingleStringEscape5() (interface{}, error) { + return nil, errors.New("invalid escape character") +} + +func (p *parser) callonSingleStringEscape5() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onSingleStringEscape5() +} + +func (c *current) onOctalEscape6() (interface{}, error) { + return nil, errors.New("invalid octal escape") +} + +func (p *parser) callonOctalEscape6() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onOctalEscape6() +} + +func (c *current) onHexEscape6() (interface{}, error) { + return nil, errors.New("invalid hexadecimal escape") +} + +func (p *parser) callonHexEscape6() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onHexEscape6() +} + +func (c *current) onLongUnicodeEscape2() (interface{}, error) { + return validateUnicodeEscape(string(c.text), "invalid Unicode escape") + +} + +func (p *parser) callonLongUnicodeEscape2() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onLongUnicodeEscape2() +} + +func (c *current) onLongUnicodeEscape13() (interface{}, error) { + return nil, errors.New("invalid Unicode escape") +} + +func (p *parser) callonLongUnicodeEscape13() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onLongUnicodeEscape13() +} + +func (c *current) onShortUnicodeEscape2() (interface{}, error) { + return validateUnicodeEscape(string(c.text), "invalid Unicode escape") + +} + +func (p *parser) callonShortUnicodeEscape2() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onShortUnicodeEscape2() +} + +func (c *current) onShortUnicodeEscape9() (interface{}, error) { + return nil, errors.New("invalid Unicode escape") +} + +func (p *parser) callonShortUnicodeEscape9() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onShortUnicodeEscape9() +} + +func (c *current) onCharClassMatcher2() (interface{}, error) { + pos := c.astPos() + cc := ast.NewCharClassMatcher(pos, string(c.text)) + return cc, nil +} + +func (p *parser) callonCharClassMatcher2() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onCharClassMatcher2() +} + +func (c *current) onCharClassMatcher15() (interface{}, error) { + return ast.NewCharClassMatcher(c.astPos(), "[]"), errors.New("character class not terminated") +} + +func (p *parser) callonCharClassMatcher15() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onCharClassMatcher15() +} + +func (c *current) onCharClassEscape5() (interface{}, error) { + return nil, errors.New("invalid escape character") +} + +func (p *parser) callonCharClassEscape5() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onCharClassEscape5() +} + +func (c *current) onUnicodeClassEscape5() (interface{}, error) { + return nil, errors.New("invalid Unicode class escape") +} + +func (p *parser) callonUnicodeClassEscape5() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onUnicodeClassEscape5() +} + +func (c *current) onUnicodeClassEscape13(ident interface{}) (interface{}, error) { + if !unicodeClasses[ident.(*ast.Identifier).Val] { + return nil, errors.New("invalid Unicode class escape") + } + return nil, nil + +} + +func (p *parser) callonUnicodeClassEscape13() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onUnicodeClassEscape13(stack["ident"]) +} + +func (c *current) onUnicodeClassEscape19() (interface{}, error) { + return nil, errors.New("Unicode class not terminated") + +} + +func (p *parser) callonUnicodeClassEscape19() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onUnicodeClassEscape19() +} + +func (c *current) onAnyMatcher1() (interface{}, error) { + any := ast.NewAnyMatcher(c.astPos(), ".") + return any, nil +} + +func (p *parser) callonAnyMatcher1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onAnyMatcher1() +} + +func (c *current) onThrowExpr2(label interface{}) (interface{}, error) { + t := ast.NewThrowExpr(c.astPos()) + t.Label = label.(*ast.Identifier).Val + return t, nil +} + +func (p *parser) callonThrowExpr2() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onThrowExpr2(stack["label"]) +} + +func (c *current) onThrowExpr9() (interface{}, error) { + return nil, errors.New("throw expression not terminated") +} + +func (p *parser) callonThrowExpr9() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onThrowExpr9() +} + +func (c *current) onCodeBlock2() (interface{}, error) { + pos := c.astPos() + cb := ast.NewCodeBlock(pos, string(c.text)) + return cb, nil +} + +func (p *parser) callonCodeBlock2() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onCodeBlock2() +} + +func (c *current) onCodeBlock7() (interface{}, error) { + return nil, errors.New("code block not terminated") +} + +func (p *parser) callonCodeBlock7() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onCodeBlock7() +} + +var ( + // errNoRule is returned when the grammar to parse has no rule. + errNoRule = errors.New("grammar has no rule") + + // errInvalidEntrypoint is returned when the specified entrypoint rule + // does not exit. + errInvalidEntrypoint = errors.New("invalid entrypoint") + + // errInvalidEncoding is returned when the source is not properly + // utf8-encoded. + errInvalidEncoding = errors.New("invalid encoding") + + // errMaxExprCnt is used to signal that the maximum number of + // expressions have been parsed. + errMaxExprCnt = errors.New("max number of expresssions parsed") +) + +// Option is a function that can set an option on the parser. It returns +// the previous setting as an Option. +type Option func(*parser) Option + +// MaxExpressions creates an Option to stop parsing after the provided +// number of expressions have been parsed, if the value is 0 then the parser will +// parse for as many steps as needed (possibly an infinite number). +// +// The default for maxExprCnt is 0. +func MaxExpressions(maxExprCnt uint64) Option { + return func(p *parser) Option { + oldMaxExprCnt := p.maxExprCnt + p.maxExprCnt = maxExprCnt + return MaxExpressions(oldMaxExprCnt) + } +} + +// Entrypoint creates an Option to set the rule name to use as entrypoint. +// The rule name must have been specified in the -alternate-entrypoints +// if generating the parser with the -optimize-grammar flag, otherwise +// it may have been optimized out. Passing an empty string sets the +// entrypoint to the first rule in the grammar. +// +// The default is to start parsing at the first rule in the grammar. +func Entrypoint(ruleName string) Option { + return func(p *parser) Option { + oldEntrypoint := p.entrypoint + p.entrypoint = ruleName + if ruleName == "" { + p.entrypoint = g.rules[0].name + } + return Entrypoint(oldEntrypoint) + } +} + +// Statistics adds a user provided Stats struct to the parser to allow +// the user to process the results after the parsing has finished. +// Also the key for the "no match" counter is set. +// +// Example usage: +// +// input := "input" +// stats := Stats{} +// _, err := Parse("input-file", []byte(input), Statistics(&stats, "no match")) +// if err != nil { +// log.Panicln(err) +// } +// b, err := json.MarshalIndent(stats.ChoiceAltCnt, "", " ") +// if err != nil { +// log.Panicln(err) +// } +// fmt.Println(string(b)) +// +func Statistics(stats *Stats, choiceNoMatch string) Option { + return func(p *parser) Option { + oldStats := p.Stats + p.Stats = stats + oldChoiceNoMatch := p.choiceNoMatch + p.choiceNoMatch = choiceNoMatch + if p.Stats.ChoiceAltCnt == nil { + p.Stats.ChoiceAltCnt = make(map[string]map[string]int) + } + return Statistics(oldStats, oldChoiceNoMatch) + } +} + +// Debug creates an Option to set the debug flag to b. When set to true, +// debugging information is printed to stdout while parsing. +// +// The default is false. +func Debug(b bool) Option { + return func(p *parser) Option { + old := p.debug + p.debug = b + return Debug(old) + } +} + +// Memoize creates an Option to set the memoize flag to b. When set to true, +// the parser will cache all results so each expression is evaluated only +// once. This guarantees linear parsing time even for pathological cases, +// at the expense of more memory and slower times for typical cases. +// +// The default is false. +func Memoize(b bool) Option { + return func(p *parser) Option { + old := p.memoize + p.memoize = b + return Memoize(old) + } +} + +// AllowInvalidUTF8 creates an Option to allow invalid UTF-8 bytes. +// Every invalid UTF-8 byte is treated as a utf8.RuneError (U+FFFD) +// by character class matchers and is matched by the any matcher. +// The returned matched value, c.text and c.offset are NOT affected. +// +// The default is false. +func AllowInvalidUTF8(b bool) Option { + return func(p *parser) Option { + old := p.allowInvalidUTF8 + p.allowInvalidUTF8 = b + return AllowInvalidUTF8(old) + } +} + +// Recover creates an Option to set the recover flag to b. When set to +// true, this causes the parser to recover from panics and convert it +// to an error. Setting it to false can be useful while debugging to +// access the full stack trace. +// +// The default is true. +func Recover(b bool) Option { + return func(p *parser) Option { + old := p.recover + p.recover = b + return Recover(old) + } +} + +// GlobalStore creates an Option to set a key to a certain value in +// the globalStore. +func GlobalStore(key string, value interface{}) Option { + return func(p *parser) Option { + old := p.cur.globalStore[key] + p.cur.globalStore[key] = value + return GlobalStore(key, old) + } +} + +// InitState creates an Option to set a key to a certain value in +// the global "state" store. +func InitState(key string, value interface{}) Option { + return func(p *parser) Option { + old := p.cur.state[key] + p.cur.state[key] = value + return InitState(key, old) + } +} + +// ParseFile parses the file identified by filename. +func ParseFile(filename string, opts ...Option) (i interface{}, err error) { // nolint: deadcode + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if closeErr := f.Close(); closeErr != nil { + err = closeErr + } + }() + return ParseReader(filename, f, opts...) +} + +// ParseReader parses the data from r using filename as information in the +// error messages. +func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) { // nolint: deadcode + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + return Parse(filename, b, opts...) +} + +// Parse parses the data from b using filename as information in the +// error messages. +func Parse(filename string, b []byte, opts ...Option) (interface{}, error) { + return newParser(filename, b, opts...).parse(g) +} + +// position records a position in the text. +type position struct { + line, col, offset int +} + +func (p position) String() string { + return strconv.Itoa(p.line) + ":" + strconv.Itoa(p.col) + " [" + strconv.Itoa(p.offset) + "]" +} + +// savepoint stores all state required to go back to this point in the +// parser. +type savepoint struct { + position + rn rune + w int +} + +type current struct { + pos position // start position of the match + text []byte // raw text of the match + + // state is a store for arbitrary key,value pairs that the user wants to be + // tied to the backtracking of the parser. + // This is always rolled back if a parsing rule fails. + state storeDict + + // globalStore is a general store for the user to store arbitrary key-value + // pairs that they need to manage and that they do not want tied to the + // backtracking of the parser. This is only modified by the user and never + // rolled back by the parser. It is always up to the user to keep this in a + // consistent state. + globalStore storeDict +} + +type storeDict map[string]interface{} + +// the AST types... + +// nolint: structcheck +type grammar struct { + pos position + rules []*rule +} + +// nolint: structcheck +type rule struct { + pos position + name string + displayName string + expr interface{} +} + +// nolint: structcheck +type choiceExpr struct { + pos position + alternatives []interface{} +} + +// nolint: structcheck +type actionExpr struct { + pos position + expr interface{} + run func(*parser) (interface{}, error) +} + +// nolint: structcheck +type recoveryExpr struct { + pos position + expr interface{} + recoverExpr interface{} + failureLabel []string +} + +// nolint: structcheck +type seqExpr struct { + pos position + exprs []interface{} +} + +// nolint: structcheck +type throwExpr struct { + pos position + label string +} + +// nolint: structcheck +type labeledExpr struct { + pos position + label string + expr interface{} +} + +// nolint: structcheck +type expr struct { + pos position + expr interface{} +} + +type andExpr expr // nolint: structcheck +type notExpr expr // nolint: structcheck +type zeroOrOneExpr expr // nolint: structcheck +type zeroOrMoreExpr expr // nolint: structcheck +type oneOrMoreExpr expr // nolint: structcheck + +// nolint: structcheck +type ruleRefExpr struct { + pos position + name string +} + +// nolint: structcheck +type stateCodeExpr struct { + pos position + run func(*parser) error +} + +// nolint: structcheck +type andCodeExpr struct { + pos position + run func(*parser) (bool, error) +} + +// nolint: structcheck +type notCodeExpr struct { + pos position + run func(*parser) (bool, error) +} + +// nolint: structcheck +type litMatcher struct { + pos position + val string + ignoreCase bool + want string +} + +// nolint: structcheck +type charClassMatcher struct { + pos position + val string + basicLatinChars [128]bool + chars []rune + ranges []rune + classes []*unicode.RangeTable + ignoreCase bool + inverted bool +} + +type anyMatcher position // nolint: structcheck + +// errList cumulates the errors found by the parser. +type errList []error + +func (e *errList) add(err error) { + *e = append(*e, err) +} + +func (e errList) err() error { + if len(e) == 0 { + return nil + } + e.dedupe() + return e +} + +func (e *errList) dedupe() { + var cleaned []error + set := make(map[string]bool) + for _, err := range *e { + if msg := err.Error(); !set[msg] { + set[msg] = true + cleaned = append(cleaned, err) + } + } + *e = cleaned +} + +func (e errList) Error() string { + switch len(e) { + case 0: + return "" + case 1: + return e[0].Error() + default: + var buf bytes.Buffer + + for i, err := range e { + if i > 0 { + buf.WriteRune('\n') + } + buf.WriteString(err.Error()) + } + return buf.String() + } +} + +// parserError wraps an error with a prefix indicating the rule in which +// the error occurred. The original error is stored in the Inner field. +type parserError struct { + Inner error + pos position + prefix string + expected []string +} + +// Error returns the error message. +func (p *parserError) Error() string { + return p.prefix + ": " + p.Inner.Error() +} + +// newParser creates a parser with the specified input source and options. +func newParser(filename string, b []byte, opts ...Option) *parser { + stats := Stats{ + ChoiceAltCnt: make(map[string]map[string]int), + } + + p := &parser{ + filename: filename, + errs: new(errList), + data: b, + pt: savepoint{position: position{line: 1}}, + recover: true, + cur: current{ + state: make(storeDict), + globalStore: make(storeDict), + }, + maxFailPos: position{col: 1, line: 1}, + maxFailExpected: make([]string, 0, 20), + Stats: &stats, + // start rule is rule [0] unless an alternate entrypoint is specified + entrypoint: g.rules[0].name, + } + p.setOptions(opts) + + if p.maxExprCnt == 0 { + p.maxExprCnt = math.MaxUint64 + } + + return p +} + +// setOptions applies the options to the parser. +func (p *parser) setOptions(opts []Option) { + for _, opt := range opts { + opt(p) + } +} + +// nolint: structcheck,deadcode +type resultTuple struct { + v interface{} + b bool + end savepoint +} + +// nolint: varcheck +const choiceNoMatch = -1 + +// Stats stores some statistics, gathered during parsing +type Stats struct { + // ExprCnt counts the number of expressions processed during parsing + // This value is compared to the maximum number of expressions allowed + // (set by the MaxExpressions option). + ExprCnt uint64 + + // ChoiceAltCnt is used to count for each ordered choice expression, + // which alternative is used how may times. + // These numbers allow to optimize the order of the ordered choice expression + // to increase the performance of the parser + // + // The outer key of ChoiceAltCnt is composed of the name of the rule as well + // as the line and the column of the ordered choice. + // The inner key of ChoiceAltCnt is the number (one-based) of the matching alternative. + // For each alternative the number of matches are counted. If an ordered choice does not + // match, a special counter is incremented. The name of this counter is set with + // the parser option Statistics. + // For an alternative to be included in ChoiceAltCnt, it has to match at least once. + ChoiceAltCnt map[string]map[string]int +} + +// nolint: structcheck,maligned +type parser struct { + filename string + pt savepoint + cur current + + data []byte + errs *errList + + depth int + recover bool + debug bool + + memoize bool + // memoization table for the packrat algorithm: + // map[offset in source] map[expression or rule] {value, match} + memo map[int]map[interface{}]resultTuple + + // rules table, maps the rule identifier to the rule node + rules map[string]*rule + // variables stack, map of label to value + vstack []map[string]interface{} + // rule stack, allows identification of the current rule in errors + rstack []*rule + + // parse fail + maxFailPos position + maxFailExpected []string + maxFailInvertExpected bool + + // max number of expressions to be parsed + maxExprCnt uint64 + // entrypoint for the parser + entrypoint string + + allowInvalidUTF8 bool + + *Stats + + choiceNoMatch string + // recovery expression stack, keeps track of the currently available recovery expression, these are traversed in reverse + recoveryStack []map[string]interface{} +} + +// push a variable set on the vstack. +func (p *parser) pushV() { + if cap(p.vstack) == len(p.vstack) { + // create new empty slot in the stack + p.vstack = append(p.vstack, nil) + } else { + // slice to 1 more + p.vstack = p.vstack[:len(p.vstack)+1] + } + + // get the last args set + m := p.vstack[len(p.vstack)-1] + if m != nil && len(m) == 0 { + // empty map, all good + return + } + + m = make(map[string]interface{}) + p.vstack[len(p.vstack)-1] = m +} + +// pop a variable set from the vstack. +func (p *parser) popV() { + // if the map is not empty, clear it + m := p.vstack[len(p.vstack)-1] + if len(m) > 0 { + // GC that map + p.vstack[len(p.vstack)-1] = nil + } + p.vstack = p.vstack[:len(p.vstack)-1] +} + +// push a recovery expression with its labels to the recoveryStack +func (p *parser) pushRecovery(labels []string, expr interface{}) { + if cap(p.recoveryStack) == len(p.recoveryStack) { + // create new empty slot in the stack + p.recoveryStack = append(p.recoveryStack, nil) + } else { + // slice to 1 more + p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)+1] + } + + m := make(map[string]interface{}, len(labels)) + for _, fl := range labels { + m[fl] = expr + } + p.recoveryStack[len(p.recoveryStack)-1] = m +} + +// pop a recovery expression from the recoveryStack +func (p *parser) popRecovery() { + // GC that map + p.recoveryStack[len(p.recoveryStack)-1] = nil + + p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)-1] +} + +func (p *parser) print(prefix, s string) string { + if !p.debug { + return s + } + + fmt.Printf("%s %d:%d:%d: %s [%#U]\n", + prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn) + return s +} + +func (p *parser) in(s string) string { + p.depth++ + return p.print(strings.Repeat(" ", p.depth)+">", s) +} + +func (p *parser) out(s string) string { + p.depth-- + return p.print(strings.Repeat(" ", p.depth)+"<", s) +} + +func (p *parser) addErr(err error) { + p.addErrAt(err, p.pt.position, []string{}) +} + +func (p *parser) addErrAt(err error, pos position, expected []string) { + var buf bytes.Buffer + if p.filename != "" { + buf.WriteString(p.filename) + } + if buf.Len() > 0 { + buf.WriteString(":") + } + buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset)) + if len(p.rstack) > 0 { + if buf.Len() > 0 { + buf.WriteString(": ") + } + rule := p.rstack[len(p.rstack)-1] + if rule.displayName != "" { + buf.WriteString("rule " + rule.displayName) + } else { + buf.WriteString("rule " + rule.name) + } + } + pe := &parserError{Inner: err, pos: pos, prefix: buf.String(), expected: expected} + p.errs.add(pe) +} + +func (p *parser) failAt(fail bool, pos position, want string) { + // process fail if parsing fails and not inverted or parsing succeeds and invert is set + if fail == p.maxFailInvertExpected { + if pos.offset < p.maxFailPos.offset { + return + } + + if pos.offset > p.maxFailPos.offset { + p.maxFailPos = pos + p.maxFailExpected = p.maxFailExpected[:0] + } + + if p.maxFailInvertExpected { + want = "!" + want + } + p.maxFailExpected = append(p.maxFailExpected, want) + } +} + +// read advances the parser to the next rune. +func (p *parser) read() { + p.pt.offset += p.pt.w + rn, n := utf8.DecodeRune(p.data[p.pt.offset:]) + p.pt.rn = rn + p.pt.w = n + p.pt.col++ + if rn == '\n' { + p.pt.line++ + p.pt.col = 0 + } + + if rn == utf8.RuneError && n == 1 { // see utf8.DecodeRune + if !p.allowInvalidUTF8 { + p.addErr(errInvalidEncoding) + } + } +} + +// restore parser position to the savepoint pt. +func (p *parser) restore(pt savepoint) { + if p.debug { + defer p.out(p.in("restore")) + } + if pt.offset == p.pt.offset { + return + } + p.pt = pt +} + +// Cloner is implemented by any value that has a Clone method, which returns a +// copy of the value. This is mainly used for types which are not passed by +// value (e.g map, slice, chan) or structs that contain such types. +// +// This is used in conjunction with the global state feature to create proper +// copies of the state to allow the parser to properly restore the state in +// the case of backtracking. +type Cloner interface { + Clone() interface{} +} + +var statePool = &sync.Pool{ + New: func() interface{} { return make(storeDict) }, +} + +func (sd storeDict) Discard() { + for k := range sd { + delete(sd, k) + } + statePool.Put(sd) +} + +// clone and return parser current state. +func (p *parser) cloneState() storeDict { + if p.debug { + defer p.out(p.in("cloneState")) + } + + state := statePool.Get().(storeDict) + for k, v := range p.cur.state { + if c, ok := v.(Cloner); ok { + state[k] = c.Clone() + } else { + state[k] = v + } + } + return state +} + +// restore parser current state to the state storeDict. +// every restoreState should applied only one time for every cloned state +func (p *parser) restoreState(state storeDict) { + if p.debug { + defer p.out(p.in("restoreState")) + } + p.cur.state.Discard() + p.cur.state = state +} + +// get the slice of bytes from the savepoint start to the current position. +func (p *parser) sliceFrom(start savepoint) []byte { + return p.data[start.position.offset:p.pt.position.offset] +} + +func (p *parser) getMemoized(node interface{}) (resultTuple, bool) { + if len(p.memo) == 0 { + return resultTuple{}, false + } + m := p.memo[p.pt.offset] + if len(m) == 0 { + return resultTuple{}, false + } + res, ok := m[node] + return res, ok +} + +func (p *parser) setMemoized(pt savepoint, node interface{}, tuple resultTuple) { + if p.memo == nil { + p.memo = make(map[int]map[interface{}]resultTuple) + } + m := p.memo[pt.offset] + if m == nil { + m = make(map[interface{}]resultTuple) + p.memo[pt.offset] = m + } + m[node] = tuple +} + +func (p *parser) buildRulesTable(g *grammar) { + p.rules = make(map[string]*rule, len(g.rules)) + for _, r := range g.rules { + p.rules[r.name] = r + } +} + +// nolint: gocyclo +func (p *parser) parse(g *grammar) (val interface{}, err error) { + if len(g.rules) == 0 { + p.addErr(errNoRule) + return nil, p.errs.err() + } + + // TODO : not super critical but this could be generated + p.buildRulesTable(g) + + if p.recover { + // panic can be used in action code to stop parsing immediately + // and return the panic as an error. + defer func() { + if e := recover(); e != nil { + if p.debug { + defer p.out(p.in("panic handler")) + } + val = nil + switch e := e.(type) { + case error: + p.addErr(e) + default: + p.addErr(fmt.Errorf("%v", e)) + } + err = p.errs.err() + } + }() + } + + startRule, ok := p.rules[p.entrypoint] + if !ok { + p.addErr(errInvalidEntrypoint) + return nil, p.errs.err() + } + + p.read() // advance to first rune + val, ok = p.parseRule(startRule) + if !ok { + if len(*p.errs) == 0 { + // If parsing fails, but no errors have been recorded, the expected values + // for the farthest parser position are returned as error. + maxFailExpectedMap := make(map[string]struct{}, len(p.maxFailExpected)) + for _, v := range p.maxFailExpected { + maxFailExpectedMap[v] = struct{}{} + } + expected := make([]string, 0, len(maxFailExpectedMap)) + eof := false + if _, ok := maxFailExpectedMap["!."]; ok { + delete(maxFailExpectedMap, "!.") + eof = true + } + for k := range maxFailExpectedMap { + expected = append(expected, k) + } + sort.Strings(expected) + if eof { + expected = append(expected, "EOF") + } + p.addErrAt(errors.New("no match found, expected: "+listJoin(expected, ", ", "or")), p.maxFailPos, expected) + } + + return nil, p.errs.err() + } + return val, p.errs.err() +} + +func listJoin(list []string, sep string, lastSep string) string { + switch len(list) { + case 0: + return "" + case 1: + return list[0] + default: + return strings.Join(list[:len(list)-1], sep) + " " + lastSep + " " + list[len(list)-1] + } +} + +func (p *parser) parseRule(rule *rule) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseRule " + rule.name)) + } + + if p.memoize { + res, ok := p.getMemoized(rule) + if ok { + p.restore(res.end) + return res.v, res.b + } + } + + start := p.pt + p.rstack = append(p.rstack, rule) + p.pushV() + val, ok := p.parseExpr(rule.expr) + p.popV() + p.rstack = p.rstack[:len(p.rstack)-1] + if ok && p.debug { + p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start))) + } + + if p.memoize { + p.setMemoized(start, rule, resultTuple{val, ok, p.pt}) + } + return val, ok +} + +// nolint: gocyclo +func (p *parser) parseExpr(expr interface{}) (interface{}, bool) { + var pt savepoint + + if p.memoize { + res, ok := p.getMemoized(expr) + if ok { + p.restore(res.end) + return res.v, res.b + } + pt = p.pt + } + + p.ExprCnt++ + if p.ExprCnt > p.maxExprCnt { + panic(errMaxExprCnt) + } + + var val interface{} + var ok bool + switch expr := expr.(type) { + case *actionExpr: + val, ok = p.parseActionExpr(expr) + case *andCodeExpr: + val, ok = p.parseAndCodeExpr(expr) + case *andExpr: + val, ok = p.parseAndExpr(expr) + case *anyMatcher: + val, ok = p.parseAnyMatcher(expr) + case *charClassMatcher: + val, ok = p.parseCharClassMatcher(expr) + case *choiceExpr: + val, ok = p.parseChoiceExpr(expr) + case *labeledExpr: + val, ok = p.parseLabeledExpr(expr) + case *litMatcher: + val, ok = p.parseLitMatcher(expr) + case *notCodeExpr: + val, ok = p.parseNotCodeExpr(expr) + case *notExpr: + val, ok = p.parseNotExpr(expr) + case *oneOrMoreExpr: + val, ok = p.parseOneOrMoreExpr(expr) + case *recoveryExpr: + val, ok = p.parseRecoveryExpr(expr) + case *ruleRefExpr: + val, ok = p.parseRuleRefExpr(expr) + case *seqExpr: + val, ok = p.parseSeqExpr(expr) + case *stateCodeExpr: + val, ok = p.parseStateCodeExpr(expr) + case *throwExpr: + val, ok = p.parseThrowExpr(expr) + case *zeroOrMoreExpr: + val, ok = p.parseZeroOrMoreExpr(expr) + case *zeroOrOneExpr: + val, ok = p.parseZeroOrOneExpr(expr) + default: + panic(fmt.Sprintf("unknown expression type %T", expr)) + } + if p.memoize { + p.setMemoized(pt, expr, resultTuple{val, ok, p.pt}) + } + return val, ok +} + +func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseActionExpr")) + } + + start := p.pt + val, ok := p.parseExpr(act.expr) + if ok { + p.cur.pos = start.position + p.cur.text = p.sliceFrom(start) + state := p.cloneState() + actVal, err := act.run(p) + if err != nil { + p.addErrAt(err, start.position, []string{}) + } + p.restoreState(state) + + val = actVal + } + if ok && p.debug { + p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start))) + } + return val, ok +} + +func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseAndCodeExpr")) + } + + state := p.cloneState() + + ok, err := and.run(p) + if err != nil { + p.addErr(err) + } + p.restoreState(state) + + return nil, ok +} + +func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseAndExpr")) + } + + pt := p.pt + state := p.cloneState() + p.pushV() + _, ok := p.parseExpr(and.expr) + p.popV() + p.restoreState(state) + p.restore(pt) + + return nil, ok +} + +func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseAnyMatcher")) + } + + if p.pt.rn == utf8.RuneError && p.pt.w == 0 { + // EOF - see utf8.DecodeRune + p.failAt(false, p.pt.position, ".") + return nil, false + } + start := p.pt + p.read() + p.failAt(true, start.position, ".") + return p.sliceFrom(start), true +} + +// nolint: gocyclo +func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseCharClassMatcher")) + } + + cur := p.pt.rn + start := p.pt + + // can't match EOF + if cur == utf8.RuneError && p.pt.w == 0 { // see utf8.DecodeRune + p.failAt(false, start.position, chr.val) + return nil, false + } + + if chr.ignoreCase { + cur = unicode.ToLower(cur) + } + + // try to match in the list of available chars + for _, rn := range chr.chars { + if rn == cur { + if chr.inverted { + p.failAt(false, start.position, chr.val) + return nil, false + } + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + } + + // try to match in the list of ranges + for i := 0; i < len(chr.ranges); i += 2 { + if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] { + if chr.inverted { + p.failAt(false, start.position, chr.val) + return nil, false + } + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + } + + // try to match in the list of Unicode classes + for _, cl := range chr.classes { + if unicode.Is(cl, cur) { + if chr.inverted { + p.failAt(false, start.position, chr.val) + return nil, false + } + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + } + + if chr.inverted { + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + p.failAt(false, start.position, chr.val) + return nil, false +} + +func (p *parser) incChoiceAltCnt(ch *choiceExpr, altI int) { + choiceIdent := fmt.Sprintf("%s %d:%d", p.rstack[len(p.rstack)-1].name, ch.pos.line, ch.pos.col) + m := p.ChoiceAltCnt[choiceIdent] + if m == nil { + m = make(map[string]int) + p.ChoiceAltCnt[choiceIdent] = m + } + // We increment altI by 1, so the keys do not start at 0 + alt := strconv.Itoa(altI + 1) + if altI == choiceNoMatch { + alt = p.choiceNoMatch + } + m[alt]++ +} + +func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseChoiceExpr")) + } + + for altI, alt := range ch.alternatives { + // dummy assignment to prevent compile error if optimized + _ = altI + + state := p.cloneState() + + p.pushV() + val, ok := p.parseExpr(alt) + p.popV() + if ok { + p.incChoiceAltCnt(ch, altI) + return val, ok + } + p.restoreState(state) + } + p.incChoiceAltCnt(ch, choiceNoMatch) + return nil, false +} + +func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseLabeledExpr")) + } + + p.pushV() + val, ok := p.parseExpr(lab.expr) + p.popV() + if ok && lab.label != "" { + m := p.vstack[len(p.vstack)-1] + m[lab.label] = val + } + return val, ok +} + +func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseLitMatcher")) + } + + start := p.pt + for _, want := range lit.val { + cur := p.pt.rn + if lit.ignoreCase { + cur = unicode.ToLower(cur) + } + if cur != want { + p.failAt(false, start.position, lit.want) + p.restore(start) + return nil, false + } + p.read() + } + p.failAt(true, start.position, lit.want) + return p.sliceFrom(start), true +} + +func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseNotCodeExpr")) + } + + state := p.cloneState() + + ok, err := not.run(p) + if err != nil { + p.addErr(err) + } + p.restoreState(state) + + return nil, !ok +} + +func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseNotExpr")) + } + + pt := p.pt + state := p.cloneState() + p.pushV() + p.maxFailInvertExpected = !p.maxFailInvertExpected + _, ok := p.parseExpr(not.expr) + p.maxFailInvertExpected = !p.maxFailInvertExpected + p.popV() + p.restoreState(state) + p.restore(pt) + + return nil, !ok +} + +func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseOneOrMoreExpr")) + } + + var vals []interface{} + + for { + p.pushV() + val, ok := p.parseExpr(expr.expr) + p.popV() + if !ok { + if len(vals) == 0 { + // did not match once, no match + return nil, false + } + return vals, true + } + vals = append(vals, val) + } +} + +func (p *parser) parseRecoveryExpr(recover *recoveryExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseRecoveryExpr (" + strings.Join(recover.failureLabel, ",") + ")")) + } + + p.pushRecovery(recover.failureLabel, recover.recoverExpr) + val, ok := p.parseExpr(recover.expr) + p.popRecovery() + + return val, ok +} + +func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseRuleRefExpr " + ref.name)) + } + + if ref.name == "" { + panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos)) + } + + rule := p.rules[ref.name] + if rule == nil { + p.addErr(fmt.Errorf("undefined rule: %s", ref.name)) + return nil, false + } + return p.parseRule(rule) +} + +func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseSeqExpr")) + } + + vals := make([]interface{}, 0, len(seq.exprs)) + + pt := p.pt + state := p.cloneState() + for _, expr := range seq.exprs { + val, ok := p.parseExpr(expr) + if !ok { + p.restoreState(state) + p.restore(pt) + return nil, false + } + vals = append(vals, val) + } + return vals, true +} + +func (p *parser) parseStateCodeExpr(state *stateCodeExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseStateCodeExpr")) + } + + err := state.run(p) + if err != nil { + p.addErr(err) + } + return nil, true +} + +func (p *parser) parseThrowExpr(expr *throwExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseThrowExpr")) + } + + for i := len(p.recoveryStack) - 1; i >= 0; i-- { + if recoverExpr, ok := p.recoveryStack[i][expr.label]; ok { + if val, ok := p.parseExpr(recoverExpr); ok { + return val, ok + } + } + } + + return nil, false +} + +func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseZeroOrMoreExpr")) + } + + var vals []interface{} + + for { + p.pushV() + val, ok := p.parseExpr(expr.expr) + p.popV() + if !ok { + return vals, true + } + vals = append(vals, val) + } +} + +func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseZeroOrOneExpr")) + } + + p.pushV() + val, _ := p.parseExpr(expr.expr) + p.popV() + // whether it matched or not, consider it a match + return val, true +} + +func rangeTable(class string) *unicode.RangeTable { + if rt, ok := unicode.Categories[class]; ok { + return rt + } + if rt, ok := unicode.Properties[class]; ok { + return rt + } + if rt, ok := unicode.Scripts[class]; ok { + return rt + } + + // cannot happen + panic(fmt.Sprintf("invalid Unicode class: %s", class)) +} diff --git a/vendor/github.com/mna/pigeon/reserved_words.go b/vendor/github.com/mna/pigeon/reserved_words.go new file mode 100644 index 00000000000..127d27387d4 --- /dev/null +++ b/vendor/github.com/mna/pigeon/reserved_words.go @@ -0,0 +1,71 @@ +package main + +var reservedWords = map[string]bool{ + // Go keywords http://golang.org/ref/spec#Keywords + "break": true, + "case": true, + "chan": true, + "const": true, + "continue": true, + "default": true, + "defer": true, + "else": true, + "fallthrough": true, + "for": true, + "func": true, + "goto": true, + "go": true, + "if": true, + "import": true, + "interface": true, + "map": true, + "package": true, + "range": true, + "return": true, + "select": true, + "struct": true, + "switch": true, + "type": true, + "var": true, + + // predeclared identifiers http://golang.org/ref/spec#Predeclared_identifiers + "bool": true, + "byte": true, + "complex64": true, + "complex128": true, + "error": true, + "float32": true, + "float64": true, + "int8": true, + "int16": true, + "int32": true, + "int64": true, + "int": true, + "rune": true, + "string": true, + "uint8": true, + "uint16": true, + "uint32": true, + "uint64": true, + "uintptr": true, + "uint": true, + "true": true, + "false": true, + "iota": true, + "nil": true, + "append": true, + "cap": true, + "close": true, + "complex": true, + "copy": true, + "delete": true, + "imag": true, + "len": true, + "make": true, + "new": true, + "panic": true, + "println": true, + "print": true, + "real": true, + "recover": true, +} diff --git a/vendor/github.com/mna/pigeon/unicode_classes.go b/vendor/github.com/mna/pigeon/unicode_classes.go new file mode 100644 index 00000000000..36001562967 --- /dev/null +++ b/vendor/github.com/mna/pigeon/unicode_classes.go @@ -0,0 +1,200 @@ +// This file is generated by the misc/cmd/unicode-classes tool. +// Do not edit. + +package main + +var unicodeClasses = map[string]bool{ + "ASCII_Hex_Digit": true, + "Arabic": true, + "Armenian": true, + "Avestan": true, + "Balinese": true, + "Bamum": true, + "Bassa_Vah": true, + "Batak": true, + "Bengali": true, + "Bidi_Control": true, + "Bopomofo": true, + "Brahmi": true, + "Braille": true, + "Buginese": true, + "Buhid": true, + "C": true, + "Canadian_Aboriginal": true, + "Carian": true, + "Caucasian_Albanian": true, + "Cc": true, + "Cf": true, + "Chakma": true, + "Cham": true, + "Cherokee": true, + "Co": true, + "Common": true, + "Coptic": true, + "Cs": true, + "Cuneiform": true, + "Cypriot": true, + "Cyrillic": true, + "Dash": true, + "Deprecated": true, + "Deseret": true, + "Devanagari": true, + "Diacritic": true, + "Duployan": true, + "Egyptian_Hieroglyphs": true, + "Elbasan": true, + "Ethiopic": true, + "Extender": true, + "Georgian": true, + "Glagolitic": true, + "Gothic": true, + "Grantha": true, + "Greek": true, + "Gujarati": true, + "Gurmukhi": true, + "Han": true, + "Hangul": true, + "Hanunoo": true, + "Hebrew": true, + "Hex_Digit": true, + "Hiragana": true, + "Hyphen": true, + "IDS_Binary_Operator": true, + "IDS_Trinary_Operator": true, + "Ideographic": true, + "Imperial_Aramaic": true, + "Inherited": true, + "Inscriptional_Pahlavi": true, + "Inscriptional_Parthian": true, + "Javanese": true, + "Join_Control": true, + "Kaithi": true, + "Kannada": true, + "Katakana": true, + "Kayah_Li": true, + "Kharoshthi": true, + "Khmer": true, + "Khojki": true, + "Khudawadi": true, + "L": true, + "Lao": true, + "Latin": true, + "Lepcha": true, + "Limbu": true, + "Linear_A": true, + "Linear_B": true, + "Lisu": true, + "Ll": true, + "Lm": true, + "Lo": true, + "Logical_Order_Exception": true, + "Lt": true, + "Lu": true, + "Lycian": true, + "Lydian": true, + "M": true, + "Mahajani": true, + "Malayalam": true, + "Mandaic": true, + "Manichaean": true, + "Mc": true, + "Me": true, + "Meetei_Mayek": true, + "Mende_Kikakui": true, + "Meroitic_Cursive": true, + "Meroitic_Hieroglyphs": true, + "Miao": true, + "Mn": true, + "Modi": true, + "Mongolian": true, + "Mro": true, + "Myanmar": true, + "N": true, + "Nabataean": true, + "Nd": true, + "New_Tai_Lue": true, + "Nko": true, + "Nl": true, + "No": true, + "Noncharacter_Code_Point": true, + "Ogham": true, + "Ol_Chiki": true, + "Old_Italic": true, + "Old_North_Arabian": true, + "Old_Permic": true, + "Old_Persian": true, + "Old_South_Arabian": true, + "Old_Turkic": true, + "Oriya": true, + "Osmanya": true, + "Other_Alphabetic": true, + "Other_Default_Ignorable_Code_Point": true, + "Other_Grapheme_Extend": true, + "Other_ID_Continue": true, + "Other_ID_Start": true, + "Other_Lowercase": true, + "Other_Math": true, + "Other_Uppercase": true, + "P": true, + "Pahawh_Hmong": true, + "Palmyrene": true, + "Pattern_Syntax": true, + "Pattern_White_Space": true, + "Pau_Cin_Hau": true, + "Pc": true, + "Pd": true, + "Pe": true, + "Pf": true, + "Phags_Pa": true, + "Phoenician": true, + "Pi": true, + "Po": true, + "Ps": true, + "Psalter_Pahlavi": true, + "Quotation_Mark": true, + "Radical": true, + "Rejang": true, + "Runic": true, + "S": true, + "STerm": true, + "Samaritan": true, + "Saurashtra": true, + "Sc": true, + "Sharada": true, + "Shavian": true, + "Siddham": true, + "Sinhala": true, + "Sk": true, + "Sm": true, + "So": true, + "Soft_Dotted": true, + "Sora_Sompeng": true, + "Sundanese": true, + "Syloti_Nagri": true, + "Syriac": true, + "Tagalog": true, + "Tagbanwa": true, + "Tai_Le": true, + "Tai_Tham": true, + "Tai_Viet": true, + "Takri": true, + "Tamil": true, + "Telugu": true, + "Terminal_Punctuation": true, + "Thaana": true, + "Thai": true, + "Tibetan": true, + "Tifinagh": true, + "Tirhuta": true, + "Ugaritic": true, + "Unified_Ideograph": true, + "Vai": true, + "Variation_Selector": true, + "Warang_Citi": true, + "White_Space": true, + "Yi": true, + "Z": true, + "Zl": true, + "Zp": true, + "Zs": true, +} diff --git a/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go new file mode 100644 index 00000000000..150f887e7a4 --- /dev/null +++ b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go @@ -0,0 +1,78 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lazyregexp is a thin wrapper over regexp, allowing the use of global +// regexp variables without forcing them to be compiled at init. +package lazyregexp + +import ( + "os" + "regexp" + "strings" + "sync" +) + +// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be +// compiled the first time it is needed. +type Regexp struct { + str string + once sync.Once + rx *regexp.Regexp +} + +func (r *Regexp) re() *regexp.Regexp { + r.once.Do(r.build) + return r.rx +} + +func (r *Regexp) build() { + r.rx = regexp.MustCompile(r.str) + r.str = "" +} + +func (r *Regexp) FindSubmatch(s []byte) [][]byte { + return r.re().FindSubmatch(s) +} + +func (r *Regexp) FindStringSubmatch(s string) []string { + return r.re().FindStringSubmatch(s) +} + +func (r *Regexp) FindStringSubmatchIndex(s string) []int { + return r.re().FindStringSubmatchIndex(s) +} + +func (r *Regexp) ReplaceAllString(src, repl string) string { + return r.re().ReplaceAllString(src, repl) +} + +func (r *Regexp) FindString(s string) string { + return r.re().FindString(s) +} + +func (r *Regexp) FindAllString(s string, n int) []string { + return r.re().FindAllString(s, n) +} + +func (r *Regexp) MatchString(s string) bool { + return r.re().MatchString(s) +} + +func (r *Regexp) SubexpNames() []string { + return r.re().SubexpNames() +} + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +// New creates a new lazy regexp, delaying the compiling work until it is first +// needed. If the code is being run as part of tests, the regexp compiling will +// happen immediately. +func New(str string) *Regexp { + lr := &Regexp{str: str} + if inTest { + // In tests, always compile the regexps early. + lr.re() + } + return lr +} diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go new file mode 100644 index 00000000000..2a364b229b9 --- /dev/null +++ b/vendor/golang.org/x/mod/module/module.go @@ -0,0 +1,841 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package module defines the module.Version type along with support code. +// +// The [module.Version] type is a simple Path, Version pair: +// +// type Version struct { +// Path string +// Version string +// } +// +// There are no restrictions imposed directly by use of this structure, +// but additional checking functions, most notably [Check], verify that +// a particular path, version pair is valid. +// +// # Escaped Paths +// +// Module paths appear as substrings of file system paths +// (in the download cache) and of web server URLs in the proxy protocol. +// In general we cannot rely on file systems to be case-sensitive, +// nor can we rely on web servers, since they read from file systems. +// That is, we cannot rely on the file system to keep rsc.io/QUOTE +// and rsc.io/quote separate. Windows and macOS don't. +// Instead, we must never require two different casings of a file path. +// Because we want the download cache to match the proxy protocol, +// and because we want the proxy protocol to be possible to serve +// from a tree of static files (which might be stored on a case-insensitive +// file system), the proxy protocol must never require two different casings +// of a URL path either. +// +// One possibility would be to make the escaped form be the lowercase +// hexadecimal encoding of the actual path bytes. This would avoid ever +// needing different casings of a file path, but it would be fairly illegible +// to most programmers when those paths appeared in the file system +// (including in file paths in compiler errors and stack traces) +// in web server logs, and so on. Instead, we want a safe escaped form that +// leaves most paths unaltered. +// +// The safe escaped form is to replace every uppercase letter +// with an exclamation mark followed by the letter's lowercase equivalent. +// +// For example, +// +// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. +// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy +// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. +// +// Import paths that avoid upper-case letters are left unchanged. +// Note that because import paths are ASCII-only and avoid various +// problematic punctuation (like : < and >), the escaped form is also ASCII-only +// and avoids the same problematic punctuation. +// +// Import paths have never allowed exclamation marks, so there is no +// need to define how to escape a literal !. +// +// # Unicode Restrictions +// +// Today, paths are disallowed from using Unicode. +// +// Although paths are currently disallowed from using Unicode, +// we would like at some point to allow Unicode letters as well, to assume that +// file systems and URLs are Unicode-safe (storing UTF-8), and apply +// the !-for-uppercase convention for escaping them in the file system. +// But there are at least two subtle considerations. +// +// First, note that not all case-fold equivalent distinct runes +// form an upper/lower pair. +// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) +// are three distinct runes that case-fold to each other. +// When we do add Unicode letters, we must not assume that upper/lower +// are the only case-equivalent pairs. +// Perhaps the Kelvin symbol would be disallowed entirely, for example. +// Or perhaps it would escape as "!!k", or perhaps as "(212A)". +// +// Second, it would be nice to allow Unicode marks as well as letters, +// but marks include combining marks, and then we must deal not +// only with case folding but also normalization: both U+00E9 ('é') +// and U+0065 U+0301 ('e' followed by combining acute accent) +// look the same on the page and are treated by some file systems +// as the same path. If we do allow Unicode marks in paths, there +// must be some kind of normalization to allow only one canonical +// encoding of any character used in an import path. +package module + +// IMPORTANT NOTE +// +// This file essentially defines the set of valid import paths for the go command. +// There are many subtle considerations, including Unicode ambiguity, +// security, network, and file system representations. +// +// This file also defines the set of valid module path and version combinations, +// another topic with many subtle considerations. +// +// Changes to the semantics in this file require approval from rsc. + +import ( + "errors" + "fmt" + "path" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/mod/semver" +) + +// A Version (for clients, a module.Version) is defined by a module path and version pair. +// These are stored in their plain (unescaped) form. +type Version struct { + // Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2". + Path string + + // Version is usually a semantic version in canonical form. + // There are three exceptions to this general rule. + // First, the top-level target of a build has no specific version + // and uses Version = "". + // Second, during MVS calculations the version "none" is used + // to represent the decision to take no version of a given module. + // Third, filesystem paths found in "replace" directives are + // represented by a path with an empty version. + Version string `json:",omitempty"` +} + +// String returns a representation of the Version suitable for logging +// (Path@Version, or just Path if Version is empty). +func (m Version) String() string { + if m.Version == "" { + return m.Path + } + return m.Path + "@" + m.Version +} + +// A ModuleError indicates an error specific to a module. +type ModuleError struct { + Path string + Version string + Err error +} + +// VersionError returns a [ModuleError] derived from a [Version] and error, +// or err itself if it is already such an error. +func VersionError(v Version, err error) error { + var mErr *ModuleError + if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version { + return err + } + return &ModuleError{ + Path: v.Path, + Version: v.Version, + Err: err, + } +} + +func (e *ModuleError) Error() string { + if v, ok := e.Err.(*InvalidVersionError); ok { + return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err) + } + if e.Version != "" { + return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err) + } + return fmt.Sprintf("module %s: %v", e.Path, e.Err) +} + +func (e *ModuleError) Unwrap() error { return e.Err } + +// An InvalidVersionError indicates an error specific to a version, with the +// module path unknown or specified externally. +// +// A [ModuleError] may wrap an InvalidVersionError, but an InvalidVersionError +// must not wrap a ModuleError. +type InvalidVersionError struct { + Version string + Pseudo bool + Err error +} + +// noun returns either "version" or "pseudo-version", depending on whether +// e.Version is a pseudo-version. +func (e *InvalidVersionError) noun() string { + if e.Pseudo { + return "pseudo-version" + } + return "version" +} + +func (e *InvalidVersionError) Error() string { + return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err) +} + +func (e *InvalidVersionError) Unwrap() error { return e.Err } + +// An InvalidPathError indicates a module, import, or file path doesn't +// satisfy all naming constraints. See [CheckPath], [CheckImportPath], +// and [CheckFilePath] for specific restrictions. +type InvalidPathError struct { + Kind string // "module", "import", or "file" + Path string + Err error +} + +func (e *InvalidPathError) Error() string { + return fmt.Sprintf("malformed %s path %q: %v", e.Kind, e.Path, e.Err) +} + +func (e *InvalidPathError) Unwrap() error { return e.Err } + +// Check checks that a given module path, version pair is valid. +// In addition to the path being a valid module path +// and the version being a valid semantic version, +// the two must correspond. +// For example, the path "yaml/v2" only corresponds to +// semantic versions beginning with "v2.". +func Check(path, version string) error { + if err := CheckPath(path); err != nil { + return err + } + if !semver.IsValid(version) { + return &ModuleError{ + Path: path, + Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")}, + } + } + _, pathMajor, _ := SplitPathVersion(path) + if err := CheckPathMajor(version, pathMajor); err != nil { + return &ModuleError{Path: path, Err: err} + } + return nil +} + +// firstPathOK reports whether r can appear in the first element of a module path. +// The first element of the path must be an LDH domain name, at least for now. +// To avoid case ambiguity, the domain name must be entirely lower case. +func firstPathOK(r rune) bool { + return r == '-' || r == '.' || + '0' <= r && r <= '9' || + 'a' <= r && r <= 'z' +} + +// modPathOK reports whether r can appear in a module path element. +// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. +// +// This matches what "go get" has historically recognized in import paths, +// and avoids confusing sequences like '%20' or '+' that would change meaning +// if used in a URL. +// +// TODO(rsc): We would like to allow Unicode letters, but that requires additional +// care in the safe encoding (see "escaped paths" above). +func modPathOK(r rune) bool { + if r < utf8.RuneSelf { + return r == '-' || r == '.' || r == '_' || r == '~' || + '0' <= r && r <= '9' || + 'A' <= r && r <= 'Z' || + 'a' <= r && r <= 'z' + } + return false +} + +// importPathOK reports whether r can appear in a package import path element. +// +// Import paths are intermediate between module paths and file paths: we allow +// disallow characters that would be confusing or ambiguous as arguments to +// 'go get' (such as '@' and ' ' ), but allow certain characters that are +// otherwise-unambiguous on the command line and historically used for some +// binary names (such as '++' as a suffix for compiler binaries and wrappers). +func importPathOK(r rune) bool { + return modPathOK(r) || r == '+' +} + +// fileNameOK reports whether r can appear in a file name. +// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. +// If we expand the set of allowed characters here, we have to +// work harder at detecting potential case-folding and normalization collisions. +// See note about "escaped paths" above. +func fileNameOK(r rune) bool { + if r < utf8.RuneSelf { + // Entire set of ASCII punctuation, from which we remove characters: + // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ + // We disallow some shell special characters: " ' * < > ? ` | + // (Note that some of those are disallowed by the Windows file system as well.) + // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). + // We allow spaces (U+0020) in file names. + const allowed = "!#$%&()+,-.=@[]^_{}~ " + if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { + return true + } + return strings.ContainsRune(allowed, r) + } + // It may be OK to add more ASCII punctuation here, but only carefully. + // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. + return unicode.IsLetter(r) +} + +// CheckPath checks that a module path is valid. +// A valid module path is a valid import path, as checked by [CheckImportPath], +// with three additional constraints. +// First, the leading path element (up to the first slash, if any), +// by convention a domain name, must contain only lower-case ASCII letters, +// ASCII digits, dots (U+002E), and dashes (U+002D); +// it must contain at least one dot and cannot start with a dash. +// Second, for a final path element of the form /vN, where N looks numeric +// (ASCII digits and dots) must not begin with a leading zero, must not be /v1, +// and must not contain any dots. For paths beginning with "gopkg.in/", +// this second requirement is replaced by a requirement that the path +// follow the gopkg.in server's conventions. +// Third, no path element may begin with a dot. +func CheckPath(path string) (err error) { + defer func() { + if err != nil { + err = &InvalidPathError{Kind: "module", Path: path, Err: err} + } + }() + + if err := checkPath(path, modulePath); err != nil { + return err + } + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + if i == 0 { + return fmt.Errorf("leading slash") + } + if !strings.Contains(path[:i], ".") { + return fmt.Errorf("missing dot in first path element") + } + if path[0] == '-' { + return fmt.Errorf("leading dash in first path element") + } + for _, r := range path[:i] { + if !firstPathOK(r) { + return fmt.Errorf("invalid char %q in first path element", r) + } + } + if _, _, ok := SplitPathVersion(path); !ok { + return fmt.Errorf("invalid version") + } + return nil +} + +// CheckImportPath checks that an import path is valid. +// +// A valid import path consists of one or more valid path elements +// separated by slashes (U+002F). (It must not begin with nor end in a slash.) +// +// A valid path element is a non-empty string made up of +// ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. +// It must not end with a dot (U+002E), nor contain two dots in a row. +// +// The element prefix up to the first dot must not be a reserved file name +// on Windows, regardless of case (CON, com1, NuL, and so on). The element +// must not have a suffix of a tilde followed by one or more ASCII digits +// (to exclude paths elements that look like Windows short-names). +// +// CheckImportPath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. +func CheckImportPath(path string) error { + if err := checkPath(path, importPath); err != nil { + return &InvalidPathError{Kind: "import", Path: path, Err: err} + } + return nil +} + +// pathKind indicates what kind of path we're checking. Module paths, +// import paths, and file paths have different restrictions. +type pathKind int + +const ( + modulePath pathKind = iota + importPath + filePath +) + +// checkPath checks that a general path is valid. kind indicates what +// specific constraints should be applied. +// +// checkPath returns an error describing why the path is not valid. +// Because these checks apply to module, import, and file paths, +// and because other checks may be applied, the caller is expected to wrap +// this error with [InvalidPathError]. +func checkPath(path string, kind pathKind) error { + if !utf8.ValidString(path) { + return fmt.Errorf("invalid UTF-8") + } + if path == "" { + return fmt.Errorf("empty string") + } + if path[0] == '-' && kind != filePath { + return fmt.Errorf("leading dash") + } + if strings.Contains(path, "//") { + return fmt.Errorf("double slash") + } + if path[len(path)-1] == '/' { + return fmt.Errorf("trailing slash") + } + elemStart := 0 + for i, r := range path { + if r == '/' { + if err := checkElem(path[elemStart:i], kind); err != nil { + return err + } + elemStart = i + 1 + } + } + if err := checkElem(path[elemStart:], kind); err != nil { + return err + } + return nil +} + +// checkElem checks whether an individual path element is valid. +func checkElem(elem string, kind pathKind) error { + if elem == "" { + return fmt.Errorf("empty path element") + } + if strings.Count(elem, ".") == len(elem) { + return fmt.Errorf("invalid path element %q", elem) + } + if elem[0] == '.' && kind == modulePath { + return fmt.Errorf("leading dot in path element") + } + if elem[len(elem)-1] == '.' { + return fmt.Errorf("trailing dot in path element") + } + for _, r := range elem { + ok := false + switch kind { + case modulePath: + ok = modPathOK(r) + case importPath: + ok = importPathOK(r) + case filePath: + ok = fileNameOK(r) + default: + panic(fmt.Sprintf("internal error: invalid kind %v", kind)) + } + if !ok { + return fmt.Errorf("invalid char %q", r) + } + } + + // Windows disallows a bunch of path elements, sadly. + // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file + short := elem + if i := strings.Index(short, "."); i >= 0 { + short = short[:i] + } + for _, bad := range badWindowsNames { + if strings.EqualFold(bad, short) { + return fmt.Errorf("%q disallowed as path element component on Windows", short) + } + } + + if kind == filePath { + // don't check for Windows short-names in file names. They're + // only an issue for import paths. + return nil + } + + // Reject path components that look like Windows short-names. + // Those usually end in a tilde followed by one or more ASCII digits. + if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 { + suffix := short[tilde+1:] + suffixIsDigits := true + for _, r := range suffix { + if r < '0' || r > '9' { + suffixIsDigits = false + break + } + } + if suffixIsDigits { + return fmt.Errorf("trailing tilde and digits in path element") + } + } + + return nil +} + +// CheckFilePath checks that a slash-separated file path is valid. +// The definition of a valid file path is the same as the definition +// of a valid import path except that the set of allowed characters is larger: +// all Unicode letters, ASCII digits, the ASCII space character (U+0020), +// and the ASCII punctuation characters +// “!#$%&()+,-.=@[]^_{}~”. +// (The excluded punctuation characters, " * < > ? ` ' | / \ and :, +// have special meanings in certain shells or operating systems.) +// +// CheckFilePath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. +func CheckFilePath(path string) error { + if err := checkPath(path, filePath); err != nil { + return &InvalidPathError{Kind: "file", Path: path, Err: err} + } + return nil +} + +// badWindowsNames are the reserved file path elements on Windows. +// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file +var badWindowsNames = []string{ + "CON", + "PRN", + "AUX", + "NUL", + "COM1", + "COM2", + "COM3", + "COM4", + "COM5", + "COM6", + "COM7", + "COM8", + "COM9", + "LPT1", + "LPT2", + "LPT3", + "LPT4", + "LPT5", + "LPT6", + "LPT7", + "LPT8", + "LPT9", +} + +// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path +// and version is either empty or "/vN" for N >= 2. +// As a special case, gopkg.in paths are recognized directly; +// they require ".vN" instead of "/vN", and for all N, not just N >= 2. +// SplitPathVersion returns with ok = false when presented with +// a path whose last path element does not satisfy the constraints +// applied by [CheckPath], such as "example.com/pkg/v1" or "example.com/pkg/v1.2". +func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { + if strings.HasPrefix(path, "gopkg.in/") { + return splitGopkgIn(path) + } + + i := len(path) + dot := false + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') { + if path[i-1] == '.' { + dot = true + } + i-- + } + if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' { + return path, "", true + } + prefix, pathMajor = path[:i-2], path[i-2:] + if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" { + return path, "", false + } + return prefix, pathMajor, true +} + +// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths. +func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { + if !strings.HasPrefix(path, "gopkg.in/") { + return path, "", false + } + i := len(path) + if strings.HasSuffix(path, "-unstable") { + i -= len("-unstable") + } + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') { + i-- + } + if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' { + // All gopkg.in paths must end in vN for some N. + return path, "", false + } + prefix, pathMajor = path[:i-2], path[i-2:] + if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" { + return path, "", false + } + return prefix, pathMajor, true +} + +// MatchPathMajor reports whether the semantic version v +// matches the path major version pathMajor. +// +// MatchPathMajor returns true if and only if [CheckPathMajor] returns nil. +func MatchPathMajor(v, pathMajor string) bool { + return CheckPathMajor(v, pathMajor) == nil +} + +// CheckPathMajor returns a non-nil error if the semantic version v +// does not match the path major version pathMajor. +func CheckPathMajor(v, pathMajor string) error { + // TODO(jayconrod): return errors or panic for invalid inputs. This function + // (and others) was covered by integration tests for cmd/go, and surrounding + // code protected against invalid inputs like non-canonical versions. + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" { + // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1. + // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405. + return nil + } + m := semver.Major(v) + if pathMajor == "" { + if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" { + return nil + } + pathMajor = "v0 or v1" + } else if pathMajor[0] == '/' || pathMajor[0] == '.' { + if m == pathMajor[1:] { + return nil + } + pathMajor = pathMajor[1:] + } + return &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)), + } +} + +// PathMajorPrefix returns the major-version tag prefix implied by pathMajor. +// An empty PathMajorPrefix allows either v0 or v1. +// +// Note that [MatchPathMajor] may accept some versions that do not actually begin +// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1' +// pathMajor, even though that pathMajor implies 'v1' tagging. +func PathMajorPrefix(pathMajor string) string { + if pathMajor == "" { + return "" + } + if pathMajor[0] != '/' && pathMajor[0] != '.' { + panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator") + } + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + m := pathMajor[1:] + if m != semver.Major(m) { + panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version") + } + return m +} + +// CanonicalVersion returns the canonical form of the version string v. +// It is the same as [semver.Canonical] except that it preserves the special build suffix "+incompatible". +func CanonicalVersion(v string) string { + cv := semver.Canonical(v) + if semver.Build(v) == "+incompatible" { + cv += "+incompatible" + } + return cv +} + +// Sort sorts the list by Path, breaking ties by comparing [Version] fields. +// The Version fields are interpreted as semantic versions (using [semver.Compare]) +// optionally followed by a tie-breaking suffix introduced by a slash character, +// like in "v0.0.1/go.mod". +func Sort(list []Version) { + sort.Slice(list, func(i, j int) bool { + mi := list[i] + mj := list[j] + if mi.Path != mj.Path { + return mi.Path < mj.Path + } + // To help go.sum formatting, allow version/file. + // Compare semver prefix by semver rules, + // file by string order. + vi := mi.Version + vj := mj.Version + var fi, fj string + if k := strings.Index(vi, "/"); k >= 0 { + vi, fi = vi[:k], vi[k:] + } + if k := strings.Index(vj, "/"); k >= 0 { + vj, fj = vj[:k], vj[k:] + } + if vi != vj { + return semver.Compare(vi, vj) < 0 + } + return fi < fj + }) +} + +// EscapePath returns the escaped form of the given module path. +// It fails if the module path is invalid. +func EscapePath(path string) (escaped string, err error) { + if err := CheckPath(path); err != nil { + return "", err + } + + return escapeString(path) +} + +// EscapeVersion returns the escaped form of the given module version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func EscapeVersion(v string) (escaped string, err error) { + if err := checkElem(v, filePath); err != nil || strings.Contains(v, "!") { + return "", &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("disallowed version string"), + } + } + return escapeString(v) +} + +func escapeString(s string) (escaped string, err error) { + haveUpper := false + for _, r := range s { + if r == '!' || r >= utf8.RuneSelf { + // This should be disallowed by CheckPath, but diagnose anyway. + // The correctness of the escaping loop below depends on it. + return "", fmt.Errorf("internal error: inconsistency in EscapePath") + } + if 'A' <= r && r <= 'Z' { + haveUpper = true + } + } + + if !haveUpper { + return s, nil + } + + var buf []byte + for _, r := range s { + if 'A' <= r && r <= 'Z' { + buf = append(buf, '!', byte(r+'a'-'A')) + } else { + buf = append(buf, byte(r)) + } + } + return string(buf), nil +} + +// UnescapePath returns the module path for the given escaped path. +// It fails if the escaped path is invalid or describes an invalid path. +func UnescapePath(escaped string) (path string, err error) { + path, ok := unescapeString(escaped) + if !ok { + return "", fmt.Errorf("invalid escaped module path %q", escaped) + } + if err := CheckPath(path); err != nil { + return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err) + } + return path, nil +} + +// UnescapeVersion returns the version string for the given escaped version. +// It fails if the escaped form is invalid or describes an invalid version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func UnescapeVersion(escaped string) (v string, err error) { + v, ok := unescapeString(escaped) + if !ok { + return "", fmt.Errorf("invalid escaped version %q", escaped) + } + if err := checkElem(v, filePath); err != nil { + return "", fmt.Errorf("invalid escaped version %q: %v", v, err) + } + return v, nil +} + +func unescapeString(escaped string) (string, bool) { + var buf []byte + + bang := false + for _, r := range escaped { + if r >= utf8.RuneSelf { + return "", false + } + if bang { + bang = false + if r < 'a' || 'z' < r { + return "", false + } + buf = append(buf, byte(r+'A'-'a')) + continue + } + if r == '!' { + bang = true + continue + } + if 'A' <= r && r <= 'Z' { + return "", false + } + buf = append(buf, byte(r)) + } + if bang { + return "", false + } + return string(buf), true +} + +// MatchPrefixPatterns reports whether any path prefix of target matches one of +// the glob patterns (as defined by [path.Match]) in the comma-separated globs +// list. This implements the algorithm used when matching a module path to the +// GOPRIVATE environment variable, as described by 'go help module-private'. +// +// It ignores any empty or malformed patterns in the list. +// Trailing slashes on patterns are ignored. +func MatchPrefixPatterns(globs, target string) bool { + for globs != "" { + // Extract next non-empty glob in comma-separated list. + var glob string + if i := strings.Index(globs, ","); i >= 0 { + glob, globs = globs[:i], globs[i+1:] + } else { + glob, globs = globs, "" + } + glob = strings.TrimSuffix(glob, "/") + if glob == "" { + continue + } + + // A glob with N+1 path elements (N slashes) needs to be matched + // against the first N+1 path elements of target, + // which end just before the N+1'th slash. + n := strings.Count(glob, "/") + prefix := target + // Walk target, counting slashes, truncating at the N+1'th slash. + for i := 0; i < len(target); i++ { + if target[i] == '/' { + if n == 0 { + prefix = target[:i] + break + } + n-- + } + } + if n > 0 { + // Not enough prefix elements. + continue + } + matched, _ := path.Match(glob, prefix) + if matched { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/mod/module/pseudo.go b/vendor/golang.org/x/mod/module/pseudo.go new file mode 100644 index 00000000000..9cf19d3254e --- /dev/null +++ b/vendor/golang.org/x/mod/module/pseudo.go @@ -0,0 +1,250 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Pseudo-versions +// +// Code authors are expected to tag the revisions they want users to use, +// including prereleases. However, not all authors tag versions at all, +// and not all commits a user might want to try will have tags. +// A pseudo-version is a version with a special form that allows us to +// address an untagged commit and order that version with respect to +// other versions we might encounter. +// +// A pseudo-version takes one of the general forms: +// +// (1) vX.0.0-yyyymmddhhmmss-abcdef123456 +// (2) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 +// (3) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible +// (4) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 +// (5) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible +// +// If there is no recently tagged version with the right major version vX, +// then form (1) is used, creating a space of pseudo-versions at the bottom +// of the vX version range, less than any tagged version, including the unlikely v0.0.0. +// +// If the most recent tagged version before the target commit is vX.Y.Z or vX.Y.Z+incompatible, +// then the pseudo-version uses form (2) or (3), making it a prerelease for the next +// possible semantic version after vX.Y.Z. The leading 0 segment in the prerelease string +// ensures that the pseudo-version compares less than possible future explicit prereleases +// like vX.Y.(Z+1)-rc1 or vX.Y.(Z+1)-1. +// +// If the most recent tagged version before the target commit is vX.Y.Z-pre or vX.Y.Z-pre+incompatible, +// then the pseudo-version uses form (4) or (5), making it a slightly later prerelease. + +package module + +import ( + "errors" + "fmt" + "strings" + "time" + + "golang.org/x/mod/internal/lazyregexp" + "golang.org/x/mod/semver" +) + +var pseudoVersionRE = lazyregexp.New(`^v[0-9]+\.(0\.0-|\d+\.\d+-([^+]*\.)?0\.)\d{14}-[A-Za-z0-9]+(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$`) + +const PseudoVersionTimestampFormat = "20060102150405" + +// PseudoVersion returns a pseudo-version for the given major version ("v1") +// preexisting older tagged version ("" or "v1.2.3" or "v1.2.3-pre"), revision time, +// and revision identifier (usually a 12-byte commit hash prefix). +func PseudoVersion(major, older string, t time.Time, rev string) string { + if major == "" { + major = "v0" + } + segment := fmt.Sprintf("%s-%s", t.UTC().Format(PseudoVersionTimestampFormat), rev) + build := semver.Build(older) + older = semver.Canonical(older) + if older == "" { + return major + ".0.0-" + segment // form (1) + } + if semver.Prerelease(older) != "" { + return older + ".0." + segment + build // form (4), (5) + } + + // Form (2), (3). + // Extract patch from vMAJOR.MINOR.PATCH + i := strings.LastIndex(older, ".") + 1 + v, patch := older[:i], older[i:] + + // Reassemble. + return v + incDecimal(patch) + "-0." + segment + build +} + +// ZeroPseudoVersion returns a pseudo-version with a zero timestamp and +// revision, which may be used as a placeholder. +func ZeroPseudoVersion(major string) string { + return PseudoVersion(major, "", time.Time{}, "000000000000") +} + +// incDecimal returns the decimal string incremented by 1. +func incDecimal(decimal string) string { + // Scan right to left turning 9s to 0s until you find a digit to increment. + digits := []byte(decimal) + i := len(digits) - 1 + for ; i >= 0 && digits[i] == '9'; i-- { + digits[i] = '0' + } + if i >= 0 { + digits[i]++ + } else { + // digits is all zeros + digits[0] = '1' + digits = append(digits, '0') + } + return string(digits) +} + +// decDecimal returns the decimal string decremented by 1, or the empty string +// if the decimal is all zeroes. +func decDecimal(decimal string) string { + // Scan right to left turning 0s to 9s until you find a digit to decrement. + digits := []byte(decimal) + i := len(digits) - 1 + for ; i >= 0 && digits[i] == '0'; i-- { + digits[i] = '9' + } + if i < 0 { + // decimal is all zeros + return "" + } + if i == 0 && digits[i] == '1' && len(digits) > 1 { + digits = digits[1:] + } else { + digits[i]-- + } + return string(digits) +} + +// IsPseudoVersion reports whether v is a pseudo-version. +func IsPseudoVersion(v string) bool { + return strings.Count(v, "-") >= 2 && semver.IsValid(v) && pseudoVersionRE.MatchString(v) +} + +// IsZeroPseudoVersion returns whether v is a pseudo-version with a zero base, +// timestamp, and revision, as returned by [ZeroPseudoVersion]. +func IsZeroPseudoVersion(v string) bool { + return v == ZeroPseudoVersion(semver.Major(v)) +} + +// PseudoVersionTime returns the time stamp of the pseudo-version v. +// It returns an error if v is not a pseudo-version or if the time stamp +// embedded in the pseudo-version is not a valid time. +func PseudoVersionTime(v string) (time.Time, error) { + _, timestamp, _, _, err := parsePseudoVersion(v) + if err != nil { + return time.Time{}, err + } + t, err := time.Parse("20060102150405", timestamp) + if err != nil { + return time.Time{}, &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("malformed time %q", timestamp), + } + } + return t, nil +} + +// PseudoVersionRev returns the revision identifier of the pseudo-version v. +// It returns an error if v is not a pseudo-version. +func PseudoVersionRev(v string) (rev string, err error) { + _, _, rev, _, err = parsePseudoVersion(v) + return +} + +// PseudoVersionBase returns the canonical parent version, if any, upon which +// the pseudo-version v is based. +// +// If v has no parent version (that is, if it is "vX.0.0-[…]"), +// PseudoVersionBase returns the empty string and a nil error. +func PseudoVersionBase(v string) (string, error) { + base, _, _, build, err := parsePseudoVersion(v) + if err != nil { + return "", err + } + + switch pre := semver.Prerelease(base); pre { + case "": + // vX.0.0-yyyymmddhhmmss-abcdef123456 → "" + if build != "" { + // Pseudo-versions of the form vX.0.0-yyyymmddhhmmss-abcdef123456+incompatible + // are nonsensical: the "vX.0.0-" prefix implies that there is no parent tag, + // but the "+incompatible" suffix implies that the major version of + // the parent tag is not compatible with the module's import path. + // + // There are a few such entries in the index generated by proxy.golang.org, + // but we believe those entries were generated by the proxy itself. + return "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("lacks base version, but has build metadata %q", build), + } + } + return "", nil + + case "-0": + // vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z + // vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z+incompatible + base = strings.TrimSuffix(base, pre) + i := strings.LastIndexByte(base, '.') + if i < 0 { + panic("base from parsePseudoVersion missing patch number: " + base) + } + patch := decDecimal(base[i+1:]) + if patch == "" { + // vX.0.0-0 is invalid, but has been observed in the wild in the index + // generated by requests to proxy.golang.org. + // + // NOTE(bcmills): I cannot find a historical bug that accounts for + // pseudo-versions of this form, nor have I seen such versions in any + // actual go.mod files. If we find actual examples of this form and a + // reasonable theory of how they came into existence, it seems fine to + // treat them as equivalent to vX.0.0 (especially since the invalid + // pseudo-versions have lower precedence than the real ones). For now, we + // reject them. + return "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("version before %s would have negative patch number", base), + } + } + return base[:i+1] + patch + build, nil + + default: + // vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z-pre + // vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z-pre+incompatible + if !strings.HasSuffix(base, ".0") { + panic(`base from parsePseudoVersion missing ".0" before date: ` + base) + } + return strings.TrimSuffix(base, ".0") + build, nil + } +} + +var errPseudoSyntax = errors.New("syntax error") + +func parsePseudoVersion(v string) (base, timestamp, rev, build string, err error) { + if !IsPseudoVersion(v) { + return "", "", "", "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: errPseudoSyntax, + } + } + build = semver.Build(v) + v = strings.TrimSuffix(v, build) + j := strings.LastIndex(v, "-") + v, rev = v[:j], v[j+1:] + i := strings.LastIndex(v, "-") + if j := strings.LastIndex(v, "."); j > i { + base = v[:j] // "vX.Y.Z-pre.0" or "vX.Y.(Z+1)-0" + timestamp = v[j+1:] + } else { + base = v[:i] // "vX.0.0" + timestamp = v[i+1:] + } + return base, timestamp, rev, build, nil +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 00000000000..9fa5aa192c2 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,636 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" + + "golang.org/x/tools/internal/typeparams" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// The resulting path is never empty; it always contains at least the +// 'root' *ast.File. Ideally PathEnclosingInterval would reject +// intervals that lie wholly or partially outside the range of the +// file, but unfortunately ast.File records only the token.Pos of +// the 'package' keyword, but not of the start of the file itself. +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + _, isToken := child.(tokenNode) + return isToken || visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + // Ensure [start,end) is nondecreasing. + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), // or len("[") + tok(n.Closing, len(")"))) // or len("]") + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if tparams := typeparams.ForFuncType(n.Type); tparams != nil { + children = append(children, tparams) + } + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *typeparams.IndexListExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *typeparams.IndexListExpr: + return "index list expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 00000000000..18d1adb05dd --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,485 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil // import "golang.org/x/tools/go/ast/astutil" + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { + return AddNamedImport(fset, f, "", path) +} + +// AddNamedImport adds the import with the given name and path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// +// AddNamedImport(fset, f, "pathpkg", "path") +// +// adds +// +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { + if imports(f, name, path) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(path), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with path. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + + isThirdPartyPath = isThirdParty(path) + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group and find best + // matched import spec. + // 1. Always prefer import spec with longest shared prefix. + // 2. While match length is 0, + // - for stdlib package: prefer first import spec. + // - for third party package: prefer first third party import spec. + // We cannot use last import spec as best match for third party package + // because grouped imports are usually placed last by goimports -local + // flag. + // See issue #19190. + seenAnyThirdParty := false + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + p := importPath(impspec) + n := matchLen(p, path) + if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { + bestMatch = n + impDecl = gen + impIndex = j + } + seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import, preceded by a blank line, goes after the package declaration + // and after the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + // +2 for a blank line + impDecl.TokPos = c.End() + 2 + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } + + return true +} + +func isThirdParty(importPath string) bool { + // Third party package import path usually contains "." (".com", ".org", ...) + // This logic is taken from golang.org/x/tools/imports package. + return strings.Contains(importPath, ".") +} + +// DeleteImport deletes the import path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var delspecs []*ast.ImportSpec + var delcomments []*ast.CommentGroup + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + spec := gen.Specs[j] + impspec := spec.(*ast.ImportSpec) + if importName(impspec) != name || importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs = append(delspecs, impspec) + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments = append(delcomments, impspec.Doc) + } + if impspec.Comment != nil { + delcomments = append(delcomments, impspec.Comment) + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments = append(delcomments, cg) + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.PositionFor(lastImpspec.Path.ValuePos, false).Line + line := fset.PositionFor(impspec.Path.ValuePos, false).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 || !gen.Rparen.IsValid() { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. The right parenthesis is + // invalid after AddImport to an import statement without parenthesis. + // Do nothing. + } else if line != fset.File(gen.Rparen).LineCount() { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + for i := 0; i < len(f.Imports); i++ { + imp := f.Imports[i] + for j, del := range delspecs { + if imp == del { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + copy(delspecs[j:], delspecs[j+1:]) + delspecs = delspecs[:len(delspecs)-1] + i-- + break + } + } + } + + // Delete comments from f.Comments. + for i := 0; i < len(f.Comments); i++ { + cg := f.Comments[i] + for j, del := range delcomments { + if cg == del { + copy(f.Comments[i:], f.Comments[i+1:]) + f.Comments = f.Comments[:len(f.Comments)-1] + copy(delcomments[j:], delcomments[j+1:]) + delcomments = delcomments[:len(delcomments)-1] + i-- + break + } + } + } + + if len(delspecs) > 0 { + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +func UsesImport(f *ast.File, path string) (used bool) { + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports reports whether f has an import with the specified name and path. +func imports(f *ast.File, name, path string) bool { + for _, s := range f.Imports { + if importName(s) == name && importPath(s) == path { + return true + } + } + return false +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importName returns the name of s, +// or "" if the import is not named. +func importName(s *ast.ImportSpec) string { + if s.Name == nil { + return "" + } + return s.Name.Name +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go new file mode 100644 index 00000000000..f430b21b9b9 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -0,0 +1,488 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "fmt" + "go/ast" + "reflect" + "sort" + + "golang.org/x/tools/internal/typeparams" +) + +// An ApplyFunc is invoked by Apply for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Apply for details. +type ApplyFunc func(*Cursor) bool + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. A package's files are +// traversed in the filenames' alphabetical order. +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + parent := &struct{ ast.Node }{root} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.Node + }() + a := &application{pre: pre, post: post} + a.apply(parent, "Node", nil, root) + return +} + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, Name, and Index methods. +// +// If p is a variable of type and value of the current parent node +// c.Parent(), and f is the field identifier with name c.Name(), +// the following invariants hold: +// +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +type Cursor struct { + parent ast.Node + name string + iter *iterator // valid if non-nil + node ast.Node +} + +// Node returns the current Node. +func (c *Cursor) Node() ast.Node { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() ast.Node { return c.parent } + +// Name returns the name of the parent Node field that contains the current Node. +// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns +// the filename for the current Node. +func (c *Cursor) Name() string { return c.name } + +// Index reports the index >= 0 of the current Node in the slice of Nodes that +// contains it, or a value < 0 if the current Node is not part of a slice. +// The index of the current node changes if InsertBefore is called while +// processing the current node. +func (c *Cursor) Index() int { + if c.iter != nil { + return c.iter.index + } + return -1 +} + +// field returns the current node's parent field value. +func (c *Cursor) field() reflect.Value { + return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) +} + +// Replace replaces the current Node with n. +// The replacement node is not walked by Apply. +func (c *Cursor) Replace(n ast.Node) { + if _, ok := c.node.(*ast.File); ok { + file, ok := n.(*ast.File) + if !ok { + panic("attempt to replace *ast.File with non-*ast.File") + } + c.parent.(*ast.Package).Files[c.name] = file + return + } + + v := c.field() + if i := c.Index(); i >= 0 { + v = v.Index(i) + } + v.Set(reflect.ValueOf(n)) +} + +// Delete deletes the current Node from its containing slice. +// If the current Node is not part of a slice, Delete panics. +// As a special case, if the current node is a package file, +// Delete removes it from the package's Files map. +func (c *Cursor) Delete() { + if _, ok := c.node.(*ast.File); ok { + delete(c.parent.(*ast.Package).Files, c.name) + return + } + + i := c.Index() + if i < 0 { + panic("Delete node not contained in slice") + } + v := c.field() + l := v.Len() + reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) + v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) + v.SetLen(l - 1) + c.iter.step-- +} + +// InsertAfter inserts n after the current Node in its containing slice. +// If the current Node is not part of a slice, InsertAfter panics. +// Apply does not walk n. +func (c *Cursor) InsertAfter(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertAfter node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) + v.Index(i + 1).Set(reflect.ValueOf(n)) + c.iter.step++ +} + +// InsertBefore inserts n before the current Node in its containing slice. +// If the current Node is not part of a slice, InsertBefore panics. +// Apply will not walk n. +func (c *Cursor) InsertBefore(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertBefore node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) + v.Index(i).Set(reflect.ValueOf(n)) + c.iter.index++ +} + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor + iter iterator +} + +func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { + // convert typed nil into untyped nil + if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + n = nil + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.parent = parent + a.cursor.name = name + a.cursor.iter = iter + a.cursor.node = n + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases matches the order of the corresponding node types in go/ast) + switch n := n.(type) { + case nil: + // nothing to do + + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + if n != nil { + a.applyList(n, "List") + } + + case *ast.Field: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.FieldList: + a.applyList(n, "List") + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + a.apply(n, "Elt", nil, n.Elt) + + case *ast.FuncLit: + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + case *ast.CompositeLit: + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Elts") + + case *ast.ParenExpr: + a.apply(n, "X", nil, n.X) + + case *ast.SelectorExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Sel", nil, n.Sel) + + case *ast.IndexExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Index", nil, n.Index) + + case *typeparams.IndexListExpr: + a.apply(n, "X", nil, n.X) + a.applyList(n, "Indices") + + case *ast.SliceExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Low", nil, n.Low) + a.apply(n, "High", nil, n.High) + a.apply(n, "Max", nil, n.Max) + + case *ast.TypeAssertExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Type", nil, n.Type) + + case *ast.CallExpr: + a.apply(n, "Fun", nil, n.Fun) + a.applyList(n, "Args") + + case *ast.StarExpr: + a.apply(n, "X", nil, n.X) + + case *ast.UnaryExpr: + a.apply(n, "X", nil, n.X) + + case *ast.BinaryExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Y", nil, n.Y) + + case *ast.KeyValueExpr: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + // Types + case *ast.ArrayType: + a.apply(n, "Len", nil, n.Len) + a.apply(n, "Elt", nil, n.Elt) + + case *ast.StructType: + a.apply(n, "Fields", nil, n.Fields) + + case *ast.FuncType: + if tparams := typeparams.ForFuncType(n); tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Params", nil, n.Params) + a.apply(n, "Results", nil, n.Results) + + case *ast.InterfaceType: + a.apply(n, "Methods", nil, n.Methods) + + case *ast.MapType: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + case *ast.ChanType: + a.apply(n, "Value", nil, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + a.apply(n, "Decl", nil, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + a.apply(n, "Label", nil, n.Label) + a.apply(n, "Stmt", nil, n.Stmt) + + case *ast.ExprStmt: + a.apply(n, "X", nil, n.X) + + case *ast.SendStmt: + a.apply(n, "Chan", nil, n.Chan) + a.apply(n, "Value", nil, n.Value) + + case *ast.IncDecStmt: + a.apply(n, "X", nil, n.X) + + case *ast.AssignStmt: + a.applyList(n, "Lhs") + a.applyList(n, "Rhs") + + case *ast.GoStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.DeferStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.ReturnStmt: + a.applyList(n, "Results") + + case *ast.BranchStmt: + a.apply(n, "Label", nil, n.Label) + + case *ast.BlockStmt: + a.applyList(n, "List") + + case *ast.IfStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Body", nil, n.Body) + a.apply(n, "Else", nil, n.Else) + + case *ast.CaseClause: + a.applyList(n, "List") + a.applyList(n, "Body") + + case *ast.SwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Body", nil, n.Body) + + case *ast.TypeSwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Assign", nil, n.Assign) + a.apply(n, "Body", nil, n.Body) + + case *ast.CommClause: + a.apply(n, "Comm", nil, n.Comm) + a.applyList(n, "Body") + + case *ast.SelectStmt: + a.apply(n, "Body", nil, n.Body) + + case *ast.ForStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Post", nil, n.Post) + a.apply(n, "Body", nil, n.Body) + + case *ast.RangeStmt: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + a.apply(n, "X", nil, n.X) + a.apply(n, "Body", nil, n.Body) + + // Declarations + case *ast.ImportSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Path", nil, n.Path) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.ValueSpec: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Values") + a.apply(n, "Comment", nil, n.Comment) + + case *ast.TypeSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + if tparams := typeparams.ForTypeSpec(n); tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Specs") + + case *ast.FuncDecl: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Recv", nil, n.Recv) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + // Files and packages + case *ast.File: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.applyList(n, "Decls") + // Don't walk n.Comments; they have either been walked already if + // they are Doc comments, or they can be easily walked explicitly. + + case *ast.Package: + // collect and sort names for reproducible behavior + var names []string + for name := range n.Files { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + a.apply(n, name, nil, n.Files[name]) + } + + default: + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +// An iterator controls iteration over a slice of nodes. +type iterator struct { + index, step int +} + +func (a *application) applyList(parent ast.Node, name string) { + // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead + saved := a.iter + a.iter.index = 0 + for { + // must reload parent.name each time, since cursor modifications might change it + v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) + if a.iter.index >= v.Len() { + break + } + + // element x may be nil in a bad AST - be cautious + var x ast.Node + if e := v.Index(a.iter.index); e.IsValid() { + x = e.Interface().(ast.Node) + } + + a.iter.step = 1 + a.apply(parent, name, &a.iter, x) + a.iter.index += a.iter.step + } + a.iter = saved +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 00000000000..919d5305ab4 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,18 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +func Unparen(e ast.Expr) ast.Expr { + for { + p, ok := e.(*ast.ParenExpr) + if !ok { + return e + } + e = p.X + } +} diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go new file mode 100644 index 00000000000..d2547c74338 --- /dev/null +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package imports implements a Go pretty-printer (like package "go/format") +// that also adds or removes import statements as necessary. +package imports // import "golang.org/x/tools/imports" + +import ( + "io/ioutil" + "log" + + "golang.org/x/tools/internal/gocommand" + intimp "golang.org/x/tools/internal/imports" +) + +// Options specifies options for processing files. +type Options struct { + Fragment bool // Accept fragment of a source file (no package statement) + AllErrors bool // Report all errors (not just the first 10 on different lines) + + Comments bool // Print comments (true if nil *Options provided) + TabIndent bool // Use tabs for indent (true if nil *Options provided) + TabWidth int // Tab width (8 if nil *Options provided) + + FormatOnly bool // Disable the insertion and deletion of imports +} + +// Debug controls verbose logging. +var Debug = false + +// LocalPrefix is a comma-separated string of import path prefixes, which, if +// set, instructs Process to sort the import paths with the given prefixes +// into another group after 3rd-party packages. +var LocalPrefix string + +// Process formats and adjusts imports for the provided file. +// If opt is nil the defaults are used, and if src is nil the source +// is read from the filesystem. +// +// Note that filename's directory influences which imports can be chosen, +// so it is important that filename be accurate. +// To process data “as if” it were in filename, pass the data as a non-nil src. +func Process(filename string, src []byte, opt *Options) ([]byte, error) { + var err error + if src == nil { + src, err = ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + } + if opt == nil { + opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} + } + intopt := &intimp.Options{ + Env: &intimp.ProcessEnv{ + GocmdRunner: &gocommand.Runner{}, + }, + LocalPrefix: LocalPrefix, + AllErrors: opt.AllErrors, + Comments: opt.Comments, + FormatOnly: opt.FormatOnly, + Fragment: opt.Fragment, + TabIndent: opt.TabIndent, + TabWidth: opt.TabWidth, + } + if Debug { + intopt.Env.Logf = log.Printf + } + return intimp.Process(filename, src, intopt) +} + +// VendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". +func VendorlessPath(ipath string) string { + return intimp.VendorlessPath(ipath) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go new file mode 100644 index 00000000000..c40c7e93106 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go @@ -0,0 +1,196 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fastwalk provides a faster version of [filepath.Walk] for file system +// scanning tools. +package fastwalk + +import ( + "errors" + "os" + "path/filepath" + "runtime" + "sync" +) + +// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the +// symlink named in the call may be traversed. +var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") + +// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the +// callback should not be called for any other files in the current directory. +// Child directories will still be traversed. +var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory") + +// Walk is a faster implementation of [filepath.Walk]. +// +// [filepath.Walk]'s design necessarily calls [os.Lstat] on each file, +// even if the caller needs less info. +// Many tools need only the type of each file. +// On some platforms, this information is provided directly by the readdir +// system call, avoiding the need to stat each file individually. +// fastwalk_unix.go contains a fork of the syscall routines. +// +// See golang.org/issue/16399. +// +// Walk walks the file tree rooted at root, calling walkFn for +// each file or directory in the tree, including root. +// +// If Walk returns [filepath.SkipDir], the directory is skipped. +// +// Unlike [filepath.Walk]: +// - file stat calls must be done by the user. +// The only provided metadata is the file type, which does not include +// any permission bits. +// - multiple goroutines stat the filesystem concurrently. The provided +// walkFn must be safe for concurrent use. +// - Walk can follow symlinks if walkFn returns the TraverseLink +// sentinel error. It is the walkFn's responsibility to prevent +// Walk from going into symlink cycles. +func Walk(root string, walkFn func(path string, typ os.FileMode) error) error { + // TODO(bradfitz): make numWorkers configurable? We used a + // minimum of 4 to give the kernel more info about multiple + // things we want, in hopes its I/O scheduling can take + // advantage of that. Hopefully most are in cache. Maybe 4 is + // even too low of a minimum. Profile more. + numWorkers := 4 + if n := runtime.NumCPU(); n > numWorkers { + numWorkers = n + } + + // Make sure to wait for all workers to finish, otherwise + // walkFn could still be called after returning. This Wait call + // runs after close(e.donec) below. + var wg sync.WaitGroup + defer wg.Wait() + + w := &walker{ + fn: walkFn, + enqueuec: make(chan walkItem, numWorkers), // buffered for performance + workc: make(chan walkItem, numWorkers), // buffered for performance + donec: make(chan struct{}), + + // buffered for correctness & not leaking goroutines: + resc: make(chan error, numWorkers), + } + defer close(w.donec) + + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go w.doWork(&wg) + } + todo := []walkItem{{dir: root}} + out := 0 + for { + workc := w.workc + var workItem walkItem + if len(todo) == 0 { + workc = nil + } else { + workItem = todo[len(todo)-1] + } + select { + case workc <- workItem: + todo = todo[:len(todo)-1] + out++ + case it := <-w.enqueuec: + todo = append(todo, it) + case err := <-w.resc: + out-- + if err != nil { + return err + } + if out == 0 && len(todo) == 0 { + // It's safe to quit here, as long as the buffered + // enqueue channel isn't also readable, which might + // happen if the worker sends both another unit of + // work and its result before the other select was + // scheduled and both w.resc and w.enqueuec were + // readable. + select { + case it := <-w.enqueuec: + todo = append(todo, it) + default: + return nil + } + } + } + } +} + +// doWork reads directories as instructed (via workc) and runs the +// user's callback function. +func (w *walker) doWork(wg *sync.WaitGroup) { + defer wg.Done() + for { + select { + case <-w.donec: + return + case it := <-w.workc: + select { + case <-w.donec: + return + case w.resc <- w.walk(it.dir, !it.callbackDone): + } + } + } +} + +type walker struct { + fn func(path string, typ os.FileMode) error + + donec chan struct{} // closed on fastWalk's return + workc chan walkItem // to workers + enqueuec chan walkItem // from workers + resc chan error // from workers +} + +type walkItem struct { + dir string + callbackDone bool // callback already called; don't do it again +} + +func (w *walker) enqueue(it walkItem) { + select { + case w.enqueuec <- it: + case <-w.donec: + } +} + +func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { + joined := dirName + string(os.PathSeparator) + baseName + if typ == os.ModeDir { + w.enqueue(walkItem{dir: joined}) + return nil + } + + err := w.fn(joined, typ) + if typ == os.ModeSymlink { + if err == ErrTraverseLink { + // Set callbackDone so we don't call it twice for both the + // symlink-as-symlink and the symlink-as-directory later: + w.enqueue(walkItem{dir: joined, callbackDone: true}) + return nil + } + if err == filepath.SkipDir { + // Permit SkipDir on symlinks too. + return nil + } + } + return err +} + +func (w *walker) walk(root string, runUserCallback bool) error { + if runUserCallback { + err := w.fn(root, os.ModeDir) + if err == filepath.SkipDir { + return nil + } + if err != nil { + return err + } + } + + return readDir(root, w.onDirEnt) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go new file mode 100644 index 00000000000..0ca55e0d56f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go @@ -0,0 +1,119 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && cgo +// +build darwin,cgo + +package fastwalk + +/* +#include + +// fastwalk_readdir_r wraps readdir_r so that we don't have to pass a dirent** +// result pointer which triggers CGO's "Go pointer to Go pointer" check unless +// we allocat the result dirent* with malloc. +// +// fastwalk_readdir_r returns 0 on success, -1 upon reaching the end of the +// directory, or a positive error number to indicate failure. +static int fastwalk_readdir_r(DIR *fd, struct dirent *entry) { + struct dirent *result; + int ret = readdir_r(fd, entry, &result); + if (ret == 0 && result == NULL) { + ret = -1; // EOF + } + return ret; +} +*/ +import "C" + +import ( + "os" + "syscall" + "unsafe" +) + +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fd, err := openDir(dirName) + if err != nil { + return &os.PathError{Op: "opendir", Path: dirName, Err: err} + } + defer C.closedir(fd) + + skipFiles := false + var dirent syscall.Dirent + for { + ret := int(C.fastwalk_readdir_r(fd, (*C.struct_dirent)(unsafe.Pointer(&dirent)))) + if ret != 0 { + if ret == -1 { + break // EOF + } + if ret == int(syscall.EINTR) { + continue + } + return &os.PathError{Op: "readdir", Path: dirName, Err: syscall.Errno(ret)} + } + if dirent.Ino == 0 { + continue + } + typ := dtToType(dirent.Type) + if skipFiles && typ.IsRegular() { + continue + } + name := (*[len(syscall.Dirent{}.Name)]byte)(unsafe.Pointer(&dirent.Name))[:] + name = name[:dirent.Namlen] + for i, c := range name { + if c == 0 { + name = name[:i] + break + } + } + // Check for useless names before allocating a string. + if string(name) == "." || string(name) == ".." { + continue + } + if err := fn(dirName, string(name), typ); err != nil { + if err != ErrSkipFiles { + return err + } + skipFiles = true + } + } + + return nil +} + +func dtToType(typ uint8) os.FileMode { + switch typ { + case syscall.DT_BLK: + return os.ModeDevice + case syscall.DT_CHR: + return os.ModeDevice | os.ModeCharDevice + case syscall.DT_DIR: + return os.ModeDir + case syscall.DT_FIFO: + return os.ModeNamedPipe + case syscall.DT_LNK: + return os.ModeSymlink + case syscall.DT_REG: + return 0 + case syscall.DT_SOCK: + return os.ModeSocket + } + return ^os.FileMode(0) +} + +// openDir wraps opendir(3) and handles any EINTR errors. The returned *DIR +// needs to be closed with closedir(3). +func openDir(path string) (*C.DIR, error) { + name, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + for { + fd, err := C.opendir((*C.char)(unsafe.Pointer(name))) + if err != syscall.EINTR { + return fd, err + } + } +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go new file mode 100644 index 00000000000..d58595dbd3f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build freebsd || openbsd || netbsd +// +build freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Fileno) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go new file mode 100644 index 00000000000..d3922890b0b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (linux || (darwin && !cgo)) && !appengine +// +build linux darwin,!cgo +// +build !appengine + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return dirent.Ino +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go new file mode 100644 index 00000000000..38a4db6af3a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (darwin && !cgo) || freebsd || openbsd || netbsd +// +build darwin,!cgo freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntNamlen(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Namlen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go new file mode 100644 index 00000000000..c82e57df85e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && !appengine +// +build linux,!appengine + +package fastwalk + +import ( + "bytes" + "syscall" + "unsafe" +) + +func direntNamlen(dirent *syscall.Dirent) uint64 { + const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name)) + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + const nameBufLen = uint16(len(nameBuf)) + limit := dirent.Reclen - fixedHdr + if limit > nameBufLen { + limit = nameBufLen + } + nameLen := bytes.IndexByte(nameBuf[:limit], 0) + if nameLen < 0 { + panic("failed to find terminating 0 byte in dirent") + } + return uint64(nameLen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go new file mode 100644 index 00000000000..085d311600b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go @@ -0,0 +1,38 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build appengine || (!linux && !darwin && !freebsd && !openbsd && !netbsd) +// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd + +package fastwalk + +import ( + "io/ioutil" + "os" +) + +// readDir calls fn for each directory entry in dirName. +// It does not descend into directories or follow symlinks. +// If fn returns a non-nil error, readDir returns with that error +// immediately. +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fis, err := ioutil.ReadDir(dirName) + if err != nil { + return err + } + skipFiles := false + for _, fi := range fis { + if fi.Mode().IsRegular() && skipFiles { + continue + } + if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { + if err == ErrSkipFiles { + skipFiles = true + continue + } + return err + } + } + return nil +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go new file mode 100644 index 00000000000..f12f1a734cc --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go @@ -0,0 +1,153 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (linux || freebsd || openbsd || netbsd || (darwin && !cgo)) && !appengine +// +build linux freebsd openbsd netbsd darwin,!cgo +// +build !appengine + +package fastwalk + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +const blockSize = 8 << 10 + +// unknownFileMode is a sentinel (and bogus) os.FileMode +// value used to represent a syscall.DT_UNKNOWN Dirent.Type. +const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice + +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fd, err := open(dirName, 0, 0) + if err != nil { + return &os.PathError{Op: "open", Path: dirName, Err: err} + } + defer syscall.Close(fd) + + // The buffer must be at least a block long. + buf := make([]byte, blockSize) // stack-allocated; doesn't escape + bufp := 0 // starting read position in buf + nbuf := 0 // end valid data in buf + skipFiles := false + for { + if bufp >= nbuf { + bufp = 0 + nbuf, err = readDirent(fd, buf) + if err != nil { + return os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + return nil + } + } + consumed, name, typ := parseDirEnt(buf[bufp:nbuf]) + bufp += consumed + if name == "" || name == "." || name == ".." { + continue + } + // Fallback for filesystems (like old XFS) that don't + // support Dirent.Type and have DT_UNKNOWN (0) there + // instead. + if typ == unknownFileMode { + fi, err := os.Lstat(dirName + "/" + name) + if err != nil { + // It got deleted in the meantime. + if os.IsNotExist(err) { + continue + } + return err + } + typ = fi.Mode() & os.ModeType + } + if skipFiles && typ.IsRegular() { + continue + } + if err := fn(dirName, name, typ); err != nil { + if err == ErrSkipFiles { + skipFiles = true + continue + } + return err + } + } +} + +func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { + // golang.org/issue/37269 + dirent := &syscall.Dirent{} + copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(dirent))[:], buf) + if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { + panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) + } + if len(buf) < int(dirent.Reclen) { + panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen)) + } + consumed = int(dirent.Reclen) + if direntInode(dirent) == 0 { // File absent in directory. + return + } + switch dirent.Type { + case syscall.DT_REG: + typ = 0 + case syscall.DT_DIR: + typ = os.ModeDir + case syscall.DT_LNK: + typ = os.ModeSymlink + case syscall.DT_BLK: + typ = os.ModeDevice + case syscall.DT_FIFO: + typ = os.ModeNamedPipe + case syscall.DT_SOCK: + typ = os.ModeSocket + case syscall.DT_UNKNOWN: + typ = unknownFileMode + default: + // Skip weird things. + // It's probably a DT_WHT (http://lwn.net/Articles/325369/) + // or something. Revisit if/when this package is moved outside + // of goimports. goimports only cares about regular files, + // symlinks, and directories. + return + } + + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + nameLen := direntNamlen(dirent) + + // Special cases for common things: + if nameLen == 1 && nameBuf[0] == '.' { + name = "." + } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' { + name = ".." + } else { + name = string(nameBuf[:nameLen]) + } + return +} + +// According to https://golang.org/doc/go1.14#runtime +// A consequence of the implementation of preemption is that on Unix systems, including Linux and macOS +// systems, programs built with Go 1.14 will receive more signals than programs built with earlier releases. +// +// This causes syscall.Open and syscall.ReadDirent sometimes fail with EINTR errors. +// We need to retry in this case. +func open(path string, mode int, perm uint32) (fd int, err error) { + for { + fd, err := syscall.Open(path, mode, perm) + if err != syscall.EINTR { + return fd, err + } + } +} + +func readDirent(fd int, buf []byte) (n int, err error) { + for { + nbuf, err := syscall.ReadDirent(fd, buf) + if err != syscall.EINTR { + return nbuf, err + } + } +} diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go new file mode 100644 index 00000000000..452e342c559 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -0,0 +1,260 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gopathwalk is like filepath.Walk but specialized for finding Go +// packages, particularly in $GOPATH and $GOROOT. +package gopathwalk + +import ( + "bufio" + "bytes" + "log" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/tools/internal/fastwalk" +) + +// Options controls the behavior of a Walk call. +type Options struct { + // If Logf is non-nil, debug logging is enabled through this function. + Logf func(format string, args ...interface{}) + // Search module caches. Also disables legacy goimports ignore rules. + ModulesEnabled bool +} + +// RootType indicates the type of a Root. +type RootType int + +const ( + RootUnknown RootType = iota + RootGOROOT + RootGOPATH + RootCurrentModule + RootModuleCache + RootOther +) + +// A Root is a starting point for a Walk. +type Root struct { + Path string + Type RootType +} + +// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// For each package found, add will be called (concurrently) with the absolute +// paths of the containing source directory and the package directory. +// add will be called concurrently. +func Walk(roots []Root, add func(root Root, dir string), opts Options) { + WalkSkip(roots, add, func(Root, string) bool { return false }, opts) +} + +// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// For each package found, add will be called (concurrently) with the absolute +// paths of the containing source directory and the package directory. +// For each directory that will be scanned, skip will be called (concurrently) +// with the absolute paths of the containing source directory and the directory. +// If skip returns false on a directory it will be processed. +// add will be called concurrently. +// skip will be called concurrently. +func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) { + for _, root := range roots { + walkDir(root, add, skip, opts) + } +} + +// walkDir creates a walker and starts fastwalk with this walker. +func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { + if _, err := os.Stat(root.Path); os.IsNotExist(err) { + if opts.Logf != nil { + opts.Logf("skipping nonexistent directory: %v", root.Path) + } + return + } + start := time.Now() + if opts.Logf != nil { + opts.Logf("scanning %s", root.Path) + } + w := &walker{ + root: root, + add: add, + skip: skip, + opts: opts, + } + w.init() + if err := fastwalk.Walk(root.Path, w.walk); err != nil { + logf := opts.Logf + if logf == nil { + logf = log.Printf + } + logf("scanning directory %v: %v", root.Path, err) + } + + if opts.Logf != nil { + opts.Logf("scanned %s in %v", root.Path, time.Since(start)) + } +} + +// walker is the callback for fastwalk.Walk. +type walker struct { + root Root // The source directory to scan. + add func(Root, string) // The callback that will be invoked for every possible Go package dir. + skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true. + opts Options // Options passed to Walk by the user. + + ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files. +} + +// init initializes the walker based on its Options +func (w *walker) init() { + var ignoredPaths []string + if w.root.Type == RootModuleCache { + ignoredPaths = []string{"cache"} + } + if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH { + ignoredPaths = w.getIgnoredDirs(w.root.Path) + ignoredPaths = append(ignoredPaths, "v", "mod") + } + + for _, p := range ignoredPaths { + full := filepath.Join(w.root.Path, p) + if fi, err := os.Stat(full); err == nil { + w.ignoredDirs = append(w.ignoredDirs, fi) + if w.opts.Logf != nil { + w.opts.Logf("Directory added to ignore list: %s", full) + } + } else if w.opts.Logf != nil { + w.opts.Logf("Error statting ignored directory: %v", err) + } + } +} + +// getIgnoredDirs reads an optional config file at /.goimportsignore +// of relative directories to ignore when scanning for go files. +// The provided path is one of the $GOPATH entries with "src" appended. +func (w *walker) getIgnoredDirs(path string) []string { + file := filepath.Join(path, ".goimportsignore") + slurp, err := os.ReadFile(file) + if w.opts.Logf != nil { + if err != nil { + w.opts.Logf("%v", err) + } else { + w.opts.Logf("Read %s", file) + } + } + if err != nil { + return nil + } + + var ignoredDirs []string + bs := bufio.NewScanner(bytes.NewReader(slurp)) + for bs.Scan() { + line := strings.TrimSpace(bs.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + ignoredDirs = append(ignoredDirs, line) + } + return ignoredDirs +} + +// shouldSkipDir reports whether the file should be skipped or not. +func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { + for _, ignoredDir := range w.ignoredDirs { + if os.SameFile(fi, ignoredDir) { + return true + } + } + if w.skip != nil { + // Check with the user specified callback. + return w.skip(w.root, dir) + } + return false +} + +// walk walks through the given path. +func (w *walker) walk(path string, typ os.FileMode) error { + if typ.IsRegular() { + dir := filepath.Dir(path) + if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { + // Doesn't make sense to have regular files + // directly in your $GOPATH/src or $GOROOT/src. + return fastwalk.ErrSkipFiles + } + if !strings.HasSuffix(path, ".go") { + return nil + } + + w.add(w.root, dir) + return fastwalk.ErrSkipFiles + } + if typ == os.ModeDir { + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || + (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || + (!w.opts.ModulesEnabled && base == "node_modules") { + return filepath.SkipDir + } + fi, err := os.Lstat(path) + if err == nil && w.shouldSkipDir(fi, path) { + return filepath.SkipDir + } + return nil + } + if typ == os.ModeSymlink { + base := filepath.Base(path) + if strings.HasPrefix(base, ".#") { + // Emacs noise. + return nil + } + if w.shouldTraverse(path) { + return fastwalk.ErrTraverseLink + } + } + return nil +} + +// shouldTraverse reports whether the symlink fi, found in dir, +// should be followed. It makes sure symlinks were never visited +// before to avoid symlink loops. +func (w *walker) shouldTraverse(path string) bool { + ts, err := os.Stat(path) + if err != nil { + logf := w.opts.Logf + if logf == nil { + logf = log.Printf + } + logf("%v", err) + return false + } + if !ts.IsDir() { + return false + } + if w.shouldSkipDir(ts, filepath.Dir(path)) { + return false + } + // Check for symlink loops by statting each directory component + // and seeing if any are the same file as ts. + for { + parent := filepath.Dir(path) + if parent == path { + // Made it to the root without seeing a cycle. + // Use this symlink. + return true + } + parentInfo, err := os.Stat(parent) + if err != nil { + return false + } + if os.SameFile(ts, parentInfo) { + // Cycle. Don't traverse. + return false + } + path = parent + } + +} diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go new file mode 100644 index 00000000000..d4f1b4e8a0f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -0,0 +1,1766 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/gopathwalk" +) + +// importToGroup is a list of functions which map from an import path to +// a group number. +var importToGroup = []func(localPrefix, importPath string) (num int, ok bool){ + func(localPrefix, importPath string) (num int, ok bool) { + if localPrefix == "" { + return + } + for _, p := range strings.Split(localPrefix, ",") { + if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath { + return 3, true + } + } + return + }, + func(_, importPath string) (num int, ok bool) { + if strings.HasPrefix(importPath, "appengine") { + return 2, true + } + return + }, + func(_, importPath string) (num int, ok bool) { + firstComponent := strings.Split(importPath, "/")[0] + if strings.Contains(firstComponent, ".") { + return 1, true + } + return + }, +} + +func importGroup(localPrefix, importPath string) int { + for _, fn := range importToGroup { + if n, ok := fn(localPrefix, importPath); ok { + return n + } + } + return 0 +} + +type ImportFixType int + +const ( + AddImport ImportFixType = iota + DeleteImport + SetImportName +) + +type ImportFix struct { + // StmtInfo represents the import statement this fix will add, remove, or change. + StmtInfo ImportInfo + // IdentName is the identifier that this fix will add or remove. + IdentName string + // FixType is the type of fix this is (AddImport, DeleteImport, SetImportName). + FixType ImportFixType + Relevance float64 // see pkg +} + +// An ImportInfo represents a single import statement. +type ImportInfo struct { + ImportPath string // import path, e.g. "crypto/rand". + Name string // import name, e.g. "crand", or "" if none. +} + +// A packageInfo represents what's known about a package. +type packageInfo struct { + name string // real package name, if known. + exports map[string]bool // known exports. +} + +// parseOtherFiles parses all the Go files in srcDir except filename, including +// test files if filename looks like a test. +func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File { + // This could use go/packages but it doesn't buy much, and it fails + // with https://golang.org/issue/26296 in LoadFiles mode in some cases. + considerTests := strings.HasSuffix(filename, "_test.go") + + fileBase := filepath.Base(filename) + packageFileInfos, err := ioutil.ReadDir(srcDir) + if err != nil { + return nil + } + + var files []*ast.File + for _, fi := range packageFileInfos { + if fi.Name() == fileBase || !strings.HasSuffix(fi.Name(), ".go") { + continue + } + if !considerTests && strings.HasSuffix(fi.Name(), "_test.go") { + continue + } + + f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0) + if err != nil { + continue + } + + files = append(files, f) + } + + return files +} + +// addGlobals puts the names of package vars into the provided map. +func addGlobals(f *ast.File, globals map[string]bool) { + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + + for _, spec := range genDecl.Specs { + valueSpec, ok := spec.(*ast.ValueSpec) + if !ok { + continue + } + globals[valueSpec.Names[0].Name] = true + } + } +} + +// collectReferences builds a map of selector expressions, from +// left hand side (X) to a set of right hand sides (Sel). +func collectReferences(f *ast.File) references { + refs := references{} + + var visitor visitFn + visitor = func(node ast.Node) ast.Visitor { + if node == nil { + return visitor + } + switch v := node.(type) { + case *ast.SelectorExpr: + xident, ok := v.X.(*ast.Ident) + if !ok { + break + } + if xident.Obj != nil { + // If the parser can resolve it, it's not a package ref. + break + } + if !ast.IsExported(v.Sel.Name) { + // Whatever this is, it's not exported from a package. + break + } + pkgName := xident.Name + r := refs[pkgName] + if r == nil { + r = make(map[string]bool) + refs[pkgName] = r + } + r[v.Sel.Name] = true + } + return visitor + } + ast.Walk(visitor, f) + return refs +} + +// collectImports returns all the imports in f. +// Unnamed imports (., _) and "C" are ignored. +func collectImports(f *ast.File) []*ImportInfo { + var imports []*ImportInfo + for _, imp := range f.Imports { + var name string + if imp.Name != nil { + name = imp.Name.Name + } + if imp.Path.Value == `"C"` || name == "_" || name == "." { + continue + } + path := strings.Trim(imp.Path.Value, `"`) + imports = append(imports, &ImportInfo{ + Name: name, + ImportPath: path, + }) + } + return imports +} + +// findMissingImport searches pass's candidates for an import that provides +// pkg, containing all of syms. +func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { + for _, candidate := range p.candidates { + pkgInfo, ok := p.knownPackages[candidate.ImportPath] + if !ok { + continue + } + if p.importIdentifier(candidate) != pkg { + continue + } + + allFound := true + for right := range syms { + if !pkgInfo.exports[right] { + allFound = false + break + } + } + + if allFound { + return candidate + } + } + return nil +} + +// references is set of references found in a Go file. The first map key is the +// left hand side of a selector expression, the second key is the right hand +// side, and the value should always be true. +type references map[string]map[string]bool + +// A pass contains all the inputs and state necessary to fix a file's imports. +// It can be modified in some ways during use; see comments below. +type pass struct { + // Inputs. These must be set before a call to load, and not modified after. + fset *token.FileSet // fset used to parse f and its siblings. + f *ast.File // the file being fixed. + srcDir string // the directory containing f. + env *ProcessEnv // the environment to use for go commands, etc. + loadRealPackageNames bool // if true, load package names from disk rather than guessing them. + otherFiles []*ast.File // sibling files. + + // Intermediate state, generated by load. + existingImports map[string]*ImportInfo + allRefs references + missingRefs references + + // Inputs to fix. These can be augmented between successive fix calls. + lastTry bool // indicates that this is the last call and fix should clean up as best it can. + candidates []*ImportInfo // candidate imports in priority order. + knownPackages map[string]*packageInfo // information about all known packages. +} + +// loadPackageNames saves the package names for everything referenced by imports. +func (p *pass) loadPackageNames(imports []*ImportInfo) error { + if p.env.Logf != nil { + p.env.Logf("loading package names for %v packages", len(imports)) + defer func() { + p.env.Logf("done loading package names for %v packages", len(imports)) + }() + } + var unknown []string + for _, imp := range imports { + if _, ok := p.knownPackages[imp.ImportPath]; ok { + continue + } + unknown = append(unknown, imp.ImportPath) + } + + resolver, err := p.env.GetResolver() + if err != nil { + return err + } + + names, err := resolver.loadPackageNames(unknown, p.srcDir) + if err != nil { + return err + } + + for path, name := range names { + p.knownPackages[path] = &packageInfo{ + name: name, + exports: map[string]bool{}, + } + } + return nil +} + +// importIdentifier returns the identifier that imp will introduce. It will +// guess if the package name has not been loaded, e.g. because the source +// is not available. +func (p *pass) importIdentifier(imp *ImportInfo) string { + if imp.Name != "" { + return imp.Name + } + known := p.knownPackages[imp.ImportPath] + if known != nil && known.name != "" { + return known.name + } + return ImportPathToAssumedName(imp.ImportPath) +} + +// load reads in everything necessary to run a pass, and reports whether the +// file already has all the imports it needs. It fills in p.missingRefs with the +// file's missing symbols, if any, or removes unused imports if not. +func (p *pass) load() ([]*ImportFix, bool) { + p.knownPackages = map[string]*packageInfo{} + p.missingRefs = references{} + p.existingImports = map[string]*ImportInfo{} + + // Load basic information about the file in question. + p.allRefs = collectReferences(p.f) + + // Load stuff from other files in the same package: + // global variables so we know they don't need resolving, and imports + // that we might want to mimic. + globals := map[string]bool{} + for _, otherFile := range p.otherFiles { + // Don't load globals from files that are in the same directory + // but a different package. Using them to suggest imports is OK. + if p.f.Name.Name == otherFile.Name.Name { + addGlobals(otherFile, globals) + } + p.candidates = append(p.candidates, collectImports(otherFile)...) + } + + // Resolve all the import paths we've seen to package names, and store + // f's imports by the identifier they introduce. + imports := collectImports(p.f) + if p.loadRealPackageNames { + err := p.loadPackageNames(append(imports, p.candidates...)) + if err != nil { + if p.env.Logf != nil { + p.env.Logf("loading package names: %v", err) + } + return nil, false + } + } + for _, imp := range imports { + p.existingImports[p.importIdentifier(imp)] = imp + } + + // Find missing references. + for left, rights := range p.allRefs { + if globals[left] { + continue + } + _, ok := p.existingImports[left] + if !ok { + p.missingRefs[left] = rights + continue + } + } + if len(p.missingRefs) != 0 { + return nil, false + } + + return p.fix() +} + +// fix attempts to satisfy missing imports using p.candidates. If it finds +// everything, or if p.lastTry is true, it updates fixes to add the imports it found, +// delete anything unused, and update import names, and returns true. +func (p *pass) fix() ([]*ImportFix, bool) { + // Find missing imports. + var selected []*ImportInfo + for left, rights := range p.missingRefs { + if imp := p.findMissingImport(left, rights); imp != nil { + selected = append(selected, imp) + } + } + + if !p.lastTry && len(selected) != len(p.missingRefs) { + return nil, false + } + + // Found everything, or giving up. Add the new imports and remove any unused. + var fixes []*ImportFix + for _, imp := range p.existingImports { + // We deliberately ignore globals here, because we can't be sure + // they're in the same package. People do things like put multiple + // main packages in the same directory, and we don't want to + // remove imports if they happen to have the same name as a var in + // a different package. + if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok { + fixes = append(fixes, &ImportFix{ + StmtInfo: *imp, + IdentName: p.importIdentifier(imp), + FixType: DeleteImport, + }) + continue + } + + // An existing import may need to update its import name to be correct. + if name := p.importSpecName(imp); name != imp.Name { + fixes = append(fixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: name, + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: SetImportName, + }) + } + } + // Collecting fixes involved map iteration, so sort for stability. See + // golang/go#59976. + sortFixes(fixes) + + // collect selected fixes in a separate slice, so that it can be sorted + // separately. Note that these fixes must occur after fixes to existing + // imports. TODO(rfindley): figure out why. + var selectedFixes []*ImportFix + for _, imp := range selected { + selectedFixes = append(selectedFixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: p.importSpecName(imp), + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: AddImport, + }) + } + sortFixes(selectedFixes) + + return append(fixes, selectedFixes...), true +} + +func sortFixes(fixes []*ImportFix) { + sort.Slice(fixes, func(i, j int) bool { + fi, fj := fixes[i], fixes[j] + if fi.StmtInfo.ImportPath != fj.StmtInfo.ImportPath { + return fi.StmtInfo.ImportPath < fj.StmtInfo.ImportPath + } + if fi.StmtInfo.Name != fj.StmtInfo.Name { + return fi.StmtInfo.Name < fj.StmtInfo.Name + } + if fi.IdentName != fj.IdentName { + return fi.IdentName < fj.IdentName + } + return fi.FixType < fj.FixType + }) +} + +// importSpecName gets the import name of imp in the import spec. +// +// When the import identifier matches the assumed import name, the import name does +// not appear in the import spec. +func (p *pass) importSpecName(imp *ImportInfo) string { + // If we did not load the real package names, or the name is already set, + // we just return the existing name. + if !p.loadRealPackageNames || imp.Name != "" { + return imp.Name + } + + ident := p.importIdentifier(imp) + if ident == ImportPathToAssumedName(imp.ImportPath) { + return "" // ident not needed since the assumed and real names are the same. + } + return ident +} + +// apply will perform the fixes on f in order. +func apply(fset *token.FileSet, f *ast.File, fixes []*ImportFix) { + for _, fix := range fixes { + switch fix.FixType { + case DeleteImport: + astutil.DeleteNamedImport(fset, f, fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case AddImport: + astutil.AddNamedImport(fset, f, fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case SetImportName: + // Find the matching import path and change the name. + for _, spec := range f.Imports { + path := strings.Trim(spec.Path.Value, `"`) + if path == fix.StmtInfo.ImportPath { + spec.Name = &ast.Ident{ + Name: fix.StmtInfo.Name, + NamePos: spec.Pos(), + } + } + } + } + } +} + +// assumeSiblingImportsValid assumes that siblings' use of packages is valid, +// adding the exports they use. +func (p *pass) assumeSiblingImportsValid() { + for _, f := range p.otherFiles { + refs := collectReferences(f) + imports := collectImports(f) + importsByName := map[string]*ImportInfo{} + for _, imp := range imports { + importsByName[p.importIdentifier(imp)] = imp + } + for left, rights := range refs { + if imp, ok := importsByName[left]; ok { + if m, ok := stdlib[imp.ImportPath]; ok { + // We have the stdlib in memory; no need to guess. + rights = copyExports(m) + } + p.addCandidate(imp, &packageInfo{ + // no name; we already know it. + exports: rights, + }) + } + } + } +} + +// addCandidate adds a candidate import to p, and merges in the information +// in pkg. +func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { + p.candidates = append(p.candidates, imp) + if existing, ok := p.knownPackages[imp.ImportPath]; ok { + if existing.name == "" { + existing.name = pkg.name + } + for export := range pkg.exports { + existing.exports[export] = true + } + } else { + p.knownPackages[imp.ImportPath] = pkg + } +} + +// fixImports adds and removes imports from f so that all its references are +// satisfied and there are no unused imports. +// +// This is declared as a variable rather than a function so goimports can +// easily be extended by adding a file with an init function. +var fixImports = fixImportsDefault + +func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { + fixes, err := getFixes(context.Background(), fset, f, filename, env) + if err != nil { + return err + } + apply(fset, f, fixes) + return err +} + +// getFixes gets the import fixes that need to be made to f in order to fix the imports. +// It does not modify the ast. +func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { + abs, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + srcDir := filepath.Dir(abs) + if env.Logf != nil { + env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) + } + + // First pass: looking only at f, and using the naive algorithm to + // derive package names from import paths, see if the file is already + // complete. We can't add any imports yet, because we don't know + // if missing references are actually package vars. + p := &pass{fset: fset, f: f, srcDir: srcDir, env: env} + if fixes, done := p.load(); done { + return fixes, nil + } + + otherFiles := parseOtherFiles(fset, srcDir, filename) + + // Second pass: add information from other files in the same package, + // like their package vars and imports. + p.otherFiles = otherFiles + if fixes, done := p.load(); done { + return fixes, nil + } + + // Now we can try adding imports from the stdlib. + p.assumeSiblingImportsValid() + addStdlibCandidates(p, p.missingRefs) + if fixes, done := p.fix(); done { + return fixes, nil + } + + // Third pass: get real package names where we had previously used + // the naive algorithm. + p = &pass{fset: fset, f: f, srcDir: srcDir, env: env} + p.loadRealPackageNames = true + p.otherFiles = otherFiles + if fixes, done := p.load(); done { + return fixes, nil + } + + if err := addStdlibCandidates(p, p.missingRefs); err != nil { + return nil, err + } + p.assumeSiblingImportsValid() + if fixes, done := p.fix(); done { + return fixes, nil + } + + // Go look for candidates in $GOPATH, etc. We don't necessarily load + // the real exports of sibling imports, so keep assuming their contents. + if err := addExternalCandidates(ctx, p, p.missingRefs, filename); err != nil { + return nil, err + } + + p.lastTry = true + fixes, _ := p.fix() + return fixes, nil +} + +// MaxRelevance is the highest relevance, used for the standard library. +// Chosen arbitrarily to match pre-existing gopls code. +const MaxRelevance = 7.0 + +// getCandidatePkgs works with the passed callback to find all acceptable packages. +// It deduplicates by import path, and uses a cached stdlib rather than reading +// from disk. +func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filename, filePkg string, env *ProcessEnv) error { + notSelf := func(p *pkg) bool { + return p.packageName != filePkg || p.dir != filepath.Dir(filename) + } + goenv, err := env.goEnv() + if err != nil { + return err + } + + var mu sync.Mutex // to guard asynchronous access to dupCheck + dupCheck := map[string]struct{}{} + + // Start off with the standard library. + for importPath, exports := range stdlib { + p := &pkg{ + dir: filepath.Join(goenv["GOROOT"], "src", importPath), + importPathShort: importPath, + packageName: path.Base(importPath), + relevance: MaxRelevance, + } + dupCheck[importPath] = struct{}{} + if notSelf(p) && wrappedCallback.dirFound(p) && wrappedCallback.packageNameLoaded(p) { + wrappedCallback.exportsLoaded(p, exports) + } + } + + scanFilter := &scanCallback{ + rootFound: func(root gopathwalk.Root) bool { + // Exclude goroot results -- getting them is relatively expensive, not cached, + // and generally redundant with the in-memory version. + return root.Type != gopathwalk.RootGOROOT && wrappedCallback.rootFound(root) + }, + dirFound: wrappedCallback.dirFound, + packageNameLoaded: func(pkg *pkg) bool { + mu.Lock() + defer mu.Unlock() + if _, ok := dupCheck[pkg.importPathShort]; ok { + return false + } + dupCheck[pkg.importPathShort] = struct{}{} + return notSelf(pkg) && wrappedCallback.packageNameLoaded(pkg) + }, + exportsLoaded: func(pkg *pkg, exports []string) { + // If we're an x_test, load the package under test's test variant. + if strings.HasSuffix(filePkg, "_test") && pkg.dir == filepath.Dir(filename) { + var err error + _, exports, err = loadExportsFromFiles(ctx, env, pkg.dir, true) + if err != nil { + return + } + } + wrappedCallback.exportsLoaded(pkg, exports) + }, + } + resolver, err := env.GetResolver() + if err != nil { + return err + } + return resolver.scan(ctx, scanFilter) +} + +func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map[string]float64, error) { + result := make(map[string]float64) + resolver, err := env.GetResolver() + if err != nil { + return nil, err + } + for _, path := range paths { + result[path] = resolver.scoreImportPath(ctx, path) + } + return result, nil +} + +func PrimeCache(ctx context.Context, env *ProcessEnv) error { + // Fully scan the disk for directories, but don't actually read any Go files. + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return false + }, + packageNameLoaded: func(pkg *pkg) bool { + return false + }, + } + return getCandidatePkgs(ctx, callback, "", "", env) +} + +func candidateImportName(pkg *pkg) string { + if ImportPathToAssumedName(pkg.importPathShort) != pkg.packageName { + return pkg.packageName + } + return "" +} + +// GetAllCandidates calls wrapped for each package whose name starts with +// searchPrefix, and can be imported from filename with the package name filePkg. +// +// Beware that the wrapped function may be called multiple times concurrently. +// TODO(adonovan): encapsulate the concurrency. +func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + if !canUse(filename, pkg.dir) { + return false + } + // Try the assumed package name first, then a simpler path match + // in case of packages named vN, which are not uncommon. + return strings.HasPrefix(ImportPathToAssumedName(pkg.importPathShort), searchPrefix) || + strings.HasPrefix(path.Base(pkg.importPathShort), searchPrefix) + }, + packageNameLoaded: func(pkg *pkg) bool { + if !strings.HasPrefix(pkg.packageName, searchPrefix) { + return false + } + wrapped(ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }) + return false + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + +// GetImportPaths calls wrapped for each package whose import path starts with +// searchPrefix, and can be imported from filename with the package name filePkg. +func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + if !canUse(filename, pkg.dir) { + return false + } + return strings.HasPrefix(pkg.importPathShort, searchPrefix) + }, + packageNameLoaded: func(pkg *pkg) bool { + wrapped(ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }) + return false + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + +// A PackageExport is a package and its exports. +type PackageExport struct { + Fix *ImportFix + Exports []string +} + +// GetPackageExports returns all known packages with name pkg and their exports. +func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, references{searchPkg: nil}, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + return pkg.packageName == searchPkg + }, + exportsLoaded: func(pkg *pkg, exports []string) { + sort.Strings(exports) + wrapped(PackageExport{ + Fix: &ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }, + Exports: exports, + }) + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + +var requiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB", "GOWORK"} + +// ProcessEnv contains environment variables and settings that affect the use of +// the go command, the go/build package, etc. +type ProcessEnv struct { + GocmdRunner *gocommand.Runner + + BuildFlags []string + ModFlag string + ModFile string + + // SkipPathInScan returns true if the path should be skipped from scans of + // the RootCurrentModule root type. The function argument is a clean, + // absolute path. + SkipPathInScan func(string) bool + + // Env overrides the OS environment, and can be used to specify + // GOPROXY, GO111MODULE, etc. PATH cannot be set here, because + // exec.Command will not honor it. + // Specifying all of RequiredGoEnvVars avoids a call to `go env`. + Env map[string]string + + WorkingDir string + + // If Logf is non-nil, debug logging is enabled through this function. + Logf func(format string, args ...interface{}) + + initialized bool + + resolver Resolver +} + +func (e *ProcessEnv) goEnv() (map[string]string, error) { + if err := e.init(); err != nil { + return nil, err + } + return e.Env, nil +} + +func (e *ProcessEnv) matchFile(dir, name string) (bool, error) { + bctx, err := e.buildContext() + if err != nil { + return false, err + } + return bctx.MatchFile(dir, name) +} + +// CopyConfig copies the env's configuration into a new env. +func (e *ProcessEnv) CopyConfig() *ProcessEnv { + copy := &ProcessEnv{ + GocmdRunner: e.GocmdRunner, + initialized: e.initialized, + BuildFlags: e.BuildFlags, + Logf: e.Logf, + WorkingDir: e.WorkingDir, + resolver: nil, + Env: map[string]string{}, + } + for k, v := range e.Env { + copy.Env[k] = v + } + return copy +} + +func (e *ProcessEnv) init() error { + if e.initialized { + return nil + } + + foundAllRequired := true + for _, k := range requiredGoEnvVars { + if _, ok := e.Env[k]; !ok { + foundAllRequired = false + break + } + } + if foundAllRequired { + e.initialized = true + return nil + } + + if e.Env == nil { + e.Env = map[string]string{} + } + + goEnv := map[string]string{} + stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, requiredGoEnvVars...)...) + if err != nil { + return err + } + if err := json.Unmarshal(stdout.Bytes(), &goEnv); err != nil { + return err + } + for k, v := range goEnv { + e.Env[k] = v + } + e.initialized = true + return nil +} + +func (e *ProcessEnv) env() []string { + var env []string // the gocommand package will prepend os.Environ. + for k, v := range e.Env { + env = append(env, k+"="+v) + } + return env +} + +func (e *ProcessEnv) GetResolver() (Resolver, error) { + if e.resolver != nil { + return e.resolver, nil + } + if err := e.init(); err != nil { + return nil, err + } + if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { + e.resolver = newGopathResolver(e) + return e.resolver, nil + } + e.resolver = newModuleResolver(e) + return e.resolver, nil +} + +func (e *ProcessEnv) buildContext() (*build.Context, error) { + ctx := build.Default + goenv, err := e.goEnv() + if err != nil { + return nil, err + } + ctx.GOROOT = goenv["GOROOT"] + ctx.GOPATH = goenv["GOPATH"] + + // As of Go 1.14, build.Context has a Dir field + // (see golang.org/issue/34860). + // Populate it only if present. + rc := reflect.ValueOf(&ctx).Elem() + dir := rc.FieldByName("Dir") + if dir.IsValid() && dir.Kind() == reflect.String { + dir.SetString(e.WorkingDir) + } + + // Since Go 1.11, go/build.Context.Import may invoke 'go list' depending on + // the value in GO111MODULE in the process's environment. We always want to + // run in GOPATH mode when calling Import, so we need to prevent this from + // happening. In Go 1.16, GO111MODULE defaults to "on", so this problem comes + // up more frequently. + // + // HACK: setting any of the Context I/O hooks prevents Import from invoking + // 'go list', regardless of GO111MODULE. This is undocumented, but it's + // unlikely to change before GOPATH support is removed. + ctx.ReadDir = ioutil.ReadDir + + return &ctx, nil +} + +func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) (*bytes.Buffer, error) { + inv := gocommand.Invocation{ + Verb: verb, + Args: args, + BuildFlags: e.BuildFlags, + Env: e.env(), + Logf: e.Logf, + WorkingDir: e.WorkingDir, + } + return e.GocmdRunner.Run(ctx, inv) +} + +func addStdlibCandidates(pass *pass, refs references) error { + goenv, err := pass.env.goEnv() + if err != nil { + return err + } + add := func(pkg string) { + // Prevent self-imports. + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir { + return + } + exports := copyExports(stdlib[pkg]) + pass.addCandidate( + &ImportInfo{ImportPath: pkg}, + &packageInfo{name: path.Base(pkg), exports: exports}) + } + for left := range refs { + if left == "rand" { + // Make sure we try crypto/rand before math/rand. + add("crypto/rand") + add("math/rand") + continue + } + for importPath := range stdlib { + if path.Base(importPath) == left { + add(importPath) + } + } + } + return nil +} + +// A Resolver does the build-system-specific parts of goimports. +type Resolver interface { + // loadPackageNames loads the package names in importPaths. + loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) + // scan works with callback to search for packages. See scanCallback for details. + scan(ctx context.Context, callback *scanCallback) error + // loadExports returns the set of exported symbols in the package at dir. + // loadExports may be called concurrently. + loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) + // scoreImportPath returns the relevance for an import path. + scoreImportPath(ctx context.Context, path string) float64 + + ClearForNewScan() +} + +// A scanCallback controls a call to scan and receives its results. +// In general, minor errors will be silently discarded; a user should not +// expect to receive a full series of calls for everything. +type scanCallback struct { + // rootFound is called before scanning a new root dir. If it returns true, + // the root will be scanned. Returning false will not necessarily prevent + // directories from that root making it to dirFound. + rootFound func(gopathwalk.Root) bool + // dirFound is called when a directory is found that is possibly a Go package. + // pkg will be populated with everything except packageName. + // If it returns true, the package's name will be loaded. + dirFound func(pkg *pkg) bool + // packageNameLoaded is called when a package is found and its name is loaded. + // If it returns true, the package's exports will be loaded. + packageNameLoaded func(pkg *pkg) bool + // exportsLoaded is called when a package's exports have been loaded. + exportsLoaded func(pkg *pkg, exports []string) +} + +func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error { + ctx, done := event.Start(ctx, "imports.addExternalCandidates") + defer done() + + var mu sync.Mutex + found := make(map[string][]pkgDistance) + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true // We want everything. + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, refs, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + if _, want := refs[pkg.packageName]; !want { + return false + } + if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName { + // The candidate is in the same directory and has the + // same package name. Don't try to import ourselves. + return false + } + if !canUse(filename, pkg.dir) { + return false + } + mu.Lock() + defer mu.Unlock() + found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)}) + return false // We'll do our own loading after we sort. + }, + } + resolver, err := pass.env.GetResolver() + if err != nil { + return err + } + if err = resolver.scan(context.Background(), callback); err != nil { + return err + } + + // Search for imports matching potential package references. + type result struct { + imp *ImportInfo + pkg *packageInfo + } + results := make(chan result, len(refs)) + + ctx, cancel := context.WithCancel(context.TODO()) + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + var ( + firstErr error + firstErrOnce sync.Once + ) + for pkgName, symbols := range refs { + wg.Add(1) + go func(pkgName string, symbols map[string]bool) { + defer wg.Done() + + found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename) + + if err != nil { + firstErrOnce.Do(func() { + firstErr = err + cancel() + }) + return + } + + if found == nil { + return // No matching package. + } + + imp := &ImportInfo{ + ImportPath: found.importPathShort, + } + + pkg := &packageInfo{ + name: pkgName, + exports: symbols, + } + results <- result{imp, pkg} + }(pkgName, symbols) + } + go func() { + wg.Wait() + close(results) + }() + + for result := range results { + pass.addCandidate(result.imp, result.pkg) + } + return firstErr +} + +// notIdentifier reports whether ch is an invalid identifier character. +func notIdentifier(ch rune) bool { + return !('a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || + '0' <= ch && ch <= '9' || + ch == '_' || + ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch))) +} + +// ImportPathToAssumedName returns the assumed package name of an import path. +// It does this using only string parsing of the import path. +// It picks the last element of the path that does not look like a major +// version, and then picks the valid identifier off the start of that element. +// It is used to determine if a local rename should be added to an import for +// clarity. +// This function could be moved to a standard package and exported if we want +// for use in other tools. +func ImportPathToAssumedName(importPath string) string { + base := path.Base(importPath) + if strings.HasPrefix(base, "v") { + if _, err := strconv.Atoi(base[1:]); err == nil { + dir := path.Dir(importPath) + if dir != "." { + base = path.Base(dir) + } + } + } + base = strings.TrimPrefix(base, "go-") + if i := strings.IndexFunc(base, notIdentifier); i >= 0 { + base = base[:i] + } + return base +} + +// gopathResolver implements resolver for GOPATH workspaces. +type gopathResolver struct { + env *ProcessEnv + walked bool + cache *dirInfoCache + scanSema chan struct{} // scanSema prevents concurrent scans. +} + +func newGopathResolver(env *ProcessEnv) *gopathResolver { + r := &gopathResolver{ + env: env, + cache: &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + }, + scanSema: make(chan struct{}, 1), + } + r.scanSema <- struct{}{} + return r +} + +func (r *gopathResolver) ClearForNewScan() { + <-r.scanSema + r.cache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + r.walked = false + r.scanSema <- struct{}{} +} + +func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { + names := map[string]string{} + bctx, err := r.env.buildContext() + if err != nil { + return nil, err + } + for _, path := range importPaths { + names[path] = importPathToName(bctx, path, srcDir) + } + return names, nil +} + +// importPathToName finds out the actual package name, as declared in its .go files. +func importPathToName(bctx *build.Context, importPath, srcDir string) string { + // Fast path for standard library without going to disk. + if _, ok := stdlib[importPath]; ok { + return path.Base(importPath) // stdlib packages always match their paths. + } + + buildPkg, err := bctx.Import(importPath, srcDir, build.FindOnly) + if err != nil { + return "" + } + pkgName, err := packageDirToName(buildPkg.Dir) + if err != nil { + return "" + } + return pkgName +} + +// packageDirToName is a faster version of build.Import if +// the only thing desired is the package name. Given a directory, +// packageDirToName then only parses one file in the package, +// trusting that the files in the directory are consistent. +func packageDirToName(dir string) (packageName string, err error) { + d, err := os.Open(dir) + if err != nil { + return "", err + } + names, err := d.Readdirnames(-1) + d.Close() + if err != nil { + return "", err + } + sort.Strings(names) // to have predictable behavior + var lastErr error + var nfile int + for _, name := range names { + if !strings.HasSuffix(name, ".go") { + continue + } + if strings.HasSuffix(name, "_test.go") { + continue + } + nfile++ + fullFile := filepath.Join(dir, name) + + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, fullFile, nil, parser.PackageClauseOnly) + if err != nil { + lastErr = err + continue + } + pkgName := f.Name.Name + if pkgName == "documentation" { + // Special case from go/build.ImportDir, not + // handled by ctx.MatchFile. + continue + } + if pkgName == "main" { + // Also skip package main, assuming it's a +build ignore generator or example. + // Since you can't import a package main anyway, there's no harm here. + continue + } + return pkgName, nil + } + if lastErr != nil { + return "", lastErr + } + return "", fmt.Errorf("no importable package found in %d Go files", nfile) +} + +type pkg struct { + dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") + importPathShort string // vendorless import path ("net/http", "a/b") + packageName string // package name loaded from source if requested + relevance float64 // a weakly-defined score of how relevant a package is. 0 is most relevant. +} + +type pkgDistance struct { + pkg *pkg + distance int // relative distance to target +} + +// byDistanceOrImportPathShortLength sorts by relative distance breaking ties +// on the short import path length and then the import string itself. +type byDistanceOrImportPathShortLength []pkgDistance + +func (s byDistanceOrImportPathShortLength) Len() int { return len(s) } +func (s byDistanceOrImportPathShortLength) Less(i, j int) bool { + di, dj := s[i].distance, s[j].distance + if di == -1 { + return false + } + if dj == -1 { + return true + } + if di != dj { + return di < dj + } + + vi, vj := s[i].pkg.importPathShort, s[j].pkg.importPathShort + if len(vi) != len(vj) { + return len(vi) < len(vj) + } + return vi < vj +} +func (s byDistanceOrImportPathShortLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func distance(basepath, targetpath string) int { + p, err := filepath.Rel(basepath, targetpath) + if err != nil { + return -1 + } + if p == "." { + return 0 + } + return strings.Count(p, string(filepath.Separator)) + 1 +} + +func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error { + add := func(root gopathwalk.Root, dir string) { + // We assume cached directories have not changed. We can skip them and their + // children. + if _, ok := r.cache.Load(dir); ok { + return + } + + importpath := filepath.ToSlash(dir[len(root.Path)+len("/"):]) + info := directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: VendorlessPath(importpath), + } + r.cache.Store(dir, info) + } + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return + } + + p := &pkg{ + importPathShort: info.nonCanonicalImportPath, + dir: info.dir, + relevance: MaxRelevance - 1, + } + if info.rootType == gopathwalk.RootGOROOT { + p.relevance = MaxRelevance + } + + if !callback.dirFound(p) { + return + } + var err error + p.packageName, err = r.cache.CachePackageName(info) + if err != nil { + return + } + + if !callback.packageNameLoaded(p) { + return + } + if _, exports, err := r.loadExports(ctx, p, false); err == nil { + callback.exportsLoaded(p, exports) + } + } + stop := r.cache.ScanAndListen(ctx, processDir) + defer stop() + + goenv, err := r.env.goEnv() + if err != nil { + return err + } + var roots []gopathwalk.Root + roots = append(roots, gopathwalk.Root{Path: filepath.Join(goenv["GOROOT"], "src"), Type: gopathwalk.RootGOROOT}) + for _, p := range filepath.SplitList(goenv["GOPATH"]) { + roots = append(roots, gopathwalk.Root{Path: filepath.Join(p, "src"), Type: gopathwalk.RootGOPATH}) + } + // The callback is not necessarily safe to use in the goroutine below. Process roots eagerly. + roots = filterRoots(roots, callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: + } + defer func() { r.scanSema <- struct{}{} }() + gopathwalk.Walk(roots, add, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: false}) + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil +} + +func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) float64 { + if _, ok := stdlib[path]; ok { + return MaxRelevance + } + return MaxRelevance - 1 +} + +func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) []gopathwalk.Root { + var result []gopathwalk.Root + for _, root := range roots { + if !include(root) { + continue + } + result = append(result, root) + } + return result +} + +func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { + if info, ok := r.cache.Load(pkg.dir); ok && !includeTest { + return r.cache.CacheExports(ctx, r.env, info) + } + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) +} + +// VendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". +func VendorlessPath(ipath string) string { + // Devendorize for use in import statement. + if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 { + return ipath[i+len("/vendor/"):] + } + if strings.HasPrefix(ipath, "vendor/") { + return ipath[len("vendor/"):] + } + return ipath +} + +func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) { + // Look for non-test, buildable .go files which could provide exports. + all, err := ioutil.ReadDir(dir) + if err != nil { + return "", nil, err + } + var files []os.FileInfo + for _, fi := range all { + name := fi.Name() + if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) { + continue + } + match, err := env.matchFile(dir, fi.Name()) + if err != nil || !match { + continue + } + files = append(files, fi) + } + + if len(files) == 0 { + return "", nil, fmt.Errorf("dir %v contains no buildable, non-test .go files", dir) + } + + var pkgName string + var exports []string + fset := token.NewFileSet() + for _, fi := range files { + select { + case <-ctx.Done(): + return "", nil, ctx.Err() + default: + } + + fullFile := filepath.Join(dir, fi.Name()) + f, err := parser.ParseFile(fset, fullFile, nil, 0) + if err != nil { + if env.Logf != nil { + env.Logf("error parsing %v: %v", fullFile, err) + } + continue + } + if f.Name.Name == "documentation" { + // Special case from go/build.ImportDir, not + // handled by MatchFile above. + continue + } + if includeTest && strings.HasSuffix(f.Name.Name, "_test") { + // x_test package. We want internal test files only. + continue + } + pkgName = f.Name.Name + for name := range f.Scope.Objects { + if ast.IsExported(name) { + exports = append(exports, name) + } + } + } + + if env.Logf != nil { + sortedExports := append([]string(nil), exports...) + sort.Strings(sortedExports) + env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, strings.Join(sortedExports, ", ")) + } + return pkgName, exports, nil +} + +// findImport searches for a package with the given symbols. +// If no package is found, findImport returns ("", false, nil) +func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { + // Sort the candidates by their import package length, + // assuming that shorter package names are better than long + // ones. Note that this sorts by the de-vendored name, so + // there's no "penalty" for vendoring. + sort.Sort(byDistanceOrImportPathShortLength(candidates)) + if pass.env.Logf != nil { + for i, c := range candidates { + pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) + } + } + resolver, err := pass.env.GetResolver() + if err != nil { + return nil, err + } + + // Collect exports for packages with matching names. + rescv := make([]chan *pkg, len(candidates)) + for i := range candidates { + rescv[i] = make(chan *pkg, 1) + } + const maxConcurrentPackageImport = 4 + loadExportsSem := make(chan struct{}, maxConcurrentPackageImport) + + ctx, cancel := context.WithCancel(ctx) + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + + wg.Add(1) + go func() { + defer wg.Done() + for i, c := range candidates { + select { + case loadExportsSem <- struct{}{}: + case <-ctx.Done(): + return + } + + wg.Add(1) + go func(c pkgDistance, resc chan<- *pkg) { + defer func() { + <-loadExportsSem + wg.Done() + }() + + if pass.env.Logf != nil { + pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) + } + // If we're an x_test, load the package under test's test variant. + includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir + _, exports, err := resolver.loadExports(ctx, c.pkg, includeTest) + if err != nil { + if pass.env.Logf != nil { + pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) + } + resc <- nil + return + } + + exportsMap := make(map[string]bool, len(exports)) + for _, sym := range exports { + exportsMap[sym] = true + } + + // If it doesn't have the right + // symbols, send nil to mean no match. + for symbol := range symbols { + if !exportsMap[symbol] { + resc <- nil + return + } + } + resc <- c.pkg + }(c, rescv[i]) + } + }() + + for _, resc := range rescv { + pkg := <-resc + if pkg == nil { + continue + } + return pkg, nil + } + return nil, nil +} + +// pkgIsCandidate reports whether pkg is a candidate for satisfying the +// finding which package pkgIdent in the file named by filename is trying +// to refer to. +// +// This check is purely lexical and is meant to be as fast as possible +// because it's run over all $GOPATH directories to filter out poor +// candidates in order to limit the CPU and I/O later parsing the +// exports in candidate packages. +// +// filename is the file being formatted. +// pkgIdent is the package being searched for, like "client" (if +// searching for "client.New") +func pkgIsCandidate(filename string, refs references, pkg *pkg) bool { + // Check "internal" and "vendor" visibility: + if !canUse(filename, pkg.dir) { + return false + } + + // Speed optimization to minimize disk I/O: + // the last two components on disk must contain the + // package name somewhere. + // + // This permits mismatch naming like directory + // "go-foo" being package "foo", or "pkg.v3" being "pkg", + // or directory "google.golang.org/api/cloudbilling/v1" + // being package "cloudbilling", but doesn't + // permit a directory "foo" to be package + // "bar", which is strongly discouraged + // anyway. There's no reason goimports needs + // to be slow just to accommodate that. + for pkgIdent := range refs { + lastTwo := lastTwoComponents(pkg.importPathShort) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + } + } + return false +} + +func hasHyphenOrUpperASCII(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b == '-' || ('A' <= b && b <= 'Z') { + return true + } + } + return false +} + +func lowerASCIIAndRemoveHyphen(s string) (ret string) { + buf := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case b == '-': + continue + case 'A' <= b && b <= 'Z': + buf = append(buf, b+('a'-'A')) + default: + buf = append(buf, b) + } + } + return string(buf) +} + +// canUse reports whether the package in dir is usable from filename, +// respecting the Go "internal" and "vendor" visibility rules. +func canUse(filename, dir string) bool { + // Fast path check, before any allocations. If it doesn't contain vendor + // or internal, it's not tricky: + // Note that this can false-negative on directories like "notinternal", + // but we check it correctly below. This is just a fast path. + if !strings.Contains(dir, "vendor") && !strings.Contains(dir, "internal") { + return true + } + + dirSlash := filepath.ToSlash(dir) + if !strings.Contains(dirSlash, "/vendor/") && !strings.Contains(dirSlash, "/internal/") && !strings.HasSuffix(dirSlash, "/internal") { + return true + } + // Vendor or internal directory only visible from children of parent. + // That means the path from the current directory to the target directory + // can contain ../vendor or ../internal but not ../foo/vendor or ../foo/internal + // or bar/vendor or bar/internal. + // After stripping all the leading ../, the only okay place to see vendor or internal + // is at the very beginning of the path. + absfile, err := filepath.Abs(filename) + if err != nil { + return false + } + absdir, err := filepath.Abs(dir) + if err != nil { + return false + } + rel, err := filepath.Rel(absfile, absdir) + if err != nil { + return false + } + relSlash := filepath.ToSlash(rel) + if i := strings.LastIndex(relSlash, "../"); i >= 0 { + relSlash = relSlash[i+len("../"):] + } + return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal") +} + +// lastTwoComponents returns at most the last two path components +// of v, using either / or \ as the path separator. +func lastTwoComponents(v string) string { + nslash := 0 + for i := len(v) - 1; i >= 0; i-- { + if v[i] == '/' || v[i] == '\\' { + nslash++ + if nslash == 2 { + return v[i:] + } + } + } + return v +} + +type visitFn func(node ast.Node) ast.Visitor + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + return fn(node) +} + +func copyExports(pkg []string) map[string]bool { + m := make(map[string]bool, len(pkg)) + for _, v := range pkg { + m[v] = true + } + return m +} diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go new file mode 100644 index 00000000000..58e637b90f2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -0,0 +1,356 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run mkstdlib.go + +// Package imports implements a Go pretty-printer (like package "go/format") +// that also adds or removes import statements as necessary. +package imports + +import ( + "bufio" + "bytes" + "context" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/printer" + "go/token" + "io" + "regexp" + "strconv" + "strings" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/event" +) + +// Options is golang.org/x/tools/imports.Options with extra internal-only options. +type Options struct { + Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state. + + // LocalPrefix is a comma-separated string of import path prefixes, which, if + // set, instructs Process to sort the import paths with the given prefixes + // into another group after 3rd-party packages. + LocalPrefix string + + Fragment bool // Accept fragment of a source file (no package statement) + AllErrors bool // Report all errors (not just the first 10 on different lines) + + Comments bool // Print comments (true if nil *Options provided) + TabIndent bool // Use tabs for indent (true if nil *Options provided) + TabWidth int // Tab width (8 if nil *Options provided) + + FormatOnly bool // Disable the insertion and deletion of imports +} + +// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env. +func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { + fileSet := token.NewFileSet() + file, adjust, err := parse(fileSet, filename, src, opt) + if err != nil { + return nil, err + } + + if !opt.FormatOnly { + if err := fixImports(fileSet, file, filename, opt.Env); err != nil { + return nil, err + } + } + return formatFile(fileSet, file, src, adjust, opt) +} + +// FixImports returns a list of fixes to the imports that, when applied, +// will leave the imports in the same state as Process. src and opt must +// be specified. +// +// Note that filename's directory influences which imports can be chosen, +// so it is important that filename be accurate. +func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { + ctx, done := event.Start(ctx, "imports.FixImports") + defer done() + + fileSet := token.NewFileSet() + file, _, err := parse(fileSet, filename, src, opt) + if err != nil { + return nil, err + } + + return getFixes(ctx, fileSet, file, filename, opt.Env) +} + +// ApplyFixes applies all of the fixes to the file and formats it. extraMode +// is added in when parsing the file. src and opts must be specified, but no +// env is needed. +func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { + // Don't use parse() -- we don't care about fragments or statement lists + // here, and we need to work with unparseable files. + fileSet := token.NewFileSet() + parserMode := parser.Mode(0) + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + parserMode |= extraMode + + file, err := parser.ParseFile(fileSet, filename, src, parserMode) + if file == nil { + return nil, err + } + + // Apply the fixes to the file. + apply(fileSet, file, fixes) + + return formatFile(fileSet, file, src, nil, opt) +} + +// formatFile formats the file syntax tree. +// It may mutate the token.FileSet. +// +// If an adjust function is provided, it is called after formatting +// with the original source (formatFile's src parameter) and the +// formatted file, and returns the postpocessed result. +func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { + mergeImports(file) + sortImports(opt.LocalPrefix, fset.File(file.Pos()), file) + var spacesBefore []string // import paths we need spaces before + for _, impSection := range astutil.Imports(fset, file) { + // Within each block of contiguous imports, see if any + // import lines are in different group numbers. If so, + // we'll need to put a space between them so it's + // compatible with gofmt. + lastGroup := -1 + for _, importSpec := range impSection { + importPath, _ := strconv.Unquote(importSpec.Path.Value) + groupNum := importGroup(opt.LocalPrefix, importPath) + if groupNum != lastGroup && lastGroup != -1 { + spacesBefore = append(spacesBefore, importPath) + } + lastGroup = groupNum + } + + } + + printerMode := printer.UseSpaces + if opt.TabIndent { + printerMode |= printer.TabIndent + } + printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth} + + var buf bytes.Buffer + err := printConfig.Fprint(&buf, fset, file) + if err != nil { + return nil, err + } + out := buf.Bytes() + if adjust != nil { + out = adjust(src, out) + } + if len(spacesBefore) > 0 { + out, err = addImportSpaces(bytes.NewReader(out), spacesBefore) + if err != nil { + return nil, err + } + } + + out, err = format.Source(out) + if err != nil { + return nil, err + } + return out, nil +} + +// parse parses src, which was read from filename, +// as a Go source file or statement list. +func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) { + parserMode := parser.Mode(0) + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + + // Try as whole source file. + file, err := parser.ParseFile(fset, filename, src, parserMode) + if err == nil { + return file, nil, nil + } + // If the error is that the source file didn't begin with a + // package line and we accept fragmented input, fall through to + // try as a source fragment. Stop and return on any other error. + if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") { + return nil, nil, err + } + + // If this is a declaration list, make it a source file + // by inserting a package clause. + // Insert using a ;, not a newline, so that parse errors are on + // the correct line. + const prefix = "package main;" + psrc := append([]byte(prefix), src...) + file, err = parser.ParseFile(fset, filename, psrc, parserMode) + if err == nil { + // Gofmt will turn the ; into a \n. + // Do that ourselves now and update the file contents, + // so that positions and line numbers are correct going forward. + psrc[len(prefix)-1] = '\n' + fset.File(file.Package).SetLinesForContent(psrc) + + // If a main function exists, we will assume this is a main + // package and leave the file. + if containsMainFunc(file) { + return file, nil, nil + } + + adjust := func(orig, src []byte) []byte { + // Remove the package clause. + src = src[len(prefix):] + return matchSpace(orig, src) + } + return file, adjust, nil + } + // If the error is that the source file didn't begin with a + // declaration, fall through to try as a statement list. + // Stop and return on any other error. + if !strings.Contains(err.Error(), "expected declaration") { + return nil, nil, err + } + + // If this is a statement list, make it a source file + // by inserting a package clause and turning the list + // into a function body. This handles expressions too. + // Insert using a ;, not a newline, so that the line numbers + // in fsrc match the ones in src. + fsrc := append(append([]byte("package p; func _() {"), src...), '}') + file, err = parser.ParseFile(fset, filename, fsrc, parserMode) + if err == nil { + adjust := func(orig, src []byte) []byte { + // Remove the wrapping. + // Gofmt has turned the ; into a \n\n. + src = src[len("package p\n\nfunc _() {"):] + src = src[:len(src)-len("}\n")] + // Gofmt has also indented the function body one level. + // Remove that indent. + src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1) + return matchSpace(orig, src) + } + return file, adjust, nil + } + + // Failed, and out of options. + return nil, nil, err +} + +// containsMainFunc checks if a file contains a function declaration with the +// function signature 'func main()' +func containsMainFunc(file *ast.File) bool { + for _, decl := range file.Decls { + if f, ok := decl.(*ast.FuncDecl); ok { + if f.Name.Name != "main" { + continue + } + + if len(f.Type.Params.List) != 0 { + continue + } + + if f.Type.Results != nil && len(f.Type.Results.List) != 0 { + continue + } + + return true + } + } + + return false +} + +func cutSpace(b []byte) (before, middle, after []byte) { + i := 0 + for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') { + i++ + } + j := len(b) + for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') { + j-- + } + if i <= j { + return b[:i], b[i:j], b[j:] + } + return nil, nil, b[j:] +} + +// matchSpace reformats src to use the same space context as orig. +// 1. If orig begins with blank lines, matchSpace inserts them at the beginning of src. +// 2. matchSpace copies the indentation of the first non-blank line in orig +// to every non-blank line in src. +// 3. matchSpace copies the trailing space from orig and uses it in place +// of src's trailing space. +func matchSpace(orig []byte, src []byte) []byte { + before, _, after := cutSpace(orig) + i := bytes.LastIndex(before, []byte{'\n'}) + before, indent := before[:i+1], before[i+1:] + + _, src, _ = cutSpace(src) + + var b bytes.Buffer + b.Write(before) + for len(src) > 0 { + line := src + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, src = line[:i+1], line[i+1:] + } else { + src = nil + } + if len(line) > 0 && line[0] != '\n' { // not blank + b.Write(indent) + } + b.Write(line) + } + b.Write(after) + return b.Bytes() +} + +var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+?)"`) + +func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) { + var out bytes.Buffer + in := bufio.NewReader(r) + inImports := false + done := false + for { + s, err := in.ReadString('\n') + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + if !inImports && !done && strings.HasPrefix(s, "import") { + inImports = true + } + if inImports && (strings.HasPrefix(s, "var") || + strings.HasPrefix(s, "func") || + strings.HasPrefix(s, "const") || + strings.HasPrefix(s, "type")) { + done = true + inImports = false + } + if inImports && len(breaks) > 0 { + if m := impLine.FindStringSubmatch(s); m != nil { + if m[1] == breaks[0] { + out.WriteByte('\n') + breaks = breaks[1:] + } + } + } + + fmt.Fprint(&out, s) + } + return out.Bytes(), nil +} diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go new file mode 100644 index 00000000000..977d2389da1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -0,0 +1,724 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/mod/module" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/gopathwalk" +) + +// ModuleResolver implements resolver for modules using the go command as little +// as feasible. +type ModuleResolver struct { + env *ProcessEnv + moduleCacheDir string + dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. + roots []gopathwalk.Root + scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots. + scannedRoots map[gopathwalk.Root]bool + + initialized bool + mains []*gocommand.ModuleJSON + mainByDir map[string]*gocommand.ModuleJSON + modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path... + modsByDir []*gocommand.ModuleJSON // ...or number of path components in their Dir. + + // moduleCacheCache stores information about the module cache. + moduleCacheCache *dirInfoCache + otherCache *dirInfoCache +} + +func newModuleResolver(e *ProcessEnv) *ModuleResolver { + r := &ModuleResolver{ + env: e, + scanSema: make(chan struct{}, 1), + } + r.scanSema <- struct{}{} + return r +} + +func (r *ModuleResolver) init() error { + if r.initialized { + return nil + } + + goenv, err := r.env.goEnv() + if err != nil { + return err + } + inv := gocommand.Invocation{ + BuildFlags: r.env.BuildFlags, + ModFlag: r.env.ModFlag, + ModFile: r.env.ModFile, + Env: r.env.env(), + Logf: r.env.Logf, + WorkingDir: r.env.WorkingDir, + } + + vendorEnabled := false + var mainModVendor *gocommand.ModuleJSON + + // Module vendor directories are ignored in workspace mode: + // https://go.googlesource.com/proposal/+/master/design/45713-workspace.md + if len(r.env.Env["GOWORK"]) == 0 { + vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) + if err != nil { + return err + } + } + + if mainModVendor != nil && vendorEnabled { + // Vendor mode is on, so all the non-Main modules are irrelevant, + // and we need to search /vendor for everything. + r.mains = []*gocommand.ModuleJSON{mainModVendor} + r.dummyVendorMod = &gocommand.ModuleJSON{ + Path: "", + Dir: filepath.Join(mainModVendor.Dir, "vendor"), + } + r.modsByModPath = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod} + r.modsByDir = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod} + } else { + // Vendor mode is off, so run go list -m ... to find everything. + err := r.initAllMods() + // We expect an error when running outside of a module with + // GO111MODULE=on. Other errors are fatal. + if err != nil { + if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") { + return err + } + } + } + + if gmc := r.env.Env["GOMODCACHE"]; gmc != "" { + r.moduleCacheDir = gmc + } else { + gopaths := filepath.SplitList(goenv["GOPATH"]) + if len(gopaths) == 0 { + return fmt.Errorf("empty GOPATH") + } + r.moduleCacheDir = filepath.Join(gopaths[0], "/pkg/mod") + } + + sort.Slice(r.modsByModPath, func(i, j int) bool { + count := func(x int) int { + return strings.Count(r.modsByModPath[x].Path, "/") + } + return count(j) < count(i) // descending order + }) + sort.Slice(r.modsByDir, func(i, j int) bool { + count := func(x int) int { + return strings.Count(r.modsByDir[x].Dir, string(filepath.Separator)) + } + return count(j) < count(i) // descending order + }) + + r.roots = []gopathwalk.Root{ + {Path: filepath.Join(goenv["GOROOT"], "/src"), Type: gopathwalk.RootGOROOT}, + } + r.mainByDir = make(map[string]*gocommand.ModuleJSON) + for _, main := range r.mains { + r.roots = append(r.roots, gopathwalk.Root{Path: main.Dir, Type: gopathwalk.RootCurrentModule}) + r.mainByDir[main.Dir] = main + } + if vendorEnabled { + r.roots = append(r.roots, gopathwalk.Root{Path: r.dummyVendorMod.Dir, Type: gopathwalk.RootOther}) + } else { + addDep := func(mod *gocommand.ModuleJSON) { + if mod.Replace == nil { + // This is redundant with the cache, but we'll skip it cheaply enough. + r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache}) + } else { + r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther}) + } + } + // Walk dependent modules before scanning the full mod cache, direct deps first. + for _, mod := range r.modsByModPath { + if !mod.Indirect && !mod.Main { + addDep(mod) + } + } + for _, mod := range r.modsByModPath { + if mod.Indirect && !mod.Main { + addDep(mod) + } + } + r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache}) + } + + r.scannedRoots = map[gopathwalk.Root]bool{} + if r.moduleCacheCache == nil { + r.moduleCacheCache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + } + if r.otherCache == nil { + r.otherCache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + } + r.initialized = true + return nil +} + +func (r *ModuleResolver) initAllMods() error { + stdout, err := r.env.invokeGo(context.TODO(), "list", "-m", "-e", "-json", "...") + if err != nil { + return err + } + for dec := json.NewDecoder(stdout); dec.More(); { + mod := &gocommand.ModuleJSON{} + if err := dec.Decode(mod); err != nil { + return err + } + if mod.Dir == "" { + if r.env.Logf != nil { + r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path) + } + // Can't do anything with a module that's not downloaded. + continue + } + // golang/go#36193: the go command doesn't always clean paths. + mod.Dir = filepath.Clean(mod.Dir) + r.modsByModPath = append(r.modsByModPath, mod) + r.modsByDir = append(r.modsByDir, mod) + if mod.Main { + r.mains = append(r.mains, mod) + } + } + return nil +} + +func (r *ModuleResolver) ClearForNewScan() { + <-r.scanSema + r.scannedRoots = map[gopathwalk.Root]bool{} + r.otherCache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + r.scanSema <- struct{}{} +} + +func (r *ModuleResolver) ClearForNewMod() { + <-r.scanSema + *r = ModuleResolver{ + env: r.env, + moduleCacheCache: r.moduleCacheCache, + otherCache: r.otherCache, + scanSema: r.scanSema, + } + r.init() + r.scanSema <- struct{}{} +} + +// findPackage returns the module and directory that contains the package at +// the given import path, or returns nil, "" if no module is in scope. +func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) { + // This can't find packages in the stdlib, but that's harmless for all + // the existing code paths. + for _, m := range r.modsByModPath { + if !strings.HasPrefix(importPath, m.Path) { + continue + } + pathInModule := importPath[len(m.Path):] + pkgDir := filepath.Join(m.Dir, pathInModule) + if r.dirIsNestedModule(pkgDir, m) { + continue + } + + if info, ok := r.cacheLoad(pkgDir); ok { + if loaded, err := info.reachedStatus(nameLoaded); loaded { + if err != nil { + continue // No package in this dir. + } + return m, pkgDir + } + if scanned, err := info.reachedStatus(directoryScanned); scanned && err != nil { + continue // Dir is unreadable, etc. + } + // This is slightly wrong: a directory doesn't have to have an + // importable package to count as a package for package-to-module + // resolution. package main or _test files should count but + // don't. + // TODO(heschi): fix this. + if _, err := r.cachePackageName(info); err == nil { + return m, pkgDir + } + } + + // Not cached. Read the filesystem. + pkgFiles, err := ioutil.ReadDir(pkgDir) + if err != nil { + continue + } + // A module only contains a package if it has buildable go + // files in that directory. If not, it could be provided by an + // outer module. See #29736. + for _, fi := range pkgFiles { + if ok, _ := r.env.matchFile(pkgDir, fi.Name()); ok { + return m, pkgDir + } + } + } + return nil, "" +} + +func (r *ModuleResolver) cacheLoad(dir string) (directoryPackageInfo, bool) { + if info, ok := r.moduleCacheCache.Load(dir); ok { + return info, ok + } + return r.otherCache.Load(dir) +} + +func (r *ModuleResolver) cacheStore(info directoryPackageInfo) { + if info.rootType == gopathwalk.RootModuleCache { + r.moduleCacheCache.Store(info.dir, info) + } else { + r.otherCache.Store(info.dir, info) + } +} + +func (r *ModuleResolver) cacheKeys() []string { + return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...) +} + +// cachePackageName caches the package name for a dir already in the cache. +func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) { + if info.rootType == gopathwalk.RootModuleCache { + return r.moduleCacheCache.CachePackageName(info) + } + return r.otherCache.CachePackageName(info) +} + +func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { + if info.rootType == gopathwalk.RootModuleCache { + return r.moduleCacheCache.CacheExports(ctx, env, info) + } + return r.otherCache.CacheExports(ctx, env, info) +} + +// findModuleByDir returns the module that contains dir, or nil if no such +// module is in scope. +func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON { + // This is quite tricky and may not be correct. dir could be: + // - a package in the main module. + // - a replace target underneath the main module's directory. + // - a nested module in the above. + // - a replace target somewhere totally random. + // - a nested module in the above. + // - in the mod cache. + // - in /vendor/ in -mod=vendor mode. + // - nested module? Dunno. + // Rumor has it that replace targets cannot contain other replace targets. + // + // Note that it is critical here that modsByDir is sorted to have deeper dirs + // first. This ensures that findModuleByDir finds the innermost module. + // See also golang/go#56291. + for _, m := range r.modsByDir { + if !strings.HasPrefix(dir, m.Dir) { + continue + } + + if r.dirIsNestedModule(dir, m) { + continue + } + + return m + } + return nil +} + +// dirIsNestedModule reports if dir is contained in a nested module underneath +// mod, not actually in mod. +func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON) bool { + if !strings.HasPrefix(dir, mod.Dir) { + return false + } + if r.dirInModuleCache(dir) { + // Nested modules in the module cache are pruned, + // so it cannot be a nested module. + return false + } + if mod != nil && mod == r.dummyVendorMod { + // The /vendor pseudomodule is flattened and doesn't actually count. + return false + } + modDir, _ := r.modInfo(dir) + if modDir == "" { + return false + } + return modDir != mod.Dir +} + +func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) { + readModName := func(modFile string) string { + modBytes, err := ioutil.ReadFile(modFile) + if err != nil { + return "" + } + return modulePath(modBytes) + } + + if r.dirInModuleCache(dir) { + if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 { + index := strings.Index(dir, matches[1]+"@"+matches[2]) + modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) + return modDir, readModName(filepath.Join(modDir, "go.mod")) + } + } + for { + if info, ok := r.cacheLoad(dir); ok { + return info.moduleDir, info.moduleName + } + f := filepath.Join(dir, "go.mod") + info, err := os.Stat(f) + if err == nil && !info.IsDir() { + return dir, readModName(f) + } + + d := filepath.Dir(dir) + if len(d) >= len(dir) { + return "", "" // reached top of file system, no go.mod + } + dir = d + } +} + +func (r *ModuleResolver) dirInModuleCache(dir string) bool { + if r.moduleCacheDir == "" { + return false + } + return strings.HasPrefix(dir, r.moduleCacheDir) +} + +func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { + if err := r.init(); err != nil { + return nil, err + } + names := map[string]string{} + for _, path := range importPaths { + _, packageDir := r.findPackage(path) + if packageDir == "" { + continue + } + name, err := packageDirToName(packageDir) + if err != nil { + continue + } + names[path] = name + } + return names, nil +} + +func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error { + ctx, done := event.Start(ctx, "imports.ModuleResolver.scan") + defer done() + + if err := r.init(); err != nil { + return err + } + + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return + } + pkg, err := r.canonicalize(info) + if err != nil { + return + } + + if !callback.dirFound(pkg) { + return + } + pkg.packageName, err = r.cachePackageName(info) + if err != nil { + return + } + + if !callback.packageNameLoaded(pkg) { + return + } + _, exports, err := r.loadExports(ctx, pkg, false) + if err != nil { + return + } + callback.exportsLoaded(pkg, exports) + } + + // Start processing everything in the cache, and listen for the new stuff + // we discover in the walk below. + stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir) + defer stop1() + stop2 := r.otherCache.ScanAndListen(ctx, processDir) + defer stop2() + + // We assume cached directories are fully cached, including all their + // children, and have not changed. We can skip them. + skip := func(root gopathwalk.Root, dir string) bool { + if r.env.SkipPathInScan != nil && root.Type == gopathwalk.RootCurrentModule { + if root.Path == dir { + return false + } + + if r.env.SkipPathInScan(filepath.Clean(dir)) { + return true + } + } + + info, ok := r.cacheLoad(dir) + if !ok { + return false + } + // This directory can be skipped as long as we have already scanned it. + // Packages with errors will continue to have errors, so there is no need + // to rescan them. + packageScanned, _ := info.reachedStatus(directoryScanned) + return packageScanned + } + + // Add anything new to the cache, and process it if we're still listening. + add := func(root gopathwalk.Root, dir string) { + r.cacheStore(r.scanDirForPackage(root, dir)) + } + + // r.roots and the callback are not necessarily safe to use in the + // goroutine below. Process them eagerly. + roots := filterRoots(r.roots, callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: + } + defer func() { r.scanSema <- struct{}{} }() + // We have the lock on r.scannedRoots, and no other scans can run. + for _, root := range roots { + if ctx.Err() != nil { + return + } + + if r.scannedRoots[root] { + continue + } + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: true}) + r.scannedRoots[root] = true + } + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil +} + +func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) float64 { + if _, ok := stdlib[path]; ok { + return MaxRelevance + } + mod, _ := r.findPackage(path) + return modRelevance(mod) +} + +func modRelevance(mod *gocommand.ModuleJSON) float64 { + var relevance float64 + switch { + case mod == nil: // out of scope + return MaxRelevance - 4 + case mod.Indirect: + relevance = MaxRelevance - 3 + case !mod.Main: + relevance = MaxRelevance - 2 + default: + relevance = MaxRelevance - 1 // main module ties with stdlib + } + + _, versionString, ok := module.SplitPathVersion(mod.Path) + if ok { + index := strings.Index(versionString, "v") + if index == -1 { + return relevance + } + if versionNumber, err := strconv.ParseFloat(versionString[index+1:], 64); err == nil { + relevance += versionNumber / 1000 + } + } + + return relevance +} + +// canonicalize gets the result of canonicalizing the packages using the results +// of initializing the resolver from 'go list -m'. +func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { + // Packages in GOROOT are already canonical, regardless of the std/cmd modules. + if info.rootType == gopathwalk.RootGOROOT { + return &pkg{ + importPathShort: info.nonCanonicalImportPath, + dir: info.dir, + packageName: path.Base(info.nonCanonicalImportPath), + relevance: MaxRelevance, + }, nil + } + + importPath := info.nonCanonicalImportPath + mod := r.findModuleByDir(info.dir) + // Check if the directory is underneath a module that's in scope. + if mod != nil { + // It is. If dir is the target of a replace directive, + // our guessed import path is wrong. Use the real one. + if mod.Dir == info.dir { + importPath = mod.Path + } else { + dirInMod := info.dir[len(mod.Dir)+len("/"):] + importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod)) + } + } else if !strings.HasPrefix(importPath, info.moduleName) { + // The module's name doesn't match the package's import path. It + // probably needs a replace directive we don't have. + return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir) + } + + res := &pkg{ + importPathShort: importPath, + dir: info.dir, + relevance: modRelevance(mod), + } + // We may have discovered a package that has a different version + // in scope already. Canonicalize to that one if possible. + if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" { + res.dir = canonicalDir + } + return res, nil +} + +func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { + if err := r.init(); err != nil { + return "", nil, err + } + if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest { + return r.cacheExports(ctx, r.env, info) + } + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) +} + +func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo { + subdir := "" + if dir != root.Path { + subdir = dir[len(root.Path)+len("/"):] + } + importPath := filepath.ToSlash(subdir) + if strings.HasPrefix(importPath, "vendor/") { + // Only enter vendor directories if they're explicitly requested as a root. + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("unwanted vendor directory"), + } + } + switch root.Type { + case gopathwalk.RootCurrentModule: + importPath = path.Join(r.mainByDir[root.Path].Path, filepath.ToSlash(subdir)) + case gopathwalk.RootModuleCache: + matches := modCacheRegexp.FindStringSubmatch(subdir) + if len(matches) == 0 { + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("invalid module cache path: %v", subdir), + } + } + modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) + if err != nil { + if r.env.Logf != nil { + r.env.Logf("decoding module cache path %q: %v", subdir, err) + } + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), + } + } + importPath = path.Join(modPath, filepath.ToSlash(matches[3])) + } + + modDir, modName := r.modInfo(dir) + result := directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: importPath, + moduleDir: modDir, + moduleName: modName, + } + if root.Type == gopathwalk.RootGOROOT { + // stdlib packages are always in scope, despite the confusing go.mod + return result + } + return result +} + +// modCacheRegexp splits a path in a module cache into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +var ( + slashSlash = []byte("//") + moduleStr = []byte("module") +) + +// modulePath returns the module path from the gomod file text. +// If it cannot find a module path, it returns an empty string. +// It is tolerant of unrelated problems in the go.mod file. +// +// Copied from cmd/go/internal/modfile. +func modulePath(mod []byte) string { + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + return "" // missing module path +} diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go new file mode 100644 index 00000000000..45690abbb4f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -0,0 +1,236 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "context" + "fmt" + "sync" + + "golang.org/x/tools/internal/gopathwalk" +) + +// To find packages to import, the resolver needs to know about all of +// the packages that could be imported. This includes packages that are +// already in modules that are in (1) the current module, (2) replace targets, +// and (3) packages in the module cache. Packages in (1) and (2) may change over +// time, as the client may edit the current module and locally replaced modules. +// The module cache (which includes all of the packages in (3)) can only +// ever be added to. +// +// The resolver can thus save state about packages in the module cache +// and guarantee that this will not change over time. To obtain information +// about new modules added to the module cache, the module cache should be +// rescanned. +// +// It is OK to serve information about modules that have been deleted, +// as they do still exist. +// TODO(suzmue): can we share information with the caller about +// what module needs to be downloaded to import this package? + +type directoryPackageStatus int + +const ( + _ directoryPackageStatus = iota + directoryScanned + nameLoaded + exportsLoaded +) + +type directoryPackageInfo struct { + // status indicates the extent to which this struct has been filled in. + status directoryPackageStatus + // err is non-nil when there was an error trying to reach status. + err error + + // Set when status >= directoryScanned. + + // dir is the absolute directory of this package. + dir string + rootType gopathwalk.RootType + // nonCanonicalImportPath is the package's expected import path. It may + // not actually be importable at that path. + nonCanonicalImportPath string + + // Module-related information. + moduleDir string // The directory that is the module root of this dir. + moduleName string // The module name that contains this dir. + + // Set when status >= nameLoaded. + + packageName string // the package name, as declared in the source. + + // Set when status >= exportsLoaded. + + exports []string +} + +// reachedStatus returns true when info has a status at least target and any error associated with +// an attempt to reach target. +func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (bool, error) { + if info.err == nil { + return info.status >= target, nil + } + if info.status == target { + return true, info.err + } + return true, nil +} + +// dirInfoCache is a concurrency safe map for storing information about +// directories that may contain packages. +// +// The information in this cache is built incrementally. Entries are initialized in scan. +// No new keys should be added in any other functions, as all directories containing +// packages are identified in scan. +// +// Other functions, including loadExports and findPackage, may update entries in this cache +// as they discover new things about the directory. +// +// The information in the cache is not expected to change for the cache's +// lifetime, so there is no protection against competing writes. Users should +// take care not to hold the cache across changes to the underlying files. +// +// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc) +type dirInfoCache struct { + mu sync.Mutex + // dirs stores information about packages in directories, keyed by absolute path. + dirs map[string]*directoryPackageInfo + listeners map[*int]cacheListener +} + +type cacheListener func(directoryPackageInfo) + +// ScanAndListen calls listener on all the items in the cache, and on anything +// newly added. The returned stop function waits for all in-flight callbacks to +// finish and blocks new ones. +func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { + ctx, cancel := context.WithCancel(ctx) + + // Flushing out all the callbacks is tricky without knowing how many there + // are going to be. Setting an arbitrary limit makes it much easier. + const maxInFlight = 10 + sema := make(chan struct{}, maxInFlight) + for i := 0; i < maxInFlight; i++ { + sema <- struct{}{} + } + + cookie := new(int) // A unique ID we can use for the listener. + + // We can't hold mu while calling the listener. + d.mu.Lock() + var keys []string + for key := range d.dirs { + keys = append(keys, key) + } + d.listeners[cookie] = func(info directoryPackageInfo) { + select { + case <-ctx.Done(): + return + case <-sema: + } + listener(info) + sema <- struct{}{} + } + d.mu.Unlock() + + stop := func() { + cancel() + d.mu.Lock() + delete(d.listeners, cookie) + d.mu.Unlock() + for i := 0; i < maxInFlight; i++ { + <-sema + } + } + + // Process the pre-existing keys. + for _, k := range keys { + select { + case <-ctx.Done(): + return stop + default: + } + if v, ok := d.Load(k); ok { + listener(v) + } + } + + return stop +} + +// Store stores the package info for dir. +func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { + d.mu.Lock() + _, old := d.dirs[dir] + d.dirs[dir] = &info + var listeners []cacheListener + for _, l := range d.listeners { + listeners = append(listeners, l) + } + d.mu.Unlock() + + if !old { + for _, l := range listeners { + l(info) + } + } +} + +// Load returns a copy of the directoryPackageInfo for absolute directory dir. +func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) { + d.mu.Lock() + defer d.mu.Unlock() + info, ok := d.dirs[dir] + if !ok { + return directoryPackageInfo{}, false + } + return *info, true +} + +// Keys returns the keys currently present in d. +func (d *dirInfoCache) Keys() (keys []string) { + d.mu.Lock() + defer d.mu.Unlock() + for key := range d.dirs { + keys = append(keys, key) + } + return keys +} + +func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { + if loaded, err := info.reachedStatus(nameLoaded); loaded { + return info.packageName, err + } + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return "", fmt.Errorf("cannot read package name, scan error: %v", err) + } + info.packageName, info.err = packageDirToName(info.dir) + info.status = nameLoaded + d.Store(info.dir, info) + return info.packageName, info.err +} + +func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { + if reached, _ := info.reachedStatus(exportsLoaded); reached { + return info.packageName, info.exports, info.err + } + if reached, err := info.reachedStatus(nameLoaded); reached && err != nil { + return "", nil, err + } + info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false) + if info.err == context.Canceled || info.err == context.DeadlineExceeded { + return info.packageName, info.exports, info.err + } + // The cache structure wants things to proceed linearly. We can skip a + // step here, but only if we succeed. + if info.status == nameLoaded || info.err == nil { + info.status = exportsLoaded + } else { + info.status = nameLoaded + } + d.Store(info.dir, info) + return info.packageName, info.exports, info.err +} diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go new file mode 100644 index 00000000000..1a0a7ebd9e4 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -0,0 +1,297 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Hacked up copy of go/ast/import.go +// Modified to use a single token.File in preference to a FileSet. + +package imports + +import ( + "go/ast" + "go/token" + "log" + "sort" + "strconv" +) + +// sortImports sorts runs of consecutive import lines in import blocks in f. +// It also removes duplicate imports when it is possible to do so without data loss. +// +// It may mutate the token.File. +func sortImports(localPrefix string, tokFile *token.File, f *ast.File) { + for i, d := range f.Decls { + d, ok := d.(*ast.GenDecl) + if !ok || d.Tok != token.IMPORT { + // Not an import declaration, so we're done. + // Imports are always first. + break + } + + if len(d.Specs) == 0 { + // Empty import block, remove it. + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + } + + if !d.Lparen.IsValid() { + // Not a block: sorted by default. + continue + } + + // Identify and sort runs of specs on successive lines. + i := 0 + specs := d.Specs[:0] + for j, s := range d.Specs { + if j > i && tokFile.Line(s.Pos()) > 1+tokFile.Line(d.Specs[j-1].End()) { + // j begins a new run. End this one. + specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:j])...) + i = j + } + } + specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:])...) + d.Specs = specs + + // Deduping can leave a blank line before the rparen; clean that up. + // Ignore line directives. + if len(d.Specs) > 0 { + lastSpec := d.Specs[len(d.Specs)-1] + lastLine := tokFile.PositionFor(lastSpec.Pos(), false).Line + if rParenLine := tokFile.PositionFor(d.Rparen, false).Line; rParenLine > lastLine+1 { + tokFile.MergeLine(rParenLine - 1) // has side effects! + } + } + } +} + +// mergeImports merges all the import declarations into the first one. +// Taken from golang.org/x/tools/ast/astutil. +// This does not adjust line numbers properly +func mergeImports(f *ast.File) { + if len(f.Decls) <= 1 { + return + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } +} + +// declImports reports whether gen contains an import of path. +// Taken from golang.org/x/tools/ast/astutil. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +func importPath(s ast.Spec) string { + t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value) + if err == nil { + return t + } + return "" +} + +func importName(s ast.Spec) string { + n := s.(*ast.ImportSpec).Name + if n == nil { + return "" + } + return n.Name +} + +func importComment(s ast.Spec) string { + c := s.(*ast.ImportSpec).Comment + if c == nil { + return "" + } + return c.Text() +} + +// collapse indicates whether prev may be removed, leaving only next. +func collapse(prev, next ast.Spec) bool { + if importPath(next) != importPath(prev) || importName(next) != importName(prev) { + return false + } + return prev.(*ast.ImportSpec).Comment == nil +} + +type posSpan struct { + Start token.Pos + End token.Pos +} + +// sortSpecs sorts the import specs within each import decl. +// It may mutate the token.File. +func sortSpecs(localPrefix string, tokFile *token.File, f *ast.File, specs []ast.Spec) []ast.Spec { + // Can't short-circuit here even if specs are already sorted, + // since they might yet need deduplication. + // A lone import, however, may be safely ignored. + if len(specs) <= 1 { + return specs + } + + // Record positions for specs. + pos := make([]posSpan, len(specs)) + for i, s := range specs { + pos[i] = posSpan{s.Pos(), s.End()} + } + + // Identify comments in this range. + // Any comment from pos[0].Start to the final line counts. + lastLine := tokFile.Line(pos[len(pos)-1].End) + cstart := len(f.Comments) + cend := len(f.Comments) + for i, g := range f.Comments { + if g.Pos() < pos[0].Start { + continue + } + if i < cstart { + cstart = i + } + if tokFile.Line(g.End()) > lastLine { + cend = i + break + } + } + comments := f.Comments[cstart:cend] + + // Assign each comment to the import spec preceding it. + importComment := map[*ast.ImportSpec][]*ast.CommentGroup{} + specIndex := 0 + for _, g := range comments { + for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() { + specIndex++ + } + s := specs[specIndex].(*ast.ImportSpec) + importComment[s] = append(importComment[s], g) + } + + // Sort the import specs by import path. + // Remove duplicates, when possible without data loss. + // Reassign the import paths to have the same position sequence. + // Reassign each comment to abut the end of its spec. + // Sort the comments by new position. + sort.Sort(byImportSpec{localPrefix, specs}) + + // Dedup. Thanks to our sorting, we can just consider + // adjacent pairs of imports. + deduped := specs[:0] + for i, s := range specs { + if i == len(specs)-1 || !collapse(s, specs[i+1]) { + deduped = append(deduped, s) + } else { + p := s.Pos() + tokFile.MergeLine(tokFile.Line(p)) // has side effects! + } + } + specs = deduped + + // Fix up comment positions + for i, s := range specs { + s := s.(*ast.ImportSpec) + if s.Name != nil { + s.Name.NamePos = pos[i].Start + } + s.Path.ValuePos = pos[i].Start + s.EndPos = pos[i].End + nextSpecPos := pos[i].End + + for _, g := range importComment[s] { + for _, c := range g.List { + c.Slash = pos[i].End + nextSpecPos = c.End() + } + } + if i < len(specs)-1 { + pos[i+1].Start = nextSpecPos + pos[i+1].End = nextSpecPos + } + } + + sort.Sort(byCommentPos(comments)) + + // Fixup comments can insert blank lines, because import specs are on different lines. + // We remove those blank lines here by merging import spec to the first import spec line. + firstSpecLine := tokFile.Line(specs[0].Pos()) + for _, s := range specs[1:] { + p := s.Pos() + line := tokFile.Line(p) + for previousLine := line - 1; previousLine >= firstSpecLine; { + // MergeLine can panic. Avoid the panic at the cost of not removing the blank line + // golang/go#50329 + if previousLine > 0 && previousLine < tokFile.LineCount() { + tokFile.MergeLine(previousLine) // has side effects! + previousLine-- + } else { + // try to gather some data to diagnose how this could happen + req := "Please report what the imports section of your go file looked like." + log.Printf("panic avoided: first:%d line:%d previous:%d max:%d. %s", + firstSpecLine, line, previousLine, tokFile.LineCount(), req) + } + } + } + return specs +} + +type byImportSpec struct { + localPrefix string + specs []ast.Spec // slice of *ast.ImportSpec +} + +func (x byImportSpec) Len() int { return len(x.specs) } +func (x byImportSpec) Swap(i, j int) { x.specs[i], x.specs[j] = x.specs[j], x.specs[i] } +func (x byImportSpec) Less(i, j int) bool { + ipath := importPath(x.specs[i]) + jpath := importPath(x.specs[j]) + + igroup := importGroup(x.localPrefix, ipath) + jgroup := importGroup(x.localPrefix, jpath) + if igroup != jgroup { + return igroup < jgroup + } + + if ipath != jpath { + return ipath < jpath + } + iname := importName(x.specs[i]) + jname := importName(x.specs[j]) + + if iname != jname { + return iname < jname + } + return importComment(x.specs[i]) < importComment(x.specs[j]) +} + +type byCommentPos []*ast.CommentGroup + +func (x byCommentPos) Len() int { return len(x) } +func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() } diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go new file mode 100644 index 00000000000..31a75949cdc --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go @@ -0,0 +1,11115 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by mkstdlib.go. DO NOT EDIT. + +package imports + +var stdlib = map[string][]string{ + "archive/tar": { + "ErrFieldTooLong", + "ErrHeader", + "ErrInsecurePath", + "ErrWriteAfterClose", + "ErrWriteTooLong", + "FileInfoHeader", + "Format", + "FormatGNU", + "FormatPAX", + "FormatUSTAR", + "FormatUnknown", + "Header", + "NewReader", + "NewWriter", + "Reader", + "TypeBlock", + "TypeChar", + "TypeCont", + "TypeDir", + "TypeFifo", + "TypeGNULongLink", + "TypeGNULongName", + "TypeGNUSparse", + "TypeLink", + "TypeReg", + "TypeRegA", + "TypeSymlink", + "TypeXGlobalHeader", + "TypeXHeader", + "Writer", + }, + "archive/zip": { + "Compressor", + "Decompressor", + "Deflate", + "ErrAlgorithm", + "ErrChecksum", + "ErrFormat", + "ErrInsecurePath", + "File", + "FileHeader", + "FileInfoHeader", + "NewReader", + "NewWriter", + "OpenReader", + "ReadCloser", + "Reader", + "RegisterCompressor", + "RegisterDecompressor", + "Store", + "Writer", + }, + "bufio": { + "ErrAdvanceTooFar", + "ErrBadReadCount", + "ErrBufferFull", + "ErrFinalToken", + "ErrInvalidUnreadByte", + "ErrInvalidUnreadRune", + "ErrNegativeAdvance", + "ErrNegativeCount", + "ErrTooLong", + "MaxScanTokenSize", + "NewReadWriter", + "NewReader", + "NewReaderSize", + "NewScanner", + "NewWriter", + "NewWriterSize", + "ReadWriter", + "Reader", + "ScanBytes", + "ScanLines", + "ScanRunes", + "ScanWords", + "Scanner", + "SplitFunc", + "Writer", + }, + "bytes": { + "Buffer", + "Clone", + "Compare", + "Contains", + "ContainsAny", + "ContainsRune", + "Count", + "Cut", + "CutPrefix", + "CutSuffix", + "Equal", + "EqualFold", + "ErrTooLarge", + "Fields", + "FieldsFunc", + "HasPrefix", + "HasSuffix", + "Index", + "IndexAny", + "IndexByte", + "IndexFunc", + "IndexRune", + "Join", + "LastIndex", + "LastIndexAny", + "LastIndexByte", + "LastIndexFunc", + "Map", + "MinRead", + "NewBuffer", + "NewBufferString", + "NewReader", + "Reader", + "Repeat", + "Replace", + "ReplaceAll", + "Runes", + "Split", + "SplitAfter", + "SplitAfterN", + "SplitN", + "Title", + "ToLower", + "ToLowerSpecial", + "ToTitle", + "ToTitleSpecial", + "ToUpper", + "ToUpperSpecial", + "ToValidUTF8", + "Trim", + "TrimFunc", + "TrimLeft", + "TrimLeftFunc", + "TrimPrefix", + "TrimRight", + "TrimRightFunc", + "TrimSpace", + "TrimSuffix", + }, + "compress/bzip2": { + "NewReader", + "StructuralError", + }, + "compress/flate": { + "BestCompression", + "BestSpeed", + "CorruptInputError", + "DefaultCompression", + "HuffmanOnly", + "InternalError", + "NewReader", + "NewReaderDict", + "NewWriter", + "NewWriterDict", + "NoCompression", + "ReadError", + "Reader", + "Resetter", + "WriteError", + "Writer", + }, + "compress/gzip": { + "BestCompression", + "BestSpeed", + "DefaultCompression", + "ErrChecksum", + "ErrHeader", + "Header", + "HuffmanOnly", + "NewReader", + "NewWriter", + "NewWriterLevel", + "NoCompression", + "Reader", + "Writer", + }, + "compress/lzw": { + "LSB", + "MSB", + "NewReader", + "NewWriter", + "Order", + "Reader", + "Writer", + }, + "compress/zlib": { + "BestCompression", + "BestSpeed", + "DefaultCompression", + "ErrChecksum", + "ErrDictionary", + "ErrHeader", + "HuffmanOnly", + "NewReader", + "NewReaderDict", + "NewWriter", + "NewWriterLevel", + "NewWriterLevelDict", + "NoCompression", + "Resetter", + "Writer", + }, + "container/heap": { + "Fix", + "Init", + "Interface", + "Pop", + "Push", + "Remove", + }, + "container/list": { + "Element", + "List", + "New", + }, + "container/ring": { + "New", + "Ring", + }, + "context": { + "Background", + "CancelCauseFunc", + "CancelFunc", + "Canceled", + "Cause", + "Context", + "DeadlineExceeded", + "TODO", + "WithCancel", + "WithCancelCause", + "WithDeadline", + "WithTimeout", + "WithValue", + }, + "crypto": { + "BLAKE2b_256", + "BLAKE2b_384", + "BLAKE2b_512", + "BLAKE2s_256", + "Decrypter", + "DecrypterOpts", + "Hash", + "MD4", + "MD5", + "MD5SHA1", + "PrivateKey", + "PublicKey", + "RIPEMD160", + "RegisterHash", + "SHA1", + "SHA224", + "SHA256", + "SHA384", + "SHA3_224", + "SHA3_256", + "SHA3_384", + "SHA3_512", + "SHA512", + "SHA512_224", + "SHA512_256", + "Signer", + "SignerOpts", + }, + "crypto/aes": { + "BlockSize", + "KeySizeError", + "NewCipher", + }, + "crypto/cipher": { + "AEAD", + "Block", + "BlockMode", + "NewCBCDecrypter", + "NewCBCEncrypter", + "NewCFBDecrypter", + "NewCFBEncrypter", + "NewCTR", + "NewGCM", + "NewGCMWithNonceSize", + "NewGCMWithTagSize", + "NewOFB", + "Stream", + "StreamReader", + "StreamWriter", + }, + "crypto/des": { + "BlockSize", + "KeySizeError", + "NewCipher", + "NewTripleDESCipher", + }, + "crypto/dsa": { + "ErrInvalidPublicKey", + "GenerateKey", + "GenerateParameters", + "L1024N160", + "L2048N224", + "L2048N256", + "L3072N256", + "ParameterSizes", + "Parameters", + "PrivateKey", + "PublicKey", + "Sign", + "Verify", + }, + "crypto/ecdh": { + "Curve", + "P256", + "P384", + "P521", + "PrivateKey", + "PublicKey", + "X25519", + }, + "crypto/ecdsa": { + "GenerateKey", + "PrivateKey", + "PublicKey", + "Sign", + "SignASN1", + "Verify", + "VerifyASN1", + }, + "crypto/ed25519": { + "GenerateKey", + "NewKeyFromSeed", + "Options", + "PrivateKey", + "PrivateKeySize", + "PublicKey", + "PublicKeySize", + "SeedSize", + "Sign", + "SignatureSize", + "Verify", + "VerifyWithOptions", + }, + "crypto/elliptic": { + "Curve", + "CurveParams", + "GenerateKey", + "Marshal", + "MarshalCompressed", + "P224", + "P256", + "P384", + "P521", + "Unmarshal", + "UnmarshalCompressed", + }, + "crypto/hmac": { + "Equal", + "New", + }, + "crypto/md5": { + "BlockSize", + "New", + "Size", + "Sum", + }, + "crypto/rand": { + "Int", + "Prime", + "Read", + "Reader", + }, + "crypto/rc4": { + "Cipher", + "KeySizeError", + "NewCipher", + }, + "crypto/rsa": { + "CRTValue", + "DecryptOAEP", + "DecryptPKCS1v15", + "DecryptPKCS1v15SessionKey", + "EncryptOAEP", + "EncryptPKCS1v15", + "ErrDecryption", + "ErrMessageTooLong", + "ErrVerification", + "GenerateKey", + "GenerateMultiPrimeKey", + "OAEPOptions", + "PKCS1v15DecryptOptions", + "PSSOptions", + "PSSSaltLengthAuto", + "PSSSaltLengthEqualsHash", + "PrecomputedValues", + "PrivateKey", + "PublicKey", + "SignPKCS1v15", + "SignPSS", + "VerifyPKCS1v15", + "VerifyPSS", + }, + "crypto/sha1": { + "BlockSize", + "New", + "Size", + "Sum", + }, + "crypto/sha256": { + "BlockSize", + "New", + "New224", + "Size", + "Size224", + "Sum224", + "Sum256", + }, + "crypto/sha512": { + "BlockSize", + "New", + "New384", + "New512_224", + "New512_256", + "Size", + "Size224", + "Size256", + "Size384", + "Sum384", + "Sum512", + "Sum512_224", + "Sum512_256", + }, + "crypto/subtle": { + "ConstantTimeByteEq", + "ConstantTimeCompare", + "ConstantTimeCopy", + "ConstantTimeEq", + "ConstantTimeLessOrEq", + "ConstantTimeSelect", + "XORBytes", + }, + "crypto/tls": { + "Certificate", + "CertificateRequestInfo", + "CertificateVerificationError", + "CipherSuite", + "CipherSuiteName", + "CipherSuites", + "Client", + "ClientAuthType", + "ClientHelloInfo", + "ClientSessionCache", + "ClientSessionState", + "Config", + "Conn", + "ConnectionState", + "CurveID", + "CurveP256", + "CurveP384", + "CurveP521", + "Dial", + "DialWithDialer", + "Dialer", + "ECDSAWithP256AndSHA256", + "ECDSAWithP384AndSHA384", + "ECDSAWithP521AndSHA512", + "ECDSAWithSHA1", + "Ed25519", + "InsecureCipherSuites", + "Listen", + "LoadX509KeyPair", + "NewLRUClientSessionCache", + "NewListener", + "NoClientCert", + "PKCS1WithSHA1", + "PKCS1WithSHA256", + "PKCS1WithSHA384", + "PKCS1WithSHA512", + "PSSWithSHA256", + "PSSWithSHA384", + "PSSWithSHA512", + "RecordHeaderError", + "RenegotiateFreelyAsClient", + "RenegotiateNever", + "RenegotiateOnceAsClient", + "RenegotiationSupport", + "RequestClientCert", + "RequireAndVerifyClientCert", + "RequireAnyClientCert", + "Server", + "SignatureScheme", + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + "TLS_FALLBACK_SCSV", + "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_RSA_WITH_AES_128_CBC_SHA", + "TLS_RSA_WITH_AES_128_CBC_SHA256", + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_256_CBC_SHA", + "TLS_RSA_WITH_AES_256_GCM_SHA384", + "TLS_RSA_WITH_RC4_128_SHA", + "VerifyClientCertIfGiven", + "VersionSSL30", + "VersionTLS10", + "VersionTLS11", + "VersionTLS12", + "VersionTLS13", + "X25519", + "X509KeyPair", + }, + "crypto/x509": { + "CANotAuthorizedForExtKeyUsage", + "CANotAuthorizedForThisName", + "CertPool", + "Certificate", + "CertificateInvalidError", + "CertificateRequest", + "ConstraintViolationError", + "CreateCertificate", + "CreateCertificateRequest", + "CreateRevocationList", + "DSA", + "DSAWithSHA1", + "DSAWithSHA256", + "DecryptPEMBlock", + "ECDSA", + "ECDSAWithSHA1", + "ECDSAWithSHA256", + "ECDSAWithSHA384", + "ECDSAWithSHA512", + "Ed25519", + "EncryptPEMBlock", + "ErrUnsupportedAlgorithm", + "Expired", + "ExtKeyUsage", + "ExtKeyUsageAny", + "ExtKeyUsageClientAuth", + "ExtKeyUsageCodeSigning", + "ExtKeyUsageEmailProtection", + "ExtKeyUsageIPSECEndSystem", + "ExtKeyUsageIPSECTunnel", + "ExtKeyUsageIPSECUser", + "ExtKeyUsageMicrosoftCommercialCodeSigning", + "ExtKeyUsageMicrosoftKernelCodeSigning", + "ExtKeyUsageMicrosoftServerGatedCrypto", + "ExtKeyUsageNetscapeServerGatedCrypto", + "ExtKeyUsageOCSPSigning", + "ExtKeyUsageServerAuth", + "ExtKeyUsageTimeStamping", + "HostnameError", + "IncompatibleUsage", + "IncorrectPasswordError", + "InsecureAlgorithmError", + "InvalidReason", + "IsEncryptedPEMBlock", + "KeyUsage", + "KeyUsageCRLSign", + "KeyUsageCertSign", + "KeyUsageContentCommitment", + "KeyUsageDataEncipherment", + "KeyUsageDecipherOnly", + "KeyUsageDigitalSignature", + "KeyUsageEncipherOnly", + "KeyUsageKeyAgreement", + "KeyUsageKeyEncipherment", + "MD2WithRSA", + "MD5WithRSA", + "MarshalECPrivateKey", + "MarshalPKCS1PrivateKey", + "MarshalPKCS1PublicKey", + "MarshalPKCS8PrivateKey", + "MarshalPKIXPublicKey", + "NameConstraintsWithoutSANs", + "NameMismatch", + "NewCertPool", + "NotAuthorizedToSign", + "PEMCipher", + "PEMCipher3DES", + "PEMCipherAES128", + "PEMCipherAES192", + "PEMCipherAES256", + "PEMCipherDES", + "ParseCRL", + "ParseCertificate", + "ParseCertificateRequest", + "ParseCertificates", + "ParseDERCRL", + "ParseECPrivateKey", + "ParsePKCS1PrivateKey", + "ParsePKCS1PublicKey", + "ParsePKCS8PrivateKey", + "ParsePKIXPublicKey", + "ParseRevocationList", + "PublicKeyAlgorithm", + "PureEd25519", + "RSA", + "RevocationList", + "SHA1WithRSA", + "SHA256WithRSA", + "SHA256WithRSAPSS", + "SHA384WithRSA", + "SHA384WithRSAPSS", + "SHA512WithRSA", + "SHA512WithRSAPSS", + "SetFallbackRoots", + "SignatureAlgorithm", + "SystemCertPool", + "SystemRootsError", + "TooManyConstraints", + "TooManyIntermediates", + "UnconstrainedName", + "UnhandledCriticalExtension", + "UnknownAuthorityError", + "UnknownPublicKeyAlgorithm", + "UnknownSignatureAlgorithm", + "VerifyOptions", + }, + "crypto/x509/pkix": { + "AlgorithmIdentifier", + "AttributeTypeAndValue", + "AttributeTypeAndValueSET", + "CertificateList", + "Extension", + "Name", + "RDNSequence", + "RelativeDistinguishedNameSET", + "RevokedCertificate", + "TBSCertificateList", + }, + "database/sql": { + "ColumnType", + "Conn", + "DB", + "DBStats", + "Drivers", + "ErrConnDone", + "ErrNoRows", + "ErrTxDone", + "IsolationLevel", + "LevelDefault", + "LevelLinearizable", + "LevelReadCommitted", + "LevelReadUncommitted", + "LevelRepeatableRead", + "LevelSerializable", + "LevelSnapshot", + "LevelWriteCommitted", + "Named", + "NamedArg", + "NullBool", + "NullByte", + "NullFloat64", + "NullInt16", + "NullInt32", + "NullInt64", + "NullString", + "NullTime", + "Open", + "OpenDB", + "Out", + "RawBytes", + "Register", + "Result", + "Row", + "Rows", + "Scanner", + "Stmt", + "Tx", + "TxOptions", + }, + "database/sql/driver": { + "Bool", + "ColumnConverter", + "Conn", + "ConnBeginTx", + "ConnPrepareContext", + "Connector", + "DefaultParameterConverter", + "Driver", + "DriverContext", + "ErrBadConn", + "ErrRemoveArgument", + "ErrSkip", + "Execer", + "ExecerContext", + "Int32", + "IsScanValue", + "IsValue", + "IsolationLevel", + "NamedValue", + "NamedValueChecker", + "NotNull", + "Null", + "Pinger", + "Queryer", + "QueryerContext", + "Result", + "ResultNoRows", + "Rows", + "RowsAffected", + "RowsColumnTypeDatabaseTypeName", + "RowsColumnTypeLength", + "RowsColumnTypeNullable", + "RowsColumnTypePrecisionScale", + "RowsColumnTypeScanType", + "RowsNextResultSet", + "SessionResetter", + "Stmt", + "StmtExecContext", + "StmtQueryContext", + "String", + "Tx", + "TxOptions", + "Validator", + "Value", + "ValueConverter", + "Valuer", + }, + "debug/buildinfo": { + "BuildInfo", + "Read", + "ReadFile", + }, + "debug/dwarf": { + "AddrType", + "ArrayType", + "Attr", + "AttrAbstractOrigin", + "AttrAccessibility", + "AttrAddrBase", + "AttrAddrClass", + "AttrAlignment", + "AttrAllocated", + "AttrArtificial", + "AttrAssociated", + "AttrBaseTypes", + "AttrBinaryScale", + "AttrBitOffset", + "AttrBitSize", + "AttrByteSize", + "AttrCallAllCalls", + "AttrCallAllSourceCalls", + "AttrCallAllTailCalls", + "AttrCallColumn", + "AttrCallDataLocation", + "AttrCallDataValue", + "AttrCallFile", + "AttrCallLine", + "AttrCallOrigin", + "AttrCallPC", + "AttrCallParameter", + "AttrCallReturnPC", + "AttrCallTailCall", + "AttrCallTarget", + "AttrCallTargetClobbered", + "AttrCallValue", + "AttrCalling", + "AttrCommonRef", + "AttrCompDir", + "AttrConstExpr", + "AttrConstValue", + "AttrContainingType", + "AttrCount", + "AttrDataBitOffset", + "AttrDataLocation", + "AttrDataMemberLoc", + "AttrDecimalScale", + "AttrDecimalSign", + "AttrDeclColumn", + "AttrDeclFile", + "AttrDeclLine", + "AttrDeclaration", + "AttrDefaultValue", + "AttrDefaulted", + "AttrDeleted", + "AttrDescription", + "AttrDigitCount", + "AttrDiscr", + "AttrDiscrList", + "AttrDiscrValue", + "AttrDwoName", + "AttrElemental", + "AttrEncoding", + "AttrEndianity", + "AttrEntrypc", + "AttrEnumClass", + "AttrExplicit", + "AttrExportSymbols", + "AttrExtension", + "AttrExternal", + "AttrFrameBase", + "AttrFriend", + "AttrHighpc", + "AttrIdentifierCase", + "AttrImport", + "AttrInline", + "AttrIsOptional", + "AttrLanguage", + "AttrLinkageName", + "AttrLocation", + "AttrLoclistsBase", + "AttrLowerBound", + "AttrLowpc", + "AttrMacroInfo", + "AttrMacros", + "AttrMainSubprogram", + "AttrMutable", + "AttrName", + "AttrNamelistItem", + "AttrNoreturn", + "AttrObjectPointer", + "AttrOrdering", + "AttrPictureString", + "AttrPriority", + "AttrProducer", + "AttrPrototyped", + "AttrPure", + "AttrRanges", + "AttrRank", + "AttrRecursive", + "AttrReference", + "AttrReturnAddr", + "AttrRnglistsBase", + "AttrRvalueReference", + "AttrSegment", + "AttrSibling", + "AttrSignature", + "AttrSmall", + "AttrSpecification", + "AttrStartScope", + "AttrStaticLink", + "AttrStmtList", + "AttrStrOffsetsBase", + "AttrStride", + "AttrStrideSize", + "AttrStringLength", + "AttrStringLengthBitSize", + "AttrStringLengthByteSize", + "AttrThreadsScaled", + "AttrTrampoline", + "AttrType", + "AttrUpperBound", + "AttrUseLocation", + "AttrUseUTF8", + "AttrVarParam", + "AttrVirtuality", + "AttrVisibility", + "AttrVtableElemLoc", + "BasicType", + "BoolType", + "CharType", + "Class", + "ClassAddrPtr", + "ClassAddress", + "ClassBlock", + "ClassConstant", + "ClassExprLoc", + "ClassFlag", + "ClassLinePtr", + "ClassLocList", + "ClassLocListPtr", + "ClassMacPtr", + "ClassRangeListPtr", + "ClassReference", + "ClassReferenceAlt", + "ClassReferenceSig", + "ClassRngList", + "ClassRngListsPtr", + "ClassStrOffsetsPtr", + "ClassString", + "ClassStringAlt", + "ClassUnknown", + "CommonType", + "ComplexType", + "Data", + "DecodeError", + "DotDotDotType", + "Entry", + "EnumType", + "EnumValue", + "ErrUnknownPC", + "Field", + "FloatType", + "FuncType", + "IntType", + "LineEntry", + "LineFile", + "LineReader", + "LineReaderPos", + "New", + "Offset", + "PtrType", + "QualType", + "Reader", + "StructField", + "StructType", + "Tag", + "TagAccessDeclaration", + "TagArrayType", + "TagAtomicType", + "TagBaseType", + "TagCallSite", + "TagCallSiteParameter", + "TagCatchDwarfBlock", + "TagClassType", + "TagCoarrayType", + "TagCommonDwarfBlock", + "TagCommonInclusion", + "TagCompileUnit", + "TagCondition", + "TagConstType", + "TagConstant", + "TagDwarfProcedure", + "TagDynamicType", + "TagEntryPoint", + "TagEnumerationType", + "TagEnumerator", + "TagFileType", + "TagFormalParameter", + "TagFriend", + "TagGenericSubrange", + "TagImmutableType", + "TagImportedDeclaration", + "TagImportedModule", + "TagImportedUnit", + "TagInheritance", + "TagInlinedSubroutine", + "TagInterfaceType", + "TagLabel", + "TagLexDwarfBlock", + "TagMember", + "TagModule", + "TagMutableType", + "TagNamelist", + "TagNamelistItem", + "TagNamespace", + "TagPackedType", + "TagPartialUnit", + "TagPointerType", + "TagPtrToMemberType", + "TagReferenceType", + "TagRestrictType", + "TagRvalueReferenceType", + "TagSetType", + "TagSharedType", + "TagSkeletonUnit", + "TagStringType", + "TagStructType", + "TagSubprogram", + "TagSubrangeType", + "TagSubroutineType", + "TagTemplateAlias", + "TagTemplateTypeParameter", + "TagTemplateValueParameter", + "TagThrownType", + "TagTryDwarfBlock", + "TagTypeUnit", + "TagTypedef", + "TagUnionType", + "TagUnspecifiedParameters", + "TagUnspecifiedType", + "TagVariable", + "TagVariant", + "TagVariantPart", + "TagVolatileType", + "TagWithStmt", + "Type", + "TypedefType", + "UcharType", + "UintType", + "UnspecifiedType", + "UnsupportedType", + "VoidType", + }, + "debug/elf": { + "ARM_MAGIC_TRAMP_NUMBER", + "COMPRESS_HIOS", + "COMPRESS_HIPROC", + "COMPRESS_LOOS", + "COMPRESS_LOPROC", + "COMPRESS_ZLIB", + "Chdr32", + "Chdr64", + "Class", + "CompressionType", + "DF_BIND_NOW", + "DF_ORIGIN", + "DF_STATIC_TLS", + "DF_SYMBOLIC", + "DF_TEXTREL", + "DT_ADDRRNGHI", + "DT_ADDRRNGLO", + "DT_AUDIT", + "DT_AUXILIARY", + "DT_BIND_NOW", + "DT_CHECKSUM", + "DT_CONFIG", + "DT_DEBUG", + "DT_DEPAUDIT", + "DT_ENCODING", + "DT_FEATURE", + "DT_FILTER", + "DT_FINI", + "DT_FINI_ARRAY", + "DT_FINI_ARRAYSZ", + "DT_FLAGS", + "DT_FLAGS_1", + "DT_GNU_CONFLICT", + "DT_GNU_CONFLICTSZ", + "DT_GNU_HASH", + "DT_GNU_LIBLIST", + "DT_GNU_LIBLISTSZ", + "DT_GNU_PRELINKED", + "DT_HASH", + "DT_HIOS", + "DT_HIPROC", + "DT_INIT", + "DT_INIT_ARRAY", + "DT_INIT_ARRAYSZ", + "DT_JMPREL", + "DT_LOOS", + "DT_LOPROC", + "DT_MIPS_AUX_DYNAMIC", + "DT_MIPS_BASE_ADDRESS", + "DT_MIPS_COMPACT_SIZE", + "DT_MIPS_CONFLICT", + "DT_MIPS_CONFLICTNO", + "DT_MIPS_CXX_FLAGS", + "DT_MIPS_DELTA_CLASS", + "DT_MIPS_DELTA_CLASSSYM", + "DT_MIPS_DELTA_CLASSSYM_NO", + "DT_MIPS_DELTA_CLASS_NO", + "DT_MIPS_DELTA_INSTANCE", + "DT_MIPS_DELTA_INSTANCE_NO", + "DT_MIPS_DELTA_RELOC", + "DT_MIPS_DELTA_RELOC_NO", + "DT_MIPS_DELTA_SYM", + "DT_MIPS_DELTA_SYM_NO", + "DT_MIPS_DYNSTR_ALIGN", + "DT_MIPS_FLAGS", + "DT_MIPS_GOTSYM", + "DT_MIPS_GP_VALUE", + "DT_MIPS_HIDDEN_GOTIDX", + "DT_MIPS_HIPAGENO", + "DT_MIPS_ICHECKSUM", + "DT_MIPS_INTERFACE", + "DT_MIPS_INTERFACE_SIZE", + "DT_MIPS_IVERSION", + "DT_MIPS_LIBLIST", + "DT_MIPS_LIBLISTNO", + "DT_MIPS_LOCALPAGE_GOTIDX", + "DT_MIPS_LOCAL_GOTIDX", + "DT_MIPS_LOCAL_GOTNO", + "DT_MIPS_MSYM", + "DT_MIPS_OPTIONS", + "DT_MIPS_PERF_SUFFIX", + "DT_MIPS_PIXIE_INIT", + "DT_MIPS_PLTGOT", + "DT_MIPS_PROTECTED_GOTIDX", + "DT_MIPS_RLD_MAP", + "DT_MIPS_RLD_MAP_REL", + "DT_MIPS_RLD_TEXT_RESOLVE_ADDR", + "DT_MIPS_RLD_VERSION", + "DT_MIPS_RWPLT", + "DT_MIPS_SYMBOL_LIB", + "DT_MIPS_SYMTABNO", + "DT_MIPS_TIME_STAMP", + "DT_MIPS_UNREFEXTNO", + "DT_MOVEENT", + "DT_MOVESZ", + "DT_MOVETAB", + "DT_NEEDED", + "DT_NULL", + "DT_PLTGOT", + "DT_PLTPAD", + "DT_PLTPADSZ", + "DT_PLTREL", + "DT_PLTRELSZ", + "DT_POSFLAG_1", + "DT_PPC64_GLINK", + "DT_PPC64_OPD", + "DT_PPC64_OPDSZ", + "DT_PPC64_OPT", + "DT_PPC_GOT", + "DT_PPC_OPT", + "DT_PREINIT_ARRAY", + "DT_PREINIT_ARRAYSZ", + "DT_REL", + "DT_RELA", + "DT_RELACOUNT", + "DT_RELAENT", + "DT_RELASZ", + "DT_RELCOUNT", + "DT_RELENT", + "DT_RELSZ", + "DT_RPATH", + "DT_RUNPATH", + "DT_SONAME", + "DT_SPARC_REGISTER", + "DT_STRSZ", + "DT_STRTAB", + "DT_SYMBOLIC", + "DT_SYMENT", + "DT_SYMINENT", + "DT_SYMINFO", + "DT_SYMINSZ", + "DT_SYMTAB", + "DT_SYMTAB_SHNDX", + "DT_TEXTREL", + "DT_TLSDESC_GOT", + "DT_TLSDESC_PLT", + "DT_USED", + "DT_VALRNGHI", + "DT_VALRNGLO", + "DT_VERDEF", + "DT_VERDEFNUM", + "DT_VERNEED", + "DT_VERNEEDNUM", + "DT_VERSYM", + "Data", + "Dyn32", + "Dyn64", + "DynFlag", + "DynTag", + "EI_ABIVERSION", + "EI_CLASS", + "EI_DATA", + "EI_NIDENT", + "EI_OSABI", + "EI_PAD", + "EI_VERSION", + "ELFCLASS32", + "ELFCLASS64", + "ELFCLASSNONE", + "ELFDATA2LSB", + "ELFDATA2MSB", + "ELFDATANONE", + "ELFMAG", + "ELFOSABI_86OPEN", + "ELFOSABI_AIX", + "ELFOSABI_ARM", + "ELFOSABI_AROS", + "ELFOSABI_CLOUDABI", + "ELFOSABI_FENIXOS", + "ELFOSABI_FREEBSD", + "ELFOSABI_HPUX", + "ELFOSABI_HURD", + "ELFOSABI_IRIX", + "ELFOSABI_LINUX", + "ELFOSABI_MODESTO", + "ELFOSABI_NETBSD", + "ELFOSABI_NONE", + "ELFOSABI_NSK", + "ELFOSABI_OPENBSD", + "ELFOSABI_OPENVMS", + "ELFOSABI_SOLARIS", + "ELFOSABI_STANDALONE", + "ELFOSABI_TRU64", + "EM_386", + "EM_486", + "EM_56800EX", + "EM_68HC05", + "EM_68HC08", + "EM_68HC11", + "EM_68HC12", + "EM_68HC16", + "EM_68K", + "EM_78KOR", + "EM_8051", + "EM_860", + "EM_88K", + "EM_960", + "EM_AARCH64", + "EM_ALPHA", + "EM_ALPHA_STD", + "EM_ALTERA_NIOS2", + "EM_AMDGPU", + "EM_ARC", + "EM_ARCA", + "EM_ARC_COMPACT", + "EM_ARC_COMPACT2", + "EM_ARM", + "EM_AVR", + "EM_AVR32", + "EM_BA1", + "EM_BA2", + "EM_BLACKFIN", + "EM_BPF", + "EM_C166", + "EM_CDP", + "EM_CE", + "EM_CLOUDSHIELD", + "EM_COGE", + "EM_COLDFIRE", + "EM_COOL", + "EM_COREA_1ST", + "EM_COREA_2ND", + "EM_CR", + "EM_CR16", + "EM_CRAYNV2", + "EM_CRIS", + "EM_CRX", + "EM_CSR_KALIMBA", + "EM_CUDA", + "EM_CYPRESS_M8C", + "EM_D10V", + "EM_D30V", + "EM_DSP24", + "EM_DSPIC30F", + "EM_DXP", + "EM_ECOG1", + "EM_ECOG16", + "EM_ECOG1X", + "EM_ECOG2", + "EM_ETPU", + "EM_EXCESS", + "EM_F2MC16", + "EM_FIREPATH", + "EM_FR20", + "EM_FR30", + "EM_FT32", + "EM_FX66", + "EM_H8S", + "EM_H8_300", + "EM_H8_300H", + "EM_H8_500", + "EM_HUANY", + "EM_IA_64", + "EM_INTEL205", + "EM_INTEL206", + "EM_INTEL207", + "EM_INTEL208", + "EM_INTEL209", + "EM_IP2K", + "EM_JAVELIN", + "EM_K10M", + "EM_KM32", + "EM_KMX16", + "EM_KMX32", + "EM_KMX8", + "EM_KVARC", + "EM_L10M", + "EM_LANAI", + "EM_LATTICEMICO32", + "EM_LOONGARCH", + "EM_M16C", + "EM_M32", + "EM_M32C", + "EM_M32R", + "EM_MANIK", + "EM_MAX", + "EM_MAXQ30", + "EM_MCHP_PIC", + "EM_MCST_ELBRUS", + "EM_ME16", + "EM_METAG", + "EM_MICROBLAZE", + "EM_MIPS", + "EM_MIPS_RS3_LE", + "EM_MIPS_RS4_BE", + "EM_MIPS_X", + "EM_MMA", + "EM_MMDSP_PLUS", + "EM_MMIX", + "EM_MN10200", + "EM_MN10300", + "EM_MOXIE", + "EM_MSP430", + "EM_NCPU", + "EM_NDR1", + "EM_NDS32", + "EM_NONE", + "EM_NORC", + "EM_NS32K", + "EM_OPEN8", + "EM_OPENRISC", + "EM_PARISC", + "EM_PCP", + "EM_PDP10", + "EM_PDP11", + "EM_PDSP", + "EM_PJ", + "EM_PPC", + "EM_PPC64", + "EM_PRISM", + "EM_QDSP6", + "EM_R32C", + "EM_RCE", + "EM_RH32", + "EM_RISCV", + "EM_RL78", + "EM_RS08", + "EM_RX", + "EM_S370", + "EM_S390", + "EM_SCORE7", + "EM_SEP", + "EM_SE_C17", + "EM_SE_C33", + "EM_SH", + "EM_SHARC", + "EM_SLE9X", + "EM_SNP1K", + "EM_SPARC", + "EM_SPARC32PLUS", + "EM_SPARCV9", + "EM_ST100", + "EM_ST19", + "EM_ST200", + "EM_ST7", + "EM_ST9PLUS", + "EM_STARCORE", + "EM_STM8", + "EM_STXP7X", + "EM_SVX", + "EM_TILE64", + "EM_TILEGX", + "EM_TILEPRO", + "EM_TINYJ", + "EM_TI_ARP32", + "EM_TI_C2000", + "EM_TI_C5500", + "EM_TI_C6000", + "EM_TI_PRU", + "EM_TMM_GPP", + "EM_TPC", + "EM_TRICORE", + "EM_TRIMEDIA", + "EM_TSK3000", + "EM_UNICORE", + "EM_V800", + "EM_V850", + "EM_VAX", + "EM_VIDEOCORE", + "EM_VIDEOCORE3", + "EM_VIDEOCORE5", + "EM_VISIUM", + "EM_VPP500", + "EM_X86_64", + "EM_XCORE", + "EM_XGATE", + "EM_XIMO16", + "EM_XTENSA", + "EM_Z80", + "EM_ZSP", + "ET_CORE", + "ET_DYN", + "ET_EXEC", + "ET_HIOS", + "ET_HIPROC", + "ET_LOOS", + "ET_LOPROC", + "ET_NONE", + "ET_REL", + "EV_CURRENT", + "EV_NONE", + "ErrNoSymbols", + "File", + "FileHeader", + "FormatError", + "Header32", + "Header64", + "ImportedSymbol", + "Machine", + "NT_FPREGSET", + "NT_PRPSINFO", + "NT_PRSTATUS", + "NType", + "NewFile", + "OSABI", + "Open", + "PF_MASKOS", + "PF_MASKPROC", + "PF_R", + "PF_W", + "PF_X", + "PT_AARCH64_ARCHEXT", + "PT_AARCH64_UNWIND", + "PT_ARM_ARCHEXT", + "PT_ARM_EXIDX", + "PT_DYNAMIC", + "PT_GNU_EH_FRAME", + "PT_GNU_MBIND_HI", + "PT_GNU_MBIND_LO", + "PT_GNU_PROPERTY", + "PT_GNU_RELRO", + "PT_GNU_STACK", + "PT_HIOS", + "PT_HIPROC", + "PT_INTERP", + "PT_LOAD", + "PT_LOOS", + "PT_LOPROC", + "PT_MIPS_ABIFLAGS", + "PT_MIPS_OPTIONS", + "PT_MIPS_REGINFO", + "PT_MIPS_RTPROC", + "PT_NOTE", + "PT_NULL", + "PT_OPENBSD_BOOTDATA", + "PT_OPENBSD_RANDOMIZE", + "PT_OPENBSD_WXNEEDED", + "PT_PAX_FLAGS", + "PT_PHDR", + "PT_S390_PGSTE", + "PT_SHLIB", + "PT_SUNWSTACK", + "PT_SUNW_EH_FRAME", + "PT_TLS", + "Prog", + "Prog32", + "Prog64", + "ProgFlag", + "ProgHeader", + "ProgType", + "R_386", + "R_386_16", + "R_386_32", + "R_386_32PLT", + "R_386_8", + "R_386_COPY", + "R_386_GLOB_DAT", + "R_386_GOT32", + "R_386_GOT32X", + "R_386_GOTOFF", + "R_386_GOTPC", + "R_386_IRELATIVE", + "R_386_JMP_SLOT", + "R_386_NONE", + "R_386_PC16", + "R_386_PC32", + "R_386_PC8", + "R_386_PLT32", + "R_386_RELATIVE", + "R_386_SIZE32", + "R_386_TLS_DESC", + "R_386_TLS_DESC_CALL", + "R_386_TLS_DTPMOD32", + "R_386_TLS_DTPOFF32", + "R_386_TLS_GD", + "R_386_TLS_GD_32", + "R_386_TLS_GD_CALL", + "R_386_TLS_GD_POP", + "R_386_TLS_GD_PUSH", + "R_386_TLS_GOTDESC", + "R_386_TLS_GOTIE", + "R_386_TLS_IE", + "R_386_TLS_IE_32", + "R_386_TLS_LDM", + "R_386_TLS_LDM_32", + "R_386_TLS_LDM_CALL", + "R_386_TLS_LDM_POP", + "R_386_TLS_LDM_PUSH", + "R_386_TLS_LDO_32", + "R_386_TLS_LE", + "R_386_TLS_LE_32", + "R_386_TLS_TPOFF", + "R_386_TLS_TPOFF32", + "R_390", + "R_390_12", + "R_390_16", + "R_390_20", + "R_390_32", + "R_390_64", + "R_390_8", + "R_390_COPY", + "R_390_GLOB_DAT", + "R_390_GOT12", + "R_390_GOT16", + "R_390_GOT20", + "R_390_GOT32", + "R_390_GOT64", + "R_390_GOTENT", + "R_390_GOTOFF", + "R_390_GOTOFF16", + "R_390_GOTOFF64", + "R_390_GOTPC", + "R_390_GOTPCDBL", + "R_390_GOTPLT12", + "R_390_GOTPLT16", + "R_390_GOTPLT20", + "R_390_GOTPLT32", + "R_390_GOTPLT64", + "R_390_GOTPLTENT", + "R_390_GOTPLTOFF16", + "R_390_GOTPLTOFF32", + "R_390_GOTPLTOFF64", + "R_390_JMP_SLOT", + "R_390_NONE", + "R_390_PC16", + "R_390_PC16DBL", + "R_390_PC32", + "R_390_PC32DBL", + "R_390_PC64", + "R_390_PLT16DBL", + "R_390_PLT32", + "R_390_PLT32DBL", + "R_390_PLT64", + "R_390_RELATIVE", + "R_390_TLS_DTPMOD", + "R_390_TLS_DTPOFF", + "R_390_TLS_GD32", + "R_390_TLS_GD64", + "R_390_TLS_GDCALL", + "R_390_TLS_GOTIE12", + "R_390_TLS_GOTIE20", + "R_390_TLS_GOTIE32", + "R_390_TLS_GOTIE64", + "R_390_TLS_IE32", + "R_390_TLS_IE64", + "R_390_TLS_IEENT", + "R_390_TLS_LDCALL", + "R_390_TLS_LDM32", + "R_390_TLS_LDM64", + "R_390_TLS_LDO32", + "R_390_TLS_LDO64", + "R_390_TLS_LE32", + "R_390_TLS_LE64", + "R_390_TLS_LOAD", + "R_390_TLS_TPOFF", + "R_AARCH64", + "R_AARCH64_ABS16", + "R_AARCH64_ABS32", + "R_AARCH64_ABS64", + "R_AARCH64_ADD_ABS_LO12_NC", + "R_AARCH64_ADR_GOT_PAGE", + "R_AARCH64_ADR_PREL_LO21", + "R_AARCH64_ADR_PREL_PG_HI21", + "R_AARCH64_ADR_PREL_PG_HI21_NC", + "R_AARCH64_CALL26", + "R_AARCH64_CONDBR19", + "R_AARCH64_COPY", + "R_AARCH64_GLOB_DAT", + "R_AARCH64_GOT_LD_PREL19", + "R_AARCH64_IRELATIVE", + "R_AARCH64_JUMP26", + "R_AARCH64_JUMP_SLOT", + "R_AARCH64_LD64_GOTOFF_LO15", + "R_AARCH64_LD64_GOTPAGE_LO15", + "R_AARCH64_LD64_GOT_LO12_NC", + "R_AARCH64_LDST128_ABS_LO12_NC", + "R_AARCH64_LDST16_ABS_LO12_NC", + "R_AARCH64_LDST32_ABS_LO12_NC", + "R_AARCH64_LDST64_ABS_LO12_NC", + "R_AARCH64_LDST8_ABS_LO12_NC", + "R_AARCH64_LD_PREL_LO19", + "R_AARCH64_MOVW_SABS_G0", + "R_AARCH64_MOVW_SABS_G1", + "R_AARCH64_MOVW_SABS_G2", + "R_AARCH64_MOVW_UABS_G0", + "R_AARCH64_MOVW_UABS_G0_NC", + "R_AARCH64_MOVW_UABS_G1", + "R_AARCH64_MOVW_UABS_G1_NC", + "R_AARCH64_MOVW_UABS_G2", + "R_AARCH64_MOVW_UABS_G2_NC", + "R_AARCH64_MOVW_UABS_G3", + "R_AARCH64_NONE", + "R_AARCH64_NULL", + "R_AARCH64_P32_ABS16", + "R_AARCH64_P32_ABS32", + "R_AARCH64_P32_ADD_ABS_LO12_NC", + "R_AARCH64_P32_ADR_GOT_PAGE", + "R_AARCH64_P32_ADR_PREL_LO21", + "R_AARCH64_P32_ADR_PREL_PG_HI21", + "R_AARCH64_P32_CALL26", + "R_AARCH64_P32_CONDBR19", + "R_AARCH64_P32_COPY", + "R_AARCH64_P32_GLOB_DAT", + "R_AARCH64_P32_GOT_LD_PREL19", + "R_AARCH64_P32_IRELATIVE", + "R_AARCH64_P32_JUMP26", + "R_AARCH64_P32_JUMP_SLOT", + "R_AARCH64_P32_LD32_GOT_LO12_NC", + "R_AARCH64_P32_LDST128_ABS_LO12_NC", + "R_AARCH64_P32_LDST16_ABS_LO12_NC", + "R_AARCH64_P32_LDST32_ABS_LO12_NC", + "R_AARCH64_P32_LDST64_ABS_LO12_NC", + "R_AARCH64_P32_LDST8_ABS_LO12_NC", + "R_AARCH64_P32_LD_PREL_LO19", + "R_AARCH64_P32_MOVW_SABS_G0", + "R_AARCH64_P32_MOVW_UABS_G0", + "R_AARCH64_P32_MOVW_UABS_G0_NC", + "R_AARCH64_P32_MOVW_UABS_G1", + "R_AARCH64_P32_PREL16", + "R_AARCH64_P32_PREL32", + "R_AARCH64_P32_RELATIVE", + "R_AARCH64_P32_TLSDESC", + "R_AARCH64_P32_TLSDESC_ADD_LO12_NC", + "R_AARCH64_P32_TLSDESC_ADR_PAGE21", + "R_AARCH64_P32_TLSDESC_ADR_PREL21", + "R_AARCH64_P32_TLSDESC_CALL", + "R_AARCH64_P32_TLSDESC_LD32_LO12_NC", + "R_AARCH64_P32_TLSDESC_LD_PREL19", + "R_AARCH64_P32_TLSGD_ADD_LO12_NC", + "R_AARCH64_P32_TLSGD_ADR_PAGE21", + "R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", + "R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", + "R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", + "R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", + "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", + "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", + "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", + "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", + "R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", + "R_AARCH64_P32_TLS_DTPMOD", + "R_AARCH64_P32_TLS_DTPREL", + "R_AARCH64_P32_TLS_TPREL", + "R_AARCH64_P32_TSTBR14", + "R_AARCH64_PREL16", + "R_AARCH64_PREL32", + "R_AARCH64_PREL64", + "R_AARCH64_RELATIVE", + "R_AARCH64_TLSDESC", + "R_AARCH64_TLSDESC_ADD", + "R_AARCH64_TLSDESC_ADD_LO12_NC", + "R_AARCH64_TLSDESC_ADR_PAGE21", + "R_AARCH64_TLSDESC_ADR_PREL21", + "R_AARCH64_TLSDESC_CALL", + "R_AARCH64_TLSDESC_LD64_LO12_NC", + "R_AARCH64_TLSDESC_LDR", + "R_AARCH64_TLSDESC_LD_PREL19", + "R_AARCH64_TLSDESC_OFF_G0_NC", + "R_AARCH64_TLSDESC_OFF_G1", + "R_AARCH64_TLSGD_ADD_LO12_NC", + "R_AARCH64_TLSGD_ADR_PAGE21", + "R_AARCH64_TLSGD_ADR_PREL21", + "R_AARCH64_TLSGD_MOVW_G0_NC", + "R_AARCH64_TLSGD_MOVW_G1", + "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", + "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", + "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", + "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", + "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", + "R_AARCH64_TLSLD_ADR_PAGE21", + "R_AARCH64_TLSLD_ADR_PREL21", + "R_AARCH64_TLSLD_LDST128_DTPREL_LO12", + "R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", + "R_AARCH64_TLSLE_ADD_TPREL_HI12", + "R_AARCH64_TLSLE_ADD_TPREL_LO12", + "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", + "R_AARCH64_TLSLE_LDST128_TPREL_LO12", + "R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", + "R_AARCH64_TLSLE_MOVW_TPREL_G0", + "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", + "R_AARCH64_TLSLE_MOVW_TPREL_G1", + "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", + "R_AARCH64_TLSLE_MOVW_TPREL_G2", + "R_AARCH64_TLS_DTPMOD64", + "R_AARCH64_TLS_DTPREL64", + "R_AARCH64_TLS_TPREL64", + "R_AARCH64_TSTBR14", + "R_ALPHA", + "R_ALPHA_BRADDR", + "R_ALPHA_COPY", + "R_ALPHA_GLOB_DAT", + "R_ALPHA_GPDISP", + "R_ALPHA_GPREL32", + "R_ALPHA_GPRELHIGH", + "R_ALPHA_GPRELLOW", + "R_ALPHA_GPVALUE", + "R_ALPHA_HINT", + "R_ALPHA_IMMED_BR_HI32", + "R_ALPHA_IMMED_GP_16", + "R_ALPHA_IMMED_GP_HI32", + "R_ALPHA_IMMED_LO32", + "R_ALPHA_IMMED_SCN_HI32", + "R_ALPHA_JMP_SLOT", + "R_ALPHA_LITERAL", + "R_ALPHA_LITUSE", + "R_ALPHA_NONE", + "R_ALPHA_OP_PRSHIFT", + "R_ALPHA_OP_PSUB", + "R_ALPHA_OP_PUSH", + "R_ALPHA_OP_STORE", + "R_ALPHA_REFLONG", + "R_ALPHA_REFQUAD", + "R_ALPHA_RELATIVE", + "R_ALPHA_SREL16", + "R_ALPHA_SREL32", + "R_ALPHA_SREL64", + "R_ARM", + "R_ARM_ABS12", + "R_ARM_ABS16", + "R_ARM_ABS32", + "R_ARM_ABS32_NOI", + "R_ARM_ABS8", + "R_ARM_ALU_PCREL_15_8", + "R_ARM_ALU_PCREL_23_15", + "R_ARM_ALU_PCREL_7_0", + "R_ARM_ALU_PC_G0", + "R_ARM_ALU_PC_G0_NC", + "R_ARM_ALU_PC_G1", + "R_ARM_ALU_PC_G1_NC", + "R_ARM_ALU_PC_G2", + "R_ARM_ALU_SBREL_19_12_NC", + "R_ARM_ALU_SBREL_27_20_CK", + "R_ARM_ALU_SB_G0", + "R_ARM_ALU_SB_G0_NC", + "R_ARM_ALU_SB_G1", + "R_ARM_ALU_SB_G1_NC", + "R_ARM_ALU_SB_G2", + "R_ARM_AMP_VCALL9", + "R_ARM_BASE_ABS", + "R_ARM_CALL", + "R_ARM_COPY", + "R_ARM_GLOB_DAT", + "R_ARM_GNU_VTENTRY", + "R_ARM_GNU_VTINHERIT", + "R_ARM_GOT32", + "R_ARM_GOTOFF", + "R_ARM_GOTOFF12", + "R_ARM_GOTPC", + "R_ARM_GOTRELAX", + "R_ARM_GOT_ABS", + "R_ARM_GOT_BREL12", + "R_ARM_GOT_PREL", + "R_ARM_IRELATIVE", + "R_ARM_JUMP24", + "R_ARM_JUMP_SLOT", + "R_ARM_LDC_PC_G0", + "R_ARM_LDC_PC_G1", + "R_ARM_LDC_PC_G2", + "R_ARM_LDC_SB_G0", + "R_ARM_LDC_SB_G1", + "R_ARM_LDC_SB_G2", + "R_ARM_LDRS_PC_G0", + "R_ARM_LDRS_PC_G1", + "R_ARM_LDRS_PC_G2", + "R_ARM_LDRS_SB_G0", + "R_ARM_LDRS_SB_G1", + "R_ARM_LDRS_SB_G2", + "R_ARM_LDR_PC_G1", + "R_ARM_LDR_PC_G2", + "R_ARM_LDR_SBREL_11_10_NC", + "R_ARM_LDR_SB_G0", + "R_ARM_LDR_SB_G1", + "R_ARM_LDR_SB_G2", + "R_ARM_ME_TOO", + "R_ARM_MOVT_ABS", + "R_ARM_MOVT_BREL", + "R_ARM_MOVT_PREL", + "R_ARM_MOVW_ABS_NC", + "R_ARM_MOVW_BREL", + "R_ARM_MOVW_BREL_NC", + "R_ARM_MOVW_PREL_NC", + "R_ARM_NONE", + "R_ARM_PC13", + "R_ARM_PC24", + "R_ARM_PLT32", + "R_ARM_PLT32_ABS", + "R_ARM_PREL31", + "R_ARM_PRIVATE_0", + "R_ARM_PRIVATE_1", + "R_ARM_PRIVATE_10", + "R_ARM_PRIVATE_11", + "R_ARM_PRIVATE_12", + "R_ARM_PRIVATE_13", + "R_ARM_PRIVATE_14", + "R_ARM_PRIVATE_15", + "R_ARM_PRIVATE_2", + "R_ARM_PRIVATE_3", + "R_ARM_PRIVATE_4", + "R_ARM_PRIVATE_5", + "R_ARM_PRIVATE_6", + "R_ARM_PRIVATE_7", + "R_ARM_PRIVATE_8", + "R_ARM_PRIVATE_9", + "R_ARM_RABS32", + "R_ARM_RBASE", + "R_ARM_REL32", + "R_ARM_REL32_NOI", + "R_ARM_RELATIVE", + "R_ARM_RPC24", + "R_ARM_RREL32", + "R_ARM_RSBREL32", + "R_ARM_RXPC25", + "R_ARM_SBREL31", + "R_ARM_SBREL32", + "R_ARM_SWI24", + "R_ARM_TARGET1", + "R_ARM_TARGET2", + "R_ARM_THM_ABS5", + "R_ARM_THM_ALU_ABS_G0_NC", + "R_ARM_THM_ALU_ABS_G1_NC", + "R_ARM_THM_ALU_ABS_G2_NC", + "R_ARM_THM_ALU_ABS_G3", + "R_ARM_THM_ALU_PREL_11_0", + "R_ARM_THM_GOT_BREL12", + "R_ARM_THM_JUMP11", + "R_ARM_THM_JUMP19", + "R_ARM_THM_JUMP24", + "R_ARM_THM_JUMP6", + "R_ARM_THM_JUMP8", + "R_ARM_THM_MOVT_ABS", + "R_ARM_THM_MOVT_BREL", + "R_ARM_THM_MOVT_PREL", + "R_ARM_THM_MOVW_ABS_NC", + "R_ARM_THM_MOVW_BREL", + "R_ARM_THM_MOVW_BREL_NC", + "R_ARM_THM_MOVW_PREL_NC", + "R_ARM_THM_PC12", + "R_ARM_THM_PC22", + "R_ARM_THM_PC8", + "R_ARM_THM_RPC22", + "R_ARM_THM_SWI8", + "R_ARM_THM_TLS_CALL", + "R_ARM_THM_TLS_DESCSEQ16", + "R_ARM_THM_TLS_DESCSEQ32", + "R_ARM_THM_XPC22", + "R_ARM_TLS_CALL", + "R_ARM_TLS_DESCSEQ", + "R_ARM_TLS_DTPMOD32", + "R_ARM_TLS_DTPOFF32", + "R_ARM_TLS_GD32", + "R_ARM_TLS_GOTDESC", + "R_ARM_TLS_IE12GP", + "R_ARM_TLS_IE32", + "R_ARM_TLS_LDM32", + "R_ARM_TLS_LDO12", + "R_ARM_TLS_LDO32", + "R_ARM_TLS_LE12", + "R_ARM_TLS_LE32", + "R_ARM_TLS_TPOFF32", + "R_ARM_V4BX", + "R_ARM_XPC25", + "R_INFO", + "R_INFO32", + "R_LARCH", + "R_LARCH_32", + "R_LARCH_32_PCREL", + "R_LARCH_64", + "R_LARCH_ABS64_HI12", + "R_LARCH_ABS64_LO20", + "R_LARCH_ABS_HI20", + "R_LARCH_ABS_LO12", + "R_LARCH_ADD16", + "R_LARCH_ADD24", + "R_LARCH_ADD32", + "R_LARCH_ADD64", + "R_LARCH_ADD8", + "R_LARCH_B16", + "R_LARCH_B21", + "R_LARCH_B26", + "R_LARCH_COPY", + "R_LARCH_GNU_VTENTRY", + "R_LARCH_GNU_VTINHERIT", + "R_LARCH_GOT64_HI12", + "R_LARCH_GOT64_LO20", + "R_LARCH_GOT64_PC_HI12", + "R_LARCH_GOT64_PC_LO20", + "R_LARCH_GOT_HI20", + "R_LARCH_GOT_LO12", + "R_LARCH_GOT_PC_HI20", + "R_LARCH_GOT_PC_LO12", + "R_LARCH_IRELATIVE", + "R_LARCH_JUMP_SLOT", + "R_LARCH_MARK_LA", + "R_LARCH_MARK_PCREL", + "R_LARCH_NONE", + "R_LARCH_PCALA64_HI12", + "R_LARCH_PCALA64_LO20", + "R_LARCH_PCALA_HI20", + "R_LARCH_PCALA_LO12", + "R_LARCH_RELATIVE", + "R_LARCH_RELAX", + "R_LARCH_SOP_ADD", + "R_LARCH_SOP_AND", + "R_LARCH_SOP_ASSERT", + "R_LARCH_SOP_IF_ELSE", + "R_LARCH_SOP_NOT", + "R_LARCH_SOP_POP_32_S_0_10_10_16_S2", + "R_LARCH_SOP_POP_32_S_0_5_10_16_S2", + "R_LARCH_SOP_POP_32_S_10_12", + "R_LARCH_SOP_POP_32_S_10_16", + "R_LARCH_SOP_POP_32_S_10_16_S2", + "R_LARCH_SOP_POP_32_S_10_5", + "R_LARCH_SOP_POP_32_S_5_20", + "R_LARCH_SOP_POP_32_U", + "R_LARCH_SOP_POP_32_U_10_12", + "R_LARCH_SOP_PUSH_ABSOLUTE", + "R_LARCH_SOP_PUSH_DUP", + "R_LARCH_SOP_PUSH_GPREL", + "R_LARCH_SOP_PUSH_PCREL", + "R_LARCH_SOP_PUSH_PLT_PCREL", + "R_LARCH_SOP_PUSH_TLS_GD", + "R_LARCH_SOP_PUSH_TLS_GOT", + "R_LARCH_SOP_PUSH_TLS_TPREL", + "R_LARCH_SOP_SL", + "R_LARCH_SOP_SR", + "R_LARCH_SOP_SUB", + "R_LARCH_SUB16", + "R_LARCH_SUB24", + "R_LARCH_SUB32", + "R_LARCH_SUB64", + "R_LARCH_SUB8", + "R_LARCH_TLS_DTPMOD32", + "R_LARCH_TLS_DTPMOD64", + "R_LARCH_TLS_DTPREL32", + "R_LARCH_TLS_DTPREL64", + "R_LARCH_TLS_GD_HI20", + "R_LARCH_TLS_GD_PC_HI20", + "R_LARCH_TLS_IE64_HI12", + "R_LARCH_TLS_IE64_LO20", + "R_LARCH_TLS_IE64_PC_HI12", + "R_LARCH_TLS_IE64_PC_LO20", + "R_LARCH_TLS_IE_HI20", + "R_LARCH_TLS_IE_LO12", + "R_LARCH_TLS_IE_PC_HI20", + "R_LARCH_TLS_IE_PC_LO12", + "R_LARCH_TLS_LD_HI20", + "R_LARCH_TLS_LD_PC_HI20", + "R_LARCH_TLS_LE64_HI12", + "R_LARCH_TLS_LE64_LO20", + "R_LARCH_TLS_LE_HI20", + "R_LARCH_TLS_LE_LO12", + "R_LARCH_TLS_TPREL32", + "R_LARCH_TLS_TPREL64", + "R_MIPS", + "R_MIPS_16", + "R_MIPS_26", + "R_MIPS_32", + "R_MIPS_64", + "R_MIPS_ADD_IMMEDIATE", + "R_MIPS_CALL16", + "R_MIPS_CALL_HI16", + "R_MIPS_CALL_LO16", + "R_MIPS_DELETE", + "R_MIPS_GOT16", + "R_MIPS_GOT_DISP", + "R_MIPS_GOT_HI16", + "R_MIPS_GOT_LO16", + "R_MIPS_GOT_OFST", + "R_MIPS_GOT_PAGE", + "R_MIPS_GPREL16", + "R_MIPS_GPREL32", + "R_MIPS_HI16", + "R_MIPS_HIGHER", + "R_MIPS_HIGHEST", + "R_MIPS_INSERT_A", + "R_MIPS_INSERT_B", + "R_MIPS_JALR", + "R_MIPS_LITERAL", + "R_MIPS_LO16", + "R_MIPS_NONE", + "R_MIPS_PC16", + "R_MIPS_PJUMP", + "R_MIPS_REL16", + "R_MIPS_REL32", + "R_MIPS_RELGOT", + "R_MIPS_SCN_DISP", + "R_MIPS_SHIFT5", + "R_MIPS_SHIFT6", + "R_MIPS_SUB", + "R_MIPS_TLS_DTPMOD32", + "R_MIPS_TLS_DTPMOD64", + "R_MIPS_TLS_DTPREL32", + "R_MIPS_TLS_DTPREL64", + "R_MIPS_TLS_DTPREL_HI16", + "R_MIPS_TLS_DTPREL_LO16", + "R_MIPS_TLS_GD", + "R_MIPS_TLS_GOTTPREL", + "R_MIPS_TLS_LDM", + "R_MIPS_TLS_TPREL32", + "R_MIPS_TLS_TPREL64", + "R_MIPS_TLS_TPREL_HI16", + "R_MIPS_TLS_TPREL_LO16", + "R_PPC", + "R_PPC64", + "R_PPC64_ADDR14", + "R_PPC64_ADDR14_BRNTAKEN", + "R_PPC64_ADDR14_BRTAKEN", + "R_PPC64_ADDR16", + "R_PPC64_ADDR16_DS", + "R_PPC64_ADDR16_HA", + "R_PPC64_ADDR16_HI", + "R_PPC64_ADDR16_HIGH", + "R_PPC64_ADDR16_HIGHA", + "R_PPC64_ADDR16_HIGHER", + "R_PPC64_ADDR16_HIGHER34", + "R_PPC64_ADDR16_HIGHERA", + "R_PPC64_ADDR16_HIGHERA34", + "R_PPC64_ADDR16_HIGHEST", + "R_PPC64_ADDR16_HIGHEST34", + "R_PPC64_ADDR16_HIGHESTA", + "R_PPC64_ADDR16_HIGHESTA34", + "R_PPC64_ADDR16_LO", + "R_PPC64_ADDR16_LO_DS", + "R_PPC64_ADDR24", + "R_PPC64_ADDR32", + "R_PPC64_ADDR64", + "R_PPC64_ADDR64_LOCAL", + "R_PPC64_COPY", + "R_PPC64_D28", + "R_PPC64_D34", + "R_PPC64_D34_HA30", + "R_PPC64_D34_HI30", + "R_PPC64_D34_LO", + "R_PPC64_DTPMOD64", + "R_PPC64_DTPREL16", + "R_PPC64_DTPREL16_DS", + "R_PPC64_DTPREL16_HA", + "R_PPC64_DTPREL16_HI", + "R_PPC64_DTPREL16_HIGH", + "R_PPC64_DTPREL16_HIGHA", + "R_PPC64_DTPREL16_HIGHER", + "R_PPC64_DTPREL16_HIGHERA", + "R_PPC64_DTPREL16_HIGHEST", + "R_PPC64_DTPREL16_HIGHESTA", + "R_PPC64_DTPREL16_LO", + "R_PPC64_DTPREL16_LO_DS", + "R_PPC64_DTPREL34", + "R_PPC64_DTPREL64", + "R_PPC64_ENTRY", + "R_PPC64_GLOB_DAT", + "R_PPC64_GNU_VTENTRY", + "R_PPC64_GNU_VTINHERIT", + "R_PPC64_GOT16", + "R_PPC64_GOT16_DS", + "R_PPC64_GOT16_HA", + "R_PPC64_GOT16_HI", + "R_PPC64_GOT16_LO", + "R_PPC64_GOT16_LO_DS", + "R_PPC64_GOT_DTPREL16_DS", + "R_PPC64_GOT_DTPREL16_HA", + "R_PPC64_GOT_DTPREL16_HI", + "R_PPC64_GOT_DTPREL16_LO_DS", + "R_PPC64_GOT_DTPREL_PCREL34", + "R_PPC64_GOT_PCREL34", + "R_PPC64_GOT_TLSGD16", + "R_PPC64_GOT_TLSGD16_HA", + "R_PPC64_GOT_TLSGD16_HI", + "R_PPC64_GOT_TLSGD16_LO", + "R_PPC64_GOT_TLSGD_PCREL34", + "R_PPC64_GOT_TLSLD16", + "R_PPC64_GOT_TLSLD16_HA", + "R_PPC64_GOT_TLSLD16_HI", + "R_PPC64_GOT_TLSLD16_LO", + "R_PPC64_GOT_TLSLD_PCREL34", + "R_PPC64_GOT_TPREL16_DS", + "R_PPC64_GOT_TPREL16_HA", + "R_PPC64_GOT_TPREL16_HI", + "R_PPC64_GOT_TPREL16_LO_DS", + "R_PPC64_GOT_TPREL_PCREL34", + "R_PPC64_IRELATIVE", + "R_PPC64_JMP_IREL", + "R_PPC64_JMP_SLOT", + "R_PPC64_NONE", + "R_PPC64_PCREL28", + "R_PPC64_PCREL34", + "R_PPC64_PCREL_OPT", + "R_PPC64_PLT16_HA", + "R_PPC64_PLT16_HI", + "R_PPC64_PLT16_LO", + "R_PPC64_PLT16_LO_DS", + "R_PPC64_PLT32", + "R_PPC64_PLT64", + "R_PPC64_PLTCALL", + "R_PPC64_PLTCALL_NOTOC", + "R_PPC64_PLTGOT16", + "R_PPC64_PLTGOT16_DS", + "R_PPC64_PLTGOT16_HA", + "R_PPC64_PLTGOT16_HI", + "R_PPC64_PLTGOT16_LO", + "R_PPC64_PLTGOT_LO_DS", + "R_PPC64_PLTREL32", + "R_PPC64_PLTREL64", + "R_PPC64_PLTSEQ", + "R_PPC64_PLTSEQ_NOTOC", + "R_PPC64_PLT_PCREL34", + "R_PPC64_PLT_PCREL34_NOTOC", + "R_PPC64_REL14", + "R_PPC64_REL14_BRNTAKEN", + "R_PPC64_REL14_BRTAKEN", + "R_PPC64_REL16", + "R_PPC64_REL16DX_HA", + "R_PPC64_REL16_HA", + "R_PPC64_REL16_HI", + "R_PPC64_REL16_HIGH", + "R_PPC64_REL16_HIGHA", + "R_PPC64_REL16_HIGHER", + "R_PPC64_REL16_HIGHER34", + "R_PPC64_REL16_HIGHERA", + "R_PPC64_REL16_HIGHERA34", + "R_PPC64_REL16_HIGHEST", + "R_PPC64_REL16_HIGHEST34", + "R_PPC64_REL16_HIGHESTA", + "R_PPC64_REL16_HIGHESTA34", + "R_PPC64_REL16_LO", + "R_PPC64_REL24", + "R_PPC64_REL24_NOTOC", + "R_PPC64_REL30", + "R_PPC64_REL32", + "R_PPC64_REL64", + "R_PPC64_RELATIVE", + "R_PPC64_SECTOFF", + "R_PPC64_SECTOFF_DS", + "R_PPC64_SECTOFF_HA", + "R_PPC64_SECTOFF_HI", + "R_PPC64_SECTOFF_LO", + "R_PPC64_SECTOFF_LO_DS", + "R_PPC64_TLS", + "R_PPC64_TLSGD", + "R_PPC64_TLSLD", + "R_PPC64_TOC", + "R_PPC64_TOC16", + "R_PPC64_TOC16_DS", + "R_PPC64_TOC16_HA", + "R_PPC64_TOC16_HI", + "R_PPC64_TOC16_LO", + "R_PPC64_TOC16_LO_DS", + "R_PPC64_TOCSAVE", + "R_PPC64_TPREL16", + "R_PPC64_TPREL16_DS", + "R_PPC64_TPREL16_HA", + "R_PPC64_TPREL16_HI", + "R_PPC64_TPREL16_HIGH", + "R_PPC64_TPREL16_HIGHA", + "R_PPC64_TPREL16_HIGHER", + "R_PPC64_TPREL16_HIGHERA", + "R_PPC64_TPREL16_HIGHEST", + "R_PPC64_TPREL16_HIGHESTA", + "R_PPC64_TPREL16_LO", + "R_PPC64_TPREL16_LO_DS", + "R_PPC64_TPREL34", + "R_PPC64_TPREL64", + "R_PPC64_UADDR16", + "R_PPC64_UADDR32", + "R_PPC64_UADDR64", + "R_PPC_ADDR14", + "R_PPC_ADDR14_BRNTAKEN", + "R_PPC_ADDR14_BRTAKEN", + "R_PPC_ADDR16", + "R_PPC_ADDR16_HA", + "R_PPC_ADDR16_HI", + "R_PPC_ADDR16_LO", + "R_PPC_ADDR24", + "R_PPC_ADDR32", + "R_PPC_COPY", + "R_PPC_DTPMOD32", + "R_PPC_DTPREL16", + "R_PPC_DTPREL16_HA", + "R_PPC_DTPREL16_HI", + "R_PPC_DTPREL16_LO", + "R_PPC_DTPREL32", + "R_PPC_EMB_BIT_FLD", + "R_PPC_EMB_MRKREF", + "R_PPC_EMB_NADDR16", + "R_PPC_EMB_NADDR16_HA", + "R_PPC_EMB_NADDR16_HI", + "R_PPC_EMB_NADDR16_LO", + "R_PPC_EMB_NADDR32", + "R_PPC_EMB_RELSDA", + "R_PPC_EMB_RELSEC16", + "R_PPC_EMB_RELST_HA", + "R_PPC_EMB_RELST_HI", + "R_PPC_EMB_RELST_LO", + "R_PPC_EMB_SDA21", + "R_PPC_EMB_SDA2I16", + "R_PPC_EMB_SDA2REL", + "R_PPC_EMB_SDAI16", + "R_PPC_GLOB_DAT", + "R_PPC_GOT16", + "R_PPC_GOT16_HA", + "R_PPC_GOT16_HI", + "R_PPC_GOT16_LO", + "R_PPC_GOT_TLSGD16", + "R_PPC_GOT_TLSGD16_HA", + "R_PPC_GOT_TLSGD16_HI", + "R_PPC_GOT_TLSGD16_LO", + "R_PPC_GOT_TLSLD16", + "R_PPC_GOT_TLSLD16_HA", + "R_PPC_GOT_TLSLD16_HI", + "R_PPC_GOT_TLSLD16_LO", + "R_PPC_GOT_TPREL16", + "R_PPC_GOT_TPREL16_HA", + "R_PPC_GOT_TPREL16_HI", + "R_PPC_GOT_TPREL16_LO", + "R_PPC_JMP_SLOT", + "R_PPC_LOCAL24PC", + "R_PPC_NONE", + "R_PPC_PLT16_HA", + "R_PPC_PLT16_HI", + "R_PPC_PLT16_LO", + "R_PPC_PLT32", + "R_PPC_PLTREL24", + "R_PPC_PLTREL32", + "R_PPC_REL14", + "R_PPC_REL14_BRNTAKEN", + "R_PPC_REL14_BRTAKEN", + "R_PPC_REL24", + "R_PPC_REL32", + "R_PPC_RELATIVE", + "R_PPC_SDAREL16", + "R_PPC_SECTOFF", + "R_PPC_SECTOFF_HA", + "R_PPC_SECTOFF_HI", + "R_PPC_SECTOFF_LO", + "R_PPC_TLS", + "R_PPC_TPREL16", + "R_PPC_TPREL16_HA", + "R_PPC_TPREL16_HI", + "R_PPC_TPREL16_LO", + "R_PPC_TPREL32", + "R_PPC_UADDR16", + "R_PPC_UADDR32", + "R_RISCV", + "R_RISCV_32", + "R_RISCV_32_PCREL", + "R_RISCV_64", + "R_RISCV_ADD16", + "R_RISCV_ADD32", + "R_RISCV_ADD64", + "R_RISCV_ADD8", + "R_RISCV_ALIGN", + "R_RISCV_BRANCH", + "R_RISCV_CALL", + "R_RISCV_CALL_PLT", + "R_RISCV_COPY", + "R_RISCV_GNU_VTENTRY", + "R_RISCV_GNU_VTINHERIT", + "R_RISCV_GOT_HI20", + "R_RISCV_GPREL_I", + "R_RISCV_GPREL_S", + "R_RISCV_HI20", + "R_RISCV_JAL", + "R_RISCV_JUMP_SLOT", + "R_RISCV_LO12_I", + "R_RISCV_LO12_S", + "R_RISCV_NONE", + "R_RISCV_PCREL_HI20", + "R_RISCV_PCREL_LO12_I", + "R_RISCV_PCREL_LO12_S", + "R_RISCV_RELATIVE", + "R_RISCV_RELAX", + "R_RISCV_RVC_BRANCH", + "R_RISCV_RVC_JUMP", + "R_RISCV_RVC_LUI", + "R_RISCV_SET16", + "R_RISCV_SET32", + "R_RISCV_SET6", + "R_RISCV_SET8", + "R_RISCV_SUB16", + "R_RISCV_SUB32", + "R_RISCV_SUB6", + "R_RISCV_SUB64", + "R_RISCV_SUB8", + "R_RISCV_TLS_DTPMOD32", + "R_RISCV_TLS_DTPMOD64", + "R_RISCV_TLS_DTPREL32", + "R_RISCV_TLS_DTPREL64", + "R_RISCV_TLS_GD_HI20", + "R_RISCV_TLS_GOT_HI20", + "R_RISCV_TLS_TPREL32", + "R_RISCV_TLS_TPREL64", + "R_RISCV_TPREL_ADD", + "R_RISCV_TPREL_HI20", + "R_RISCV_TPREL_I", + "R_RISCV_TPREL_LO12_I", + "R_RISCV_TPREL_LO12_S", + "R_RISCV_TPREL_S", + "R_SPARC", + "R_SPARC_10", + "R_SPARC_11", + "R_SPARC_13", + "R_SPARC_16", + "R_SPARC_22", + "R_SPARC_32", + "R_SPARC_5", + "R_SPARC_6", + "R_SPARC_64", + "R_SPARC_7", + "R_SPARC_8", + "R_SPARC_COPY", + "R_SPARC_DISP16", + "R_SPARC_DISP32", + "R_SPARC_DISP64", + "R_SPARC_DISP8", + "R_SPARC_GLOB_DAT", + "R_SPARC_GLOB_JMP", + "R_SPARC_GOT10", + "R_SPARC_GOT13", + "R_SPARC_GOT22", + "R_SPARC_H44", + "R_SPARC_HH22", + "R_SPARC_HI22", + "R_SPARC_HIPLT22", + "R_SPARC_HIX22", + "R_SPARC_HM10", + "R_SPARC_JMP_SLOT", + "R_SPARC_L44", + "R_SPARC_LM22", + "R_SPARC_LO10", + "R_SPARC_LOPLT10", + "R_SPARC_LOX10", + "R_SPARC_M44", + "R_SPARC_NONE", + "R_SPARC_OLO10", + "R_SPARC_PC10", + "R_SPARC_PC22", + "R_SPARC_PCPLT10", + "R_SPARC_PCPLT22", + "R_SPARC_PCPLT32", + "R_SPARC_PC_HH22", + "R_SPARC_PC_HM10", + "R_SPARC_PC_LM22", + "R_SPARC_PLT32", + "R_SPARC_PLT64", + "R_SPARC_REGISTER", + "R_SPARC_RELATIVE", + "R_SPARC_UA16", + "R_SPARC_UA32", + "R_SPARC_UA64", + "R_SPARC_WDISP16", + "R_SPARC_WDISP19", + "R_SPARC_WDISP22", + "R_SPARC_WDISP30", + "R_SPARC_WPLT30", + "R_SYM32", + "R_SYM64", + "R_TYPE32", + "R_TYPE64", + "R_X86_64", + "R_X86_64_16", + "R_X86_64_32", + "R_X86_64_32S", + "R_X86_64_64", + "R_X86_64_8", + "R_X86_64_COPY", + "R_X86_64_DTPMOD64", + "R_X86_64_DTPOFF32", + "R_X86_64_DTPOFF64", + "R_X86_64_GLOB_DAT", + "R_X86_64_GOT32", + "R_X86_64_GOT64", + "R_X86_64_GOTOFF64", + "R_X86_64_GOTPC32", + "R_X86_64_GOTPC32_TLSDESC", + "R_X86_64_GOTPC64", + "R_X86_64_GOTPCREL", + "R_X86_64_GOTPCREL64", + "R_X86_64_GOTPCRELX", + "R_X86_64_GOTPLT64", + "R_X86_64_GOTTPOFF", + "R_X86_64_IRELATIVE", + "R_X86_64_JMP_SLOT", + "R_X86_64_NONE", + "R_X86_64_PC16", + "R_X86_64_PC32", + "R_X86_64_PC32_BND", + "R_X86_64_PC64", + "R_X86_64_PC8", + "R_X86_64_PLT32", + "R_X86_64_PLT32_BND", + "R_X86_64_PLTOFF64", + "R_X86_64_RELATIVE", + "R_X86_64_RELATIVE64", + "R_X86_64_REX_GOTPCRELX", + "R_X86_64_SIZE32", + "R_X86_64_SIZE64", + "R_X86_64_TLSDESC", + "R_X86_64_TLSDESC_CALL", + "R_X86_64_TLSGD", + "R_X86_64_TLSLD", + "R_X86_64_TPOFF32", + "R_X86_64_TPOFF64", + "Rel32", + "Rel64", + "Rela32", + "Rela64", + "SHF_ALLOC", + "SHF_COMPRESSED", + "SHF_EXECINSTR", + "SHF_GROUP", + "SHF_INFO_LINK", + "SHF_LINK_ORDER", + "SHF_MASKOS", + "SHF_MASKPROC", + "SHF_MERGE", + "SHF_OS_NONCONFORMING", + "SHF_STRINGS", + "SHF_TLS", + "SHF_WRITE", + "SHN_ABS", + "SHN_COMMON", + "SHN_HIOS", + "SHN_HIPROC", + "SHN_HIRESERVE", + "SHN_LOOS", + "SHN_LOPROC", + "SHN_LORESERVE", + "SHN_UNDEF", + "SHN_XINDEX", + "SHT_DYNAMIC", + "SHT_DYNSYM", + "SHT_FINI_ARRAY", + "SHT_GNU_ATTRIBUTES", + "SHT_GNU_HASH", + "SHT_GNU_LIBLIST", + "SHT_GNU_VERDEF", + "SHT_GNU_VERNEED", + "SHT_GNU_VERSYM", + "SHT_GROUP", + "SHT_HASH", + "SHT_HIOS", + "SHT_HIPROC", + "SHT_HIUSER", + "SHT_INIT_ARRAY", + "SHT_LOOS", + "SHT_LOPROC", + "SHT_LOUSER", + "SHT_MIPS_ABIFLAGS", + "SHT_NOBITS", + "SHT_NOTE", + "SHT_NULL", + "SHT_PREINIT_ARRAY", + "SHT_PROGBITS", + "SHT_REL", + "SHT_RELA", + "SHT_SHLIB", + "SHT_STRTAB", + "SHT_SYMTAB", + "SHT_SYMTAB_SHNDX", + "STB_GLOBAL", + "STB_HIOS", + "STB_HIPROC", + "STB_LOCAL", + "STB_LOOS", + "STB_LOPROC", + "STB_WEAK", + "STT_COMMON", + "STT_FILE", + "STT_FUNC", + "STT_HIOS", + "STT_HIPROC", + "STT_LOOS", + "STT_LOPROC", + "STT_NOTYPE", + "STT_OBJECT", + "STT_SECTION", + "STT_TLS", + "STV_DEFAULT", + "STV_HIDDEN", + "STV_INTERNAL", + "STV_PROTECTED", + "ST_BIND", + "ST_INFO", + "ST_TYPE", + "ST_VISIBILITY", + "Section", + "Section32", + "Section64", + "SectionFlag", + "SectionHeader", + "SectionIndex", + "SectionType", + "Sym32", + "Sym32Size", + "Sym64", + "Sym64Size", + "SymBind", + "SymType", + "SymVis", + "Symbol", + "Type", + "Version", + }, + "debug/gosym": { + "DecodingError", + "Func", + "LineTable", + "NewLineTable", + "NewTable", + "Obj", + "Sym", + "Table", + "UnknownFileError", + "UnknownLineError", + }, + "debug/macho": { + "ARM64_RELOC_ADDEND", + "ARM64_RELOC_BRANCH26", + "ARM64_RELOC_GOT_LOAD_PAGE21", + "ARM64_RELOC_GOT_LOAD_PAGEOFF12", + "ARM64_RELOC_PAGE21", + "ARM64_RELOC_PAGEOFF12", + "ARM64_RELOC_POINTER_TO_GOT", + "ARM64_RELOC_SUBTRACTOR", + "ARM64_RELOC_TLVP_LOAD_PAGE21", + "ARM64_RELOC_TLVP_LOAD_PAGEOFF12", + "ARM64_RELOC_UNSIGNED", + "ARM_RELOC_BR24", + "ARM_RELOC_HALF", + "ARM_RELOC_HALF_SECTDIFF", + "ARM_RELOC_LOCAL_SECTDIFF", + "ARM_RELOC_PAIR", + "ARM_RELOC_PB_LA_PTR", + "ARM_RELOC_SECTDIFF", + "ARM_RELOC_VANILLA", + "ARM_THUMB_32BIT_BRANCH", + "ARM_THUMB_RELOC_BR22", + "Cpu", + "Cpu386", + "CpuAmd64", + "CpuArm", + "CpuArm64", + "CpuPpc", + "CpuPpc64", + "Dylib", + "DylibCmd", + "Dysymtab", + "DysymtabCmd", + "ErrNotFat", + "FatArch", + "FatArchHeader", + "FatFile", + "File", + "FileHeader", + "FlagAllModsBound", + "FlagAllowStackExecution", + "FlagAppExtensionSafe", + "FlagBindAtLoad", + "FlagBindsToWeak", + "FlagCanonical", + "FlagDeadStrippableDylib", + "FlagDyldLink", + "FlagForceFlat", + "FlagHasTLVDescriptors", + "FlagIncrLink", + "FlagLazyInit", + "FlagNoFixPrebinding", + "FlagNoHeapExecution", + "FlagNoMultiDefs", + "FlagNoReexportedDylibs", + "FlagNoUndefs", + "FlagPIE", + "FlagPrebindable", + "FlagPrebound", + "FlagRootSafe", + "FlagSetuidSafe", + "FlagSplitSegs", + "FlagSubsectionsViaSymbols", + "FlagTwoLevel", + "FlagWeakDefines", + "FormatError", + "GENERIC_RELOC_LOCAL_SECTDIFF", + "GENERIC_RELOC_PAIR", + "GENERIC_RELOC_PB_LA_PTR", + "GENERIC_RELOC_SECTDIFF", + "GENERIC_RELOC_TLV", + "GENERIC_RELOC_VANILLA", + "Load", + "LoadBytes", + "LoadCmd", + "LoadCmdDylib", + "LoadCmdDylinker", + "LoadCmdDysymtab", + "LoadCmdRpath", + "LoadCmdSegment", + "LoadCmdSegment64", + "LoadCmdSymtab", + "LoadCmdThread", + "LoadCmdUnixThread", + "Magic32", + "Magic64", + "MagicFat", + "NewFatFile", + "NewFile", + "Nlist32", + "Nlist64", + "Open", + "OpenFat", + "Regs386", + "RegsAMD64", + "Reloc", + "RelocTypeARM", + "RelocTypeARM64", + "RelocTypeGeneric", + "RelocTypeX86_64", + "Rpath", + "RpathCmd", + "Section", + "Section32", + "Section64", + "SectionHeader", + "Segment", + "Segment32", + "Segment64", + "SegmentHeader", + "Symbol", + "Symtab", + "SymtabCmd", + "Thread", + "Type", + "TypeBundle", + "TypeDylib", + "TypeExec", + "TypeObj", + "X86_64_RELOC_BRANCH", + "X86_64_RELOC_GOT", + "X86_64_RELOC_GOT_LOAD", + "X86_64_RELOC_SIGNED", + "X86_64_RELOC_SIGNED_1", + "X86_64_RELOC_SIGNED_2", + "X86_64_RELOC_SIGNED_4", + "X86_64_RELOC_SUBTRACTOR", + "X86_64_RELOC_TLV", + "X86_64_RELOC_UNSIGNED", + }, + "debug/pe": { + "COFFSymbol", + "COFFSymbolAuxFormat5", + "COFFSymbolSize", + "DataDirectory", + "File", + "FileHeader", + "FormatError", + "IMAGE_COMDAT_SELECT_ANY", + "IMAGE_COMDAT_SELECT_ASSOCIATIVE", + "IMAGE_COMDAT_SELECT_EXACT_MATCH", + "IMAGE_COMDAT_SELECT_LARGEST", + "IMAGE_COMDAT_SELECT_NODUPLICATES", + "IMAGE_COMDAT_SELECT_SAME_SIZE", + "IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", + "IMAGE_DIRECTORY_ENTRY_BASERELOC", + "IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", + "IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", + "IMAGE_DIRECTORY_ENTRY_DEBUG", + "IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", + "IMAGE_DIRECTORY_ENTRY_EXCEPTION", + "IMAGE_DIRECTORY_ENTRY_EXPORT", + "IMAGE_DIRECTORY_ENTRY_GLOBALPTR", + "IMAGE_DIRECTORY_ENTRY_IAT", + "IMAGE_DIRECTORY_ENTRY_IMPORT", + "IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", + "IMAGE_DIRECTORY_ENTRY_RESOURCE", + "IMAGE_DIRECTORY_ENTRY_SECURITY", + "IMAGE_DIRECTORY_ENTRY_TLS", + "IMAGE_DLLCHARACTERISTICS_APPCONTAINER", + "IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", + "IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", + "IMAGE_DLLCHARACTERISTICS_GUARD_CF", + "IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", + "IMAGE_DLLCHARACTERISTICS_NO_BIND", + "IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", + "IMAGE_DLLCHARACTERISTICS_NO_SEH", + "IMAGE_DLLCHARACTERISTICS_NX_COMPAT", + "IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", + "IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", + "IMAGE_FILE_32BIT_MACHINE", + "IMAGE_FILE_AGGRESIVE_WS_TRIM", + "IMAGE_FILE_BYTES_REVERSED_HI", + "IMAGE_FILE_BYTES_REVERSED_LO", + "IMAGE_FILE_DEBUG_STRIPPED", + "IMAGE_FILE_DLL", + "IMAGE_FILE_EXECUTABLE_IMAGE", + "IMAGE_FILE_LARGE_ADDRESS_AWARE", + "IMAGE_FILE_LINE_NUMS_STRIPPED", + "IMAGE_FILE_LOCAL_SYMS_STRIPPED", + "IMAGE_FILE_MACHINE_AM33", + "IMAGE_FILE_MACHINE_AMD64", + "IMAGE_FILE_MACHINE_ARM", + "IMAGE_FILE_MACHINE_ARM64", + "IMAGE_FILE_MACHINE_ARMNT", + "IMAGE_FILE_MACHINE_EBC", + "IMAGE_FILE_MACHINE_I386", + "IMAGE_FILE_MACHINE_IA64", + "IMAGE_FILE_MACHINE_LOONGARCH32", + "IMAGE_FILE_MACHINE_LOONGARCH64", + "IMAGE_FILE_MACHINE_M32R", + "IMAGE_FILE_MACHINE_MIPS16", + "IMAGE_FILE_MACHINE_MIPSFPU", + "IMAGE_FILE_MACHINE_MIPSFPU16", + "IMAGE_FILE_MACHINE_POWERPC", + "IMAGE_FILE_MACHINE_POWERPCFP", + "IMAGE_FILE_MACHINE_R4000", + "IMAGE_FILE_MACHINE_RISCV128", + "IMAGE_FILE_MACHINE_RISCV32", + "IMAGE_FILE_MACHINE_RISCV64", + "IMAGE_FILE_MACHINE_SH3", + "IMAGE_FILE_MACHINE_SH3DSP", + "IMAGE_FILE_MACHINE_SH4", + "IMAGE_FILE_MACHINE_SH5", + "IMAGE_FILE_MACHINE_THUMB", + "IMAGE_FILE_MACHINE_UNKNOWN", + "IMAGE_FILE_MACHINE_WCEMIPSV2", + "IMAGE_FILE_NET_RUN_FROM_SWAP", + "IMAGE_FILE_RELOCS_STRIPPED", + "IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", + "IMAGE_FILE_SYSTEM", + "IMAGE_FILE_UP_SYSTEM_ONLY", + "IMAGE_SCN_CNT_CODE", + "IMAGE_SCN_CNT_INITIALIZED_DATA", + "IMAGE_SCN_CNT_UNINITIALIZED_DATA", + "IMAGE_SCN_LNK_COMDAT", + "IMAGE_SCN_MEM_DISCARDABLE", + "IMAGE_SCN_MEM_EXECUTE", + "IMAGE_SCN_MEM_READ", + "IMAGE_SCN_MEM_WRITE", + "IMAGE_SUBSYSTEM_EFI_APPLICATION", + "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", + "IMAGE_SUBSYSTEM_EFI_ROM", + "IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", + "IMAGE_SUBSYSTEM_NATIVE", + "IMAGE_SUBSYSTEM_NATIVE_WINDOWS", + "IMAGE_SUBSYSTEM_OS2_CUI", + "IMAGE_SUBSYSTEM_POSIX_CUI", + "IMAGE_SUBSYSTEM_UNKNOWN", + "IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", + "IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", + "IMAGE_SUBSYSTEM_WINDOWS_CUI", + "IMAGE_SUBSYSTEM_WINDOWS_GUI", + "IMAGE_SUBSYSTEM_XBOX", + "ImportDirectory", + "NewFile", + "Open", + "OptionalHeader32", + "OptionalHeader64", + "Reloc", + "Section", + "SectionHeader", + "SectionHeader32", + "StringTable", + "Symbol", + }, + "debug/plan9obj": { + "ErrNoSymbols", + "File", + "FileHeader", + "Magic386", + "Magic64", + "MagicAMD64", + "MagicARM", + "NewFile", + "Open", + "Section", + "SectionHeader", + "Sym", + }, + "embed": { + "FS", + }, + "encoding": { + "BinaryMarshaler", + "BinaryUnmarshaler", + "TextMarshaler", + "TextUnmarshaler", + }, + "encoding/ascii85": { + "CorruptInputError", + "Decode", + "Encode", + "MaxEncodedLen", + "NewDecoder", + "NewEncoder", + }, + "encoding/asn1": { + "BitString", + "ClassApplication", + "ClassContextSpecific", + "ClassPrivate", + "ClassUniversal", + "Enumerated", + "Flag", + "Marshal", + "MarshalWithParams", + "NullBytes", + "NullRawValue", + "ObjectIdentifier", + "RawContent", + "RawValue", + "StructuralError", + "SyntaxError", + "TagBMPString", + "TagBitString", + "TagBoolean", + "TagEnum", + "TagGeneralString", + "TagGeneralizedTime", + "TagIA5String", + "TagInteger", + "TagNull", + "TagNumericString", + "TagOID", + "TagOctetString", + "TagPrintableString", + "TagSequence", + "TagSet", + "TagT61String", + "TagUTCTime", + "TagUTF8String", + "Unmarshal", + "UnmarshalWithParams", + }, + "encoding/base32": { + "CorruptInputError", + "Encoding", + "HexEncoding", + "NewDecoder", + "NewEncoder", + "NewEncoding", + "NoPadding", + "StdEncoding", + "StdPadding", + }, + "encoding/base64": { + "CorruptInputError", + "Encoding", + "NewDecoder", + "NewEncoder", + "NewEncoding", + "NoPadding", + "RawStdEncoding", + "RawURLEncoding", + "StdEncoding", + "StdPadding", + "URLEncoding", + }, + "encoding/binary": { + "AppendByteOrder", + "AppendUvarint", + "AppendVarint", + "BigEndian", + "ByteOrder", + "LittleEndian", + "MaxVarintLen16", + "MaxVarintLen32", + "MaxVarintLen64", + "PutUvarint", + "PutVarint", + "Read", + "ReadUvarint", + "ReadVarint", + "Size", + "Uvarint", + "Varint", + "Write", + }, + "encoding/csv": { + "ErrBareQuote", + "ErrFieldCount", + "ErrQuote", + "ErrTrailingComma", + "NewReader", + "NewWriter", + "ParseError", + "Reader", + "Writer", + }, + "encoding/gob": { + "CommonType", + "Decoder", + "Encoder", + "GobDecoder", + "GobEncoder", + "NewDecoder", + "NewEncoder", + "Register", + "RegisterName", + }, + "encoding/hex": { + "Decode", + "DecodeString", + "DecodedLen", + "Dump", + "Dumper", + "Encode", + "EncodeToString", + "EncodedLen", + "ErrLength", + "InvalidByteError", + "NewDecoder", + "NewEncoder", + }, + "encoding/json": { + "Compact", + "Decoder", + "Delim", + "Encoder", + "HTMLEscape", + "Indent", + "InvalidUTF8Error", + "InvalidUnmarshalError", + "Marshal", + "MarshalIndent", + "Marshaler", + "MarshalerError", + "NewDecoder", + "NewEncoder", + "Number", + "RawMessage", + "SyntaxError", + "Token", + "Unmarshal", + "UnmarshalFieldError", + "UnmarshalTypeError", + "Unmarshaler", + "UnsupportedTypeError", + "UnsupportedValueError", + "Valid", + }, + "encoding/pem": { + "Block", + "Decode", + "Encode", + "EncodeToMemory", + }, + "encoding/xml": { + "Attr", + "CharData", + "Comment", + "CopyToken", + "Decoder", + "Directive", + "Encoder", + "EndElement", + "Escape", + "EscapeText", + "HTMLAutoClose", + "HTMLEntity", + "Header", + "Marshal", + "MarshalIndent", + "Marshaler", + "MarshalerAttr", + "Name", + "NewDecoder", + "NewEncoder", + "NewTokenDecoder", + "ProcInst", + "StartElement", + "SyntaxError", + "TagPathError", + "Token", + "TokenReader", + "Unmarshal", + "UnmarshalError", + "Unmarshaler", + "UnmarshalerAttr", + "UnsupportedTypeError", + }, + "errors": { + "As", + "Is", + "Join", + "New", + "Unwrap", + }, + "expvar": { + "Do", + "Float", + "Func", + "Get", + "Handler", + "Int", + "KeyValue", + "Map", + "NewFloat", + "NewInt", + "NewMap", + "NewString", + "Publish", + "String", + "Var", + }, + "flag": { + "Arg", + "Args", + "Bool", + "BoolVar", + "CommandLine", + "ContinueOnError", + "Duration", + "DurationVar", + "ErrHelp", + "ErrorHandling", + "ExitOnError", + "Flag", + "FlagSet", + "Float64", + "Float64Var", + "Func", + "Getter", + "Int", + "Int64", + "Int64Var", + "IntVar", + "Lookup", + "NArg", + "NFlag", + "NewFlagSet", + "PanicOnError", + "Parse", + "Parsed", + "PrintDefaults", + "Set", + "String", + "StringVar", + "TextVar", + "Uint", + "Uint64", + "Uint64Var", + "UintVar", + "UnquoteUsage", + "Usage", + "Value", + "Var", + "Visit", + "VisitAll", + }, + "fmt": { + "Append", + "Appendf", + "Appendln", + "Errorf", + "FormatString", + "Formatter", + "Fprint", + "Fprintf", + "Fprintln", + "Fscan", + "Fscanf", + "Fscanln", + "GoStringer", + "Print", + "Printf", + "Println", + "Scan", + "ScanState", + "Scanf", + "Scanln", + "Scanner", + "Sprint", + "Sprintf", + "Sprintln", + "Sscan", + "Sscanf", + "Sscanln", + "State", + "Stringer", + }, + "go/ast": { + "ArrayType", + "AssignStmt", + "Bad", + "BadDecl", + "BadExpr", + "BadStmt", + "BasicLit", + "BinaryExpr", + "BlockStmt", + "BranchStmt", + "CallExpr", + "CaseClause", + "ChanDir", + "ChanType", + "CommClause", + "Comment", + "CommentGroup", + "CommentMap", + "CompositeLit", + "Con", + "Decl", + "DeclStmt", + "DeferStmt", + "Ellipsis", + "EmptyStmt", + "Expr", + "ExprStmt", + "Field", + "FieldFilter", + "FieldList", + "File", + "FileExports", + "Filter", + "FilterDecl", + "FilterFile", + "FilterFuncDuplicates", + "FilterImportDuplicates", + "FilterPackage", + "FilterUnassociatedComments", + "ForStmt", + "Fprint", + "Fun", + "FuncDecl", + "FuncLit", + "FuncType", + "GenDecl", + "GoStmt", + "Ident", + "IfStmt", + "ImportSpec", + "Importer", + "IncDecStmt", + "IndexExpr", + "IndexListExpr", + "Inspect", + "InterfaceType", + "IsExported", + "KeyValueExpr", + "LabeledStmt", + "Lbl", + "MapType", + "MergeMode", + "MergePackageFiles", + "NewCommentMap", + "NewIdent", + "NewObj", + "NewPackage", + "NewScope", + "Node", + "NotNilFilter", + "ObjKind", + "Object", + "Package", + "PackageExports", + "ParenExpr", + "Pkg", + "Print", + "RECV", + "RangeStmt", + "ReturnStmt", + "SEND", + "Scope", + "SelectStmt", + "SelectorExpr", + "SendStmt", + "SliceExpr", + "SortImports", + "Spec", + "StarExpr", + "Stmt", + "StructType", + "SwitchStmt", + "Typ", + "TypeAssertExpr", + "TypeSpec", + "TypeSwitchStmt", + "UnaryExpr", + "ValueSpec", + "Var", + "Visitor", + "Walk", + }, + "go/build": { + "AllowBinary", + "ArchChar", + "Context", + "Default", + "FindOnly", + "IgnoreVendor", + "Import", + "ImportComment", + "ImportDir", + "ImportMode", + "IsLocalImport", + "MultiplePackageError", + "NoGoError", + "Package", + "ToolDir", + }, + "go/build/constraint": { + "AndExpr", + "Expr", + "IsGoBuild", + "IsPlusBuild", + "NotExpr", + "OrExpr", + "Parse", + "PlusBuildLines", + "SyntaxError", + "TagExpr", + }, + "go/constant": { + "BinaryOp", + "BitLen", + "Bool", + "BoolVal", + "Bytes", + "Compare", + "Complex", + "Denom", + "Float", + "Float32Val", + "Float64Val", + "Imag", + "Int", + "Int64Val", + "Kind", + "Make", + "MakeBool", + "MakeFloat64", + "MakeFromBytes", + "MakeFromLiteral", + "MakeImag", + "MakeInt64", + "MakeString", + "MakeUint64", + "MakeUnknown", + "Num", + "Real", + "Shift", + "Sign", + "String", + "StringVal", + "ToComplex", + "ToFloat", + "ToInt", + "Uint64Val", + "UnaryOp", + "Unknown", + "Val", + "Value", + }, + "go/doc": { + "AllDecls", + "AllMethods", + "Example", + "Examples", + "Filter", + "Func", + "IllegalPrefixes", + "IsPredeclared", + "Mode", + "New", + "NewFromFiles", + "Note", + "Package", + "PreserveAST", + "Synopsis", + "ToHTML", + "ToText", + "Type", + "Value", + }, + "go/doc/comment": { + "Block", + "Code", + "DefaultLookupPackage", + "Doc", + "DocLink", + "Heading", + "Italic", + "Link", + "LinkDef", + "List", + "ListItem", + "Paragraph", + "Parser", + "Plain", + "Printer", + "Text", + }, + "go/format": { + "Node", + "Source", + }, + "go/importer": { + "Default", + "For", + "ForCompiler", + "Lookup", + }, + "go/parser": { + "AllErrors", + "DeclarationErrors", + "ImportsOnly", + "Mode", + "PackageClauseOnly", + "ParseComments", + "ParseDir", + "ParseExpr", + "ParseExprFrom", + "ParseFile", + "SkipObjectResolution", + "SpuriousErrors", + "Trace", + }, + "go/printer": { + "CommentedNode", + "Config", + "Fprint", + "Mode", + "RawFormat", + "SourcePos", + "TabIndent", + "UseSpaces", + }, + "go/scanner": { + "Error", + "ErrorHandler", + "ErrorList", + "Mode", + "PrintError", + "ScanComments", + "Scanner", + }, + "go/token": { + "ADD", + "ADD_ASSIGN", + "AND", + "AND_ASSIGN", + "AND_NOT", + "AND_NOT_ASSIGN", + "ARROW", + "ASSIGN", + "BREAK", + "CASE", + "CHAN", + "CHAR", + "COLON", + "COMMA", + "COMMENT", + "CONST", + "CONTINUE", + "DEC", + "DEFAULT", + "DEFER", + "DEFINE", + "ELLIPSIS", + "ELSE", + "EOF", + "EQL", + "FALLTHROUGH", + "FLOAT", + "FOR", + "FUNC", + "File", + "FileSet", + "GEQ", + "GO", + "GOTO", + "GTR", + "HighestPrec", + "IDENT", + "IF", + "ILLEGAL", + "IMAG", + "IMPORT", + "INC", + "INT", + "INTERFACE", + "IsExported", + "IsIdentifier", + "IsKeyword", + "LAND", + "LBRACE", + "LBRACK", + "LEQ", + "LOR", + "LPAREN", + "LSS", + "Lookup", + "LowestPrec", + "MAP", + "MUL", + "MUL_ASSIGN", + "NEQ", + "NOT", + "NewFileSet", + "NoPos", + "OR", + "OR_ASSIGN", + "PACKAGE", + "PERIOD", + "Pos", + "Position", + "QUO", + "QUO_ASSIGN", + "RANGE", + "RBRACE", + "RBRACK", + "REM", + "REM_ASSIGN", + "RETURN", + "RPAREN", + "SELECT", + "SEMICOLON", + "SHL", + "SHL_ASSIGN", + "SHR", + "SHR_ASSIGN", + "STRING", + "STRUCT", + "SUB", + "SUB_ASSIGN", + "SWITCH", + "TILDE", + "TYPE", + "Token", + "UnaryPrec", + "VAR", + "XOR", + "XOR_ASSIGN", + }, + "go/types": { + "ArgumentError", + "Array", + "AssertableTo", + "AssignableTo", + "Basic", + "BasicInfo", + "BasicKind", + "Bool", + "Builtin", + "Byte", + "Chan", + "ChanDir", + "CheckExpr", + "Checker", + "Comparable", + "Complex128", + "Complex64", + "Config", + "Const", + "Context", + "ConvertibleTo", + "DefPredeclaredTestFuncs", + "Default", + "Error", + "Eval", + "ExprString", + "FieldVal", + "Float32", + "Float64", + "Func", + "Id", + "Identical", + "IdenticalIgnoreTags", + "Implements", + "ImportMode", + "Importer", + "ImporterFrom", + "Info", + "Initializer", + "Instance", + "Instantiate", + "Int", + "Int16", + "Int32", + "Int64", + "Int8", + "Interface", + "Invalid", + "IsBoolean", + "IsComplex", + "IsConstType", + "IsFloat", + "IsInteger", + "IsInterface", + "IsNumeric", + "IsOrdered", + "IsString", + "IsUnsigned", + "IsUntyped", + "Label", + "LookupFieldOrMethod", + "Map", + "MethodExpr", + "MethodSet", + "MethodVal", + "MissingMethod", + "Named", + "NewArray", + "NewChan", + "NewChecker", + "NewConst", + "NewContext", + "NewField", + "NewFunc", + "NewInterface", + "NewInterfaceType", + "NewLabel", + "NewMap", + "NewMethodSet", + "NewNamed", + "NewPackage", + "NewParam", + "NewPkgName", + "NewPointer", + "NewScope", + "NewSignature", + "NewSignatureType", + "NewSlice", + "NewStruct", + "NewTerm", + "NewTuple", + "NewTypeName", + "NewTypeParam", + "NewUnion", + "NewVar", + "Nil", + "Object", + "ObjectString", + "Package", + "PkgName", + "Pointer", + "Qualifier", + "RecvOnly", + "RelativeTo", + "Rune", + "Satisfies", + "Scope", + "Selection", + "SelectionKind", + "SelectionString", + "SendOnly", + "SendRecv", + "Signature", + "Sizes", + "SizesFor", + "Slice", + "StdSizes", + "String", + "Struct", + "Term", + "Tuple", + "Typ", + "Type", + "TypeAndValue", + "TypeList", + "TypeName", + "TypeParam", + "TypeParamList", + "TypeString", + "Uint", + "Uint16", + "Uint32", + "Uint64", + "Uint8", + "Uintptr", + "Union", + "Universe", + "Unsafe", + "UnsafePointer", + "UntypedBool", + "UntypedComplex", + "UntypedFloat", + "UntypedInt", + "UntypedNil", + "UntypedRune", + "UntypedString", + "Var", + "WriteExpr", + "WriteSignature", + "WriteType", + }, + "hash": { + "Hash", + "Hash32", + "Hash64", + }, + "hash/adler32": { + "Checksum", + "New", + "Size", + }, + "hash/crc32": { + "Castagnoli", + "Checksum", + "ChecksumIEEE", + "IEEE", + "IEEETable", + "Koopman", + "MakeTable", + "New", + "NewIEEE", + "Size", + "Table", + "Update", + }, + "hash/crc64": { + "Checksum", + "ECMA", + "ISO", + "MakeTable", + "New", + "Size", + "Table", + "Update", + }, + "hash/fnv": { + "New128", + "New128a", + "New32", + "New32a", + "New64", + "New64a", + }, + "hash/maphash": { + "Bytes", + "Hash", + "MakeSeed", + "Seed", + "String", + }, + "html": { + "EscapeString", + "UnescapeString", + }, + "html/template": { + "CSS", + "ErrAmbigContext", + "ErrBadHTML", + "ErrBranchEnd", + "ErrEndContext", + "ErrNoSuchTemplate", + "ErrOutputContext", + "ErrPartialCharset", + "ErrPartialEscape", + "ErrPredefinedEscaper", + "ErrRangeLoopReentry", + "ErrSlashAmbig", + "Error", + "ErrorCode", + "FuncMap", + "HTML", + "HTMLAttr", + "HTMLEscape", + "HTMLEscapeString", + "HTMLEscaper", + "IsTrue", + "JS", + "JSEscape", + "JSEscapeString", + "JSEscaper", + "JSStr", + "Must", + "New", + "OK", + "ParseFS", + "ParseFiles", + "ParseGlob", + "Srcset", + "Template", + "URL", + "URLQueryEscaper", + }, + "image": { + "Alpha", + "Alpha16", + "Black", + "CMYK", + "Config", + "Decode", + "DecodeConfig", + "ErrFormat", + "Gray", + "Gray16", + "Image", + "NRGBA", + "NRGBA64", + "NYCbCrA", + "NewAlpha", + "NewAlpha16", + "NewCMYK", + "NewGray", + "NewGray16", + "NewNRGBA", + "NewNRGBA64", + "NewNYCbCrA", + "NewPaletted", + "NewRGBA", + "NewRGBA64", + "NewUniform", + "NewYCbCr", + "Opaque", + "Paletted", + "PalettedImage", + "Point", + "Pt", + "RGBA", + "RGBA64", + "RGBA64Image", + "Rect", + "Rectangle", + "RegisterFormat", + "Transparent", + "Uniform", + "White", + "YCbCr", + "YCbCrSubsampleRatio", + "YCbCrSubsampleRatio410", + "YCbCrSubsampleRatio411", + "YCbCrSubsampleRatio420", + "YCbCrSubsampleRatio422", + "YCbCrSubsampleRatio440", + "YCbCrSubsampleRatio444", + "ZP", + "ZR", + }, + "image/color": { + "Alpha", + "Alpha16", + "Alpha16Model", + "AlphaModel", + "Black", + "CMYK", + "CMYKModel", + "CMYKToRGB", + "Color", + "Gray", + "Gray16", + "Gray16Model", + "GrayModel", + "Model", + "ModelFunc", + "NRGBA", + "NRGBA64", + "NRGBA64Model", + "NRGBAModel", + "NYCbCrA", + "NYCbCrAModel", + "Opaque", + "Palette", + "RGBA", + "RGBA64", + "RGBA64Model", + "RGBAModel", + "RGBToCMYK", + "RGBToYCbCr", + "Transparent", + "White", + "YCbCr", + "YCbCrModel", + "YCbCrToRGB", + }, + "image/color/palette": { + "Plan9", + "WebSafe", + }, + "image/draw": { + "Draw", + "DrawMask", + "Drawer", + "FloydSteinberg", + "Image", + "Op", + "Over", + "Quantizer", + "RGBA64Image", + "Src", + }, + "image/gif": { + "Decode", + "DecodeAll", + "DecodeConfig", + "DisposalBackground", + "DisposalNone", + "DisposalPrevious", + "Encode", + "EncodeAll", + "GIF", + "Options", + }, + "image/jpeg": { + "Decode", + "DecodeConfig", + "DefaultQuality", + "Encode", + "FormatError", + "Options", + "Reader", + "UnsupportedError", + }, + "image/png": { + "BestCompression", + "BestSpeed", + "CompressionLevel", + "Decode", + "DecodeConfig", + "DefaultCompression", + "Encode", + "Encoder", + "EncoderBuffer", + "EncoderBufferPool", + "FormatError", + "NoCompression", + "UnsupportedError", + }, + "index/suffixarray": { + "Index", + "New", + }, + "io": { + "ByteReader", + "ByteScanner", + "ByteWriter", + "Closer", + "Copy", + "CopyBuffer", + "CopyN", + "Discard", + "EOF", + "ErrClosedPipe", + "ErrNoProgress", + "ErrShortBuffer", + "ErrShortWrite", + "ErrUnexpectedEOF", + "LimitReader", + "LimitedReader", + "MultiReader", + "MultiWriter", + "NewOffsetWriter", + "NewSectionReader", + "NopCloser", + "OffsetWriter", + "Pipe", + "PipeReader", + "PipeWriter", + "ReadAll", + "ReadAtLeast", + "ReadCloser", + "ReadFull", + "ReadSeekCloser", + "ReadSeeker", + "ReadWriteCloser", + "ReadWriteSeeker", + "ReadWriter", + "Reader", + "ReaderAt", + "ReaderFrom", + "RuneReader", + "RuneScanner", + "SectionReader", + "SeekCurrent", + "SeekEnd", + "SeekStart", + "Seeker", + "StringWriter", + "TeeReader", + "WriteCloser", + "WriteSeeker", + "WriteString", + "Writer", + "WriterAt", + "WriterTo", + }, + "io/fs": { + "DirEntry", + "ErrClosed", + "ErrExist", + "ErrInvalid", + "ErrNotExist", + "ErrPermission", + "FS", + "File", + "FileInfo", + "FileInfoToDirEntry", + "FileMode", + "Glob", + "GlobFS", + "ModeAppend", + "ModeCharDevice", + "ModeDevice", + "ModeDir", + "ModeExclusive", + "ModeIrregular", + "ModeNamedPipe", + "ModePerm", + "ModeSetgid", + "ModeSetuid", + "ModeSocket", + "ModeSticky", + "ModeSymlink", + "ModeTemporary", + "ModeType", + "PathError", + "ReadDir", + "ReadDirFS", + "ReadDirFile", + "ReadFile", + "ReadFileFS", + "SkipAll", + "SkipDir", + "Stat", + "StatFS", + "Sub", + "SubFS", + "ValidPath", + "WalkDir", + "WalkDirFunc", + }, + "io/ioutil": { + "Discard", + "NopCloser", + "ReadAll", + "ReadDir", + "ReadFile", + "TempDir", + "TempFile", + "WriteFile", + }, + "log": { + "Default", + "Fatal", + "Fatalf", + "Fatalln", + "Flags", + "LUTC", + "Ldate", + "Llongfile", + "Lmicroseconds", + "Lmsgprefix", + "Logger", + "Lshortfile", + "LstdFlags", + "Ltime", + "New", + "Output", + "Panic", + "Panicf", + "Panicln", + "Prefix", + "Print", + "Printf", + "Println", + "SetFlags", + "SetOutput", + "SetPrefix", + "Writer", + }, + "log/syslog": { + "Dial", + "LOG_ALERT", + "LOG_AUTH", + "LOG_AUTHPRIV", + "LOG_CRIT", + "LOG_CRON", + "LOG_DAEMON", + "LOG_DEBUG", + "LOG_EMERG", + "LOG_ERR", + "LOG_FTP", + "LOG_INFO", + "LOG_KERN", + "LOG_LOCAL0", + "LOG_LOCAL1", + "LOG_LOCAL2", + "LOG_LOCAL3", + "LOG_LOCAL4", + "LOG_LOCAL5", + "LOG_LOCAL6", + "LOG_LOCAL7", + "LOG_LPR", + "LOG_MAIL", + "LOG_NEWS", + "LOG_NOTICE", + "LOG_SYSLOG", + "LOG_USER", + "LOG_UUCP", + "LOG_WARNING", + "New", + "NewLogger", + "Priority", + "Writer", + }, + "math": { + "Abs", + "Acos", + "Acosh", + "Asin", + "Asinh", + "Atan", + "Atan2", + "Atanh", + "Cbrt", + "Ceil", + "Copysign", + "Cos", + "Cosh", + "Dim", + "E", + "Erf", + "Erfc", + "Erfcinv", + "Erfinv", + "Exp", + "Exp2", + "Expm1", + "FMA", + "Float32bits", + "Float32frombits", + "Float64bits", + "Float64frombits", + "Floor", + "Frexp", + "Gamma", + "Hypot", + "Ilogb", + "Inf", + "IsInf", + "IsNaN", + "J0", + "J1", + "Jn", + "Ldexp", + "Lgamma", + "Ln10", + "Ln2", + "Log", + "Log10", + "Log10E", + "Log1p", + "Log2", + "Log2E", + "Logb", + "Max", + "MaxFloat32", + "MaxFloat64", + "MaxInt", + "MaxInt16", + "MaxInt32", + "MaxInt64", + "MaxInt8", + "MaxUint", + "MaxUint16", + "MaxUint32", + "MaxUint64", + "MaxUint8", + "Min", + "MinInt", + "MinInt16", + "MinInt32", + "MinInt64", + "MinInt8", + "Mod", + "Modf", + "NaN", + "Nextafter", + "Nextafter32", + "Phi", + "Pi", + "Pow", + "Pow10", + "Remainder", + "Round", + "RoundToEven", + "Signbit", + "Sin", + "Sincos", + "Sinh", + "SmallestNonzeroFloat32", + "SmallestNonzeroFloat64", + "Sqrt", + "Sqrt2", + "SqrtE", + "SqrtPhi", + "SqrtPi", + "Tan", + "Tanh", + "Trunc", + "Y0", + "Y1", + "Yn", + }, + "math/big": { + "Above", + "Accuracy", + "AwayFromZero", + "Below", + "ErrNaN", + "Exact", + "Float", + "Int", + "Jacobi", + "MaxBase", + "MaxExp", + "MaxPrec", + "MinExp", + "NewFloat", + "NewInt", + "NewRat", + "ParseFloat", + "Rat", + "RoundingMode", + "ToNearestAway", + "ToNearestEven", + "ToNegativeInf", + "ToPositiveInf", + "ToZero", + "Word", + }, + "math/bits": { + "Add", + "Add32", + "Add64", + "Div", + "Div32", + "Div64", + "LeadingZeros", + "LeadingZeros16", + "LeadingZeros32", + "LeadingZeros64", + "LeadingZeros8", + "Len", + "Len16", + "Len32", + "Len64", + "Len8", + "Mul", + "Mul32", + "Mul64", + "OnesCount", + "OnesCount16", + "OnesCount32", + "OnesCount64", + "OnesCount8", + "Rem", + "Rem32", + "Rem64", + "Reverse", + "Reverse16", + "Reverse32", + "Reverse64", + "Reverse8", + "ReverseBytes", + "ReverseBytes16", + "ReverseBytes32", + "ReverseBytes64", + "RotateLeft", + "RotateLeft16", + "RotateLeft32", + "RotateLeft64", + "RotateLeft8", + "Sub", + "Sub32", + "Sub64", + "TrailingZeros", + "TrailingZeros16", + "TrailingZeros32", + "TrailingZeros64", + "TrailingZeros8", + "UintSize", + }, + "math/cmplx": { + "Abs", + "Acos", + "Acosh", + "Asin", + "Asinh", + "Atan", + "Atanh", + "Conj", + "Cos", + "Cosh", + "Cot", + "Exp", + "Inf", + "IsInf", + "IsNaN", + "Log", + "Log10", + "NaN", + "Phase", + "Polar", + "Pow", + "Rect", + "Sin", + "Sinh", + "Sqrt", + "Tan", + "Tanh", + }, + "math/rand": { + "ExpFloat64", + "Float32", + "Float64", + "Int", + "Int31", + "Int31n", + "Int63", + "Int63n", + "Intn", + "New", + "NewSource", + "NewZipf", + "NormFloat64", + "Perm", + "Rand", + "Read", + "Seed", + "Shuffle", + "Source", + "Source64", + "Uint32", + "Uint64", + "Zipf", + }, + "mime": { + "AddExtensionType", + "BEncoding", + "ErrInvalidMediaParameter", + "ExtensionsByType", + "FormatMediaType", + "ParseMediaType", + "QEncoding", + "TypeByExtension", + "WordDecoder", + "WordEncoder", + }, + "mime/multipart": { + "ErrMessageTooLarge", + "File", + "FileHeader", + "Form", + "NewReader", + "NewWriter", + "Part", + "Reader", + "Writer", + }, + "mime/quotedprintable": { + "NewReader", + "NewWriter", + "Reader", + "Writer", + }, + "net": { + "Addr", + "AddrError", + "Buffers", + "CIDRMask", + "Conn", + "DNSConfigError", + "DNSError", + "DefaultResolver", + "Dial", + "DialIP", + "DialTCP", + "DialTimeout", + "DialUDP", + "DialUnix", + "Dialer", + "ErrClosed", + "ErrWriteToConnected", + "Error", + "FileConn", + "FileListener", + "FilePacketConn", + "FlagBroadcast", + "FlagLoopback", + "FlagMulticast", + "FlagPointToPoint", + "FlagRunning", + "FlagUp", + "Flags", + "HardwareAddr", + "IP", + "IPAddr", + "IPConn", + "IPMask", + "IPNet", + "IPv4", + "IPv4Mask", + "IPv4allrouter", + "IPv4allsys", + "IPv4bcast", + "IPv4len", + "IPv4zero", + "IPv6interfacelocalallnodes", + "IPv6len", + "IPv6linklocalallnodes", + "IPv6linklocalallrouters", + "IPv6loopback", + "IPv6unspecified", + "IPv6zero", + "Interface", + "InterfaceAddrs", + "InterfaceByIndex", + "InterfaceByName", + "Interfaces", + "InvalidAddrError", + "JoinHostPort", + "Listen", + "ListenConfig", + "ListenIP", + "ListenMulticastUDP", + "ListenPacket", + "ListenTCP", + "ListenUDP", + "ListenUnix", + "ListenUnixgram", + "Listener", + "LookupAddr", + "LookupCNAME", + "LookupHost", + "LookupIP", + "LookupMX", + "LookupNS", + "LookupPort", + "LookupSRV", + "LookupTXT", + "MX", + "NS", + "OpError", + "PacketConn", + "ParseCIDR", + "ParseError", + "ParseIP", + "ParseMAC", + "Pipe", + "ResolveIPAddr", + "ResolveTCPAddr", + "ResolveUDPAddr", + "ResolveUnixAddr", + "Resolver", + "SRV", + "SplitHostPort", + "TCPAddr", + "TCPAddrFromAddrPort", + "TCPConn", + "TCPListener", + "UDPAddr", + "UDPAddrFromAddrPort", + "UDPConn", + "UnixAddr", + "UnixConn", + "UnixListener", + "UnknownNetworkError", + }, + "net/http": { + "AllowQuerySemicolons", + "CanonicalHeaderKey", + "Client", + "CloseNotifier", + "ConnState", + "Cookie", + "CookieJar", + "DefaultClient", + "DefaultMaxHeaderBytes", + "DefaultMaxIdleConnsPerHost", + "DefaultServeMux", + "DefaultTransport", + "DetectContentType", + "Dir", + "ErrAbortHandler", + "ErrBodyNotAllowed", + "ErrBodyReadAfterClose", + "ErrContentLength", + "ErrHandlerTimeout", + "ErrHeaderTooLong", + "ErrHijacked", + "ErrLineTooLong", + "ErrMissingBoundary", + "ErrMissingContentLength", + "ErrMissingFile", + "ErrNoCookie", + "ErrNoLocation", + "ErrNotMultipart", + "ErrNotSupported", + "ErrServerClosed", + "ErrShortBody", + "ErrSkipAltProtocol", + "ErrUnexpectedTrailer", + "ErrUseLastResponse", + "ErrWriteAfterFlush", + "Error", + "FS", + "File", + "FileServer", + "FileSystem", + "Flusher", + "Get", + "Handle", + "HandleFunc", + "Handler", + "HandlerFunc", + "Head", + "Header", + "Hijacker", + "ListenAndServe", + "ListenAndServeTLS", + "LocalAddrContextKey", + "MaxBytesError", + "MaxBytesHandler", + "MaxBytesReader", + "MethodConnect", + "MethodDelete", + "MethodGet", + "MethodHead", + "MethodOptions", + "MethodPatch", + "MethodPost", + "MethodPut", + "MethodTrace", + "NewFileTransport", + "NewRequest", + "NewRequestWithContext", + "NewResponseController", + "NewServeMux", + "NoBody", + "NotFound", + "NotFoundHandler", + "ParseHTTPVersion", + "ParseTime", + "Post", + "PostForm", + "ProtocolError", + "ProxyFromEnvironment", + "ProxyURL", + "PushOptions", + "Pusher", + "ReadRequest", + "ReadResponse", + "Redirect", + "RedirectHandler", + "Request", + "Response", + "ResponseController", + "ResponseWriter", + "RoundTripper", + "SameSite", + "SameSiteDefaultMode", + "SameSiteLaxMode", + "SameSiteNoneMode", + "SameSiteStrictMode", + "Serve", + "ServeContent", + "ServeFile", + "ServeMux", + "ServeTLS", + "Server", + "ServerContextKey", + "SetCookie", + "StateActive", + "StateClosed", + "StateHijacked", + "StateIdle", + "StateNew", + "StatusAccepted", + "StatusAlreadyReported", + "StatusBadGateway", + "StatusBadRequest", + "StatusConflict", + "StatusContinue", + "StatusCreated", + "StatusEarlyHints", + "StatusExpectationFailed", + "StatusFailedDependency", + "StatusForbidden", + "StatusFound", + "StatusGatewayTimeout", + "StatusGone", + "StatusHTTPVersionNotSupported", + "StatusIMUsed", + "StatusInsufficientStorage", + "StatusInternalServerError", + "StatusLengthRequired", + "StatusLocked", + "StatusLoopDetected", + "StatusMethodNotAllowed", + "StatusMisdirectedRequest", + "StatusMovedPermanently", + "StatusMultiStatus", + "StatusMultipleChoices", + "StatusNetworkAuthenticationRequired", + "StatusNoContent", + "StatusNonAuthoritativeInfo", + "StatusNotAcceptable", + "StatusNotExtended", + "StatusNotFound", + "StatusNotImplemented", + "StatusNotModified", + "StatusOK", + "StatusPartialContent", + "StatusPaymentRequired", + "StatusPermanentRedirect", + "StatusPreconditionFailed", + "StatusPreconditionRequired", + "StatusProcessing", + "StatusProxyAuthRequired", + "StatusRequestEntityTooLarge", + "StatusRequestHeaderFieldsTooLarge", + "StatusRequestTimeout", + "StatusRequestURITooLong", + "StatusRequestedRangeNotSatisfiable", + "StatusResetContent", + "StatusSeeOther", + "StatusServiceUnavailable", + "StatusSwitchingProtocols", + "StatusTeapot", + "StatusTemporaryRedirect", + "StatusText", + "StatusTooEarly", + "StatusTooManyRequests", + "StatusUnauthorized", + "StatusUnavailableForLegalReasons", + "StatusUnprocessableEntity", + "StatusUnsupportedMediaType", + "StatusUpgradeRequired", + "StatusUseProxy", + "StatusVariantAlsoNegotiates", + "StripPrefix", + "TimeFormat", + "TimeoutHandler", + "TrailerPrefix", + "Transport", + }, + "net/http/cgi": { + "Handler", + "Request", + "RequestFromMap", + "Serve", + }, + "net/http/cookiejar": { + "Jar", + "New", + "Options", + "PublicSuffixList", + }, + "net/http/fcgi": { + "ErrConnClosed", + "ErrRequestAborted", + "ProcessEnv", + "Serve", + }, + "net/http/httptest": { + "DefaultRemoteAddr", + "NewRecorder", + "NewRequest", + "NewServer", + "NewTLSServer", + "NewUnstartedServer", + "ResponseRecorder", + "Server", + }, + "net/http/httptrace": { + "ClientTrace", + "ContextClientTrace", + "DNSDoneInfo", + "DNSStartInfo", + "GotConnInfo", + "WithClientTrace", + "WroteRequestInfo", + }, + "net/http/httputil": { + "BufferPool", + "ClientConn", + "DumpRequest", + "DumpRequestOut", + "DumpResponse", + "ErrClosed", + "ErrLineTooLong", + "ErrPersistEOF", + "ErrPipeline", + "NewChunkedReader", + "NewChunkedWriter", + "NewClientConn", + "NewProxyClientConn", + "NewServerConn", + "NewSingleHostReverseProxy", + "ProxyRequest", + "ReverseProxy", + "ServerConn", + }, + "net/http/pprof": { + "Cmdline", + "Handler", + "Index", + "Profile", + "Symbol", + "Trace", + }, + "net/mail": { + "Address", + "AddressParser", + "ErrHeaderNotPresent", + "Header", + "Message", + "ParseAddress", + "ParseAddressList", + "ParseDate", + "ReadMessage", + }, + "net/netip": { + "Addr", + "AddrFrom16", + "AddrFrom4", + "AddrFromSlice", + "AddrPort", + "AddrPortFrom", + "IPv4Unspecified", + "IPv6LinkLocalAllNodes", + "IPv6LinkLocalAllRouters", + "IPv6Loopback", + "IPv6Unspecified", + "MustParseAddr", + "MustParseAddrPort", + "MustParsePrefix", + "ParseAddr", + "ParseAddrPort", + "ParsePrefix", + "Prefix", + "PrefixFrom", + }, + "net/rpc": { + "Accept", + "Call", + "Client", + "ClientCodec", + "DefaultDebugPath", + "DefaultRPCPath", + "DefaultServer", + "Dial", + "DialHTTP", + "DialHTTPPath", + "ErrShutdown", + "HandleHTTP", + "NewClient", + "NewClientWithCodec", + "NewServer", + "Register", + "RegisterName", + "Request", + "Response", + "ServeCodec", + "ServeConn", + "ServeRequest", + "Server", + "ServerCodec", + "ServerError", + }, + "net/rpc/jsonrpc": { + "Dial", + "NewClient", + "NewClientCodec", + "NewServerCodec", + "ServeConn", + }, + "net/smtp": { + "Auth", + "CRAMMD5Auth", + "Client", + "Dial", + "NewClient", + "PlainAuth", + "SendMail", + "ServerInfo", + }, + "net/textproto": { + "CanonicalMIMEHeaderKey", + "Conn", + "Dial", + "Error", + "MIMEHeader", + "NewConn", + "NewReader", + "NewWriter", + "Pipeline", + "ProtocolError", + "Reader", + "TrimBytes", + "TrimString", + "Writer", + }, + "net/url": { + "Error", + "EscapeError", + "InvalidHostError", + "JoinPath", + "Parse", + "ParseQuery", + "ParseRequestURI", + "PathEscape", + "PathUnescape", + "QueryEscape", + "QueryUnescape", + "URL", + "User", + "UserPassword", + "Userinfo", + "Values", + }, + "os": { + "Args", + "Chdir", + "Chmod", + "Chown", + "Chtimes", + "Clearenv", + "Create", + "CreateTemp", + "DevNull", + "DirEntry", + "DirFS", + "Environ", + "ErrClosed", + "ErrDeadlineExceeded", + "ErrExist", + "ErrInvalid", + "ErrNoDeadline", + "ErrNotExist", + "ErrPermission", + "ErrProcessDone", + "Executable", + "Exit", + "Expand", + "ExpandEnv", + "File", + "FileInfo", + "FileMode", + "FindProcess", + "Getegid", + "Getenv", + "Geteuid", + "Getgid", + "Getgroups", + "Getpagesize", + "Getpid", + "Getppid", + "Getuid", + "Getwd", + "Hostname", + "Interrupt", + "IsExist", + "IsNotExist", + "IsPathSeparator", + "IsPermission", + "IsTimeout", + "Kill", + "Lchown", + "Link", + "LinkError", + "LookupEnv", + "Lstat", + "Mkdir", + "MkdirAll", + "MkdirTemp", + "ModeAppend", + "ModeCharDevice", + "ModeDevice", + "ModeDir", + "ModeExclusive", + "ModeIrregular", + "ModeNamedPipe", + "ModePerm", + "ModeSetgid", + "ModeSetuid", + "ModeSocket", + "ModeSticky", + "ModeSymlink", + "ModeTemporary", + "ModeType", + "NewFile", + "NewSyscallError", + "O_APPEND", + "O_CREATE", + "O_EXCL", + "O_RDONLY", + "O_RDWR", + "O_SYNC", + "O_TRUNC", + "O_WRONLY", + "Open", + "OpenFile", + "PathError", + "PathListSeparator", + "PathSeparator", + "Pipe", + "ProcAttr", + "Process", + "ProcessState", + "ReadDir", + "ReadFile", + "Readlink", + "Remove", + "RemoveAll", + "Rename", + "SEEK_CUR", + "SEEK_END", + "SEEK_SET", + "SameFile", + "Setenv", + "Signal", + "StartProcess", + "Stat", + "Stderr", + "Stdin", + "Stdout", + "Symlink", + "SyscallError", + "TempDir", + "Truncate", + "Unsetenv", + "UserCacheDir", + "UserConfigDir", + "UserHomeDir", + "WriteFile", + }, + "os/exec": { + "Cmd", + "Command", + "CommandContext", + "ErrDot", + "ErrNotFound", + "ErrWaitDelay", + "Error", + "ExitError", + "LookPath", + }, + "os/signal": { + "Ignore", + "Ignored", + "Notify", + "NotifyContext", + "Reset", + "Stop", + }, + "os/user": { + "Current", + "Group", + "Lookup", + "LookupGroup", + "LookupGroupId", + "LookupId", + "UnknownGroupError", + "UnknownGroupIdError", + "UnknownUserError", + "UnknownUserIdError", + "User", + }, + "path": { + "Base", + "Clean", + "Dir", + "ErrBadPattern", + "Ext", + "IsAbs", + "Join", + "Match", + "Split", + }, + "path/filepath": { + "Abs", + "Base", + "Clean", + "Dir", + "ErrBadPattern", + "EvalSymlinks", + "Ext", + "FromSlash", + "Glob", + "HasPrefix", + "IsAbs", + "IsLocal", + "Join", + "ListSeparator", + "Match", + "Rel", + "Separator", + "SkipAll", + "SkipDir", + "Split", + "SplitList", + "ToSlash", + "VolumeName", + "Walk", + "WalkDir", + "WalkFunc", + }, + "plugin": { + "Open", + "Plugin", + "Symbol", + }, + "reflect": { + "Append", + "AppendSlice", + "Array", + "ArrayOf", + "Bool", + "BothDir", + "Chan", + "ChanDir", + "ChanOf", + "Complex128", + "Complex64", + "Copy", + "DeepEqual", + "Float32", + "Float64", + "Func", + "FuncOf", + "Indirect", + "Int", + "Int16", + "Int32", + "Int64", + "Int8", + "Interface", + "Invalid", + "Kind", + "MakeChan", + "MakeFunc", + "MakeMap", + "MakeMapWithSize", + "MakeSlice", + "Map", + "MapIter", + "MapOf", + "Method", + "New", + "NewAt", + "Pointer", + "PointerTo", + "Ptr", + "PtrTo", + "RecvDir", + "Select", + "SelectCase", + "SelectDefault", + "SelectDir", + "SelectRecv", + "SelectSend", + "SendDir", + "Slice", + "SliceHeader", + "SliceOf", + "String", + "StringHeader", + "Struct", + "StructField", + "StructOf", + "StructTag", + "Swapper", + "Type", + "TypeOf", + "Uint", + "Uint16", + "Uint32", + "Uint64", + "Uint8", + "Uintptr", + "UnsafePointer", + "Value", + "ValueError", + "ValueOf", + "VisibleFields", + "Zero", + }, + "regexp": { + "Compile", + "CompilePOSIX", + "Match", + "MatchReader", + "MatchString", + "MustCompile", + "MustCompilePOSIX", + "QuoteMeta", + "Regexp", + }, + "regexp/syntax": { + "ClassNL", + "Compile", + "DotNL", + "EmptyBeginLine", + "EmptyBeginText", + "EmptyEndLine", + "EmptyEndText", + "EmptyNoWordBoundary", + "EmptyOp", + "EmptyOpContext", + "EmptyWordBoundary", + "ErrInternalError", + "ErrInvalidCharClass", + "ErrInvalidCharRange", + "ErrInvalidEscape", + "ErrInvalidNamedCapture", + "ErrInvalidPerlOp", + "ErrInvalidRepeatOp", + "ErrInvalidRepeatSize", + "ErrInvalidUTF8", + "ErrLarge", + "ErrMissingBracket", + "ErrMissingParen", + "ErrMissingRepeatArgument", + "ErrNestingDepth", + "ErrTrailingBackslash", + "ErrUnexpectedParen", + "Error", + "ErrorCode", + "Flags", + "FoldCase", + "Inst", + "InstAlt", + "InstAltMatch", + "InstCapture", + "InstEmptyWidth", + "InstFail", + "InstMatch", + "InstNop", + "InstOp", + "InstRune", + "InstRune1", + "InstRuneAny", + "InstRuneAnyNotNL", + "IsWordChar", + "Literal", + "MatchNL", + "NonGreedy", + "OneLine", + "Op", + "OpAlternate", + "OpAnyChar", + "OpAnyCharNotNL", + "OpBeginLine", + "OpBeginText", + "OpCapture", + "OpCharClass", + "OpConcat", + "OpEmptyMatch", + "OpEndLine", + "OpEndText", + "OpLiteral", + "OpNoMatch", + "OpNoWordBoundary", + "OpPlus", + "OpQuest", + "OpRepeat", + "OpStar", + "OpWordBoundary", + "POSIX", + "Parse", + "Perl", + "PerlX", + "Prog", + "Regexp", + "Simple", + "UnicodeGroups", + "WasDollar", + }, + "runtime": { + "BlockProfile", + "BlockProfileRecord", + "Breakpoint", + "CPUProfile", + "Caller", + "Callers", + "CallersFrames", + "Compiler", + "Error", + "Frame", + "Frames", + "Func", + "FuncForPC", + "GC", + "GOARCH", + "GOMAXPROCS", + "GOOS", + "GOROOT", + "Goexit", + "GoroutineProfile", + "Gosched", + "KeepAlive", + "LockOSThread", + "MemProfile", + "MemProfileRate", + "MemProfileRecord", + "MemStats", + "MutexProfile", + "NumCPU", + "NumCgoCall", + "NumGoroutine", + "ReadMemStats", + "ReadTrace", + "SetBlockProfileRate", + "SetCPUProfileRate", + "SetCgoTraceback", + "SetFinalizer", + "SetMutexProfileFraction", + "Stack", + "StackRecord", + "StartTrace", + "StopTrace", + "ThreadCreateProfile", + "TypeAssertionError", + "UnlockOSThread", + "Version", + }, + "runtime/cgo": { + "Handle", + "Incomplete", + "NewHandle", + }, + "runtime/coverage": { + "ClearCounters", + "WriteCounters", + "WriteCountersDir", + "WriteMeta", + "WriteMetaDir", + }, + "runtime/debug": { + "BuildInfo", + "BuildSetting", + "FreeOSMemory", + "GCStats", + "Module", + "ParseBuildInfo", + "PrintStack", + "ReadBuildInfo", + "ReadGCStats", + "SetGCPercent", + "SetMaxStack", + "SetMaxThreads", + "SetMemoryLimit", + "SetPanicOnFault", + "SetTraceback", + "Stack", + "WriteHeapDump", + }, + "runtime/metrics": { + "All", + "Description", + "Float64Histogram", + "KindBad", + "KindFloat64", + "KindFloat64Histogram", + "KindUint64", + "Read", + "Sample", + "Value", + "ValueKind", + }, + "runtime/pprof": { + "Do", + "ForLabels", + "Label", + "LabelSet", + "Labels", + "Lookup", + "NewProfile", + "Profile", + "Profiles", + "SetGoroutineLabels", + "StartCPUProfile", + "StopCPUProfile", + "WithLabels", + "WriteHeapProfile", + }, + "runtime/trace": { + "IsEnabled", + "Log", + "Logf", + "NewTask", + "Region", + "Start", + "StartRegion", + "Stop", + "Task", + "WithRegion", + }, + "sort": { + "Find", + "Float64Slice", + "Float64s", + "Float64sAreSorted", + "IntSlice", + "Interface", + "Ints", + "IntsAreSorted", + "IsSorted", + "Reverse", + "Search", + "SearchFloat64s", + "SearchInts", + "SearchStrings", + "Slice", + "SliceIsSorted", + "SliceStable", + "Sort", + "Stable", + "StringSlice", + "Strings", + "StringsAreSorted", + }, + "strconv": { + "AppendBool", + "AppendFloat", + "AppendInt", + "AppendQuote", + "AppendQuoteRune", + "AppendQuoteRuneToASCII", + "AppendQuoteRuneToGraphic", + "AppendQuoteToASCII", + "AppendQuoteToGraphic", + "AppendUint", + "Atoi", + "CanBackquote", + "ErrRange", + "ErrSyntax", + "FormatBool", + "FormatComplex", + "FormatFloat", + "FormatInt", + "FormatUint", + "IntSize", + "IsGraphic", + "IsPrint", + "Itoa", + "NumError", + "ParseBool", + "ParseComplex", + "ParseFloat", + "ParseInt", + "ParseUint", + "Quote", + "QuoteRune", + "QuoteRuneToASCII", + "QuoteRuneToGraphic", + "QuoteToASCII", + "QuoteToGraphic", + "QuotedPrefix", + "Unquote", + "UnquoteChar", + }, + "strings": { + "Builder", + "Clone", + "Compare", + "Contains", + "ContainsAny", + "ContainsRune", + "Count", + "Cut", + "CutPrefix", + "CutSuffix", + "EqualFold", + "Fields", + "FieldsFunc", + "HasPrefix", + "HasSuffix", + "Index", + "IndexAny", + "IndexByte", + "IndexFunc", + "IndexRune", + "Join", + "LastIndex", + "LastIndexAny", + "LastIndexByte", + "LastIndexFunc", + "Map", + "NewReader", + "NewReplacer", + "Reader", + "Repeat", + "Replace", + "ReplaceAll", + "Replacer", + "Split", + "SplitAfter", + "SplitAfterN", + "SplitN", + "Title", + "ToLower", + "ToLowerSpecial", + "ToTitle", + "ToTitleSpecial", + "ToUpper", + "ToUpperSpecial", + "ToValidUTF8", + "Trim", + "TrimFunc", + "TrimLeft", + "TrimLeftFunc", + "TrimPrefix", + "TrimRight", + "TrimRightFunc", + "TrimSpace", + "TrimSuffix", + }, + "sync": { + "Cond", + "Locker", + "Map", + "Mutex", + "NewCond", + "Once", + "Pool", + "RWMutex", + "WaitGroup", + }, + "sync/atomic": { + "AddInt32", + "AddInt64", + "AddUint32", + "AddUint64", + "AddUintptr", + "Bool", + "CompareAndSwapInt32", + "CompareAndSwapInt64", + "CompareAndSwapPointer", + "CompareAndSwapUint32", + "CompareAndSwapUint64", + "CompareAndSwapUintptr", + "Int32", + "Int64", + "LoadInt32", + "LoadInt64", + "LoadPointer", + "LoadUint32", + "LoadUint64", + "LoadUintptr", + "Pointer", + "StoreInt32", + "StoreInt64", + "StorePointer", + "StoreUint32", + "StoreUint64", + "StoreUintptr", + "SwapInt32", + "SwapInt64", + "SwapPointer", + "SwapUint32", + "SwapUint64", + "SwapUintptr", + "Uint32", + "Uint64", + "Uintptr", + "Value", + }, + "syscall": { + "AF_ALG", + "AF_APPLETALK", + "AF_ARP", + "AF_ASH", + "AF_ATM", + "AF_ATMPVC", + "AF_ATMSVC", + "AF_AX25", + "AF_BLUETOOTH", + "AF_BRIDGE", + "AF_CAIF", + "AF_CAN", + "AF_CCITT", + "AF_CHAOS", + "AF_CNT", + "AF_COIP", + "AF_DATAKIT", + "AF_DECnet", + "AF_DLI", + "AF_E164", + "AF_ECMA", + "AF_ECONET", + "AF_ENCAP", + "AF_FILE", + "AF_HYLINK", + "AF_IEEE80211", + "AF_IEEE802154", + "AF_IMPLINK", + "AF_INET", + "AF_INET6", + "AF_INET6_SDP", + "AF_INET_SDP", + "AF_IPX", + "AF_IRDA", + "AF_ISDN", + "AF_ISO", + "AF_IUCV", + "AF_KEY", + "AF_LAT", + "AF_LINK", + "AF_LLC", + "AF_LOCAL", + "AF_MAX", + "AF_MPLS", + "AF_NATM", + "AF_NDRV", + "AF_NETBEUI", + "AF_NETBIOS", + "AF_NETGRAPH", + "AF_NETLINK", + "AF_NETROM", + "AF_NS", + "AF_OROUTE", + "AF_OSI", + "AF_PACKET", + "AF_PHONET", + "AF_PPP", + "AF_PPPOX", + "AF_PUP", + "AF_RDS", + "AF_RESERVED_36", + "AF_ROSE", + "AF_ROUTE", + "AF_RXRPC", + "AF_SCLUSTER", + "AF_SECURITY", + "AF_SIP", + "AF_SLOW", + "AF_SNA", + "AF_SYSTEM", + "AF_TIPC", + "AF_UNIX", + "AF_UNSPEC", + "AF_UTUN", + "AF_VENDOR00", + "AF_VENDOR01", + "AF_VENDOR02", + "AF_VENDOR03", + "AF_VENDOR04", + "AF_VENDOR05", + "AF_VENDOR06", + "AF_VENDOR07", + "AF_VENDOR08", + "AF_VENDOR09", + "AF_VENDOR10", + "AF_VENDOR11", + "AF_VENDOR12", + "AF_VENDOR13", + "AF_VENDOR14", + "AF_VENDOR15", + "AF_VENDOR16", + "AF_VENDOR17", + "AF_VENDOR18", + "AF_VENDOR19", + "AF_VENDOR20", + "AF_VENDOR21", + "AF_VENDOR22", + "AF_VENDOR23", + "AF_VENDOR24", + "AF_VENDOR25", + "AF_VENDOR26", + "AF_VENDOR27", + "AF_VENDOR28", + "AF_VENDOR29", + "AF_VENDOR30", + "AF_VENDOR31", + "AF_VENDOR32", + "AF_VENDOR33", + "AF_VENDOR34", + "AF_VENDOR35", + "AF_VENDOR36", + "AF_VENDOR37", + "AF_VENDOR38", + "AF_VENDOR39", + "AF_VENDOR40", + "AF_VENDOR41", + "AF_VENDOR42", + "AF_VENDOR43", + "AF_VENDOR44", + "AF_VENDOR45", + "AF_VENDOR46", + "AF_VENDOR47", + "AF_WANPIPE", + "AF_X25", + "AI_CANONNAME", + "AI_NUMERICHOST", + "AI_PASSIVE", + "APPLICATION_ERROR", + "ARPHRD_ADAPT", + "ARPHRD_APPLETLK", + "ARPHRD_ARCNET", + "ARPHRD_ASH", + "ARPHRD_ATM", + "ARPHRD_AX25", + "ARPHRD_BIF", + "ARPHRD_CHAOS", + "ARPHRD_CISCO", + "ARPHRD_CSLIP", + "ARPHRD_CSLIP6", + "ARPHRD_DDCMP", + "ARPHRD_DLCI", + "ARPHRD_ECONET", + "ARPHRD_EETHER", + "ARPHRD_ETHER", + "ARPHRD_EUI64", + "ARPHRD_FCAL", + "ARPHRD_FCFABRIC", + "ARPHRD_FCPL", + "ARPHRD_FCPP", + "ARPHRD_FDDI", + "ARPHRD_FRAD", + "ARPHRD_FRELAY", + "ARPHRD_HDLC", + "ARPHRD_HIPPI", + "ARPHRD_HWX25", + "ARPHRD_IEEE1394", + "ARPHRD_IEEE802", + "ARPHRD_IEEE80211", + "ARPHRD_IEEE80211_PRISM", + "ARPHRD_IEEE80211_RADIOTAP", + "ARPHRD_IEEE802154", + "ARPHRD_IEEE802154_PHY", + "ARPHRD_IEEE802_TR", + "ARPHRD_INFINIBAND", + "ARPHRD_IPDDP", + "ARPHRD_IPGRE", + "ARPHRD_IRDA", + "ARPHRD_LAPB", + "ARPHRD_LOCALTLK", + "ARPHRD_LOOPBACK", + "ARPHRD_METRICOM", + "ARPHRD_NETROM", + "ARPHRD_NONE", + "ARPHRD_PIMREG", + "ARPHRD_PPP", + "ARPHRD_PRONET", + "ARPHRD_RAWHDLC", + "ARPHRD_ROSE", + "ARPHRD_RSRVD", + "ARPHRD_SIT", + "ARPHRD_SKIP", + "ARPHRD_SLIP", + "ARPHRD_SLIP6", + "ARPHRD_STRIP", + "ARPHRD_TUNNEL", + "ARPHRD_TUNNEL6", + "ARPHRD_VOID", + "ARPHRD_X25", + "AUTHTYPE_CLIENT", + "AUTHTYPE_SERVER", + "Accept", + "Accept4", + "AcceptEx", + "Access", + "Acct", + "AddrinfoW", + "Adjtime", + "Adjtimex", + "AllThreadsSyscall", + "AllThreadsSyscall6", + "AttachLsf", + "B0", + "B1000000", + "B110", + "B115200", + "B1152000", + "B1200", + "B134", + "B14400", + "B150", + "B1500000", + "B1800", + "B19200", + "B200", + "B2000000", + "B230400", + "B2400", + "B2500000", + "B28800", + "B300", + "B3000000", + "B3500000", + "B38400", + "B4000000", + "B460800", + "B4800", + "B50", + "B500000", + "B57600", + "B576000", + "B600", + "B7200", + "B75", + "B76800", + "B921600", + "B9600", + "BASE_PROTOCOL", + "BIOCFEEDBACK", + "BIOCFLUSH", + "BIOCGBLEN", + "BIOCGDIRECTION", + "BIOCGDIRFILT", + "BIOCGDLT", + "BIOCGDLTLIST", + "BIOCGETBUFMODE", + "BIOCGETIF", + "BIOCGETZMAX", + "BIOCGFEEDBACK", + "BIOCGFILDROP", + "BIOCGHDRCMPLT", + "BIOCGRSIG", + "BIOCGRTIMEOUT", + "BIOCGSEESENT", + "BIOCGSTATS", + "BIOCGSTATSOLD", + "BIOCGTSTAMP", + "BIOCIMMEDIATE", + "BIOCLOCK", + "BIOCPROMISC", + "BIOCROTZBUF", + "BIOCSBLEN", + "BIOCSDIRECTION", + "BIOCSDIRFILT", + "BIOCSDLT", + "BIOCSETBUFMODE", + "BIOCSETF", + "BIOCSETFNR", + "BIOCSETIF", + "BIOCSETWF", + "BIOCSETZBUF", + "BIOCSFEEDBACK", + "BIOCSFILDROP", + "BIOCSHDRCMPLT", + "BIOCSRSIG", + "BIOCSRTIMEOUT", + "BIOCSSEESENT", + "BIOCSTCPF", + "BIOCSTSTAMP", + "BIOCSUDPF", + "BIOCVERSION", + "BPF_A", + "BPF_ABS", + "BPF_ADD", + "BPF_ALIGNMENT", + "BPF_ALIGNMENT32", + "BPF_ALU", + "BPF_AND", + "BPF_B", + "BPF_BUFMODE_BUFFER", + "BPF_BUFMODE_ZBUF", + "BPF_DFLTBUFSIZE", + "BPF_DIRECTION_IN", + "BPF_DIRECTION_OUT", + "BPF_DIV", + "BPF_H", + "BPF_IMM", + "BPF_IND", + "BPF_JA", + "BPF_JEQ", + "BPF_JGE", + "BPF_JGT", + "BPF_JMP", + "BPF_JSET", + "BPF_K", + "BPF_LD", + "BPF_LDX", + "BPF_LEN", + "BPF_LSH", + "BPF_MAJOR_VERSION", + "BPF_MAXBUFSIZE", + "BPF_MAXINSNS", + "BPF_MEM", + "BPF_MEMWORDS", + "BPF_MINBUFSIZE", + "BPF_MINOR_VERSION", + "BPF_MISC", + "BPF_MSH", + "BPF_MUL", + "BPF_NEG", + "BPF_OR", + "BPF_RELEASE", + "BPF_RET", + "BPF_RSH", + "BPF_ST", + "BPF_STX", + "BPF_SUB", + "BPF_TAX", + "BPF_TXA", + "BPF_T_BINTIME", + "BPF_T_BINTIME_FAST", + "BPF_T_BINTIME_MONOTONIC", + "BPF_T_BINTIME_MONOTONIC_FAST", + "BPF_T_FAST", + "BPF_T_FLAG_MASK", + "BPF_T_FORMAT_MASK", + "BPF_T_MICROTIME", + "BPF_T_MICROTIME_FAST", + "BPF_T_MICROTIME_MONOTONIC", + "BPF_T_MICROTIME_MONOTONIC_FAST", + "BPF_T_MONOTONIC", + "BPF_T_MONOTONIC_FAST", + "BPF_T_NANOTIME", + "BPF_T_NANOTIME_FAST", + "BPF_T_NANOTIME_MONOTONIC", + "BPF_T_NANOTIME_MONOTONIC_FAST", + "BPF_T_NONE", + "BPF_T_NORMAL", + "BPF_W", + "BPF_X", + "BRKINT", + "Bind", + "BindToDevice", + "BpfBuflen", + "BpfDatalink", + "BpfHdr", + "BpfHeadercmpl", + "BpfInsn", + "BpfInterface", + "BpfJump", + "BpfProgram", + "BpfStat", + "BpfStats", + "BpfStmt", + "BpfTimeout", + "BpfTimeval", + "BpfVersion", + "BpfZbuf", + "BpfZbufHeader", + "ByHandleFileInformation", + "BytePtrFromString", + "ByteSliceFromString", + "CCR0_FLUSH", + "CERT_CHAIN_POLICY_AUTHENTICODE", + "CERT_CHAIN_POLICY_AUTHENTICODE_TS", + "CERT_CHAIN_POLICY_BASE", + "CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", + "CERT_CHAIN_POLICY_EV", + "CERT_CHAIN_POLICY_MICROSOFT_ROOT", + "CERT_CHAIN_POLICY_NT_AUTH", + "CERT_CHAIN_POLICY_SSL", + "CERT_E_CN_NO_MATCH", + "CERT_E_EXPIRED", + "CERT_E_PURPOSE", + "CERT_E_ROLE", + "CERT_E_UNTRUSTEDROOT", + "CERT_STORE_ADD_ALWAYS", + "CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", + "CERT_STORE_PROV_MEMORY", + "CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", + "CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", + "CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", + "CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", + "CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", + "CERT_TRUST_INVALID_BASIC_CONSTRAINTS", + "CERT_TRUST_INVALID_EXTENSION", + "CERT_TRUST_INVALID_NAME_CONSTRAINTS", + "CERT_TRUST_INVALID_POLICY_CONSTRAINTS", + "CERT_TRUST_IS_CYCLIC", + "CERT_TRUST_IS_EXPLICIT_DISTRUST", + "CERT_TRUST_IS_NOT_SIGNATURE_VALID", + "CERT_TRUST_IS_NOT_TIME_VALID", + "CERT_TRUST_IS_NOT_VALID_FOR_USAGE", + "CERT_TRUST_IS_OFFLINE_REVOCATION", + "CERT_TRUST_IS_REVOKED", + "CERT_TRUST_IS_UNTRUSTED_ROOT", + "CERT_TRUST_NO_ERROR", + "CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", + "CERT_TRUST_REVOCATION_STATUS_UNKNOWN", + "CFLUSH", + "CLOCAL", + "CLONE_CHILD_CLEARTID", + "CLONE_CHILD_SETTID", + "CLONE_CLEAR_SIGHAND", + "CLONE_CSIGNAL", + "CLONE_DETACHED", + "CLONE_FILES", + "CLONE_FS", + "CLONE_INTO_CGROUP", + "CLONE_IO", + "CLONE_NEWCGROUP", + "CLONE_NEWIPC", + "CLONE_NEWNET", + "CLONE_NEWNS", + "CLONE_NEWPID", + "CLONE_NEWTIME", + "CLONE_NEWUSER", + "CLONE_NEWUTS", + "CLONE_PARENT", + "CLONE_PARENT_SETTID", + "CLONE_PID", + "CLONE_PIDFD", + "CLONE_PTRACE", + "CLONE_SETTLS", + "CLONE_SIGHAND", + "CLONE_SYSVSEM", + "CLONE_THREAD", + "CLONE_UNTRACED", + "CLONE_VFORK", + "CLONE_VM", + "CPUID_CFLUSH", + "CREAD", + "CREATE_ALWAYS", + "CREATE_NEW", + "CREATE_NEW_PROCESS_GROUP", + "CREATE_UNICODE_ENVIRONMENT", + "CRYPT_DEFAULT_CONTAINER_OPTIONAL", + "CRYPT_DELETEKEYSET", + "CRYPT_MACHINE_KEYSET", + "CRYPT_NEWKEYSET", + "CRYPT_SILENT", + "CRYPT_VERIFYCONTEXT", + "CS5", + "CS6", + "CS7", + "CS8", + "CSIZE", + "CSTART", + "CSTATUS", + "CSTOP", + "CSTOPB", + "CSUSP", + "CTL_MAXNAME", + "CTL_NET", + "CTL_QUERY", + "CTRL_BREAK_EVENT", + "CTRL_CLOSE_EVENT", + "CTRL_C_EVENT", + "CTRL_LOGOFF_EVENT", + "CTRL_SHUTDOWN_EVENT", + "CancelIo", + "CancelIoEx", + "CertAddCertificateContextToStore", + "CertChainContext", + "CertChainElement", + "CertChainPara", + "CertChainPolicyPara", + "CertChainPolicyStatus", + "CertCloseStore", + "CertContext", + "CertCreateCertificateContext", + "CertEnhKeyUsage", + "CertEnumCertificatesInStore", + "CertFreeCertificateChain", + "CertFreeCertificateContext", + "CertGetCertificateChain", + "CertInfo", + "CertOpenStore", + "CertOpenSystemStore", + "CertRevocationCrlInfo", + "CertRevocationInfo", + "CertSimpleChain", + "CertTrustListInfo", + "CertTrustStatus", + "CertUsageMatch", + "CertVerifyCertificateChainPolicy", + "Chdir", + "CheckBpfVersion", + "Chflags", + "Chmod", + "Chown", + "Chroot", + "Clearenv", + "Close", + "CloseHandle", + "CloseOnExec", + "Closesocket", + "CmsgLen", + "CmsgSpace", + "Cmsghdr", + "CommandLineToArgv", + "ComputerName", + "Conn", + "Connect", + "ConnectEx", + "ConvertSidToStringSid", + "ConvertStringSidToSid", + "CopySid", + "Creat", + "CreateDirectory", + "CreateFile", + "CreateFileMapping", + "CreateHardLink", + "CreateIoCompletionPort", + "CreatePipe", + "CreateProcess", + "CreateProcessAsUser", + "CreateSymbolicLink", + "CreateToolhelp32Snapshot", + "Credential", + "CryptAcquireContext", + "CryptGenRandom", + "CryptReleaseContext", + "DIOCBSFLUSH", + "DIOCOSFPFLUSH", + "DLL", + "DLLError", + "DLT_A429", + "DLT_A653_ICM", + "DLT_AIRONET_HEADER", + "DLT_AOS", + "DLT_APPLE_IP_OVER_IEEE1394", + "DLT_ARCNET", + "DLT_ARCNET_LINUX", + "DLT_ATM_CLIP", + "DLT_ATM_RFC1483", + "DLT_AURORA", + "DLT_AX25", + "DLT_AX25_KISS", + "DLT_BACNET_MS_TP", + "DLT_BLUETOOTH_HCI_H4", + "DLT_BLUETOOTH_HCI_H4_WITH_PHDR", + "DLT_CAN20B", + "DLT_CAN_SOCKETCAN", + "DLT_CHAOS", + "DLT_CHDLC", + "DLT_CISCO_IOS", + "DLT_C_HDLC", + "DLT_C_HDLC_WITH_DIR", + "DLT_DBUS", + "DLT_DECT", + "DLT_DOCSIS", + "DLT_DVB_CI", + "DLT_ECONET", + "DLT_EN10MB", + "DLT_EN3MB", + "DLT_ENC", + "DLT_ERF", + "DLT_ERF_ETH", + "DLT_ERF_POS", + "DLT_FC_2", + "DLT_FC_2_WITH_FRAME_DELIMS", + "DLT_FDDI", + "DLT_FLEXRAY", + "DLT_FRELAY", + "DLT_FRELAY_WITH_DIR", + "DLT_GCOM_SERIAL", + "DLT_GCOM_T1E1", + "DLT_GPF_F", + "DLT_GPF_T", + "DLT_GPRS_LLC", + "DLT_GSMTAP_ABIS", + "DLT_GSMTAP_UM", + "DLT_HDLC", + "DLT_HHDLC", + "DLT_HIPPI", + "DLT_IBM_SN", + "DLT_IBM_SP", + "DLT_IEEE802", + "DLT_IEEE802_11", + "DLT_IEEE802_11_RADIO", + "DLT_IEEE802_11_RADIO_AVS", + "DLT_IEEE802_15_4", + "DLT_IEEE802_15_4_LINUX", + "DLT_IEEE802_15_4_NOFCS", + "DLT_IEEE802_15_4_NONASK_PHY", + "DLT_IEEE802_16_MAC_CPS", + "DLT_IEEE802_16_MAC_CPS_RADIO", + "DLT_IPFILTER", + "DLT_IPMB", + "DLT_IPMB_LINUX", + "DLT_IPNET", + "DLT_IPOIB", + "DLT_IPV4", + "DLT_IPV6", + "DLT_IP_OVER_FC", + "DLT_JUNIPER_ATM1", + "DLT_JUNIPER_ATM2", + "DLT_JUNIPER_ATM_CEMIC", + "DLT_JUNIPER_CHDLC", + "DLT_JUNIPER_ES", + "DLT_JUNIPER_ETHER", + "DLT_JUNIPER_FIBRECHANNEL", + "DLT_JUNIPER_FRELAY", + "DLT_JUNIPER_GGSN", + "DLT_JUNIPER_ISM", + "DLT_JUNIPER_MFR", + "DLT_JUNIPER_MLFR", + "DLT_JUNIPER_MLPPP", + "DLT_JUNIPER_MONITOR", + "DLT_JUNIPER_PIC_PEER", + "DLT_JUNIPER_PPP", + "DLT_JUNIPER_PPPOE", + "DLT_JUNIPER_PPPOE_ATM", + "DLT_JUNIPER_SERVICES", + "DLT_JUNIPER_SRX_E2E", + "DLT_JUNIPER_ST", + "DLT_JUNIPER_VP", + "DLT_JUNIPER_VS", + "DLT_LAPB_WITH_DIR", + "DLT_LAPD", + "DLT_LIN", + "DLT_LINUX_EVDEV", + "DLT_LINUX_IRDA", + "DLT_LINUX_LAPD", + "DLT_LINUX_PPP_WITHDIRECTION", + "DLT_LINUX_SLL", + "DLT_LOOP", + "DLT_LTALK", + "DLT_MATCHING_MAX", + "DLT_MATCHING_MIN", + "DLT_MFR", + "DLT_MOST", + "DLT_MPEG_2_TS", + "DLT_MPLS", + "DLT_MTP2", + "DLT_MTP2_WITH_PHDR", + "DLT_MTP3", + "DLT_MUX27010", + "DLT_NETANALYZER", + "DLT_NETANALYZER_TRANSPARENT", + "DLT_NFC_LLCP", + "DLT_NFLOG", + "DLT_NG40", + "DLT_NULL", + "DLT_PCI_EXP", + "DLT_PFLOG", + "DLT_PFSYNC", + "DLT_PPI", + "DLT_PPP", + "DLT_PPP_BSDOS", + "DLT_PPP_ETHER", + "DLT_PPP_PPPD", + "DLT_PPP_SERIAL", + "DLT_PPP_WITH_DIR", + "DLT_PPP_WITH_DIRECTION", + "DLT_PRISM_HEADER", + "DLT_PRONET", + "DLT_RAIF1", + "DLT_RAW", + "DLT_RAWAF_MASK", + "DLT_RIO", + "DLT_SCCP", + "DLT_SITA", + "DLT_SLIP", + "DLT_SLIP_BSDOS", + "DLT_STANAG_5066_D_PDU", + "DLT_SUNATM", + "DLT_SYMANTEC_FIREWALL", + "DLT_TZSP", + "DLT_USB", + "DLT_USB_LINUX", + "DLT_USB_LINUX_MMAPPED", + "DLT_USER0", + "DLT_USER1", + "DLT_USER10", + "DLT_USER11", + "DLT_USER12", + "DLT_USER13", + "DLT_USER14", + "DLT_USER15", + "DLT_USER2", + "DLT_USER3", + "DLT_USER4", + "DLT_USER5", + "DLT_USER6", + "DLT_USER7", + "DLT_USER8", + "DLT_USER9", + "DLT_WIHART", + "DLT_X2E_SERIAL", + "DLT_X2E_XORAYA", + "DNSMXData", + "DNSPTRData", + "DNSRecord", + "DNSSRVData", + "DNSTXTData", + "DNS_INFO_NO_RECORDS", + "DNS_TYPE_A", + "DNS_TYPE_A6", + "DNS_TYPE_AAAA", + "DNS_TYPE_ADDRS", + "DNS_TYPE_AFSDB", + "DNS_TYPE_ALL", + "DNS_TYPE_ANY", + "DNS_TYPE_ATMA", + "DNS_TYPE_AXFR", + "DNS_TYPE_CERT", + "DNS_TYPE_CNAME", + "DNS_TYPE_DHCID", + "DNS_TYPE_DNAME", + "DNS_TYPE_DNSKEY", + "DNS_TYPE_DS", + "DNS_TYPE_EID", + "DNS_TYPE_GID", + "DNS_TYPE_GPOS", + "DNS_TYPE_HINFO", + "DNS_TYPE_ISDN", + "DNS_TYPE_IXFR", + "DNS_TYPE_KEY", + "DNS_TYPE_KX", + "DNS_TYPE_LOC", + "DNS_TYPE_MAILA", + "DNS_TYPE_MAILB", + "DNS_TYPE_MB", + "DNS_TYPE_MD", + "DNS_TYPE_MF", + "DNS_TYPE_MG", + "DNS_TYPE_MINFO", + "DNS_TYPE_MR", + "DNS_TYPE_MX", + "DNS_TYPE_NAPTR", + "DNS_TYPE_NBSTAT", + "DNS_TYPE_NIMLOC", + "DNS_TYPE_NS", + "DNS_TYPE_NSAP", + "DNS_TYPE_NSAPPTR", + "DNS_TYPE_NSEC", + "DNS_TYPE_NULL", + "DNS_TYPE_NXT", + "DNS_TYPE_OPT", + "DNS_TYPE_PTR", + "DNS_TYPE_PX", + "DNS_TYPE_RP", + "DNS_TYPE_RRSIG", + "DNS_TYPE_RT", + "DNS_TYPE_SIG", + "DNS_TYPE_SINK", + "DNS_TYPE_SOA", + "DNS_TYPE_SRV", + "DNS_TYPE_TEXT", + "DNS_TYPE_TKEY", + "DNS_TYPE_TSIG", + "DNS_TYPE_UID", + "DNS_TYPE_UINFO", + "DNS_TYPE_UNSPEC", + "DNS_TYPE_WINS", + "DNS_TYPE_WINSR", + "DNS_TYPE_WKS", + "DNS_TYPE_X25", + "DT_BLK", + "DT_CHR", + "DT_DIR", + "DT_FIFO", + "DT_LNK", + "DT_REG", + "DT_SOCK", + "DT_UNKNOWN", + "DT_WHT", + "DUPLICATE_CLOSE_SOURCE", + "DUPLICATE_SAME_ACCESS", + "DeleteFile", + "DetachLsf", + "DeviceIoControl", + "Dirent", + "DnsNameCompare", + "DnsQuery", + "DnsRecordListFree", + "DnsSectionAdditional", + "DnsSectionAnswer", + "DnsSectionAuthority", + "DnsSectionQuestion", + "Dup", + "Dup2", + "Dup3", + "DuplicateHandle", + "E2BIG", + "EACCES", + "EADDRINUSE", + "EADDRNOTAVAIL", + "EADV", + "EAFNOSUPPORT", + "EAGAIN", + "EALREADY", + "EAUTH", + "EBADARCH", + "EBADE", + "EBADEXEC", + "EBADF", + "EBADFD", + "EBADMACHO", + "EBADMSG", + "EBADR", + "EBADRPC", + "EBADRQC", + "EBADSLT", + "EBFONT", + "EBUSY", + "ECANCELED", + "ECAPMODE", + "ECHILD", + "ECHO", + "ECHOCTL", + "ECHOE", + "ECHOK", + "ECHOKE", + "ECHONL", + "ECHOPRT", + "ECHRNG", + "ECOMM", + "ECONNABORTED", + "ECONNREFUSED", + "ECONNRESET", + "EDEADLK", + "EDEADLOCK", + "EDESTADDRREQ", + "EDEVERR", + "EDOM", + "EDOOFUS", + "EDOTDOT", + "EDQUOT", + "EEXIST", + "EFAULT", + "EFBIG", + "EFER_LMA", + "EFER_LME", + "EFER_NXE", + "EFER_SCE", + "EFTYPE", + "EHOSTDOWN", + "EHOSTUNREACH", + "EHWPOISON", + "EIDRM", + "EILSEQ", + "EINPROGRESS", + "EINTR", + "EINVAL", + "EIO", + "EIPSEC", + "EISCONN", + "EISDIR", + "EISNAM", + "EKEYEXPIRED", + "EKEYREJECTED", + "EKEYREVOKED", + "EL2HLT", + "EL2NSYNC", + "EL3HLT", + "EL3RST", + "ELAST", + "ELF_NGREG", + "ELF_PRARGSZ", + "ELIBACC", + "ELIBBAD", + "ELIBEXEC", + "ELIBMAX", + "ELIBSCN", + "ELNRNG", + "ELOOP", + "EMEDIUMTYPE", + "EMFILE", + "EMLINK", + "EMSGSIZE", + "EMT_TAGOVF", + "EMULTIHOP", + "EMUL_ENABLED", + "EMUL_LINUX", + "EMUL_LINUX32", + "EMUL_MAXID", + "EMUL_NATIVE", + "ENAMETOOLONG", + "ENAVAIL", + "ENDRUNDISC", + "ENEEDAUTH", + "ENETDOWN", + "ENETRESET", + "ENETUNREACH", + "ENFILE", + "ENOANO", + "ENOATTR", + "ENOBUFS", + "ENOCSI", + "ENODATA", + "ENODEV", + "ENOENT", + "ENOEXEC", + "ENOKEY", + "ENOLCK", + "ENOLINK", + "ENOMEDIUM", + "ENOMEM", + "ENOMSG", + "ENONET", + "ENOPKG", + "ENOPOLICY", + "ENOPROTOOPT", + "ENOSPC", + "ENOSR", + "ENOSTR", + "ENOSYS", + "ENOTBLK", + "ENOTCAPABLE", + "ENOTCONN", + "ENOTDIR", + "ENOTEMPTY", + "ENOTNAM", + "ENOTRECOVERABLE", + "ENOTSOCK", + "ENOTSUP", + "ENOTTY", + "ENOTUNIQ", + "ENXIO", + "EN_SW_CTL_INF", + "EN_SW_CTL_PREC", + "EN_SW_CTL_ROUND", + "EN_SW_DATACHAIN", + "EN_SW_DENORM", + "EN_SW_INVOP", + "EN_SW_OVERFLOW", + "EN_SW_PRECLOSS", + "EN_SW_UNDERFLOW", + "EN_SW_ZERODIV", + "EOPNOTSUPP", + "EOVERFLOW", + "EOWNERDEAD", + "EPERM", + "EPFNOSUPPORT", + "EPIPE", + "EPOLLERR", + "EPOLLET", + "EPOLLHUP", + "EPOLLIN", + "EPOLLMSG", + "EPOLLONESHOT", + "EPOLLOUT", + "EPOLLPRI", + "EPOLLRDBAND", + "EPOLLRDHUP", + "EPOLLRDNORM", + "EPOLLWRBAND", + "EPOLLWRNORM", + "EPOLL_CLOEXEC", + "EPOLL_CTL_ADD", + "EPOLL_CTL_DEL", + "EPOLL_CTL_MOD", + "EPOLL_NONBLOCK", + "EPROCLIM", + "EPROCUNAVAIL", + "EPROGMISMATCH", + "EPROGUNAVAIL", + "EPROTO", + "EPROTONOSUPPORT", + "EPROTOTYPE", + "EPWROFF", + "EQFULL", + "ERANGE", + "EREMCHG", + "EREMOTE", + "EREMOTEIO", + "ERESTART", + "ERFKILL", + "EROFS", + "ERPCMISMATCH", + "ERROR_ACCESS_DENIED", + "ERROR_ALREADY_EXISTS", + "ERROR_BROKEN_PIPE", + "ERROR_BUFFER_OVERFLOW", + "ERROR_DIR_NOT_EMPTY", + "ERROR_ENVVAR_NOT_FOUND", + "ERROR_FILE_EXISTS", + "ERROR_FILE_NOT_FOUND", + "ERROR_HANDLE_EOF", + "ERROR_INSUFFICIENT_BUFFER", + "ERROR_IO_PENDING", + "ERROR_MOD_NOT_FOUND", + "ERROR_MORE_DATA", + "ERROR_NETNAME_DELETED", + "ERROR_NOT_FOUND", + "ERROR_NO_MORE_FILES", + "ERROR_OPERATION_ABORTED", + "ERROR_PATH_NOT_FOUND", + "ERROR_PRIVILEGE_NOT_HELD", + "ERROR_PROC_NOT_FOUND", + "ESHLIBVERS", + "ESHUTDOWN", + "ESOCKTNOSUPPORT", + "ESPIPE", + "ESRCH", + "ESRMNT", + "ESTALE", + "ESTRPIPE", + "ETHERCAP_JUMBO_MTU", + "ETHERCAP_VLAN_HWTAGGING", + "ETHERCAP_VLAN_MTU", + "ETHERMIN", + "ETHERMTU", + "ETHERMTU_JUMBO", + "ETHERTYPE_8023", + "ETHERTYPE_AARP", + "ETHERTYPE_ACCTON", + "ETHERTYPE_AEONIC", + "ETHERTYPE_ALPHA", + "ETHERTYPE_AMBER", + "ETHERTYPE_AMOEBA", + "ETHERTYPE_AOE", + "ETHERTYPE_APOLLO", + "ETHERTYPE_APOLLODOMAIN", + "ETHERTYPE_APPLETALK", + "ETHERTYPE_APPLITEK", + "ETHERTYPE_ARGONAUT", + "ETHERTYPE_ARP", + "ETHERTYPE_AT", + "ETHERTYPE_ATALK", + "ETHERTYPE_ATOMIC", + "ETHERTYPE_ATT", + "ETHERTYPE_ATTSTANFORD", + "ETHERTYPE_AUTOPHON", + "ETHERTYPE_AXIS", + "ETHERTYPE_BCLOOP", + "ETHERTYPE_BOFL", + "ETHERTYPE_CABLETRON", + "ETHERTYPE_CHAOS", + "ETHERTYPE_COMDESIGN", + "ETHERTYPE_COMPUGRAPHIC", + "ETHERTYPE_COUNTERPOINT", + "ETHERTYPE_CRONUS", + "ETHERTYPE_CRONUSVLN", + "ETHERTYPE_DCA", + "ETHERTYPE_DDE", + "ETHERTYPE_DEBNI", + "ETHERTYPE_DECAM", + "ETHERTYPE_DECCUST", + "ETHERTYPE_DECDIAG", + "ETHERTYPE_DECDNS", + "ETHERTYPE_DECDTS", + "ETHERTYPE_DECEXPER", + "ETHERTYPE_DECLAST", + "ETHERTYPE_DECLTM", + "ETHERTYPE_DECMUMPS", + "ETHERTYPE_DECNETBIOS", + "ETHERTYPE_DELTACON", + "ETHERTYPE_DIDDLE", + "ETHERTYPE_DLOG1", + "ETHERTYPE_DLOG2", + "ETHERTYPE_DN", + "ETHERTYPE_DOGFIGHT", + "ETHERTYPE_DSMD", + "ETHERTYPE_ECMA", + "ETHERTYPE_ENCRYPT", + "ETHERTYPE_ES", + "ETHERTYPE_EXCELAN", + "ETHERTYPE_EXPERDATA", + "ETHERTYPE_FLIP", + "ETHERTYPE_FLOWCONTROL", + "ETHERTYPE_FRARP", + "ETHERTYPE_GENDYN", + "ETHERTYPE_HAYES", + "ETHERTYPE_HIPPI_FP", + "ETHERTYPE_HITACHI", + "ETHERTYPE_HP", + "ETHERTYPE_IEEEPUP", + "ETHERTYPE_IEEEPUPAT", + "ETHERTYPE_IMLBL", + "ETHERTYPE_IMLBLDIAG", + "ETHERTYPE_IP", + "ETHERTYPE_IPAS", + "ETHERTYPE_IPV6", + "ETHERTYPE_IPX", + "ETHERTYPE_IPXNEW", + "ETHERTYPE_KALPANA", + "ETHERTYPE_LANBRIDGE", + "ETHERTYPE_LANPROBE", + "ETHERTYPE_LAT", + "ETHERTYPE_LBACK", + "ETHERTYPE_LITTLE", + "ETHERTYPE_LLDP", + "ETHERTYPE_LOGICRAFT", + "ETHERTYPE_LOOPBACK", + "ETHERTYPE_MATRA", + "ETHERTYPE_MAX", + "ETHERTYPE_MERIT", + "ETHERTYPE_MICP", + "ETHERTYPE_MOPDL", + "ETHERTYPE_MOPRC", + "ETHERTYPE_MOTOROLA", + "ETHERTYPE_MPLS", + "ETHERTYPE_MPLS_MCAST", + "ETHERTYPE_MUMPS", + "ETHERTYPE_NBPCC", + "ETHERTYPE_NBPCLAIM", + "ETHERTYPE_NBPCLREQ", + "ETHERTYPE_NBPCLRSP", + "ETHERTYPE_NBPCREQ", + "ETHERTYPE_NBPCRSP", + "ETHERTYPE_NBPDG", + "ETHERTYPE_NBPDGB", + "ETHERTYPE_NBPDLTE", + "ETHERTYPE_NBPRAR", + "ETHERTYPE_NBPRAS", + "ETHERTYPE_NBPRST", + "ETHERTYPE_NBPSCD", + "ETHERTYPE_NBPVCD", + "ETHERTYPE_NBS", + "ETHERTYPE_NCD", + "ETHERTYPE_NESTAR", + "ETHERTYPE_NETBEUI", + "ETHERTYPE_NOVELL", + "ETHERTYPE_NS", + "ETHERTYPE_NSAT", + "ETHERTYPE_NSCOMPAT", + "ETHERTYPE_NTRAILER", + "ETHERTYPE_OS9", + "ETHERTYPE_OS9NET", + "ETHERTYPE_PACER", + "ETHERTYPE_PAE", + "ETHERTYPE_PCS", + "ETHERTYPE_PLANNING", + "ETHERTYPE_PPP", + "ETHERTYPE_PPPOE", + "ETHERTYPE_PPPOEDISC", + "ETHERTYPE_PRIMENTS", + "ETHERTYPE_PUP", + "ETHERTYPE_PUPAT", + "ETHERTYPE_QINQ", + "ETHERTYPE_RACAL", + "ETHERTYPE_RATIONAL", + "ETHERTYPE_RAWFR", + "ETHERTYPE_RCL", + "ETHERTYPE_RDP", + "ETHERTYPE_RETIX", + "ETHERTYPE_REVARP", + "ETHERTYPE_SCA", + "ETHERTYPE_SECTRA", + "ETHERTYPE_SECUREDATA", + "ETHERTYPE_SGITW", + "ETHERTYPE_SG_BOUNCE", + "ETHERTYPE_SG_DIAG", + "ETHERTYPE_SG_NETGAMES", + "ETHERTYPE_SG_RESV", + "ETHERTYPE_SIMNET", + "ETHERTYPE_SLOW", + "ETHERTYPE_SLOWPROTOCOLS", + "ETHERTYPE_SNA", + "ETHERTYPE_SNMP", + "ETHERTYPE_SONIX", + "ETHERTYPE_SPIDER", + "ETHERTYPE_SPRITE", + "ETHERTYPE_STP", + "ETHERTYPE_TALARIS", + "ETHERTYPE_TALARISMC", + "ETHERTYPE_TCPCOMP", + "ETHERTYPE_TCPSM", + "ETHERTYPE_TEC", + "ETHERTYPE_TIGAN", + "ETHERTYPE_TRAIL", + "ETHERTYPE_TRANSETHER", + "ETHERTYPE_TYMSHARE", + "ETHERTYPE_UBBST", + "ETHERTYPE_UBDEBUG", + "ETHERTYPE_UBDIAGLOOP", + "ETHERTYPE_UBDL", + "ETHERTYPE_UBNIU", + "ETHERTYPE_UBNMC", + "ETHERTYPE_VALID", + "ETHERTYPE_VARIAN", + "ETHERTYPE_VAXELN", + "ETHERTYPE_VEECO", + "ETHERTYPE_VEXP", + "ETHERTYPE_VGLAB", + "ETHERTYPE_VINES", + "ETHERTYPE_VINESECHO", + "ETHERTYPE_VINESLOOP", + "ETHERTYPE_VITAL", + "ETHERTYPE_VLAN", + "ETHERTYPE_VLTLMAN", + "ETHERTYPE_VPROD", + "ETHERTYPE_VURESERVED", + "ETHERTYPE_WATERLOO", + "ETHERTYPE_WELLFLEET", + "ETHERTYPE_X25", + "ETHERTYPE_X75", + "ETHERTYPE_XNSSM", + "ETHERTYPE_XTP", + "ETHER_ADDR_LEN", + "ETHER_ALIGN", + "ETHER_CRC_LEN", + "ETHER_CRC_POLY_BE", + "ETHER_CRC_POLY_LE", + "ETHER_HDR_LEN", + "ETHER_MAX_DIX_LEN", + "ETHER_MAX_LEN", + "ETHER_MAX_LEN_JUMBO", + "ETHER_MIN_LEN", + "ETHER_PPPOE_ENCAP_LEN", + "ETHER_TYPE_LEN", + "ETHER_VLAN_ENCAP_LEN", + "ETH_P_1588", + "ETH_P_8021Q", + "ETH_P_802_2", + "ETH_P_802_3", + "ETH_P_AARP", + "ETH_P_ALL", + "ETH_P_AOE", + "ETH_P_ARCNET", + "ETH_P_ARP", + "ETH_P_ATALK", + "ETH_P_ATMFATE", + "ETH_P_ATMMPOA", + "ETH_P_AX25", + "ETH_P_BPQ", + "ETH_P_CAIF", + "ETH_P_CAN", + "ETH_P_CONTROL", + "ETH_P_CUST", + "ETH_P_DDCMP", + "ETH_P_DEC", + "ETH_P_DIAG", + "ETH_P_DNA_DL", + "ETH_P_DNA_RC", + "ETH_P_DNA_RT", + "ETH_P_DSA", + "ETH_P_ECONET", + "ETH_P_EDSA", + "ETH_P_FCOE", + "ETH_P_FIP", + "ETH_P_HDLC", + "ETH_P_IEEE802154", + "ETH_P_IEEEPUP", + "ETH_P_IEEEPUPAT", + "ETH_P_IP", + "ETH_P_IPV6", + "ETH_P_IPX", + "ETH_P_IRDA", + "ETH_P_LAT", + "ETH_P_LINK_CTL", + "ETH_P_LOCALTALK", + "ETH_P_LOOP", + "ETH_P_MOBITEX", + "ETH_P_MPLS_MC", + "ETH_P_MPLS_UC", + "ETH_P_PAE", + "ETH_P_PAUSE", + "ETH_P_PHONET", + "ETH_P_PPPTALK", + "ETH_P_PPP_DISC", + "ETH_P_PPP_MP", + "ETH_P_PPP_SES", + "ETH_P_PUP", + "ETH_P_PUPAT", + "ETH_P_RARP", + "ETH_P_SCA", + "ETH_P_SLOW", + "ETH_P_SNAP", + "ETH_P_TEB", + "ETH_P_TIPC", + "ETH_P_TRAILER", + "ETH_P_TR_802_2", + "ETH_P_WAN_PPP", + "ETH_P_WCCP", + "ETH_P_X25", + "ETIME", + "ETIMEDOUT", + "ETOOMANYREFS", + "ETXTBSY", + "EUCLEAN", + "EUNATCH", + "EUSERS", + "EVFILT_AIO", + "EVFILT_FS", + "EVFILT_LIO", + "EVFILT_MACHPORT", + "EVFILT_PROC", + "EVFILT_READ", + "EVFILT_SIGNAL", + "EVFILT_SYSCOUNT", + "EVFILT_THREADMARKER", + "EVFILT_TIMER", + "EVFILT_USER", + "EVFILT_VM", + "EVFILT_VNODE", + "EVFILT_WRITE", + "EV_ADD", + "EV_CLEAR", + "EV_DELETE", + "EV_DISABLE", + "EV_DISPATCH", + "EV_DROP", + "EV_ENABLE", + "EV_EOF", + "EV_ERROR", + "EV_FLAG0", + "EV_FLAG1", + "EV_ONESHOT", + "EV_OOBAND", + "EV_POLL", + "EV_RECEIPT", + "EV_SYSFLAGS", + "EWINDOWS", + "EWOULDBLOCK", + "EXDEV", + "EXFULL", + "EXTA", + "EXTB", + "EXTPROC", + "Environ", + "EpollCreate", + "EpollCreate1", + "EpollCtl", + "EpollEvent", + "EpollWait", + "Errno", + "EscapeArg", + "Exchangedata", + "Exec", + "Exit", + "ExitProcess", + "FD_CLOEXEC", + "FD_SETSIZE", + "FILE_ACTION_ADDED", + "FILE_ACTION_MODIFIED", + "FILE_ACTION_REMOVED", + "FILE_ACTION_RENAMED_NEW_NAME", + "FILE_ACTION_RENAMED_OLD_NAME", + "FILE_APPEND_DATA", + "FILE_ATTRIBUTE_ARCHIVE", + "FILE_ATTRIBUTE_DIRECTORY", + "FILE_ATTRIBUTE_HIDDEN", + "FILE_ATTRIBUTE_NORMAL", + "FILE_ATTRIBUTE_READONLY", + "FILE_ATTRIBUTE_REPARSE_POINT", + "FILE_ATTRIBUTE_SYSTEM", + "FILE_BEGIN", + "FILE_CURRENT", + "FILE_END", + "FILE_FLAG_BACKUP_SEMANTICS", + "FILE_FLAG_OPEN_REPARSE_POINT", + "FILE_FLAG_OVERLAPPED", + "FILE_LIST_DIRECTORY", + "FILE_MAP_COPY", + "FILE_MAP_EXECUTE", + "FILE_MAP_READ", + "FILE_MAP_WRITE", + "FILE_NOTIFY_CHANGE_ATTRIBUTES", + "FILE_NOTIFY_CHANGE_CREATION", + "FILE_NOTIFY_CHANGE_DIR_NAME", + "FILE_NOTIFY_CHANGE_FILE_NAME", + "FILE_NOTIFY_CHANGE_LAST_ACCESS", + "FILE_NOTIFY_CHANGE_LAST_WRITE", + "FILE_NOTIFY_CHANGE_SIZE", + "FILE_SHARE_DELETE", + "FILE_SHARE_READ", + "FILE_SHARE_WRITE", + "FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", + "FILE_SKIP_SET_EVENT_ON_HANDLE", + "FILE_TYPE_CHAR", + "FILE_TYPE_DISK", + "FILE_TYPE_PIPE", + "FILE_TYPE_REMOTE", + "FILE_TYPE_UNKNOWN", + "FILE_WRITE_ATTRIBUTES", + "FLUSHO", + "FORMAT_MESSAGE_ALLOCATE_BUFFER", + "FORMAT_MESSAGE_ARGUMENT_ARRAY", + "FORMAT_MESSAGE_FROM_HMODULE", + "FORMAT_MESSAGE_FROM_STRING", + "FORMAT_MESSAGE_FROM_SYSTEM", + "FORMAT_MESSAGE_IGNORE_INSERTS", + "FORMAT_MESSAGE_MAX_WIDTH_MASK", + "FSCTL_GET_REPARSE_POINT", + "F_ADDFILESIGS", + "F_ADDSIGS", + "F_ALLOCATEALL", + "F_ALLOCATECONTIG", + "F_CANCEL", + "F_CHKCLEAN", + "F_CLOSEM", + "F_DUP2FD", + "F_DUP2FD_CLOEXEC", + "F_DUPFD", + "F_DUPFD_CLOEXEC", + "F_EXLCK", + "F_FINDSIGS", + "F_FLUSH_DATA", + "F_FREEZE_FS", + "F_FSCTL", + "F_FSDIRMASK", + "F_FSIN", + "F_FSINOUT", + "F_FSOUT", + "F_FSPRIV", + "F_FSVOID", + "F_FULLFSYNC", + "F_GETCODEDIR", + "F_GETFD", + "F_GETFL", + "F_GETLEASE", + "F_GETLK", + "F_GETLK64", + "F_GETLKPID", + "F_GETNOSIGPIPE", + "F_GETOWN", + "F_GETOWN_EX", + "F_GETPATH", + "F_GETPATH_MTMINFO", + "F_GETPIPE_SZ", + "F_GETPROTECTIONCLASS", + "F_GETPROTECTIONLEVEL", + "F_GETSIG", + "F_GLOBAL_NOCACHE", + "F_LOCK", + "F_LOG2PHYS", + "F_LOG2PHYS_EXT", + "F_MARKDEPENDENCY", + "F_MAXFD", + "F_NOCACHE", + "F_NODIRECT", + "F_NOTIFY", + "F_OGETLK", + "F_OK", + "F_OSETLK", + "F_OSETLKW", + "F_PARAM_MASK", + "F_PARAM_MAX", + "F_PATHPKG_CHECK", + "F_PEOFPOSMODE", + "F_PREALLOCATE", + "F_RDADVISE", + "F_RDAHEAD", + "F_RDLCK", + "F_READAHEAD", + "F_READBOOTSTRAP", + "F_SETBACKINGSTORE", + "F_SETFD", + "F_SETFL", + "F_SETLEASE", + "F_SETLK", + "F_SETLK64", + "F_SETLKW", + "F_SETLKW64", + "F_SETLKWTIMEOUT", + "F_SETLK_REMOTE", + "F_SETNOSIGPIPE", + "F_SETOWN", + "F_SETOWN_EX", + "F_SETPIPE_SZ", + "F_SETPROTECTIONCLASS", + "F_SETSIG", + "F_SETSIZE", + "F_SHLCK", + "F_SINGLE_WRITER", + "F_TEST", + "F_THAW_FS", + "F_TLOCK", + "F_TRANSCODEKEY", + "F_ULOCK", + "F_UNLCK", + "F_UNLCKSYS", + "F_VOLPOSMODE", + "F_WRITEBOOTSTRAP", + "F_WRLCK", + "Faccessat", + "Fallocate", + "Fbootstraptransfer_t", + "Fchdir", + "Fchflags", + "Fchmod", + "Fchmodat", + "Fchown", + "Fchownat", + "FcntlFlock", + "FdSet", + "Fdatasync", + "FileNotifyInformation", + "Filetime", + "FindClose", + "FindFirstFile", + "FindNextFile", + "Flock", + "Flock_t", + "FlushBpf", + "FlushFileBuffers", + "FlushViewOfFile", + "ForkExec", + "ForkLock", + "FormatMessage", + "Fpathconf", + "FreeAddrInfoW", + "FreeEnvironmentStrings", + "FreeLibrary", + "Fsid", + "Fstat", + "Fstatat", + "Fstatfs", + "Fstore_t", + "Fsync", + "Ftruncate", + "FullPath", + "Futimes", + "Futimesat", + "GENERIC_ALL", + "GENERIC_EXECUTE", + "GENERIC_READ", + "GENERIC_WRITE", + "GUID", + "GetAcceptExSockaddrs", + "GetAdaptersInfo", + "GetAddrInfoW", + "GetCommandLine", + "GetComputerName", + "GetConsoleMode", + "GetCurrentDirectory", + "GetCurrentProcess", + "GetEnvironmentStrings", + "GetEnvironmentVariable", + "GetExitCodeProcess", + "GetFileAttributes", + "GetFileAttributesEx", + "GetFileExInfoStandard", + "GetFileExMaxInfoLevel", + "GetFileInformationByHandle", + "GetFileType", + "GetFullPathName", + "GetHostByName", + "GetIfEntry", + "GetLastError", + "GetLengthSid", + "GetLongPathName", + "GetProcAddress", + "GetProcessTimes", + "GetProtoByName", + "GetQueuedCompletionStatus", + "GetServByName", + "GetShortPathName", + "GetStartupInfo", + "GetStdHandle", + "GetSystemTimeAsFileTime", + "GetTempPath", + "GetTimeZoneInformation", + "GetTokenInformation", + "GetUserNameEx", + "GetUserProfileDirectory", + "GetVersion", + "Getcwd", + "Getdents", + "Getdirentries", + "Getdtablesize", + "Getegid", + "Getenv", + "Geteuid", + "Getfsstat", + "Getgid", + "Getgroups", + "Getpagesize", + "Getpeername", + "Getpgid", + "Getpgrp", + "Getpid", + "Getppid", + "Getpriority", + "Getrlimit", + "Getrusage", + "Getsid", + "Getsockname", + "Getsockopt", + "GetsockoptByte", + "GetsockoptICMPv6Filter", + "GetsockoptIPMreq", + "GetsockoptIPMreqn", + "GetsockoptIPv6MTUInfo", + "GetsockoptIPv6Mreq", + "GetsockoptInet4Addr", + "GetsockoptInt", + "GetsockoptUcred", + "Gettid", + "Gettimeofday", + "Getuid", + "Getwd", + "Getxattr", + "HANDLE_FLAG_INHERIT", + "HKEY_CLASSES_ROOT", + "HKEY_CURRENT_CONFIG", + "HKEY_CURRENT_USER", + "HKEY_DYN_DATA", + "HKEY_LOCAL_MACHINE", + "HKEY_PERFORMANCE_DATA", + "HKEY_USERS", + "HUPCL", + "Handle", + "Hostent", + "ICANON", + "ICMP6_FILTER", + "ICMPV6_FILTER", + "ICMPv6Filter", + "ICRNL", + "IEXTEN", + "IFAN_ARRIVAL", + "IFAN_DEPARTURE", + "IFA_ADDRESS", + "IFA_ANYCAST", + "IFA_BROADCAST", + "IFA_CACHEINFO", + "IFA_F_DADFAILED", + "IFA_F_DEPRECATED", + "IFA_F_HOMEADDRESS", + "IFA_F_NODAD", + "IFA_F_OPTIMISTIC", + "IFA_F_PERMANENT", + "IFA_F_SECONDARY", + "IFA_F_TEMPORARY", + "IFA_F_TENTATIVE", + "IFA_LABEL", + "IFA_LOCAL", + "IFA_MAX", + "IFA_MULTICAST", + "IFA_ROUTE", + "IFA_UNSPEC", + "IFF_ALLMULTI", + "IFF_ALTPHYS", + "IFF_AUTOMEDIA", + "IFF_BROADCAST", + "IFF_CANTCHANGE", + "IFF_CANTCONFIG", + "IFF_DEBUG", + "IFF_DRV_OACTIVE", + "IFF_DRV_RUNNING", + "IFF_DYING", + "IFF_DYNAMIC", + "IFF_LINK0", + "IFF_LINK1", + "IFF_LINK2", + "IFF_LOOPBACK", + "IFF_MASTER", + "IFF_MONITOR", + "IFF_MULTICAST", + "IFF_NOARP", + "IFF_NOTRAILERS", + "IFF_NO_PI", + "IFF_OACTIVE", + "IFF_ONE_QUEUE", + "IFF_POINTOPOINT", + "IFF_POINTTOPOINT", + "IFF_PORTSEL", + "IFF_PPROMISC", + "IFF_PROMISC", + "IFF_RENAMING", + "IFF_RUNNING", + "IFF_SIMPLEX", + "IFF_SLAVE", + "IFF_SMART", + "IFF_STATICARP", + "IFF_TAP", + "IFF_TUN", + "IFF_TUN_EXCL", + "IFF_UP", + "IFF_VNET_HDR", + "IFLA_ADDRESS", + "IFLA_BROADCAST", + "IFLA_COST", + "IFLA_IFALIAS", + "IFLA_IFNAME", + "IFLA_LINK", + "IFLA_LINKINFO", + "IFLA_LINKMODE", + "IFLA_MAP", + "IFLA_MASTER", + "IFLA_MAX", + "IFLA_MTU", + "IFLA_NET_NS_PID", + "IFLA_OPERSTATE", + "IFLA_PRIORITY", + "IFLA_PROTINFO", + "IFLA_QDISC", + "IFLA_STATS", + "IFLA_TXQLEN", + "IFLA_UNSPEC", + "IFLA_WEIGHT", + "IFLA_WIRELESS", + "IFNAMSIZ", + "IFT_1822", + "IFT_A12MPPSWITCH", + "IFT_AAL2", + "IFT_AAL5", + "IFT_ADSL", + "IFT_AFLANE8023", + "IFT_AFLANE8025", + "IFT_ARAP", + "IFT_ARCNET", + "IFT_ARCNETPLUS", + "IFT_ASYNC", + "IFT_ATM", + "IFT_ATMDXI", + "IFT_ATMFUNI", + "IFT_ATMIMA", + "IFT_ATMLOGICAL", + "IFT_ATMRADIO", + "IFT_ATMSUBINTERFACE", + "IFT_ATMVCIENDPT", + "IFT_ATMVIRTUAL", + "IFT_BGPPOLICYACCOUNTING", + "IFT_BLUETOOTH", + "IFT_BRIDGE", + "IFT_BSC", + "IFT_CARP", + "IFT_CCTEMUL", + "IFT_CELLULAR", + "IFT_CEPT", + "IFT_CES", + "IFT_CHANNEL", + "IFT_CNR", + "IFT_COFFEE", + "IFT_COMPOSITELINK", + "IFT_DCN", + "IFT_DIGITALPOWERLINE", + "IFT_DIGITALWRAPPEROVERHEADCHANNEL", + "IFT_DLSW", + "IFT_DOCSCABLEDOWNSTREAM", + "IFT_DOCSCABLEMACLAYER", + "IFT_DOCSCABLEUPSTREAM", + "IFT_DOCSCABLEUPSTREAMCHANNEL", + "IFT_DS0", + "IFT_DS0BUNDLE", + "IFT_DS1FDL", + "IFT_DS3", + "IFT_DTM", + "IFT_DUMMY", + "IFT_DVBASILN", + "IFT_DVBASIOUT", + "IFT_DVBRCCDOWNSTREAM", + "IFT_DVBRCCMACLAYER", + "IFT_DVBRCCUPSTREAM", + "IFT_ECONET", + "IFT_ENC", + "IFT_EON", + "IFT_EPLRS", + "IFT_ESCON", + "IFT_ETHER", + "IFT_FAITH", + "IFT_FAST", + "IFT_FASTETHER", + "IFT_FASTETHERFX", + "IFT_FDDI", + "IFT_FIBRECHANNEL", + "IFT_FRAMERELAYINTERCONNECT", + "IFT_FRAMERELAYMPI", + "IFT_FRDLCIENDPT", + "IFT_FRELAY", + "IFT_FRELAYDCE", + "IFT_FRF16MFRBUNDLE", + "IFT_FRFORWARD", + "IFT_G703AT2MB", + "IFT_G703AT64K", + "IFT_GIF", + "IFT_GIGABITETHERNET", + "IFT_GR303IDT", + "IFT_GR303RDT", + "IFT_H323GATEKEEPER", + "IFT_H323PROXY", + "IFT_HDH1822", + "IFT_HDLC", + "IFT_HDSL2", + "IFT_HIPERLAN2", + "IFT_HIPPI", + "IFT_HIPPIINTERFACE", + "IFT_HOSTPAD", + "IFT_HSSI", + "IFT_HY", + "IFT_IBM370PARCHAN", + "IFT_IDSL", + "IFT_IEEE1394", + "IFT_IEEE80211", + "IFT_IEEE80212", + "IFT_IEEE8023ADLAG", + "IFT_IFGSN", + "IFT_IMT", + "IFT_INFINIBAND", + "IFT_INTERLEAVE", + "IFT_IP", + "IFT_IPFORWARD", + "IFT_IPOVERATM", + "IFT_IPOVERCDLC", + "IFT_IPOVERCLAW", + "IFT_IPSWITCH", + "IFT_IPXIP", + "IFT_ISDN", + "IFT_ISDNBASIC", + "IFT_ISDNPRIMARY", + "IFT_ISDNS", + "IFT_ISDNU", + "IFT_ISO88022LLC", + "IFT_ISO88023", + "IFT_ISO88024", + "IFT_ISO88025", + "IFT_ISO88025CRFPINT", + "IFT_ISO88025DTR", + "IFT_ISO88025FIBER", + "IFT_ISO88026", + "IFT_ISUP", + "IFT_L2VLAN", + "IFT_L3IPVLAN", + "IFT_L3IPXVLAN", + "IFT_LAPB", + "IFT_LAPD", + "IFT_LAPF", + "IFT_LINEGROUP", + "IFT_LOCALTALK", + "IFT_LOOP", + "IFT_MEDIAMAILOVERIP", + "IFT_MFSIGLINK", + "IFT_MIOX25", + "IFT_MODEM", + "IFT_MPC", + "IFT_MPLS", + "IFT_MPLSTUNNEL", + "IFT_MSDSL", + "IFT_MVL", + "IFT_MYRINET", + "IFT_NFAS", + "IFT_NSIP", + "IFT_OPTICALCHANNEL", + "IFT_OPTICALTRANSPORT", + "IFT_OTHER", + "IFT_P10", + "IFT_P80", + "IFT_PARA", + "IFT_PDP", + "IFT_PFLOG", + "IFT_PFLOW", + "IFT_PFSYNC", + "IFT_PLC", + "IFT_PON155", + "IFT_PON622", + "IFT_POS", + "IFT_PPP", + "IFT_PPPMULTILINKBUNDLE", + "IFT_PROPATM", + "IFT_PROPBWAP2MP", + "IFT_PROPCNLS", + "IFT_PROPDOCSWIRELESSDOWNSTREAM", + "IFT_PROPDOCSWIRELESSMACLAYER", + "IFT_PROPDOCSWIRELESSUPSTREAM", + "IFT_PROPMUX", + "IFT_PROPVIRTUAL", + "IFT_PROPWIRELESSP2P", + "IFT_PTPSERIAL", + "IFT_PVC", + "IFT_Q2931", + "IFT_QLLC", + "IFT_RADIOMAC", + "IFT_RADSL", + "IFT_REACHDSL", + "IFT_RFC1483", + "IFT_RS232", + "IFT_RSRB", + "IFT_SDLC", + "IFT_SDSL", + "IFT_SHDSL", + "IFT_SIP", + "IFT_SIPSIG", + "IFT_SIPTG", + "IFT_SLIP", + "IFT_SMDSDXI", + "IFT_SMDSICIP", + "IFT_SONET", + "IFT_SONETOVERHEADCHANNEL", + "IFT_SONETPATH", + "IFT_SONETVT", + "IFT_SRP", + "IFT_SS7SIGLINK", + "IFT_STACKTOSTACK", + "IFT_STARLAN", + "IFT_STF", + "IFT_T1", + "IFT_TDLC", + "IFT_TELINK", + "IFT_TERMPAD", + "IFT_TR008", + "IFT_TRANSPHDLC", + "IFT_TUNNEL", + "IFT_ULTRA", + "IFT_USB", + "IFT_V11", + "IFT_V35", + "IFT_V36", + "IFT_V37", + "IFT_VDSL", + "IFT_VIRTUALIPADDRESS", + "IFT_VIRTUALTG", + "IFT_VOICEDID", + "IFT_VOICEEM", + "IFT_VOICEEMFGD", + "IFT_VOICEENCAP", + "IFT_VOICEFGDEANA", + "IFT_VOICEFXO", + "IFT_VOICEFXS", + "IFT_VOICEOVERATM", + "IFT_VOICEOVERCABLE", + "IFT_VOICEOVERFRAMERELAY", + "IFT_VOICEOVERIP", + "IFT_X213", + "IFT_X25", + "IFT_X25DDN", + "IFT_X25HUNTGROUP", + "IFT_X25MLP", + "IFT_X25PLE", + "IFT_XETHER", + "IGNBRK", + "IGNCR", + "IGNORE", + "IGNPAR", + "IMAXBEL", + "INFINITE", + "INLCR", + "INPCK", + "INVALID_FILE_ATTRIBUTES", + "IN_ACCESS", + "IN_ALL_EVENTS", + "IN_ATTRIB", + "IN_CLASSA_HOST", + "IN_CLASSA_MAX", + "IN_CLASSA_NET", + "IN_CLASSA_NSHIFT", + "IN_CLASSB_HOST", + "IN_CLASSB_MAX", + "IN_CLASSB_NET", + "IN_CLASSB_NSHIFT", + "IN_CLASSC_HOST", + "IN_CLASSC_NET", + "IN_CLASSC_NSHIFT", + "IN_CLASSD_HOST", + "IN_CLASSD_NET", + "IN_CLASSD_NSHIFT", + "IN_CLOEXEC", + "IN_CLOSE", + "IN_CLOSE_NOWRITE", + "IN_CLOSE_WRITE", + "IN_CREATE", + "IN_DELETE", + "IN_DELETE_SELF", + "IN_DONT_FOLLOW", + "IN_EXCL_UNLINK", + "IN_IGNORED", + "IN_ISDIR", + "IN_LINKLOCALNETNUM", + "IN_LOOPBACKNET", + "IN_MASK_ADD", + "IN_MODIFY", + "IN_MOVE", + "IN_MOVED_FROM", + "IN_MOVED_TO", + "IN_MOVE_SELF", + "IN_NONBLOCK", + "IN_ONESHOT", + "IN_ONLYDIR", + "IN_OPEN", + "IN_Q_OVERFLOW", + "IN_RFC3021_HOST", + "IN_RFC3021_MASK", + "IN_RFC3021_NET", + "IN_RFC3021_NSHIFT", + "IN_UNMOUNT", + "IOC_IN", + "IOC_INOUT", + "IOC_OUT", + "IOC_VENDOR", + "IOC_WS2", + "IO_REPARSE_TAG_SYMLINK", + "IPMreq", + "IPMreqn", + "IPPROTO_3PC", + "IPPROTO_ADFS", + "IPPROTO_AH", + "IPPROTO_AHIP", + "IPPROTO_APES", + "IPPROTO_ARGUS", + "IPPROTO_AX25", + "IPPROTO_BHA", + "IPPROTO_BLT", + "IPPROTO_BRSATMON", + "IPPROTO_CARP", + "IPPROTO_CFTP", + "IPPROTO_CHAOS", + "IPPROTO_CMTP", + "IPPROTO_COMP", + "IPPROTO_CPHB", + "IPPROTO_CPNX", + "IPPROTO_DCCP", + "IPPROTO_DDP", + "IPPROTO_DGP", + "IPPROTO_DIVERT", + "IPPROTO_DIVERT_INIT", + "IPPROTO_DIVERT_RESP", + "IPPROTO_DONE", + "IPPROTO_DSTOPTS", + "IPPROTO_EGP", + "IPPROTO_EMCON", + "IPPROTO_ENCAP", + "IPPROTO_EON", + "IPPROTO_ESP", + "IPPROTO_ETHERIP", + "IPPROTO_FRAGMENT", + "IPPROTO_GGP", + "IPPROTO_GMTP", + "IPPROTO_GRE", + "IPPROTO_HELLO", + "IPPROTO_HMP", + "IPPROTO_HOPOPTS", + "IPPROTO_ICMP", + "IPPROTO_ICMPV6", + "IPPROTO_IDP", + "IPPROTO_IDPR", + "IPPROTO_IDRP", + "IPPROTO_IGMP", + "IPPROTO_IGP", + "IPPROTO_IGRP", + "IPPROTO_IL", + "IPPROTO_INLSP", + "IPPROTO_INP", + "IPPROTO_IP", + "IPPROTO_IPCOMP", + "IPPROTO_IPCV", + "IPPROTO_IPEIP", + "IPPROTO_IPIP", + "IPPROTO_IPPC", + "IPPROTO_IPV4", + "IPPROTO_IPV6", + "IPPROTO_IPV6_ICMP", + "IPPROTO_IRTP", + "IPPROTO_KRYPTOLAN", + "IPPROTO_LARP", + "IPPROTO_LEAF1", + "IPPROTO_LEAF2", + "IPPROTO_MAX", + "IPPROTO_MAXID", + "IPPROTO_MEAS", + "IPPROTO_MH", + "IPPROTO_MHRP", + "IPPROTO_MICP", + "IPPROTO_MOBILE", + "IPPROTO_MPLS", + "IPPROTO_MTP", + "IPPROTO_MUX", + "IPPROTO_ND", + "IPPROTO_NHRP", + "IPPROTO_NONE", + "IPPROTO_NSP", + "IPPROTO_NVPII", + "IPPROTO_OLD_DIVERT", + "IPPROTO_OSPFIGP", + "IPPROTO_PFSYNC", + "IPPROTO_PGM", + "IPPROTO_PIGP", + "IPPROTO_PIM", + "IPPROTO_PRM", + "IPPROTO_PUP", + "IPPROTO_PVP", + "IPPROTO_RAW", + "IPPROTO_RCCMON", + "IPPROTO_RDP", + "IPPROTO_ROUTING", + "IPPROTO_RSVP", + "IPPROTO_RVD", + "IPPROTO_SATEXPAK", + "IPPROTO_SATMON", + "IPPROTO_SCCSP", + "IPPROTO_SCTP", + "IPPROTO_SDRP", + "IPPROTO_SEND", + "IPPROTO_SEP", + "IPPROTO_SKIP", + "IPPROTO_SPACER", + "IPPROTO_SRPC", + "IPPROTO_ST", + "IPPROTO_SVMTP", + "IPPROTO_SWIPE", + "IPPROTO_TCF", + "IPPROTO_TCP", + "IPPROTO_TLSP", + "IPPROTO_TP", + "IPPROTO_TPXX", + "IPPROTO_TRUNK1", + "IPPROTO_TRUNK2", + "IPPROTO_TTP", + "IPPROTO_UDP", + "IPPROTO_UDPLITE", + "IPPROTO_VINES", + "IPPROTO_VISA", + "IPPROTO_VMTP", + "IPPROTO_VRRP", + "IPPROTO_WBEXPAK", + "IPPROTO_WBMON", + "IPPROTO_WSN", + "IPPROTO_XNET", + "IPPROTO_XTP", + "IPV6_2292DSTOPTS", + "IPV6_2292HOPLIMIT", + "IPV6_2292HOPOPTS", + "IPV6_2292NEXTHOP", + "IPV6_2292PKTINFO", + "IPV6_2292PKTOPTIONS", + "IPV6_2292RTHDR", + "IPV6_ADDRFORM", + "IPV6_ADD_MEMBERSHIP", + "IPV6_AUTHHDR", + "IPV6_AUTH_LEVEL", + "IPV6_AUTOFLOWLABEL", + "IPV6_BINDANY", + "IPV6_BINDV6ONLY", + "IPV6_BOUND_IF", + "IPV6_CHECKSUM", + "IPV6_DEFAULT_MULTICAST_HOPS", + "IPV6_DEFAULT_MULTICAST_LOOP", + "IPV6_DEFHLIM", + "IPV6_DONTFRAG", + "IPV6_DROP_MEMBERSHIP", + "IPV6_DSTOPTS", + "IPV6_ESP_NETWORK_LEVEL", + "IPV6_ESP_TRANS_LEVEL", + "IPV6_FAITH", + "IPV6_FLOWINFO_MASK", + "IPV6_FLOWLABEL_MASK", + "IPV6_FRAGTTL", + "IPV6_FW_ADD", + "IPV6_FW_DEL", + "IPV6_FW_FLUSH", + "IPV6_FW_GET", + "IPV6_FW_ZERO", + "IPV6_HLIMDEC", + "IPV6_HOPLIMIT", + "IPV6_HOPOPTS", + "IPV6_IPCOMP_LEVEL", + "IPV6_IPSEC_POLICY", + "IPV6_JOIN_ANYCAST", + "IPV6_JOIN_GROUP", + "IPV6_LEAVE_ANYCAST", + "IPV6_LEAVE_GROUP", + "IPV6_MAXHLIM", + "IPV6_MAXOPTHDR", + "IPV6_MAXPACKET", + "IPV6_MAX_GROUP_SRC_FILTER", + "IPV6_MAX_MEMBERSHIPS", + "IPV6_MAX_SOCK_SRC_FILTER", + "IPV6_MIN_MEMBERSHIPS", + "IPV6_MMTU", + "IPV6_MSFILTER", + "IPV6_MTU", + "IPV6_MTU_DISCOVER", + "IPV6_MULTICAST_HOPS", + "IPV6_MULTICAST_IF", + "IPV6_MULTICAST_LOOP", + "IPV6_NEXTHOP", + "IPV6_OPTIONS", + "IPV6_PATHMTU", + "IPV6_PIPEX", + "IPV6_PKTINFO", + "IPV6_PMTUDISC_DO", + "IPV6_PMTUDISC_DONT", + "IPV6_PMTUDISC_PROBE", + "IPV6_PMTUDISC_WANT", + "IPV6_PORTRANGE", + "IPV6_PORTRANGE_DEFAULT", + "IPV6_PORTRANGE_HIGH", + "IPV6_PORTRANGE_LOW", + "IPV6_PREFER_TEMPADDR", + "IPV6_RECVDSTOPTS", + "IPV6_RECVDSTPORT", + "IPV6_RECVERR", + "IPV6_RECVHOPLIMIT", + "IPV6_RECVHOPOPTS", + "IPV6_RECVPATHMTU", + "IPV6_RECVPKTINFO", + "IPV6_RECVRTHDR", + "IPV6_RECVTCLASS", + "IPV6_ROUTER_ALERT", + "IPV6_RTABLE", + "IPV6_RTHDR", + "IPV6_RTHDRDSTOPTS", + "IPV6_RTHDR_LOOSE", + "IPV6_RTHDR_STRICT", + "IPV6_RTHDR_TYPE_0", + "IPV6_RXDSTOPTS", + "IPV6_RXHOPOPTS", + "IPV6_SOCKOPT_RESERVED1", + "IPV6_TCLASS", + "IPV6_UNICAST_HOPS", + "IPV6_USE_MIN_MTU", + "IPV6_V6ONLY", + "IPV6_VERSION", + "IPV6_VERSION_MASK", + "IPV6_XFRM_POLICY", + "IP_ADD_MEMBERSHIP", + "IP_ADD_SOURCE_MEMBERSHIP", + "IP_AUTH_LEVEL", + "IP_BINDANY", + "IP_BLOCK_SOURCE", + "IP_BOUND_IF", + "IP_DEFAULT_MULTICAST_LOOP", + "IP_DEFAULT_MULTICAST_TTL", + "IP_DF", + "IP_DIVERTFL", + "IP_DONTFRAG", + "IP_DROP_MEMBERSHIP", + "IP_DROP_SOURCE_MEMBERSHIP", + "IP_DUMMYNET3", + "IP_DUMMYNET_CONFIGURE", + "IP_DUMMYNET_DEL", + "IP_DUMMYNET_FLUSH", + "IP_DUMMYNET_GET", + "IP_EF", + "IP_ERRORMTU", + "IP_ESP_NETWORK_LEVEL", + "IP_ESP_TRANS_LEVEL", + "IP_FAITH", + "IP_FREEBIND", + "IP_FW3", + "IP_FW_ADD", + "IP_FW_DEL", + "IP_FW_FLUSH", + "IP_FW_GET", + "IP_FW_NAT_CFG", + "IP_FW_NAT_DEL", + "IP_FW_NAT_GET_CONFIG", + "IP_FW_NAT_GET_LOG", + "IP_FW_RESETLOG", + "IP_FW_TABLE_ADD", + "IP_FW_TABLE_DEL", + "IP_FW_TABLE_FLUSH", + "IP_FW_TABLE_GETSIZE", + "IP_FW_TABLE_LIST", + "IP_FW_ZERO", + "IP_HDRINCL", + "IP_IPCOMP_LEVEL", + "IP_IPSECFLOWINFO", + "IP_IPSEC_LOCAL_AUTH", + "IP_IPSEC_LOCAL_CRED", + "IP_IPSEC_LOCAL_ID", + "IP_IPSEC_POLICY", + "IP_IPSEC_REMOTE_AUTH", + "IP_IPSEC_REMOTE_CRED", + "IP_IPSEC_REMOTE_ID", + "IP_MAXPACKET", + "IP_MAX_GROUP_SRC_FILTER", + "IP_MAX_MEMBERSHIPS", + "IP_MAX_SOCK_MUTE_FILTER", + "IP_MAX_SOCK_SRC_FILTER", + "IP_MAX_SOURCE_FILTER", + "IP_MF", + "IP_MINFRAGSIZE", + "IP_MINTTL", + "IP_MIN_MEMBERSHIPS", + "IP_MSFILTER", + "IP_MSS", + "IP_MTU", + "IP_MTU_DISCOVER", + "IP_MULTICAST_IF", + "IP_MULTICAST_IFINDEX", + "IP_MULTICAST_LOOP", + "IP_MULTICAST_TTL", + "IP_MULTICAST_VIF", + "IP_NAT__XXX", + "IP_OFFMASK", + "IP_OLD_FW_ADD", + "IP_OLD_FW_DEL", + "IP_OLD_FW_FLUSH", + "IP_OLD_FW_GET", + "IP_OLD_FW_RESETLOG", + "IP_OLD_FW_ZERO", + "IP_ONESBCAST", + "IP_OPTIONS", + "IP_ORIGDSTADDR", + "IP_PASSSEC", + "IP_PIPEX", + "IP_PKTINFO", + "IP_PKTOPTIONS", + "IP_PMTUDISC", + "IP_PMTUDISC_DO", + "IP_PMTUDISC_DONT", + "IP_PMTUDISC_PROBE", + "IP_PMTUDISC_WANT", + "IP_PORTRANGE", + "IP_PORTRANGE_DEFAULT", + "IP_PORTRANGE_HIGH", + "IP_PORTRANGE_LOW", + "IP_RECVDSTADDR", + "IP_RECVDSTPORT", + "IP_RECVERR", + "IP_RECVIF", + "IP_RECVOPTS", + "IP_RECVORIGDSTADDR", + "IP_RECVPKTINFO", + "IP_RECVRETOPTS", + "IP_RECVRTABLE", + "IP_RECVTOS", + "IP_RECVTTL", + "IP_RETOPTS", + "IP_RF", + "IP_ROUTER_ALERT", + "IP_RSVP_OFF", + "IP_RSVP_ON", + "IP_RSVP_VIF_OFF", + "IP_RSVP_VIF_ON", + "IP_RTABLE", + "IP_SENDSRCADDR", + "IP_STRIPHDR", + "IP_TOS", + "IP_TRAFFIC_MGT_BACKGROUND", + "IP_TRANSPARENT", + "IP_TTL", + "IP_UNBLOCK_SOURCE", + "IP_XFRM_POLICY", + "IPv6MTUInfo", + "IPv6Mreq", + "ISIG", + "ISTRIP", + "IUCLC", + "IUTF8", + "IXANY", + "IXOFF", + "IXON", + "IfAddrmsg", + "IfAnnounceMsghdr", + "IfData", + "IfInfomsg", + "IfMsghdr", + "IfaMsghdr", + "IfmaMsghdr", + "IfmaMsghdr2", + "ImplementsGetwd", + "Inet4Pktinfo", + "Inet6Pktinfo", + "InotifyAddWatch", + "InotifyEvent", + "InotifyInit", + "InotifyInit1", + "InotifyRmWatch", + "InterfaceAddrMessage", + "InterfaceAnnounceMessage", + "InterfaceInfo", + "InterfaceMessage", + "InterfaceMulticastAddrMessage", + "InvalidHandle", + "Ioperm", + "Iopl", + "Iovec", + "IpAdapterInfo", + "IpAddrString", + "IpAddressString", + "IpMaskString", + "Issetugid", + "KEY_ALL_ACCESS", + "KEY_CREATE_LINK", + "KEY_CREATE_SUB_KEY", + "KEY_ENUMERATE_SUB_KEYS", + "KEY_EXECUTE", + "KEY_NOTIFY", + "KEY_QUERY_VALUE", + "KEY_READ", + "KEY_SET_VALUE", + "KEY_WOW64_32KEY", + "KEY_WOW64_64KEY", + "KEY_WRITE", + "Kevent", + "Kevent_t", + "Kill", + "Klogctl", + "Kqueue", + "LANG_ENGLISH", + "LAYERED_PROTOCOL", + "LCNT_OVERLOAD_FLUSH", + "LINUX_REBOOT_CMD_CAD_OFF", + "LINUX_REBOOT_CMD_CAD_ON", + "LINUX_REBOOT_CMD_HALT", + "LINUX_REBOOT_CMD_KEXEC", + "LINUX_REBOOT_CMD_POWER_OFF", + "LINUX_REBOOT_CMD_RESTART", + "LINUX_REBOOT_CMD_RESTART2", + "LINUX_REBOOT_CMD_SW_SUSPEND", + "LINUX_REBOOT_MAGIC1", + "LINUX_REBOOT_MAGIC2", + "LOCK_EX", + "LOCK_NB", + "LOCK_SH", + "LOCK_UN", + "LazyDLL", + "LazyProc", + "Lchown", + "Linger", + "Link", + "Listen", + "Listxattr", + "LoadCancelIoEx", + "LoadConnectEx", + "LoadCreateSymbolicLink", + "LoadDLL", + "LoadGetAddrInfo", + "LoadLibrary", + "LoadSetFileCompletionNotificationModes", + "LocalFree", + "Log2phys_t", + "LookupAccountName", + "LookupAccountSid", + "LookupSID", + "LsfJump", + "LsfSocket", + "LsfStmt", + "Lstat", + "MADV_AUTOSYNC", + "MADV_CAN_REUSE", + "MADV_CORE", + "MADV_DOFORK", + "MADV_DONTFORK", + "MADV_DONTNEED", + "MADV_FREE", + "MADV_FREE_REUSABLE", + "MADV_FREE_REUSE", + "MADV_HUGEPAGE", + "MADV_HWPOISON", + "MADV_MERGEABLE", + "MADV_NOCORE", + "MADV_NOHUGEPAGE", + "MADV_NORMAL", + "MADV_NOSYNC", + "MADV_PROTECT", + "MADV_RANDOM", + "MADV_REMOVE", + "MADV_SEQUENTIAL", + "MADV_SPACEAVAIL", + "MADV_UNMERGEABLE", + "MADV_WILLNEED", + "MADV_ZERO_WIRED_PAGES", + "MAP_32BIT", + "MAP_ALIGNED_SUPER", + "MAP_ALIGNMENT_16MB", + "MAP_ALIGNMENT_1TB", + "MAP_ALIGNMENT_256TB", + "MAP_ALIGNMENT_4GB", + "MAP_ALIGNMENT_64KB", + "MAP_ALIGNMENT_64PB", + "MAP_ALIGNMENT_MASK", + "MAP_ALIGNMENT_SHIFT", + "MAP_ANON", + "MAP_ANONYMOUS", + "MAP_COPY", + "MAP_DENYWRITE", + "MAP_EXECUTABLE", + "MAP_FILE", + "MAP_FIXED", + "MAP_FLAGMASK", + "MAP_GROWSDOWN", + "MAP_HASSEMAPHORE", + "MAP_HUGETLB", + "MAP_INHERIT", + "MAP_INHERIT_COPY", + "MAP_INHERIT_DEFAULT", + "MAP_INHERIT_DONATE_COPY", + "MAP_INHERIT_NONE", + "MAP_INHERIT_SHARE", + "MAP_JIT", + "MAP_LOCKED", + "MAP_NOCACHE", + "MAP_NOCORE", + "MAP_NOEXTEND", + "MAP_NONBLOCK", + "MAP_NORESERVE", + "MAP_NOSYNC", + "MAP_POPULATE", + "MAP_PREFAULT_READ", + "MAP_PRIVATE", + "MAP_RENAME", + "MAP_RESERVED0080", + "MAP_RESERVED0100", + "MAP_SHARED", + "MAP_STACK", + "MAP_TRYFIXED", + "MAP_TYPE", + "MAP_WIRED", + "MAXIMUM_REPARSE_DATA_BUFFER_SIZE", + "MAXLEN_IFDESCR", + "MAXLEN_PHYSADDR", + "MAX_ADAPTER_ADDRESS_LENGTH", + "MAX_ADAPTER_DESCRIPTION_LENGTH", + "MAX_ADAPTER_NAME_LENGTH", + "MAX_COMPUTERNAME_LENGTH", + "MAX_INTERFACE_NAME_LEN", + "MAX_LONG_PATH", + "MAX_PATH", + "MAX_PROTOCOL_CHAIN", + "MCL_CURRENT", + "MCL_FUTURE", + "MNT_DETACH", + "MNT_EXPIRE", + "MNT_FORCE", + "MSG_BCAST", + "MSG_CMSG_CLOEXEC", + "MSG_COMPAT", + "MSG_CONFIRM", + "MSG_CONTROLMBUF", + "MSG_CTRUNC", + "MSG_DONTROUTE", + "MSG_DONTWAIT", + "MSG_EOF", + "MSG_EOR", + "MSG_ERRQUEUE", + "MSG_FASTOPEN", + "MSG_FIN", + "MSG_FLUSH", + "MSG_HAVEMORE", + "MSG_HOLD", + "MSG_IOVUSRSPACE", + "MSG_LENUSRSPACE", + "MSG_MCAST", + "MSG_MORE", + "MSG_NAMEMBUF", + "MSG_NBIO", + "MSG_NEEDSA", + "MSG_NOSIGNAL", + "MSG_NOTIFICATION", + "MSG_OOB", + "MSG_PEEK", + "MSG_PROXY", + "MSG_RCVMORE", + "MSG_RST", + "MSG_SEND", + "MSG_SYN", + "MSG_TRUNC", + "MSG_TRYHARD", + "MSG_USERFLAGS", + "MSG_WAITALL", + "MSG_WAITFORONE", + "MSG_WAITSTREAM", + "MS_ACTIVE", + "MS_ASYNC", + "MS_BIND", + "MS_DEACTIVATE", + "MS_DIRSYNC", + "MS_INVALIDATE", + "MS_I_VERSION", + "MS_KERNMOUNT", + "MS_KILLPAGES", + "MS_MANDLOCK", + "MS_MGC_MSK", + "MS_MGC_VAL", + "MS_MOVE", + "MS_NOATIME", + "MS_NODEV", + "MS_NODIRATIME", + "MS_NOEXEC", + "MS_NOSUID", + "MS_NOUSER", + "MS_POSIXACL", + "MS_PRIVATE", + "MS_RDONLY", + "MS_REC", + "MS_RELATIME", + "MS_REMOUNT", + "MS_RMT_MASK", + "MS_SHARED", + "MS_SILENT", + "MS_SLAVE", + "MS_STRICTATIME", + "MS_SYNC", + "MS_SYNCHRONOUS", + "MS_UNBINDABLE", + "Madvise", + "MapViewOfFile", + "MaxTokenInfoClass", + "Mclpool", + "MibIfRow", + "Mkdir", + "Mkdirat", + "Mkfifo", + "Mknod", + "Mknodat", + "Mlock", + "Mlockall", + "Mmap", + "Mount", + "MoveFile", + "Mprotect", + "Msghdr", + "Munlock", + "Munlockall", + "Munmap", + "MustLoadDLL", + "NAME_MAX", + "NETLINK_ADD_MEMBERSHIP", + "NETLINK_AUDIT", + "NETLINK_BROADCAST_ERROR", + "NETLINK_CONNECTOR", + "NETLINK_DNRTMSG", + "NETLINK_DROP_MEMBERSHIP", + "NETLINK_ECRYPTFS", + "NETLINK_FIB_LOOKUP", + "NETLINK_FIREWALL", + "NETLINK_GENERIC", + "NETLINK_INET_DIAG", + "NETLINK_IP6_FW", + "NETLINK_ISCSI", + "NETLINK_KOBJECT_UEVENT", + "NETLINK_NETFILTER", + "NETLINK_NFLOG", + "NETLINK_NO_ENOBUFS", + "NETLINK_PKTINFO", + "NETLINK_RDMA", + "NETLINK_ROUTE", + "NETLINK_SCSITRANSPORT", + "NETLINK_SELINUX", + "NETLINK_UNUSED", + "NETLINK_USERSOCK", + "NETLINK_XFRM", + "NET_RT_DUMP", + "NET_RT_DUMP2", + "NET_RT_FLAGS", + "NET_RT_IFLIST", + "NET_RT_IFLIST2", + "NET_RT_IFLISTL", + "NET_RT_IFMALIST", + "NET_RT_MAXID", + "NET_RT_OIFLIST", + "NET_RT_OOIFLIST", + "NET_RT_STAT", + "NET_RT_STATS", + "NET_RT_TABLE", + "NET_RT_TRASH", + "NLA_ALIGNTO", + "NLA_F_NESTED", + "NLA_F_NET_BYTEORDER", + "NLA_HDRLEN", + "NLMSG_ALIGNTO", + "NLMSG_DONE", + "NLMSG_ERROR", + "NLMSG_HDRLEN", + "NLMSG_MIN_TYPE", + "NLMSG_NOOP", + "NLMSG_OVERRUN", + "NLM_F_ACK", + "NLM_F_APPEND", + "NLM_F_ATOMIC", + "NLM_F_CREATE", + "NLM_F_DUMP", + "NLM_F_ECHO", + "NLM_F_EXCL", + "NLM_F_MATCH", + "NLM_F_MULTI", + "NLM_F_REPLACE", + "NLM_F_REQUEST", + "NLM_F_ROOT", + "NOFLSH", + "NOTE_ABSOLUTE", + "NOTE_ATTRIB", + "NOTE_BACKGROUND", + "NOTE_CHILD", + "NOTE_CRITICAL", + "NOTE_DELETE", + "NOTE_EOF", + "NOTE_EXEC", + "NOTE_EXIT", + "NOTE_EXITSTATUS", + "NOTE_EXIT_CSERROR", + "NOTE_EXIT_DECRYPTFAIL", + "NOTE_EXIT_DETAIL", + "NOTE_EXIT_DETAIL_MASK", + "NOTE_EXIT_MEMORY", + "NOTE_EXIT_REPARENTED", + "NOTE_EXTEND", + "NOTE_FFAND", + "NOTE_FFCOPY", + "NOTE_FFCTRLMASK", + "NOTE_FFLAGSMASK", + "NOTE_FFNOP", + "NOTE_FFOR", + "NOTE_FORK", + "NOTE_LEEWAY", + "NOTE_LINK", + "NOTE_LOWAT", + "NOTE_NONE", + "NOTE_NSECONDS", + "NOTE_PCTRLMASK", + "NOTE_PDATAMASK", + "NOTE_REAP", + "NOTE_RENAME", + "NOTE_RESOURCEEND", + "NOTE_REVOKE", + "NOTE_SECONDS", + "NOTE_SIGNAL", + "NOTE_TRACK", + "NOTE_TRACKERR", + "NOTE_TRIGGER", + "NOTE_TRUNCATE", + "NOTE_USECONDS", + "NOTE_VM_ERROR", + "NOTE_VM_PRESSURE", + "NOTE_VM_PRESSURE_SUDDEN_TERMINATE", + "NOTE_VM_PRESSURE_TERMINATE", + "NOTE_WRITE", + "NameCanonical", + "NameCanonicalEx", + "NameDisplay", + "NameDnsDomain", + "NameFullyQualifiedDN", + "NameSamCompatible", + "NameServicePrincipal", + "NameUniqueId", + "NameUnknown", + "NameUserPrincipal", + "Nanosleep", + "NetApiBufferFree", + "NetGetJoinInformation", + "NetSetupDomainName", + "NetSetupUnjoined", + "NetSetupUnknownStatus", + "NetSetupWorkgroupName", + "NetUserGetInfo", + "NetlinkMessage", + "NetlinkRIB", + "NetlinkRouteAttr", + "NetlinkRouteRequest", + "NewCallback", + "NewCallbackCDecl", + "NewLazyDLL", + "NlAttr", + "NlMsgerr", + "NlMsghdr", + "NsecToFiletime", + "NsecToTimespec", + "NsecToTimeval", + "Ntohs", + "OCRNL", + "OFDEL", + "OFILL", + "OFIOGETBMAP", + "OID_PKIX_KP_SERVER_AUTH", + "OID_SERVER_GATED_CRYPTO", + "OID_SGC_NETSCAPE", + "OLCUC", + "ONLCR", + "ONLRET", + "ONOCR", + "ONOEOT", + "OPEN_ALWAYS", + "OPEN_EXISTING", + "OPOST", + "O_ACCMODE", + "O_ALERT", + "O_ALT_IO", + "O_APPEND", + "O_ASYNC", + "O_CLOEXEC", + "O_CREAT", + "O_DIRECT", + "O_DIRECTORY", + "O_DP_GETRAWENCRYPTED", + "O_DSYNC", + "O_EVTONLY", + "O_EXCL", + "O_EXEC", + "O_EXLOCK", + "O_FSYNC", + "O_LARGEFILE", + "O_NDELAY", + "O_NOATIME", + "O_NOCTTY", + "O_NOFOLLOW", + "O_NONBLOCK", + "O_NOSIGPIPE", + "O_POPUP", + "O_RDONLY", + "O_RDWR", + "O_RSYNC", + "O_SHLOCK", + "O_SYMLINK", + "O_SYNC", + "O_TRUNC", + "O_TTY_INIT", + "O_WRONLY", + "Open", + "OpenCurrentProcessToken", + "OpenProcess", + "OpenProcessToken", + "Openat", + "Overlapped", + "PACKET_ADD_MEMBERSHIP", + "PACKET_BROADCAST", + "PACKET_DROP_MEMBERSHIP", + "PACKET_FASTROUTE", + "PACKET_HOST", + "PACKET_LOOPBACK", + "PACKET_MR_ALLMULTI", + "PACKET_MR_MULTICAST", + "PACKET_MR_PROMISC", + "PACKET_MULTICAST", + "PACKET_OTHERHOST", + "PACKET_OUTGOING", + "PACKET_RECV_OUTPUT", + "PACKET_RX_RING", + "PACKET_STATISTICS", + "PAGE_EXECUTE_READ", + "PAGE_EXECUTE_READWRITE", + "PAGE_EXECUTE_WRITECOPY", + "PAGE_READONLY", + "PAGE_READWRITE", + "PAGE_WRITECOPY", + "PARENB", + "PARMRK", + "PARODD", + "PENDIN", + "PFL_HIDDEN", + "PFL_MATCHES_PROTOCOL_ZERO", + "PFL_MULTIPLE_PROTO_ENTRIES", + "PFL_NETWORKDIRECT_PROVIDER", + "PFL_RECOMMENDED_PROTO_ENTRY", + "PF_FLUSH", + "PKCS_7_ASN_ENCODING", + "PMC5_PIPELINE_FLUSH", + "PRIO_PGRP", + "PRIO_PROCESS", + "PRIO_USER", + "PRI_IOFLUSH", + "PROCESS_QUERY_INFORMATION", + "PROCESS_TERMINATE", + "PROT_EXEC", + "PROT_GROWSDOWN", + "PROT_GROWSUP", + "PROT_NONE", + "PROT_READ", + "PROT_WRITE", + "PROV_DH_SCHANNEL", + "PROV_DSS", + "PROV_DSS_DH", + "PROV_EC_ECDSA_FULL", + "PROV_EC_ECDSA_SIG", + "PROV_EC_ECNRA_FULL", + "PROV_EC_ECNRA_SIG", + "PROV_FORTEZZA", + "PROV_INTEL_SEC", + "PROV_MS_EXCHANGE", + "PROV_REPLACE_OWF", + "PROV_RNG", + "PROV_RSA_AES", + "PROV_RSA_FULL", + "PROV_RSA_SCHANNEL", + "PROV_RSA_SIG", + "PROV_SPYRUS_LYNKS", + "PROV_SSL", + "PR_CAPBSET_DROP", + "PR_CAPBSET_READ", + "PR_CLEAR_SECCOMP_FILTER", + "PR_ENDIAN_BIG", + "PR_ENDIAN_LITTLE", + "PR_ENDIAN_PPC_LITTLE", + "PR_FPEMU_NOPRINT", + "PR_FPEMU_SIGFPE", + "PR_FP_EXC_ASYNC", + "PR_FP_EXC_DISABLED", + "PR_FP_EXC_DIV", + "PR_FP_EXC_INV", + "PR_FP_EXC_NONRECOV", + "PR_FP_EXC_OVF", + "PR_FP_EXC_PRECISE", + "PR_FP_EXC_RES", + "PR_FP_EXC_SW_ENABLE", + "PR_FP_EXC_UND", + "PR_GET_DUMPABLE", + "PR_GET_ENDIAN", + "PR_GET_FPEMU", + "PR_GET_FPEXC", + "PR_GET_KEEPCAPS", + "PR_GET_NAME", + "PR_GET_PDEATHSIG", + "PR_GET_SECCOMP", + "PR_GET_SECCOMP_FILTER", + "PR_GET_SECUREBITS", + "PR_GET_TIMERSLACK", + "PR_GET_TIMING", + "PR_GET_TSC", + "PR_GET_UNALIGN", + "PR_MCE_KILL", + "PR_MCE_KILL_CLEAR", + "PR_MCE_KILL_DEFAULT", + "PR_MCE_KILL_EARLY", + "PR_MCE_KILL_GET", + "PR_MCE_KILL_LATE", + "PR_MCE_KILL_SET", + "PR_SECCOMP_FILTER_EVENT", + "PR_SECCOMP_FILTER_SYSCALL", + "PR_SET_DUMPABLE", + "PR_SET_ENDIAN", + "PR_SET_FPEMU", + "PR_SET_FPEXC", + "PR_SET_KEEPCAPS", + "PR_SET_NAME", + "PR_SET_PDEATHSIG", + "PR_SET_PTRACER", + "PR_SET_SECCOMP", + "PR_SET_SECCOMP_FILTER", + "PR_SET_SECUREBITS", + "PR_SET_TIMERSLACK", + "PR_SET_TIMING", + "PR_SET_TSC", + "PR_SET_UNALIGN", + "PR_TASK_PERF_EVENTS_DISABLE", + "PR_TASK_PERF_EVENTS_ENABLE", + "PR_TIMING_STATISTICAL", + "PR_TIMING_TIMESTAMP", + "PR_TSC_ENABLE", + "PR_TSC_SIGSEGV", + "PR_UNALIGN_NOPRINT", + "PR_UNALIGN_SIGBUS", + "PTRACE_ARCH_PRCTL", + "PTRACE_ATTACH", + "PTRACE_CONT", + "PTRACE_DETACH", + "PTRACE_EVENT_CLONE", + "PTRACE_EVENT_EXEC", + "PTRACE_EVENT_EXIT", + "PTRACE_EVENT_FORK", + "PTRACE_EVENT_VFORK", + "PTRACE_EVENT_VFORK_DONE", + "PTRACE_GETCRUNCHREGS", + "PTRACE_GETEVENTMSG", + "PTRACE_GETFPREGS", + "PTRACE_GETFPXREGS", + "PTRACE_GETHBPREGS", + "PTRACE_GETREGS", + "PTRACE_GETREGSET", + "PTRACE_GETSIGINFO", + "PTRACE_GETVFPREGS", + "PTRACE_GETWMMXREGS", + "PTRACE_GET_THREAD_AREA", + "PTRACE_KILL", + "PTRACE_OLDSETOPTIONS", + "PTRACE_O_MASK", + "PTRACE_O_TRACECLONE", + "PTRACE_O_TRACEEXEC", + "PTRACE_O_TRACEEXIT", + "PTRACE_O_TRACEFORK", + "PTRACE_O_TRACESYSGOOD", + "PTRACE_O_TRACEVFORK", + "PTRACE_O_TRACEVFORKDONE", + "PTRACE_PEEKDATA", + "PTRACE_PEEKTEXT", + "PTRACE_PEEKUSR", + "PTRACE_POKEDATA", + "PTRACE_POKETEXT", + "PTRACE_POKEUSR", + "PTRACE_SETCRUNCHREGS", + "PTRACE_SETFPREGS", + "PTRACE_SETFPXREGS", + "PTRACE_SETHBPREGS", + "PTRACE_SETOPTIONS", + "PTRACE_SETREGS", + "PTRACE_SETREGSET", + "PTRACE_SETSIGINFO", + "PTRACE_SETVFPREGS", + "PTRACE_SETWMMXREGS", + "PTRACE_SET_SYSCALL", + "PTRACE_SET_THREAD_AREA", + "PTRACE_SINGLEBLOCK", + "PTRACE_SINGLESTEP", + "PTRACE_SYSCALL", + "PTRACE_SYSEMU", + "PTRACE_SYSEMU_SINGLESTEP", + "PTRACE_TRACEME", + "PT_ATTACH", + "PT_ATTACHEXC", + "PT_CONTINUE", + "PT_DATA_ADDR", + "PT_DENY_ATTACH", + "PT_DETACH", + "PT_FIRSTMACH", + "PT_FORCEQUOTA", + "PT_KILL", + "PT_MASK", + "PT_READ_D", + "PT_READ_I", + "PT_READ_U", + "PT_SIGEXC", + "PT_STEP", + "PT_TEXT_ADDR", + "PT_TEXT_END_ADDR", + "PT_THUPDATE", + "PT_TRACE_ME", + "PT_WRITE_D", + "PT_WRITE_I", + "PT_WRITE_U", + "ParseDirent", + "ParseNetlinkMessage", + "ParseNetlinkRouteAttr", + "ParseRoutingMessage", + "ParseRoutingSockaddr", + "ParseSocketControlMessage", + "ParseUnixCredentials", + "ParseUnixRights", + "PathMax", + "Pathconf", + "Pause", + "Pipe", + "Pipe2", + "PivotRoot", + "Pointer", + "PostQueuedCompletionStatus", + "Pread", + "Proc", + "ProcAttr", + "Process32First", + "Process32Next", + "ProcessEntry32", + "ProcessInformation", + "Protoent", + "PtraceAttach", + "PtraceCont", + "PtraceDetach", + "PtraceGetEventMsg", + "PtraceGetRegs", + "PtracePeekData", + "PtracePeekText", + "PtracePokeData", + "PtracePokeText", + "PtraceRegs", + "PtraceSetOptions", + "PtraceSetRegs", + "PtraceSingleStep", + "PtraceSyscall", + "Pwrite", + "REG_BINARY", + "REG_DWORD", + "REG_DWORD_BIG_ENDIAN", + "REG_DWORD_LITTLE_ENDIAN", + "REG_EXPAND_SZ", + "REG_FULL_RESOURCE_DESCRIPTOR", + "REG_LINK", + "REG_MULTI_SZ", + "REG_NONE", + "REG_QWORD", + "REG_QWORD_LITTLE_ENDIAN", + "REG_RESOURCE_LIST", + "REG_RESOURCE_REQUIREMENTS_LIST", + "REG_SZ", + "RLIMIT_AS", + "RLIMIT_CORE", + "RLIMIT_CPU", + "RLIMIT_CPU_USAGE_MONITOR", + "RLIMIT_DATA", + "RLIMIT_FSIZE", + "RLIMIT_NOFILE", + "RLIMIT_STACK", + "RLIM_INFINITY", + "RTAX_ADVMSS", + "RTAX_AUTHOR", + "RTAX_BRD", + "RTAX_CWND", + "RTAX_DST", + "RTAX_FEATURES", + "RTAX_FEATURE_ALLFRAG", + "RTAX_FEATURE_ECN", + "RTAX_FEATURE_SACK", + "RTAX_FEATURE_TIMESTAMP", + "RTAX_GATEWAY", + "RTAX_GENMASK", + "RTAX_HOPLIMIT", + "RTAX_IFA", + "RTAX_IFP", + "RTAX_INITCWND", + "RTAX_INITRWND", + "RTAX_LABEL", + "RTAX_LOCK", + "RTAX_MAX", + "RTAX_MTU", + "RTAX_NETMASK", + "RTAX_REORDERING", + "RTAX_RTO_MIN", + "RTAX_RTT", + "RTAX_RTTVAR", + "RTAX_SRC", + "RTAX_SRCMASK", + "RTAX_SSTHRESH", + "RTAX_TAG", + "RTAX_UNSPEC", + "RTAX_WINDOW", + "RTA_ALIGNTO", + "RTA_AUTHOR", + "RTA_BRD", + "RTA_CACHEINFO", + "RTA_DST", + "RTA_FLOW", + "RTA_GATEWAY", + "RTA_GENMASK", + "RTA_IFA", + "RTA_IFP", + "RTA_IIF", + "RTA_LABEL", + "RTA_MAX", + "RTA_METRICS", + "RTA_MULTIPATH", + "RTA_NETMASK", + "RTA_OIF", + "RTA_PREFSRC", + "RTA_PRIORITY", + "RTA_SRC", + "RTA_SRCMASK", + "RTA_TABLE", + "RTA_TAG", + "RTA_UNSPEC", + "RTCF_DIRECTSRC", + "RTCF_DOREDIRECT", + "RTCF_LOG", + "RTCF_MASQ", + "RTCF_NAT", + "RTCF_VALVE", + "RTF_ADDRCLASSMASK", + "RTF_ADDRCONF", + "RTF_ALLONLINK", + "RTF_ANNOUNCE", + "RTF_BLACKHOLE", + "RTF_BROADCAST", + "RTF_CACHE", + "RTF_CLONED", + "RTF_CLONING", + "RTF_CONDEMNED", + "RTF_DEFAULT", + "RTF_DELCLONE", + "RTF_DONE", + "RTF_DYNAMIC", + "RTF_FLOW", + "RTF_FMASK", + "RTF_GATEWAY", + "RTF_GWFLAG_COMPAT", + "RTF_HOST", + "RTF_IFREF", + "RTF_IFSCOPE", + "RTF_INTERFACE", + "RTF_IRTT", + "RTF_LINKRT", + "RTF_LLDATA", + "RTF_LLINFO", + "RTF_LOCAL", + "RTF_MASK", + "RTF_MODIFIED", + "RTF_MPATH", + "RTF_MPLS", + "RTF_MSS", + "RTF_MTU", + "RTF_MULTICAST", + "RTF_NAT", + "RTF_NOFORWARD", + "RTF_NONEXTHOP", + "RTF_NOPMTUDISC", + "RTF_PERMANENT_ARP", + "RTF_PINNED", + "RTF_POLICY", + "RTF_PRCLONING", + "RTF_PROTO1", + "RTF_PROTO2", + "RTF_PROTO3", + "RTF_PROXY", + "RTF_REINSTATE", + "RTF_REJECT", + "RTF_RNH_LOCKED", + "RTF_ROUTER", + "RTF_SOURCE", + "RTF_SRC", + "RTF_STATIC", + "RTF_STICKY", + "RTF_THROW", + "RTF_TUNNEL", + "RTF_UP", + "RTF_USETRAILERS", + "RTF_WASCLONED", + "RTF_WINDOW", + "RTF_XRESOLVE", + "RTM_ADD", + "RTM_BASE", + "RTM_CHANGE", + "RTM_CHGADDR", + "RTM_DELACTION", + "RTM_DELADDR", + "RTM_DELADDRLABEL", + "RTM_DELETE", + "RTM_DELLINK", + "RTM_DELMADDR", + "RTM_DELNEIGH", + "RTM_DELQDISC", + "RTM_DELROUTE", + "RTM_DELRULE", + "RTM_DELTCLASS", + "RTM_DELTFILTER", + "RTM_DESYNC", + "RTM_F_CLONED", + "RTM_F_EQUALIZE", + "RTM_F_NOTIFY", + "RTM_F_PREFIX", + "RTM_GET", + "RTM_GET2", + "RTM_GETACTION", + "RTM_GETADDR", + "RTM_GETADDRLABEL", + "RTM_GETANYCAST", + "RTM_GETDCB", + "RTM_GETLINK", + "RTM_GETMULTICAST", + "RTM_GETNEIGH", + "RTM_GETNEIGHTBL", + "RTM_GETQDISC", + "RTM_GETROUTE", + "RTM_GETRULE", + "RTM_GETTCLASS", + "RTM_GETTFILTER", + "RTM_IEEE80211", + "RTM_IFANNOUNCE", + "RTM_IFINFO", + "RTM_IFINFO2", + "RTM_LLINFO_UPD", + "RTM_LOCK", + "RTM_LOSING", + "RTM_MAX", + "RTM_MAXSIZE", + "RTM_MISS", + "RTM_NEWACTION", + "RTM_NEWADDR", + "RTM_NEWADDRLABEL", + "RTM_NEWLINK", + "RTM_NEWMADDR", + "RTM_NEWMADDR2", + "RTM_NEWNDUSEROPT", + "RTM_NEWNEIGH", + "RTM_NEWNEIGHTBL", + "RTM_NEWPREFIX", + "RTM_NEWQDISC", + "RTM_NEWROUTE", + "RTM_NEWRULE", + "RTM_NEWTCLASS", + "RTM_NEWTFILTER", + "RTM_NR_FAMILIES", + "RTM_NR_MSGTYPES", + "RTM_OIFINFO", + "RTM_OLDADD", + "RTM_OLDDEL", + "RTM_OOIFINFO", + "RTM_REDIRECT", + "RTM_RESOLVE", + "RTM_RTTUNIT", + "RTM_SETDCB", + "RTM_SETGATE", + "RTM_SETLINK", + "RTM_SETNEIGHTBL", + "RTM_VERSION", + "RTNH_ALIGNTO", + "RTNH_F_DEAD", + "RTNH_F_ONLINK", + "RTNH_F_PERVASIVE", + "RTNLGRP_IPV4_IFADDR", + "RTNLGRP_IPV4_MROUTE", + "RTNLGRP_IPV4_ROUTE", + "RTNLGRP_IPV4_RULE", + "RTNLGRP_IPV6_IFADDR", + "RTNLGRP_IPV6_IFINFO", + "RTNLGRP_IPV6_MROUTE", + "RTNLGRP_IPV6_PREFIX", + "RTNLGRP_IPV6_ROUTE", + "RTNLGRP_IPV6_RULE", + "RTNLGRP_LINK", + "RTNLGRP_ND_USEROPT", + "RTNLGRP_NEIGH", + "RTNLGRP_NONE", + "RTNLGRP_NOTIFY", + "RTNLGRP_TC", + "RTN_ANYCAST", + "RTN_BLACKHOLE", + "RTN_BROADCAST", + "RTN_LOCAL", + "RTN_MAX", + "RTN_MULTICAST", + "RTN_NAT", + "RTN_PROHIBIT", + "RTN_THROW", + "RTN_UNICAST", + "RTN_UNREACHABLE", + "RTN_UNSPEC", + "RTN_XRESOLVE", + "RTPROT_BIRD", + "RTPROT_BOOT", + "RTPROT_DHCP", + "RTPROT_DNROUTED", + "RTPROT_GATED", + "RTPROT_KERNEL", + "RTPROT_MRT", + "RTPROT_NTK", + "RTPROT_RA", + "RTPROT_REDIRECT", + "RTPROT_STATIC", + "RTPROT_UNSPEC", + "RTPROT_XORP", + "RTPROT_ZEBRA", + "RTV_EXPIRE", + "RTV_HOPCOUNT", + "RTV_MTU", + "RTV_RPIPE", + "RTV_RTT", + "RTV_RTTVAR", + "RTV_SPIPE", + "RTV_SSTHRESH", + "RTV_WEIGHT", + "RT_CACHING_CONTEXT", + "RT_CLASS_DEFAULT", + "RT_CLASS_LOCAL", + "RT_CLASS_MAIN", + "RT_CLASS_MAX", + "RT_CLASS_UNSPEC", + "RT_DEFAULT_FIB", + "RT_NORTREF", + "RT_SCOPE_HOST", + "RT_SCOPE_LINK", + "RT_SCOPE_NOWHERE", + "RT_SCOPE_SITE", + "RT_SCOPE_UNIVERSE", + "RT_TABLEID_MAX", + "RT_TABLE_COMPAT", + "RT_TABLE_DEFAULT", + "RT_TABLE_LOCAL", + "RT_TABLE_MAIN", + "RT_TABLE_MAX", + "RT_TABLE_UNSPEC", + "RUSAGE_CHILDREN", + "RUSAGE_SELF", + "RUSAGE_THREAD", + "Radvisory_t", + "RawConn", + "RawSockaddr", + "RawSockaddrAny", + "RawSockaddrDatalink", + "RawSockaddrInet4", + "RawSockaddrInet6", + "RawSockaddrLinklayer", + "RawSockaddrNetlink", + "RawSockaddrUnix", + "RawSyscall", + "RawSyscall6", + "Read", + "ReadConsole", + "ReadDirectoryChanges", + "ReadDirent", + "ReadFile", + "Readlink", + "Reboot", + "Recvfrom", + "Recvmsg", + "RegCloseKey", + "RegEnumKeyEx", + "RegOpenKeyEx", + "RegQueryInfoKey", + "RegQueryValueEx", + "RemoveDirectory", + "Removexattr", + "Rename", + "Renameat", + "Revoke", + "Rlimit", + "Rmdir", + "RouteMessage", + "RouteRIB", + "RoutingMessage", + "RtAttr", + "RtGenmsg", + "RtMetrics", + "RtMsg", + "RtMsghdr", + "RtNexthop", + "Rusage", + "SCM_BINTIME", + "SCM_CREDENTIALS", + "SCM_CREDS", + "SCM_RIGHTS", + "SCM_TIMESTAMP", + "SCM_TIMESTAMPING", + "SCM_TIMESTAMPNS", + "SCM_TIMESTAMP_MONOTONIC", + "SHUT_RD", + "SHUT_RDWR", + "SHUT_WR", + "SID", + "SIDAndAttributes", + "SIGABRT", + "SIGALRM", + "SIGBUS", + "SIGCHLD", + "SIGCLD", + "SIGCONT", + "SIGEMT", + "SIGFPE", + "SIGHUP", + "SIGILL", + "SIGINFO", + "SIGINT", + "SIGIO", + "SIGIOT", + "SIGKILL", + "SIGLIBRT", + "SIGLWP", + "SIGPIPE", + "SIGPOLL", + "SIGPROF", + "SIGPWR", + "SIGQUIT", + "SIGSEGV", + "SIGSTKFLT", + "SIGSTOP", + "SIGSYS", + "SIGTERM", + "SIGTHR", + "SIGTRAP", + "SIGTSTP", + "SIGTTIN", + "SIGTTOU", + "SIGUNUSED", + "SIGURG", + "SIGUSR1", + "SIGUSR2", + "SIGVTALRM", + "SIGWINCH", + "SIGXCPU", + "SIGXFSZ", + "SIOCADDDLCI", + "SIOCADDMULTI", + "SIOCADDRT", + "SIOCAIFADDR", + "SIOCAIFGROUP", + "SIOCALIFADDR", + "SIOCARPIPLL", + "SIOCATMARK", + "SIOCAUTOADDR", + "SIOCAUTONETMASK", + "SIOCBRDGADD", + "SIOCBRDGADDS", + "SIOCBRDGARL", + "SIOCBRDGDADDR", + "SIOCBRDGDEL", + "SIOCBRDGDELS", + "SIOCBRDGFLUSH", + "SIOCBRDGFRL", + "SIOCBRDGGCACHE", + "SIOCBRDGGFD", + "SIOCBRDGGHT", + "SIOCBRDGGIFFLGS", + "SIOCBRDGGMA", + "SIOCBRDGGPARAM", + "SIOCBRDGGPRI", + "SIOCBRDGGRL", + "SIOCBRDGGSIFS", + "SIOCBRDGGTO", + "SIOCBRDGIFS", + "SIOCBRDGRTS", + "SIOCBRDGSADDR", + "SIOCBRDGSCACHE", + "SIOCBRDGSFD", + "SIOCBRDGSHT", + "SIOCBRDGSIFCOST", + "SIOCBRDGSIFFLGS", + "SIOCBRDGSIFPRIO", + "SIOCBRDGSMA", + "SIOCBRDGSPRI", + "SIOCBRDGSPROTO", + "SIOCBRDGSTO", + "SIOCBRDGSTXHC", + "SIOCDARP", + "SIOCDELDLCI", + "SIOCDELMULTI", + "SIOCDELRT", + "SIOCDEVPRIVATE", + "SIOCDIFADDR", + "SIOCDIFGROUP", + "SIOCDIFPHYADDR", + "SIOCDLIFADDR", + "SIOCDRARP", + "SIOCGARP", + "SIOCGDRVSPEC", + "SIOCGETKALIVE", + "SIOCGETLABEL", + "SIOCGETPFLOW", + "SIOCGETPFSYNC", + "SIOCGETSGCNT", + "SIOCGETVIFCNT", + "SIOCGETVLAN", + "SIOCGHIWAT", + "SIOCGIFADDR", + "SIOCGIFADDRPREF", + "SIOCGIFALIAS", + "SIOCGIFALTMTU", + "SIOCGIFASYNCMAP", + "SIOCGIFBOND", + "SIOCGIFBR", + "SIOCGIFBRDADDR", + "SIOCGIFCAP", + "SIOCGIFCONF", + "SIOCGIFCOUNT", + "SIOCGIFDATA", + "SIOCGIFDESCR", + "SIOCGIFDEVMTU", + "SIOCGIFDLT", + "SIOCGIFDSTADDR", + "SIOCGIFENCAP", + "SIOCGIFFIB", + "SIOCGIFFLAGS", + "SIOCGIFGATTR", + "SIOCGIFGENERIC", + "SIOCGIFGMEMB", + "SIOCGIFGROUP", + "SIOCGIFHARDMTU", + "SIOCGIFHWADDR", + "SIOCGIFINDEX", + "SIOCGIFKPI", + "SIOCGIFMAC", + "SIOCGIFMAP", + "SIOCGIFMEDIA", + "SIOCGIFMEM", + "SIOCGIFMETRIC", + "SIOCGIFMTU", + "SIOCGIFNAME", + "SIOCGIFNETMASK", + "SIOCGIFPDSTADDR", + "SIOCGIFPFLAGS", + "SIOCGIFPHYS", + "SIOCGIFPRIORITY", + "SIOCGIFPSRCADDR", + "SIOCGIFRDOMAIN", + "SIOCGIFRTLABEL", + "SIOCGIFSLAVE", + "SIOCGIFSTATUS", + "SIOCGIFTIMESLOT", + "SIOCGIFTXQLEN", + "SIOCGIFVLAN", + "SIOCGIFWAKEFLAGS", + "SIOCGIFXFLAGS", + "SIOCGLIFADDR", + "SIOCGLIFPHYADDR", + "SIOCGLIFPHYRTABLE", + "SIOCGLIFPHYTTL", + "SIOCGLINKSTR", + "SIOCGLOWAT", + "SIOCGPGRP", + "SIOCGPRIVATE_0", + "SIOCGPRIVATE_1", + "SIOCGRARP", + "SIOCGSPPPPARAMS", + "SIOCGSTAMP", + "SIOCGSTAMPNS", + "SIOCGVH", + "SIOCGVNETID", + "SIOCIFCREATE", + "SIOCIFCREATE2", + "SIOCIFDESTROY", + "SIOCIFGCLONERS", + "SIOCINITIFADDR", + "SIOCPROTOPRIVATE", + "SIOCRSLVMULTI", + "SIOCRTMSG", + "SIOCSARP", + "SIOCSDRVSPEC", + "SIOCSETKALIVE", + "SIOCSETLABEL", + "SIOCSETPFLOW", + "SIOCSETPFSYNC", + "SIOCSETVLAN", + "SIOCSHIWAT", + "SIOCSIFADDR", + "SIOCSIFADDRPREF", + "SIOCSIFALTMTU", + "SIOCSIFASYNCMAP", + "SIOCSIFBOND", + "SIOCSIFBR", + "SIOCSIFBRDADDR", + "SIOCSIFCAP", + "SIOCSIFDESCR", + "SIOCSIFDSTADDR", + "SIOCSIFENCAP", + "SIOCSIFFIB", + "SIOCSIFFLAGS", + "SIOCSIFGATTR", + "SIOCSIFGENERIC", + "SIOCSIFHWADDR", + "SIOCSIFHWBROADCAST", + "SIOCSIFKPI", + "SIOCSIFLINK", + "SIOCSIFLLADDR", + "SIOCSIFMAC", + "SIOCSIFMAP", + "SIOCSIFMEDIA", + "SIOCSIFMEM", + "SIOCSIFMETRIC", + "SIOCSIFMTU", + "SIOCSIFNAME", + "SIOCSIFNETMASK", + "SIOCSIFPFLAGS", + "SIOCSIFPHYADDR", + "SIOCSIFPHYS", + "SIOCSIFPRIORITY", + "SIOCSIFRDOMAIN", + "SIOCSIFRTLABEL", + "SIOCSIFRVNET", + "SIOCSIFSLAVE", + "SIOCSIFTIMESLOT", + "SIOCSIFTXQLEN", + "SIOCSIFVLAN", + "SIOCSIFVNET", + "SIOCSIFXFLAGS", + "SIOCSLIFPHYADDR", + "SIOCSLIFPHYRTABLE", + "SIOCSLIFPHYTTL", + "SIOCSLINKSTR", + "SIOCSLOWAT", + "SIOCSPGRP", + "SIOCSRARP", + "SIOCSSPPPPARAMS", + "SIOCSVH", + "SIOCSVNETID", + "SIOCZIFDATA", + "SIO_GET_EXTENSION_FUNCTION_POINTER", + "SIO_GET_INTERFACE_LIST", + "SIO_KEEPALIVE_VALS", + "SIO_UDP_CONNRESET", + "SOCK_CLOEXEC", + "SOCK_DCCP", + "SOCK_DGRAM", + "SOCK_FLAGS_MASK", + "SOCK_MAXADDRLEN", + "SOCK_NONBLOCK", + "SOCK_NOSIGPIPE", + "SOCK_PACKET", + "SOCK_RAW", + "SOCK_RDM", + "SOCK_SEQPACKET", + "SOCK_STREAM", + "SOL_AAL", + "SOL_ATM", + "SOL_DECNET", + "SOL_ICMPV6", + "SOL_IP", + "SOL_IPV6", + "SOL_IRDA", + "SOL_PACKET", + "SOL_RAW", + "SOL_SOCKET", + "SOL_TCP", + "SOL_X25", + "SOMAXCONN", + "SO_ACCEPTCONN", + "SO_ACCEPTFILTER", + "SO_ATTACH_FILTER", + "SO_BINDANY", + "SO_BINDTODEVICE", + "SO_BINTIME", + "SO_BROADCAST", + "SO_BSDCOMPAT", + "SO_DEBUG", + "SO_DETACH_FILTER", + "SO_DOMAIN", + "SO_DONTROUTE", + "SO_DONTTRUNC", + "SO_ERROR", + "SO_KEEPALIVE", + "SO_LABEL", + "SO_LINGER", + "SO_LINGER_SEC", + "SO_LISTENINCQLEN", + "SO_LISTENQLEN", + "SO_LISTENQLIMIT", + "SO_MARK", + "SO_NETPROC", + "SO_NKE", + "SO_NOADDRERR", + "SO_NOHEADER", + "SO_NOSIGPIPE", + "SO_NOTIFYCONFLICT", + "SO_NO_CHECK", + "SO_NO_DDP", + "SO_NO_OFFLOAD", + "SO_NP_EXTENSIONS", + "SO_NREAD", + "SO_NUMRCVPKT", + "SO_NWRITE", + "SO_OOBINLINE", + "SO_OVERFLOWED", + "SO_PASSCRED", + "SO_PASSSEC", + "SO_PEERCRED", + "SO_PEERLABEL", + "SO_PEERNAME", + "SO_PEERSEC", + "SO_PRIORITY", + "SO_PROTOCOL", + "SO_PROTOTYPE", + "SO_RANDOMPORT", + "SO_RCVBUF", + "SO_RCVBUFFORCE", + "SO_RCVLOWAT", + "SO_RCVTIMEO", + "SO_RESTRICTIONS", + "SO_RESTRICT_DENYIN", + "SO_RESTRICT_DENYOUT", + "SO_RESTRICT_DENYSET", + "SO_REUSEADDR", + "SO_REUSEPORT", + "SO_REUSESHAREUID", + "SO_RTABLE", + "SO_RXQ_OVFL", + "SO_SECURITY_AUTHENTICATION", + "SO_SECURITY_ENCRYPTION_NETWORK", + "SO_SECURITY_ENCRYPTION_TRANSPORT", + "SO_SETFIB", + "SO_SNDBUF", + "SO_SNDBUFFORCE", + "SO_SNDLOWAT", + "SO_SNDTIMEO", + "SO_SPLICE", + "SO_TIMESTAMP", + "SO_TIMESTAMPING", + "SO_TIMESTAMPNS", + "SO_TIMESTAMP_MONOTONIC", + "SO_TYPE", + "SO_UPCALLCLOSEWAIT", + "SO_UPDATE_ACCEPT_CONTEXT", + "SO_UPDATE_CONNECT_CONTEXT", + "SO_USELOOPBACK", + "SO_USER_COOKIE", + "SO_VENDOR", + "SO_WANTMORE", + "SO_WANTOOBFLAG", + "SSLExtraCertChainPolicyPara", + "STANDARD_RIGHTS_ALL", + "STANDARD_RIGHTS_EXECUTE", + "STANDARD_RIGHTS_READ", + "STANDARD_RIGHTS_REQUIRED", + "STANDARD_RIGHTS_WRITE", + "STARTF_USESHOWWINDOW", + "STARTF_USESTDHANDLES", + "STD_ERROR_HANDLE", + "STD_INPUT_HANDLE", + "STD_OUTPUT_HANDLE", + "SUBLANG_ENGLISH_US", + "SW_FORCEMINIMIZE", + "SW_HIDE", + "SW_MAXIMIZE", + "SW_MINIMIZE", + "SW_NORMAL", + "SW_RESTORE", + "SW_SHOW", + "SW_SHOWDEFAULT", + "SW_SHOWMAXIMIZED", + "SW_SHOWMINIMIZED", + "SW_SHOWMINNOACTIVE", + "SW_SHOWNA", + "SW_SHOWNOACTIVATE", + "SW_SHOWNORMAL", + "SYMBOLIC_LINK_FLAG_DIRECTORY", + "SYNCHRONIZE", + "SYSCTL_VERSION", + "SYSCTL_VERS_0", + "SYSCTL_VERS_1", + "SYSCTL_VERS_MASK", + "SYS_ABORT2", + "SYS_ACCEPT", + "SYS_ACCEPT4", + "SYS_ACCEPT_NOCANCEL", + "SYS_ACCESS", + "SYS_ACCESS_EXTENDED", + "SYS_ACCT", + "SYS_ADD_KEY", + "SYS_ADD_PROFIL", + "SYS_ADJFREQ", + "SYS_ADJTIME", + "SYS_ADJTIMEX", + "SYS_AFS_SYSCALL", + "SYS_AIO_CANCEL", + "SYS_AIO_ERROR", + "SYS_AIO_FSYNC", + "SYS_AIO_READ", + "SYS_AIO_RETURN", + "SYS_AIO_SUSPEND", + "SYS_AIO_SUSPEND_NOCANCEL", + "SYS_AIO_WRITE", + "SYS_ALARM", + "SYS_ARCH_PRCTL", + "SYS_ARM_FADVISE64_64", + "SYS_ARM_SYNC_FILE_RANGE", + "SYS_ATGETMSG", + "SYS_ATPGETREQ", + "SYS_ATPGETRSP", + "SYS_ATPSNDREQ", + "SYS_ATPSNDRSP", + "SYS_ATPUTMSG", + "SYS_ATSOCKET", + "SYS_AUDIT", + "SYS_AUDITCTL", + "SYS_AUDITON", + "SYS_AUDIT_SESSION_JOIN", + "SYS_AUDIT_SESSION_PORT", + "SYS_AUDIT_SESSION_SELF", + "SYS_BDFLUSH", + "SYS_BIND", + "SYS_BINDAT", + "SYS_BREAK", + "SYS_BRK", + "SYS_BSDTHREAD_CREATE", + "SYS_BSDTHREAD_REGISTER", + "SYS_BSDTHREAD_TERMINATE", + "SYS_CAPGET", + "SYS_CAPSET", + "SYS_CAP_ENTER", + "SYS_CAP_FCNTLS_GET", + "SYS_CAP_FCNTLS_LIMIT", + "SYS_CAP_GETMODE", + "SYS_CAP_GETRIGHTS", + "SYS_CAP_IOCTLS_GET", + "SYS_CAP_IOCTLS_LIMIT", + "SYS_CAP_NEW", + "SYS_CAP_RIGHTS_GET", + "SYS_CAP_RIGHTS_LIMIT", + "SYS_CHDIR", + "SYS_CHFLAGS", + "SYS_CHFLAGSAT", + "SYS_CHMOD", + "SYS_CHMOD_EXTENDED", + "SYS_CHOWN", + "SYS_CHOWN32", + "SYS_CHROOT", + "SYS_CHUD", + "SYS_CLOCK_ADJTIME", + "SYS_CLOCK_GETCPUCLOCKID2", + "SYS_CLOCK_GETRES", + "SYS_CLOCK_GETTIME", + "SYS_CLOCK_NANOSLEEP", + "SYS_CLOCK_SETTIME", + "SYS_CLONE", + "SYS_CLOSE", + "SYS_CLOSEFROM", + "SYS_CLOSE_NOCANCEL", + "SYS_CONNECT", + "SYS_CONNECTAT", + "SYS_CONNECT_NOCANCEL", + "SYS_COPYFILE", + "SYS_CPUSET", + "SYS_CPUSET_GETAFFINITY", + "SYS_CPUSET_GETID", + "SYS_CPUSET_SETAFFINITY", + "SYS_CPUSET_SETID", + "SYS_CREAT", + "SYS_CREATE_MODULE", + "SYS_CSOPS", + "SYS_CSOPS_AUDITTOKEN", + "SYS_DELETE", + "SYS_DELETE_MODULE", + "SYS_DUP", + "SYS_DUP2", + "SYS_DUP3", + "SYS_EACCESS", + "SYS_EPOLL_CREATE", + "SYS_EPOLL_CREATE1", + "SYS_EPOLL_CTL", + "SYS_EPOLL_CTL_OLD", + "SYS_EPOLL_PWAIT", + "SYS_EPOLL_WAIT", + "SYS_EPOLL_WAIT_OLD", + "SYS_EVENTFD", + "SYS_EVENTFD2", + "SYS_EXCHANGEDATA", + "SYS_EXECVE", + "SYS_EXIT", + "SYS_EXIT_GROUP", + "SYS_EXTATTRCTL", + "SYS_EXTATTR_DELETE_FD", + "SYS_EXTATTR_DELETE_FILE", + "SYS_EXTATTR_DELETE_LINK", + "SYS_EXTATTR_GET_FD", + "SYS_EXTATTR_GET_FILE", + "SYS_EXTATTR_GET_LINK", + "SYS_EXTATTR_LIST_FD", + "SYS_EXTATTR_LIST_FILE", + "SYS_EXTATTR_LIST_LINK", + "SYS_EXTATTR_SET_FD", + "SYS_EXTATTR_SET_FILE", + "SYS_EXTATTR_SET_LINK", + "SYS_FACCESSAT", + "SYS_FADVISE64", + "SYS_FADVISE64_64", + "SYS_FALLOCATE", + "SYS_FANOTIFY_INIT", + "SYS_FANOTIFY_MARK", + "SYS_FCHDIR", + "SYS_FCHFLAGS", + "SYS_FCHMOD", + "SYS_FCHMODAT", + "SYS_FCHMOD_EXTENDED", + "SYS_FCHOWN", + "SYS_FCHOWN32", + "SYS_FCHOWNAT", + "SYS_FCHROOT", + "SYS_FCNTL", + "SYS_FCNTL64", + "SYS_FCNTL_NOCANCEL", + "SYS_FDATASYNC", + "SYS_FEXECVE", + "SYS_FFCLOCK_GETCOUNTER", + "SYS_FFCLOCK_GETESTIMATE", + "SYS_FFCLOCK_SETESTIMATE", + "SYS_FFSCTL", + "SYS_FGETATTRLIST", + "SYS_FGETXATTR", + "SYS_FHOPEN", + "SYS_FHSTAT", + "SYS_FHSTATFS", + "SYS_FILEPORT_MAKEFD", + "SYS_FILEPORT_MAKEPORT", + "SYS_FKTRACE", + "SYS_FLISTXATTR", + "SYS_FLOCK", + "SYS_FORK", + "SYS_FPATHCONF", + "SYS_FREEBSD6_FTRUNCATE", + "SYS_FREEBSD6_LSEEK", + "SYS_FREEBSD6_MMAP", + "SYS_FREEBSD6_PREAD", + "SYS_FREEBSD6_PWRITE", + "SYS_FREEBSD6_TRUNCATE", + "SYS_FREMOVEXATTR", + "SYS_FSCTL", + "SYS_FSETATTRLIST", + "SYS_FSETXATTR", + "SYS_FSGETPATH", + "SYS_FSTAT", + "SYS_FSTAT64", + "SYS_FSTAT64_EXTENDED", + "SYS_FSTATAT", + "SYS_FSTATAT64", + "SYS_FSTATFS", + "SYS_FSTATFS64", + "SYS_FSTATV", + "SYS_FSTATVFS1", + "SYS_FSTAT_EXTENDED", + "SYS_FSYNC", + "SYS_FSYNC_NOCANCEL", + "SYS_FSYNC_RANGE", + "SYS_FTIME", + "SYS_FTRUNCATE", + "SYS_FTRUNCATE64", + "SYS_FUTEX", + "SYS_FUTIMENS", + "SYS_FUTIMES", + "SYS_FUTIMESAT", + "SYS_GETATTRLIST", + "SYS_GETAUDIT", + "SYS_GETAUDIT_ADDR", + "SYS_GETAUID", + "SYS_GETCONTEXT", + "SYS_GETCPU", + "SYS_GETCWD", + "SYS_GETDENTS", + "SYS_GETDENTS64", + "SYS_GETDIRENTRIES", + "SYS_GETDIRENTRIES64", + "SYS_GETDIRENTRIESATTR", + "SYS_GETDTABLECOUNT", + "SYS_GETDTABLESIZE", + "SYS_GETEGID", + "SYS_GETEGID32", + "SYS_GETEUID", + "SYS_GETEUID32", + "SYS_GETFH", + "SYS_GETFSSTAT", + "SYS_GETFSSTAT64", + "SYS_GETGID", + "SYS_GETGID32", + "SYS_GETGROUPS", + "SYS_GETGROUPS32", + "SYS_GETHOSTUUID", + "SYS_GETITIMER", + "SYS_GETLCID", + "SYS_GETLOGIN", + "SYS_GETLOGINCLASS", + "SYS_GETPEERNAME", + "SYS_GETPGID", + "SYS_GETPGRP", + "SYS_GETPID", + "SYS_GETPMSG", + "SYS_GETPPID", + "SYS_GETPRIORITY", + "SYS_GETRESGID", + "SYS_GETRESGID32", + "SYS_GETRESUID", + "SYS_GETRESUID32", + "SYS_GETRLIMIT", + "SYS_GETRTABLE", + "SYS_GETRUSAGE", + "SYS_GETSGROUPS", + "SYS_GETSID", + "SYS_GETSOCKNAME", + "SYS_GETSOCKOPT", + "SYS_GETTHRID", + "SYS_GETTID", + "SYS_GETTIMEOFDAY", + "SYS_GETUID", + "SYS_GETUID32", + "SYS_GETVFSSTAT", + "SYS_GETWGROUPS", + "SYS_GETXATTR", + "SYS_GET_KERNEL_SYMS", + "SYS_GET_MEMPOLICY", + "SYS_GET_ROBUST_LIST", + "SYS_GET_THREAD_AREA", + "SYS_GTTY", + "SYS_IDENTITYSVC", + "SYS_IDLE", + "SYS_INITGROUPS", + "SYS_INIT_MODULE", + "SYS_INOTIFY_ADD_WATCH", + "SYS_INOTIFY_INIT", + "SYS_INOTIFY_INIT1", + "SYS_INOTIFY_RM_WATCH", + "SYS_IOCTL", + "SYS_IOPERM", + "SYS_IOPL", + "SYS_IOPOLICYSYS", + "SYS_IOPRIO_GET", + "SYS_IOPRIO_SET", + "SYS_IO_CANCEL", + "SYS_IO_DESTROY", + "SYS_IO_GETEVENTS", + "SYS_IO_SETUP", + "SYS_IO_SUBMIT", + "SYS_IPC", + "SYS_ISSETUGID", + "SYS_JAIL", + "SYS_JAIL_ATTACH", + "SYS_JAIL_GET", + "SYS_JAIL_REMOVE", + "SYS_JAIL_SET", + "SYS_KAS_INFO", + "SYS_KDEBUG_TRACE", + "SYS_KENV", + "SYS_KEVENT", + "SYS_KEVENT64", + "SYS_KEXEC_LOAD", + "SYS_KEYCTL", + "SYS_KILL", + "SYS_KLDFIND", + "SYS_KLDFIRSTMOD", + "SYS_KLDLOAD", + "SYS_KLDNEXT", + "SYS_KLDSTAT", + "SYS_KLDSYM", + "SYS_KLDUNLOAD", + "SYS_KLDUNLOADF", + "SYS_KQUEUE", + "SYS_KQUEUE1", + "SYS_KTIMER_CREATE", + "SYS_KTIMER_DELETE", + "SYS_KTIMER_GETOVERRUN", + "SYS_KTIMER_GETTIME", + "SYS_KTIMER_SETTIME", + "SYS_KTRACE", + "SYS_LCHFLAGS", + "SYS_LCHMOD", + "SYS_LCHOWN", + "SYS_LCHOWN32", + "SYS_LEDGER", + "SYS_LGETFH", + "SYS_LGETXATTR", + "SYS_LINK", + "SYS_LINKAT", + "SYS_LIO_LISTIO", + "SYS_LISTEN", + "SYS_LISTXATTR", + "SYS_LLISTXATTR", + "SYS_LOCK", + "SYS_LOOKUP_DCOOKIE", + "SYS_LPATHCONF", + "SYS_LREMOVEXATTR", + "SYS_LSEEK", + "SYS_LSETXATTR", + "SYS_LSTAT", + "SYS_LSTAT64", + "SYS_LSTAT64_EXTENDED", + "SYS_LSTATV", + "SYS_LSTAT_EXTENDED", + "SYS_LUTIMES", + "SYS_MAC_SYSCALL", + "SYS_MADVISE", + "SYS_MADVISE1", + "SYS_MAXSYSCALL", + "SYS_MBIND", + "SYS_MIGRATE_PAGES", + "SYS_MINCORE", + "SYS_MINHERIT", + "SYS_MKCOMPLEX", + "SYS_MKDIR", + "SYS_MKDIRAT", + "SYS_MKDIR_EXTENDED", + "SYS_MKFIFO", + "SYS_MKFIFOAT", + "SYS_MKFIFO_EXTENDED", + "SYS_MKNOD", + "SYS_MKNODAT", + "SYS_MLOCK", + "SYS_MLOCKALL", + "SYS_MMAP", + "SYS_MMAP2", + "SYS_MODCTL", + "SYS_MODFIND", + "SYS_MODFNEXT", + "SYS_MODIFY_LDT", + "SYS_MODNEXT", + "SYS_MODSTAT", + "SYS_MODWATCH", + "SYS_MOUNT", + "SYS_MOVE_PAGES", + "SYS_MPROTECT", + "SYS_MPX", + "SYS_MQUERY", + "SYS_MQ_GETSETATTR", + "SYS_MQ_NOTIFY", + "SYS_MQ_OPEN", + "SYS_MQ_TIMEDRECEIVE", + "SYS_MQ_TIMEDSEND", + "SYS_MQ_UNLINK", + "SYS_MREMAP", + "SYS_MSGCTL", + "SYS_MSGGET", + "SYS_MSGRCV", + "SYS_MSGRCV_NOCANCEL", + "SYS_MSGSND", + "SYS_MSGSND_NOCANCEL", + "SYS_MSGSYS", + "SYS_MSYNC", + "SYS_MSYNC_NOCANCEL", + "SYS_MUNLOCK", + "SYS_MUNLOCKALL", + "SYS_MUNMAP", + "SYS_NAME_TO_HANDLE_AT", + "SYS_NANOSLEEP", + "SYS_NEWFSTATAT", + "SYS_NFSCLNT", + "SYS_NFSSERVCTL", + "SYS_NFSSVC", + "SYS_NFSTAT", + "SYS_NICE", + "SYS_NLSTAT", + "SYS_NMOUNT", + "SYS_NSTAT", + "SYS_NTP_ADJTIME", + "SYS_NTP_GETTIME", + "SYS_OABI_SYSCALL_BASE", + "SYS_OBREAK", + "SYS_OLDFSTAT", + "SYS_OLDLSTAT", + "SYS_OLDOLDUNAME", + "SYS_OLDSTAT", + "SYS_OLDUNAME", + "SYS_OPEN", + "SYS_OPENAT", + "SYS_OPENBSD_POLL", + "SYS_OPEN_BY_HANDLE_AT", + "SYS_OPEN_DPROTECTED_NP", + "SYS_OPEN_EXTENDED", + "SYS_OPEN_NOCANCEL", + "SYS_OVADVISE", + "SYS_PACCEPT", + "SYS_PATHCONF", + "SYS_PAUSE", + "SYS_PCICONFIG_IOBASE", + "SYS_PCICONFIG_READ", + "SYS_PCICONFIG_WRITE", + "SYS_PDFORK", + "SYS_PDGETPID", + "SYS_PDKILL", + "SYS_PERF_EVENT_OPEN", + "SYS_PERSONALITY", + "SYS_PID_HIBERNATE", + "SYS_PID_RESUME", + "SYS_PID_SHUTDOWN_SOCKETS", + "SYS_PID_SUSPEND", + "SYS_PIPE", + "SYS_PIPE2", + "SYS_PIVOT_ROOT", + "SYS_PMC_CONTROL", + "SYS_PMC_GET_INFO", + "SYS_POLL", + "SYS_POLLTS", + "SYS_POLL_NOCANCEL", + "SYS_POSIX_FADVISE", + "SYS_POSIX_FALLOCATE", + "SYS_POSIX_OPENPT", + "SYS_POSIX_SPAWN", + "SYS_PPOLL", + "SYS_PRCTL", + "SYS_PREAD", + "SYS_PREAD64", + "SYS_PREADV", + "SYS_PREAD_NOCANCEL", + "SYS_PRLIMIT64", + "SYS_PROCCTL", + "SYS_PROCESS_POLICY", + "SYS_PROCESS_VM_READV", + "SYS_PROCESS_VM_WRITEV", + "SYS_PROC_INFO", + "SYS_PROF", + "SYS_PROFIL", + "SYS_PSELECT", + "SYS_PSELECT6", + "SYS_PSET_ASSIGN", + "SYS_PSET_CREATE", + "SYS_PSET_DESTROY", + "SYS_PSYNCH_CVBROAD", + "SYS_PSYNCH_CVCLRPREPOST", + "SYS_PSYNCH_CVSIGNAL", + "SYS_PSYNCH_CVWAIT", + "SYS_PSYNCH_MUTEXDROP", + "SYS_PSYNCH_MUTEXWAIT", + "SYS_PSYNCH_RW_DOWNGRADE", + "SYS_PSYNCH_RW_LONGRDLOCK", + "SYS_PSYNCH_RW_RDLOCK", + "SYS_PSYNCH_RW_UNLOCK", + "SYS_PSYNCH_RW_UNLOCK2", + "SYS_PSYNCH_RW_UPGRADE", + "SYS_PSYNCH_RW_WRLOCK", + "SYS_PSYNCH_RW_YIELDWRLOCK", + "SYS_PTRACE", + "SYS_PUTPMSG", + "SYS_PWRITE", + "SYS_PWRITE64", + "SYS_PWRITEV", + "SYS_PWRITE_NOCANCEL", + "SYS_QUERY_MODULE", + "SYS_QUOTACTL", + "SYS_RASCTL", + "SYS_RCTL_ADD_RULE", + "SYS_RCTL_GET_LIMITS", + "SYS_RCTL_GET_RACCT", + "SYS_RCTL_GET_RULES", + "SYS_RCTL_REMOVE_RULE", + "SYS_READ", + "SYS_READAHEAD", + "SYS_READDIR", + "SYS_READLINK", + "SYS_READLINKAT", + "SYS_READV", + "SYS_READV_NOCANCEL", + "SYS_READ_NOCANCEL", + "SYS_REBOOT", + "SYS_RECV", + "SYS_RECVFROM", + "SYS_RECVFROM_NOCANCEL", + "SYS_RECVMMSG", + "SYS_RECVMSG", + "SYS_RECVMSG_NOCANCEL", + "SYS_REMAP_FILE_PAGES", + "SYS_REMOVEXATTR", + "SYS_RENAME", + "SYS_RENAMEAT", + "SYS_REQUEST_KEY", + "SYS_RESTART_SYSCALL", + "SYS_REVOKE", + "SYS_RFORK", + "SYS_RMDIR", + "SYS_RTPRIO", + "SYS_RTPRIO_THREAD", + "SYS_RT_SIGACTION", + "SYS_RT_SIGPENDING", + "SYS_RT_SIGPROCMASK", + "SYS_RT_SIGQUEUEINFO", + "SYS_RT_SIGRETURN", + "SYS_RT_SIGSUSPEND", + "SYS_RT_SIGTIMEDWAIT", + "SYS_RT_TGSIGQUEUEINFO", + "SYS_SBRK", + "SYS_SCHED_GETAFFINITY", + "SYS_SCHED_GETPARAM", + "SYS_SCHED_GETSCHEDULER", + "SYS_SCHED_GET_PRIORITY_MAX", + "SYS_SCHED_GET_PRIORITY_MIN", + "SYS_SCHED_RR_GET_INTERVAL", + "SYS_SCHED_SETAFFINITY", + "SYS_SCHED_SETPARAM", + "SYS_SCHED_SETSCHEDULER", + "SYS_SCHED_YIELD", + "SYS_SCTP_GENERIC_RECVMSG", + "SYS_SCTP_GENERIC_SENDMSG", + "SYS_SCTP_GENERIC_SENDMSG_IOV", + "SYS_SCTP_PEELOFF", + "SYS_SEARCHFS", + "SYS_SECURITY", + "SYS_SELECT", + "SYS_SELECT_NOCANCEL", + "SYS_SEMCONFIG", + "SYS_SEMCTL", + "SYS_SEMGET", + "SYS_SEMOP", + "SYS_SEMSYS", + "SYS_SEMTIMEDOP", + "SYS_SEM_CLOSE", + "SYS_SEM_DESTROY", + "SYS_SEM_GETVALUE", + "SYS_SEM_INIT", + "SYS_SEM_OPEN", + "SYS_SEM_POST", + "SYS_SEM_TRYWAIT", + "SYS_SEM_UNLINK", + "SYS_SEM_WAIT", + "SYS_SEM_WAIT_NOCANCEL", + "SYS_SEND", + "SYS_SENDFILE", + "SYS_SENDFILE64", + "SYS_SENDMMSG", + "SYS_SENDMSG", + "SYS_SENDMSG_NOCANCEL", + "SYS_SENDTO", + "SYS_SENDTO_NOCANCEL", + "SYS_SETATTRLIST", + "SYS_SETAUDIT", + "SYS_SETAUDIT_ADDR", + "SYS_SETAUID", + "SYS_SETCONTEXT", + "SYS_SETDOMAINNAME", + "SYS_SETEGID", + "SYS_SETEUID", + "SYS_SETFIB", + "SYS_SETFSGID", + "SYS_SETFSGID32", + "SYS_SETFSUID", + "SYS_SETFSUID32", + "SYS_SETGID", + "SYS_SETGID32", + "SYS_SETGROUPS", + "SYS_SETGROUPS32", + "SYS_SETHOSTNAME", + "SYS_SETITIMER", + "SYS_SETLCID", + "SYS_SETLOGIN", + "SYS_SETLOGINCLASS", + "SYS_SETNS", + "SYS_SETPGID", + "SYS_SETPRIORITY", + "SYS_SETPRIVEXEC", + "SYS_SETREGID", + "SYS_SETREGID32", + "SYS_SETRESGID", + "SYS_SETRESGID32", + "SYS_SETRESUID", + "SYS_SETRESUID32", + "SYS_SETREUID", + "SYS_SETREUID32", + "SYS_SETRLIMIT", + "SYS_SETRTABLE", + "SYS_SETSGROUPS", + "SYS_SETSID", + "SYS_SETSOCKOPT", + "SYS_SETTID", + "SYS_SETTID_WITH_PID", + "SYS_SETTIMEOFDAY", + "SYS_SETUID", + "SYS_SETUID32", + "SYS_SETWGROUPS", + "SYS_SETXATTR", + "SYS_SET_MEMPOLICY", + "SYS_SET_ROBUST_LIST", + "SYS_SET_THREAD_AREA", + "SYS_SET_TID_ADDRESS", + "SYS_SGETMASK", + "SYS_SHARED_REGION_CHECK_NP", + "SYS_SHARED_REGION_MAP_AND_SLIDE_NP", + "SYS_SHMAT", + "SYS_SHMCTL", + "SYS_SHMDT", + "SYS_SHMGET", + "SYS_SHMSYS", + "SYS_SHM_OPEN", + "SYS_SHM_UNLINK", + "SYS_SHUTDOWN", + "SYS_SIGACTION", + "SYS_SIGALTSTACK", + "SYS_SIGNAL", + "SYS_SIGNALFD", + "SYS_SIGNALFD4", + "SYS_SIGPENDING", + "SYS_SIGPROCMASK", + "SYS_SIGQUEUE", + "SYS_SIGQUEUEINFO", + "SYS_SIGRETURN", + "SYS_SIGSUSPEND", + "SYS_SIGSUSPEND_NOCANCEL", + "SYS_SIGTIMEDWAIT", + "SYS_SIGWAIT", + "SYS_SIGWAITINFO", + "SYS_SOCKET", + "SYS_SOCKETCALL", + "SYS_SOCKETPAIR", + "SYS_SPLICE", + "SYS_SSETMASK", + "SYS_SSTK", + "SYS_STACK_SNAPSHOT", + "SYS_STAT", + "SYS_STAT64", + "SYS_STAT64_EXTENDED", + "SYS_STATFS", + "SYS_STATFS64", + "SYS_STATV", + "SYS_STATVFS1", + "SYS_STAT_EXTENDED", + "SYS_STIME", + "SYS_STTY", + "SYS_SWAPCONTEXT", + "SYS_SWAPCTL", + "SYS_SWAPOFF", + "SYS_SWAPON", + "SYS_SYMLINK", + "SYS_SYMLINKAT", + "SYS_SYNC", + "SYS_SYNCFS", + "SYS_SYNC_FILE_RANGE", + "SYS_SYSARCH", + "SYS_SYSCALL", + "SYS_SYSCALL_BASE", + "SYS_SYSFS", + "SYS_SYSINFO", + "SYS_SYSLOG", + "SYS_TEE", + "SYS_TGKILL", + "SYS_THREAD_SELFID", + "SYS_THR_CREATE", + "SYS_THR_EXIT", + "SYS_THR_KILL", + "SYS_THR_KILL2", + "SYS_THR_NEW", + "SYS_THR_SELF", + "SYS_THR_SET_NAME", + "SYS_THR_SUSPEND", + "SYS_THR_WAKE", + "SYS_TIME", + "SYS_TIMERFD_CREATE", + "SYS_TIMERFD_GETTIME", + "SYS_TIMERFD_SETTIME", + "SYS_TIMER_CREATE", + "SYS_TIMER_DELETE", + "SYS_TIMER_GETOVERRUN", + "SYS_TIMER_GETTIME", + "SYS_TIMER_SETTIME", + "SYS_TIMES", + "SYS_TKILL", + "SYS_TRUNCATE", + "SYS_TRUNCATE64", + "SYS_TUXCALL", + "SYS_UGETRLIMIT", + "SYS_ULIMIT", + "SYS_UMASK", + "SYS_UMASK_EXTENDED", + "SYS_UMOUNT", + "SYS_UMOUNT2", + "SYS_UNAME", + "SYS_UNDELETE", + "SYS_UNLINK", + "SYS_UNLINKAT", + "SYS_UNMOUNT", + "SYS_UNSHARE", + "SYS_USELIB", + "SYS_USTAT", + "SYS_UTIME", + "SYS_UTIMENSAT", + "SYS_UTIMES", + "SYS_UTRACE", + "SYS_UUIDGEN", + "SYS_VADVISE", + "SYS_VFORK", + "SYS_VHANGUP", + "SYS_VM86", + "SYS_VM86OLD", + "SYS_VMSPLICE", + "SYS_VM_PRESSURE_MONITOR", + "SYS_VSERVER", + "SYS_WAIT4", + "SYS_WAIT4_NOCANCEL", + "SYS_WAIT6", + "SYS_WAITEVENT", + "SYS_WAITID", + "SYS_WAITID_NOCANCEL", + "SYS_WAITPID", + "SYS_WATCHEVENT", + "SYS_WORKQ_KERNRETURN", + "SYS_WORKQ_OPEN", + "SYS_WRITE", + "SYS_WRITEV", + "SYS_WRITEV_NOCANCEL", + "SYS_WRITE_NOCANCEL", + "SYS_YIELD", + "SYS__LLSEEK", + "SYS__LWP_CONTINUE", + "SYS__LWP_CREATE", + "SYS__LWP_CTL", + "SYS__LWP_DETACH", + "SYS__LWP_EXIT", + "SYS__LWP_GETNAME", + "SYS__LWP_GETPRIVATE", + "SYS__LWP_KILL", + "SYS__LWP_PARK", + "SYS__LWP_SELF", + "SYS__LWP_SETNAME", + "SYS__LWP_SETPRIVATE", + "SYS__LWP_SUSPEND", + "SYS__LWP_UNPARK", + "SYS__LWP_UNPARK_ALL", + "SYS__LWP_WAIT", + "SYS__LWP_WAKEUP", + "SYS__NEWSELECT", + "SYS__PSET_BIND", + "SYS__SCHED_GETAFFINITY", + "SYS__SCHED_GETPARAM", + "SYS__SCHED_SETAFFINITY", + "SYS__SCHED_SETPARAM", + "SYS__SYSCTL", + "SYS__UMTX_LOCK", + "SYS__UMTX_OP", + "SYS__UMTX_UNLOCK", + "SYS___ACL_ACLCHECK_FD", + "SYS___ACL_ACLCHECK_FILE", + "SYS___ACL_ACLCHECK_LINK", + "SYS___ACL_DELETE_FD", + "SYS___ACL_DELETE_FILE", + "SYS___ACL_DELETE_LINK", + "SYS___ACL_GET_FD", + "SYS___ACL_GET_FILE", + "SYS___ACL_GET_LINK", + "SYS___ACL_SET_FD", + "SYS___ACL_SET_FILE", + "SYS___ACL_SET_LINK", + "SYS___CLONE", + "SYS___DISABLE_THREADSIGNAL", + "SYS___GETCWD", + "SYS___GETLOGIN", + "SYS___GET_TCB", + "SYS___MAC_EXECVE", + "SYS___MAC_GETFSSTAT", + "SYS___MAC_GET_FD", + "SYS___MAC_GET_FILE", + "SYS___MAC_GET_LCID", + "SYS___MAC_GET_LCTX", + "SYS___MAC_GET_LINK", + "SYS___MAC_GET_MOUNT", + "SYS___MAC_GET_PID", + "SYS___MAC_GET_PROC", + "SYS___MAC_MOUNT", + "SYS___MAC_SET_FD", + "SYS___MAC_SET_FILE", + "SYS___MAC_SET_LCTX", + "SYS___MAC_SET_LINK", + "SYS___MAC_SET_PROC", + "SYS___MAC_SYSCALL", + "SYS___OLD_SEMWAIT_SIGNAL", + "SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", + "SYS___POSIX_CHOWN", + "SYS___POSIX_FCHOWN", + "SYS___POSIX_LCHOWN", + "SYS___POSIX_RENAME", + "SYS___PTHREAD_CANCELED", + "SYS___PTHREAD_CHDIR", + "SYS___PTHREAD_FCHDIR", + "SYS___PTHREAD_KILL", + "SYS___PTHREAD_MARKCANCEL", + "SYS___PTHREAD_SIGMASK", + "SYS___QUOTACTL", + "SYS___SEMCTL", + "SYS___SEMWAIT_SIGNAL", + "SYS___SEMWAIT_SIGNAL_NOCANCEL", + "SYS___SETLOGIN", + "SYS___SETUGID", + "SYS___SET_TCB", + "SYS___SIGACTION_SIGTRAMP", + "SYS___SIGTIMEDWAIT", + "SYS___SIGWAIT", + "SYS___SIGWAIT_NOCANCEL", + "SYS___SYSCTL", + "SYS___TFORK", + "SYS___THREXIT", + "SYS___THRSIGDIVERT", + "SYS___THRSLEEP", + "SYS___THRWAKEUP", + "S_ARCH1", + "S_ARCH2", + "S_BLKSIZE", + "S_IEXEC", + "S_IFBLK", + "S_IFCHR", + "S_IFDIR", + "S_IFIFO", + "S_IFLNK", + "S_IFMT", + "S_IFREG", + "S_IFSOCK", + "S_IFWHT", + "S_IREAD", + "S_IRGRP", + "S_IROTH", + "S_IRUSR", + "S_IRWXG", + "S_IRWXO", + "S_IRWXU", + "S_ISGID", + "S_ISTXT", + "S_ISUID", + "S_ISVTX", + "S_IWGRP", + "S_IWOTH", + "S_IWRITE", + "S_IWUSR", + "S_IXGRP", + "S_IXOTH", + "S_IXUSR", + "S_LOGIN_SET", + "SecurityAttributes", + "Seek", + "Select", + "Sendfile", + "Sendmsg", + "SendmsgN", + "Sendto", + "Servent", + "SetBpf", + "SetBpfBuflen", + "SetBpfDatalink", + "SetBpfHeadercmpl", + "SetBpfImmediate", + "SetBpfInterface", + "SetBpfPromisc", + "SetBpfTimeout", + "SetCurrentDirectory", + "SetEndOfFile", + "SetEnvironmentVariable", + "SetFileAttributes", + "SetFileCompletionNotificationModes", + "SetFilePointer", + "SetFileTime", + "SetHandleInformation", + "SetKevent", + "SetLsfPromisc", + "SetNonblock", + "Setdomainname", + "Setegid", + "Setenv", + "Seteuid", + "Setfsgid", + "Setfsuid", + "Setgid", + "Setgroups", + "Sethostname", + "Setlogin", + "Setpgid", + "Setpriority", + "Setprivexec", + "Setregid", + "Setresgid", + "Setresuid", + "Setreuid", + "Setrlimit", + "Setsid", + "Setsockopt", + "SetsockoptByte", + "SetsockoptICMPv6Filter", + "SetsockoptIPMreq", + "SetsockoptIPMreqn", + "SetsockoptIPv6Mreq", + "SetsockoptInet4Addr", + "SetsockoptInt", + "SetsockoptLinger", + "SetsockoptString", + "SetsockoptTimeval", + "Settimeofday", + "Setuid", + "Setxattr", + "Shutdown", + "SidTypeAlias", + "SidTypeComputer", + "SidTypeDeletedAccount", + "SidTypeDomain", + "SidTypeGroup", + "SidTypeInvalid", + "SidTypeLabel", + "SidTypeUnknown", + "SidTypeUser", + "SidTypeWellKnownGroup", + "Signal", + "SizeofBpfHdr", + "SizeofBpfInsn", + "SizeofBpfProgram", + "SizeofBpfStat", + "SizeofBpfVersion", + "SizeofBpfZbuf", + "SizeofBpfZbufHeader", + "SizeofCmsghdr", + "SizeofICMPv6Filter", + "SizeofIPMreq", + "SizeofIPMreqn", + "SizeofIPv6MTUInfo", + "SizeofIPv6Mreq", + "SizeofIfAddrmsg", + "SizeofIfAnnounceMsghdr", + "SizeofIfData", + "SizeofIfInfomsg", + "SizeofIfMsghdr", + "SizeofIfaMsghdr", + "SizeofIfmaMsghdr", + "SizeofIfmaMsghdr2", + "SizeofInet4Pktinfo", + "SizeofInet6Pktinfo", + "SizeofInotifyEvent", + "SizeofLinger", + "SizeofMsghdr", + "SizeofNlAttr", + "SizeofNlMsgerr", + "SizeofNlMsghdr", + "SizeofRtAttr", + "SizeofRtGenmsg", + "SizeofRtMetrics", + "SizeofRtMsg", + "SizeofRtMsghdr", + "SizeofRtNexthop", + "SizeofSockFilter", + "SizeofSockFprog", + "SizeofSockaddrAny", + "SizeofSockaddrDatalink", + "SizeofSockaddrInet4", + "SizeofSockaddrInet6", + "SizeofSockaddrLinklayer", + "SizeofSockaddrNetlink", + "SizeofSockaddrUnix", + "SizeofTCPInfo", + "SizeofUcred", + "SlicePtrFromStrings", + "SockFilter", + "SockFprog", + "Sockaddr", + "SockaddrDatalink", + "SockaddrGen", + "SockaddrInet4", + "SockaddrInet6", + "SockaddrLinklayer", + "SockaddrNetlink", + "SockaddrUnix", + "Socket", + "SocketControlMessage", + "SocketDisableIPv6", + "Socketpair", + "Splice", + "StartProcess", + "StartupInfo", + "Stat", + "Stat_t", + "Statfs", + "Statfs_t", + "Stderr", + "Stdin", + "Stdout", + "StringBytePtr", + "StringByteSlice", + "StringSlicePtr", + "StringToSid", + "StringToUTF16", + "StringToUTF16Ptr", + "Symlink", + "Sync", + "SyncFileRange", + "SysProcAttr", + "SysProcIDMap", + "Syscall", + "Syscall12", + "Syscall15", + "Syscall18", + "Syscall6", + "Syscall9", + "SyscallN", + "Sysctl", + "SysctlUint32", + "Sysctlnode", + "Sysinfo", + "Sysinfo_t", + "Systemtime", + "TCGETS", + "TCIFLUSH", + "TCIOFLUSH", + "TCOFLUSH", + "TCPInfo", + "TCPKeepalive", + "TCP_CA_NAME_MAX", + "TCP_CONGCTL", + "TCP_CONGESTION", + "TCP_CONNECTIONTIMEOUT", + "TCP_CORK", + "TCP_DEFER_ACCEPT", + "TCP_ENABLE_ECN", + "TCP_INFO", + "TCP_KEEPALIVE", + "TCP_KEEPCNT", + "TCP_KEEPIDLE", + "TCP_KEEPINIT", + "TCP_KEEPINTVL", + "TCP_LINGER2", + "TCP_MAXBURST", + "TCP_MAXHLEN", + "TCP_MAXOLEN", + "TCP_MAXSEG", + "TCP_MAXWIN", + "TCP_MAX_SACK", + "TCP_MAX_WINSHIFT", + "TCP_MD5SIG", + "TCP_MD5SIG_MAXKEYLEN", + "TCP_MINMSS", + "TCP_MINMSSOVERLOAD", + "TCP_MSS", + "TCP_NODELAY", + "TCP_NOOPT", + "TCP_NOPUSH", + "TCP_NOTSENT_LOWAT", + "TCP_NSTATES", + "TCP_QUICKACK", + "TCP_RXT_CONNDROPTIME", + "TCP_RXT_FINDROP", + "TCP_SACK_ENABLE", + "TCP_SENDMOREACKS", + "TCP_SYNCNT", + "TCP_VENDOR", + "TCP_WINDOW_CLAMP", + "TCSAFLUSH", + "TCSETS", + "TF_DISCONNECT", + "TF_REUSE_SOCKET", + "TF_USE_DEFAULT_WORKER", + "TF_USE_KERNEL_APC", + "TF_USE_SYSTEM_THREAD", + "TF_WRITE_BEHIND", + "TH32CS_INHERIT", + "TH32CS_SNAPALL", + "TH32CS_SNAPHEAPLIST", + "TH32CS_SNAPMODULE", + "TH32CS_SNAPMODULE32", + "TH32CS_SNAPPROCESS", + "TH32CS_SNAPTHREAD", + "TIME_ZONE_ID_DAYLIGHT", + "TIME_ZONE_ID_STANDARD", + "TIME_ZONE_ID_UNKNOWN", + "TIOCCBRK", + "TIOCCDTR", + "TIOCCONS", + "TIOCDCDTIMESTAMP", + "TIOCDRAIN", + "TIOCDSIMICROCODE", + "TIOCEXCL", + "TIOCEXT", + "TIOCFLAG_CDTRCTS", + "TIOCFLAG_CLOCAL", + "TIOCFLAG_CRTSCTS", + "TIOCFLAG_MDMBUF", + "TIOCFLAG_PPS", + "TIOCFLAG_SOFTCAR", + "TIOCFLUSH", + "TIOCGDEV", + "TIOCGDRAINWAIT", + "TIOCGETA", + "TIOCGETD", + "TIOCGFLAGS", + "TIOCGICOUNT", + "TIOCGLCKTRMIOS", + "TIOCGLINED", + "TIOCGPGRP", + "TIOCGPTN", + "TIOCGQSIZE", + "TIOCGRANTPT", + "TIOCGRS485", + "TIOCGSERIAL", + "TIOCGSID", + "TIOCGSIZE", + "TIOCGSOFTCAR", + "TIOCGTSTAMP", + "TIOCGWINSZ", + "TIOCINQ", + "TIOCIXOFF", + "TIOCIXON", + "TIOCLINUX", + "TIOCMBIC", + "TIOCMBIS", + "TIOCMGDTRWAIT", + "TIOCMGET", + "TIOCMIWAIT", + "TIOCMODG", + "TIOCMODS", + "TIOCMSDTRWAIT", + "TIOCMSET", + "TIOCM_CAR", + "TIOCM_CD", + "TIOCM_CTS", + "TIOCM_DCD", + "TIOCM_DSR", + "TIOCM_DTR", + "TIOCM_LE", + "TIOCM_RI", + "TIOCM_RNG", + "TIOCM_RTS", + "TIOCM_SR", + "TIOCM_ST", + "TIOCNOTTY", + "TIOCNXCL", + "TIOCOUTQ", + "TIOCPKT", + "TIOCPKT_DATA", + "TIOCPKT_DOSTOP", + "TIOCPKT_FLUSHREAD", + "TIOCPKT_FLUSHWRITE", + "TIOCPKT_IOCTL", + "TIOCPKT_NOSTOP", + "TIOCPKT_START", + "TIOCPKT_STOP", + "TIOCPTMASTER", + "TIOCPTMGET", + "TIOCPTSNAME", + "TIOCPTYGNAME", + "TIOCPTYGRANT", + "TIOCPTYUNLK", + "TIOCRCVFRAME", + "TIOCREMOTE", + "TIOCSBRK", + "TIOCSCONS", + "TIOCSCTTY", + "TIOCSDRAINWAIT", + "TIOCSDTR", + "TIOCSERCONFIG", + "TIOCSERGETLSR", + "TIOCSERGETMULTI", + "TIOCSERGSTRUCT", + "TIOCSERGWILD", + "TIOCSERSETMULTI", + "TIOCSERSWILD", + "TIOCSER_TEMT", + "TIOCSETA", + "TIOCSETAF", + "TIOCSETAW", + "TIOCSETD", + "TIOCSFLAGS", + "TIOCSIG", + "TIOCSLCKTRMIOS", + "TIOCSLINED", + "TIOCSPGRP", + "TIOCSPTLCK", + "TIOCSQSIZE", + "TIOCSRS485", + "TIOCSSERIAL", + "TIOCSSIZE", + "TIOCSSOFTCAR", + "TIOCSTART", + "TIOCSTAT", + "TIOCSTI", + "TIOCSTOP", + "TIOCSTSTAMP", + "TIOCSWINSZ", + "TIOCTIMESTAMP", + "TIOCUCNTL", + "TIOCVHANGUP", + "TIOCXMTFRAME", + "TOKEN_ADJUST_DEFAULT", + "TOKEN_ADJUST_GROUPS", + "TOKEN_ADJUST_PRIVILEGES", + "TOKEN_ADJUST_SESSIONID", + "TOKEN_ALL_ACCESS", + "TOKEN_ASSIGN_PRIMARY", + "TOKEN_DUPLICATE", + "TOKEN_EXECUTE", + "TOKEN_IMPERSONATE", + "TOKEN_QUERY", + "TOKEN_QUERY_SOURCE", + "TOKEN_READ", + "TOKEN_WRITE", + "TOSTOP", + "TRUNCATE_EXISTING", + "TUNATTACHFILTER", + "TUNDETACHFILTER", + "TUNGETFEATURES", + "TUNGETIFF", + "TUNGETSNDBUF", + "TUNGETVNETHDRSZ", + "TUNSETDEBUG", + "TUNSETGROUP", + "TUNSETIFF", + "TUNSETLINK", + "TUNSETNOCSUM", + "TUNSETOFFLOAD", + "TUNSETOWNER", + "TUNSETPERSIST", + "TUNSETSNDBUF", + "TUNSETTXFILTER", + "TUNSETVNETHDRSZ", + "Tee", + "TerminateProcess", + "Termios", + "Tgkill", + "Time", + "Time_t", + "Times", + "Timespec", + "TimespecToNsec", + "Timeval", + "Timeval32", + "TimevalToNsec", + "Timex", + "Timezoneinformation", + "Tms", + "Token", + "TokenAccessInformation", + "TokenAuditPolicy", + "TokenDefaultDacl", + "TokenElevation", + "TokenElevationType", + "TokenGroups", + "TokenGroupsAndPrivileges", + "TokenHasRestrictions", + "TokenImpersonationLevel", + "TokenIntegrityLevel", + "TokenLinkedToken", + "TokenLogonSid", + "TokenMandatoryPolicy", + "TokenOrigin", + "TokenOwner", + "TokenPrimaryGroup", + "TokenPrivileges", + "TokenRestrictedSids", + "TokenSandBoxInert", + "TokenSessionId", + "TokenSessionReference", + "TokenSource", + "TokenStatistics", + "TokenType", + "TokenUIAccess", + "TokenUser", + "TokenVirtualizationAllowed", + "TokenVirtualizationEnabled", + "Tokenprimarygroup", + "Tokenuser", + "TranslateAccountName", + "TranslateName", + "TransmitFile", + "TransmitFileBuffers", + "Truncate", + "UNIX_PATH_MAX", + "USAGE_MATCH_TYPE_AND", + "USAGE_MATCH_TYPE_OR", + "UTF16FromString", + "UTF16PtrFromString", + "UTF16ToString", + "Ucred", + "Umask", + "Uname", + "Undelete", + "UnixCredentials", + "UnixRights", + "Unlink", + "Unlinkat", + "UnmapViewOfFile", + "Unmount", + "Unsetenv", + "Unshare", + "UserInfo10", + "Ustat", + "Ustat_t", + "Utimbuf", + "Utime", + "Utimes", + "UtimesNano", + "Utsname", + "VDISCARD", + "VDSUSP", + "VEOF", + "VEOL", + "VEOL2", + "VERASE", + "VERASE2", + "VINTR", + "VKILL", + "VLNEXT", + "VMIN", + "VQUIT", + "VREPRINT", + "VSTART", + "VSTATUS", + "VSTOP", + "VSUSP", + "VSWTC", + "VT0", + "VT1", + "VTDLY", + "VTIME", + "VWERASE", + "VirtualLock", + "VirtualUnlock", + "WAIT_ABANDONED", + "WAIT_FAILED", + "WAIT_OBJECT_0", + "WAIT_TIMEOUT", + "WALL", + "WALLSIG", + "WALTSIG", + "WCLONE", + "WCONTINUED", + "WCOREFLAG", + "WEXITED", + "WLINUXCLONE", + "WNOHANG", + "WNOTHREAD", + "WNOWAIT", + "WNOZOMBIE", + "WOPTSCHECKED", + "WORDSIZE", + "WSABuf", + "WSACleanup", + "WSADESCRIPTION_LEN", + "WSAData", + "WSAEACCES", + "WSAECONNABORTED", + "WSAECONNRESET", + "WSAEnumProtocols", + "WSAID_CONNECTEX", + "WSAIoctl", + "WSAPROTOCOL_LEN", + "WSAProtocolChain", + "WSAProtocolInfo", + "WSARecv", + "WSARecvFrom", + "WSASYS_STATUS_LEN", + "WSASend", + "WSASendTo", + "WSASendto", + "WSAStartup", + "WSTOPPED", + "WTRAPPED", + "WUNTRACED", + "Wait4", + "WaitForSingleObject", + "WaitStatus", + "Win32FileAttributeData", + "Win32finddata", + "Write", + "WriteConsole", + "WriteFile", + "X509_ASN_ENCODING", + "XCASE", + "XP1_CONNECTIONLESS", + "XP1_CONNECT_DATA", + "XP1_DISCONNECT_DATA", + "XP1_EXPEDITED_DATA", + "XP1_GRACEFUL_CLOSE", + "XP1_GUARANTEED_DELIVERY", + "XP1_GUARANTEED_ORDER", + "XP1_IFS_HANDLES", + "XP1_MESSAGE_ORIENTED", + "XP1_MULTIPOINT_CONTROL_PLANE", + "XP1_MULTIPOINT_DATA_PLANE", + "XP1_PARTIAL_MESSAGE", + "XP1_PSEUDO_STREAM", + "XP1_QOS_SUPPORTED", + "XP1_SAN_SUPPORT_SDP", + "XP1_SUPPORT_BROADCAST", + "XP1_SUPPORT_MULTIPOINT", + "XP1_UNI_RECV", + "XP1_UNI_SEND", + }, + "syscall/js": { + "CopyBytesToGo", + "CopyBytesToJS", + "Error", + "Func", + "FuncOf", + "Global", + "Null", + "Type", + "TypeBoolean", + "TypeFunction", + "TypeNull", + "TypeNumber", + "TypeObject", + "TypeString", + "TypeSymbol", + "TypeUndefined", + "Undefined", + "Value", + "ValueError", + "ValueOf", + }, + "testing": { + "AllocsPerRun", + "B", + "Benchmark", + "BenchmarkResult", + "Cover", + "CoverBlock", + "CoverMode", + "Coverage", + "F", + "Init", + "InternalBenchmark", + "InternalExample", + "InternalFuzzTarget", + "InternalTest", + "M", + "Main", + "MainStart", + "PB", + "RegisterCover", + "RunBenchmarks", + "RunExamples", + "RunTests", + "Short", + "T", + "TB", + "Verbose", + }, + "testing/fstest": { + "MapFS", + "MapFile", + "TestFS", + }, + "testing/iotest": { + "DataErrReader", + "ErrReader", + "ErrTimeout", + "HalfReader", + "NewReadLogger", + "NewWriteLogger", + "OneByteReader", + "TestReader", + "TimeoutReader", + "TruncateWriter", + }, + "testing/quick": { + "Check", + "CheckEqual", + "CheckEqualError", + "CheckError", + "Config", + "Generator", + "SetupError", + "Value", + }, + "text/scanner": { + "Char", + "Comment", + "EOF", + "Float", + "GoTokens", + "GoWhitespace", + "Ident", + "Int", + "Position", + "RawString", + "ScanChars", + "ScanComments", + "ScanFloats", + "ScanIdents", + "ScanInts", + "ScanRawStrings", + "ScanStrings", + "Scanner", + "SkipComments", + "String", + "TokenString", + }, + "text/tabwriter": { + "AlignRight", + "Debug", + "DiscardEmptyColumns", + "Escape", + "FilterHTML", + "NewWriter", + "StripEscape", + "TabIndent", + "Writer", + }, + "text/template": { + "ExecError", + "FuncMap", + "HTMLEscape", + "HTMLEscapeString", + "HTMLEscaper", + "IsTrue", + "JSEscape", + "JSEscapeString", + "JSEscaper", + "Must", + "New", + "ParseFS", + "ParseFiles", + "ParseGlob", + "Template", + "URLQueryEscaper", + }, + "text/template/parse": { + "ActionNode", + "BoolNode", + "BranchNode", + "BreakNode", + "ChainNode", + "CommandNode", + "CommentNode", + "ContinueNode", + "DotNode", + "FieldNode", + "IdentifierNode", + "IfNode", + "IsEmptyTree", + "ListNode", + "Mode", + "New", + "NewIdentifier", + "NilNode", + "Node", + "NodeAction", + "NodeBool", + "NodeBreak", + "NodeChain", + "NodeCommand", + "NodeComment", + "NodeContinue", + "NodeDot", + "NodeField", + "NodeIdentifier", + "NodeIf", + "NodeList", + "NodeNil", + "NodeNumber", + "NodePipe", + "NodeRange", + "NodeString", + "NodeTemplate", + "NodeText", + "NodeType", + "NodeVariable", + "NodeWith", + "NumberNode", + "Parse", + "ParseComments", + "PipeNode", + "Pos", + "RangeNode", + "SkipFuncCheck", + "StringNode", + "TemplateNode", + "TextNode", + "Tree", + "VariableNode", + "WithNode", + }, + "time": { + "ANSIC", + "After", + "AfterFunc", + "April", + "August", + "Date", + "DateOnly", + "DateTime", + "December", + "Duration", + "February", + "FixedZone", + "Friday", + "Hour", + "January", + "July", + "June", + "Kitchen", + "Layout", + "LoadLocation", + "LoadLocationFromTZData", + "Local", + "Location", + "March", + "May", + "Microsecond", + "Millisecond", + "Minute", + "Monday", + "Month", + "Nanosecond", + "NewTicker", + "NewTimer", + "November", + "Now", + "October", + "Parse", + "ParseDuration", + "ParseError", + "ParseInLocation", + "RFC1123", + "RFC1123Z", + "RFC3339", + "RFC3339Nano", + "RFC822", + "RFC822Z", + "RFC850", + "RubyDate", + "Saturday", + "Second", + "September", + "Since", + "Sleep", + "Stamp", + "StampMicro", + "StampMilli", + "StampNano", + "Sunday", + "Thursday", + "Tick", + "Ticker", + "Time", + "TimeOnly", + "Timer", + "Tuesday", + "UTC", + "Unix", + "UnixDate", + "UnixMicro", + "UnixMilli", + "Until", + "Wednesday", + "Weekday", + }, + "unicode": { + "ASCII_Hex_Digit", + "Adlam", + "Ahom", + "Anatolian_Hieroglyphs", + "Arabic", + "Armenian", + "Avestan", + "AzeriCase", + "Balinese", + "Bamum", + "Bassa_Vah", + "Batak", + "Bengali", + "Bhaiksuki", + "Bidi_Control", + "Bopomofo", + "Brahmi", + "Braille", + "Buginese", + "Buhid", + "C", + "Canadian_Aboriginal", + "Carian", + "CaseRange", + "CaseRanges", + "Categories", + "Caucasian_Albanian", + "Cc", + "Cf", + "Chakma", + "Cham", + "Cherokee", + "Chorasmian", + "Co", + "Common", + "Coptic", + "Cs", + "Cuneiform", + "Cypriot", + "Cyrillic", + "Dash", + "Deprecated", + "Deseret", + "Devanagari", + "Diacritic", + "Digit", + "Dives_Akuru", + "Dogra", + "Duployan", + "Egyptian_Hieroglyphs", + "Elbasan", + "Elymaic", + "Ethiopic", + "Extender", + "FoldCategory", + "FoldScript", + "Georgian", + "Glagolitic", + "Gothic", + "Grantha", + "GraphicRanges", + "Greek", + "Gujarati", + "Gunjala_Gondi", + "Gurmukhi", + "Han", + "Hangul", + "Hanifi_Rohingya", + "Hanunoo", + "Hatran", + "Hebrew", + "Hex_Digit", + "Hiragana", + "Hyphen", + "IDS_Binary_Operator", + "IDS_Trinary_Operator", + "Ideographic", + "Imperial_Aramaic", + "In", + "Inherited", + "Inscriptional_Pahlavi", + "Inscriptional_Parthian", + "Is", + "IsControl", + "IsDigit", + "IsGraphic", + "IsLetter", + "IsLower", + "IsMark", + "IsNumber", + "IsOneOf", + "IsPrint", + "IsPunct", + "IsSpace", + "IsSymbol", + "IsTitle", + "IsUpper", + "Javanese", + "Join_Control", + "Kaithi", + "Kannada", + "Katakana", + "Kayah_Li", + "Kharoshthi", + "Khitan_Small_Script", + "Khmer", + "Khojki", + "Khudawadi", + "L", + "Lao", + "Latin", + "Lepcha", + "Letter", + "Limbu", + "Linear_A", + "Linear_B", + "Lisu", + "Ll", + "Lm", + "Lo", + "Logical_Order_Exception", + "Lower", + "LowerCase", + "Lt", + "Lu", + "Lycian", + "Lydian", + "M", + "Mahajani", + "Makasar", + "Malayalam", + "Mandaic", + "Manichaean", + "Marchen", + "Mark", + "Masaram_Gondi", + "MaxASCII", + "MaxCase", + "MaxLatin1", + "MaxRune", + "Mc", + "Me", + "Medefaidrin", + "Meetei_Mayek", + "Mende_Kikakui", + "Meroitic_Cursive", + "Meroitic_Hieroglyphs", + "Miao", + "Mn", + "Modi", + "Mongolian", + "Mro", + "Multani", + "Myanmar", + "N", + "Nabataean", + "Nandinagari", + "Nd", + "New_Tai_Lue", + "Newa", + "Nko", + "Nl", + "No", + "Noncharacter_Code_Point", + "Number", + "Nushu", + "Nyiakeng_Puachue_Hmong", + "Ogham", + "Ol_Chiki", + "Old_Hungarian", + "Old_Italic", + "Old_North_Arabian", + "Old_Permic", + "Old_Persian", + "Old_Sogdian", + "Old_South_Arabian", + "Old_Turkic", + "Oriya", + "Osage", + "Osmanya", + "Other", + "Other_Alphabetic", + "Other_Default_Ignorable_Code_Point", + "Other_Grapheme_Extend", + "Other_ID_Continue", + "Other_ID_Start", + "Other_Lowercase", + "Other_Math", + "Other_Uppercase", + "P", + "Pahawh_Hmong", + "Palmyrene", + "Pattern_Syntax", + "Pattern_White_Space", + "Pau_Cin_Hau", + "Pc", + "Pd", + "Pe", + "Pf", + "Phags_Pa", + "Phoenician", + "Pi", + "Po", + "Prepended_Concatenation_Mark", + "PrintRanges", + "Properties", + "Ps", + "Psalter_Pahlavi", + "Punct", + "Quotation_Mark", + "Radical", + "Range16", + "Range32", + "RangeTable", + "Regional_Indicator", + "Rejang", + "ReplacementChar", + "Runic", + "S", + "STerm", + "Samaritan", + "Saurashtra", + "Sc", + "Scripts", + "Sentence_Terminal", + "Sharada", + "Shavian", + "Siddham", + "SignWriting", + "SimpleFold", + "Sinhala", + "Sk", + "Sm", + "So", + "Soft_Dotted", + "Sogdian", + "Sora_Sompeng", + "Soyombo", + "Space", + "SpecialCase", + "Sundanese", + "Syloti_Nagri", + "Symbol", + "Syriac", + "Tagalog", + "Tagbanwa", + "Tai_Le", + "Tai_Tham", + "Tai_Viet", + "Takri", + "Tamil", + "Tangut", + "Telugu", + "Terminal_Punctuation", + "Thaana", + "Thai", + "Tibetan", + "Tifinagh", + "Tirhuta", + "Title", + "TitleCase", + "To", + "ToLower", + "ToTitle", + "ToUpper", + "TurkishCase", + "Ugaritic", + "Unified_Ideograph", + "Upper", + "UpperCase", + "UpperLower", + "Vai", + "Variation_Selector", + "Version", + "Wancho", + "Warang_Citi", + "White_Space", + "Yezidi", + "Yi", + "Z", + "Zanabazar_Square", + "Zl", + "Zp", + "Zs", + }, + "unicode/utf16": { + "AppendRune", + "Decode", + "DecodeRune", + "Encode", + "EncodeRune", + "IsSurrogate", + }, + "unicode/utf8": { + "AppendRune", + "DecodeLastRune", + "DecodeLastRuneInString", + "DecodeRune", + "DecodeRuneInString", + "EncodeRune", + "FullRune", + "FullRuneInString", + "MaxRune", + "RuneCount", + "RuneCountInString", + "RuneError", + "RuneLen", + "RuneSelf", + "RuneStart", + "UTFMax", + "Valid", + "ValidRune", + "ValidString", + }, + "unsafe": { + "Add", + "Alignof", + "Offsetof", + "Pointer", + "Sizeof", + "Slice", + "SliceData", + "String", + "StringData", + }, +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 51b94eac83c..265a1b2b0fa 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1324,6 +1324,11 @@ github.com/mitchellh/mapstructure # github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk +# github.com/mna/pigeon v1.1.0 +## explicit +github.com/mna/pigeon +github.com/mna/pigeon/ast +github.com/mna/pigeon/builder # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent @@ -1961,6 +1966,8 @@ golang.org/x/image/tiff/lzw golang.org/x/image/vector # golang.org/x/mod v0.12.0 ## explicit; go 1.17 +golang.org/x/mod/internal/lazyregexp +golang.org/x/mod/module golang.org/x/mod/semver # golang.org/x/net v0.15.0 ## explicit; go 1.17 @@ -2040,18 +2047,23 @@ golang.org/x/time/rate # golang.org/x/tools v0.12.0 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer +golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath +golang.org/x/tools/imports golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label golang.org/x/tools/internal/event/tag +golang.org/x/tools/internal/fastwalk golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand +golang.org/x/tools/internal/gopathwalk +golang.org/x/tools/internal/imports golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/tokeninternal From e9d9757ee82b65aef45e1460bcc53bdb943fe9c6 Mon Sep 17 00:00:00 2001 From: Florian Schade Date: Sun, 10 Sep 2023 12:22:53 +0200 Subject: [PATCH 7/8] enhancement: simplify error handling --- services/search/pkg/query/kql/factory.go | 44 +++++------------------ services/search/pkg/query/kql/validate.go | 37 +++++++++++++++++++ 2 files changed, 46 insertions(+), 35 deletions(-) create mode 100644 services/search/pkg/query/kql/validate.go diff --git a/services/search/pkg/query/kql/factory.go b/services/search/pkg/query/kql/factory.go index 05827184830..bab2d945ae6 100644 --- a/services/search/pkg/query/kql/factory.go +++ b/services/search/pkg/query/kql/factory.go @@ -38,24 +38,16 @@ func buildAST(n interface{}, text []byte, pos position) (*ast.Ast, error) { return nil, err } - if len(nodes) == 0 { - return nil, nil + a := &ast.Ast{ + Base: b, + Nodes: connectNodes(DefaultConnector{sameKeyOPValue: BoolOR}, nodes...), } - nodes = connectNodes(DefaultConnector{sameKeyOPValue: BoolOR}, nodes...) - - switch node := nodes[0].(type) { - case *ast.OperatorNode: - switch node.Value { - case BoolAND, BoolOR: - return nil, StartsWithBinaryOperatorError{Node: node} - } + if err := validateAst(a); err != nil { + return nil, err } - return &ast.Ast{ - Base: b, - Nodes: nodes, - }, nil + return a, nil } func buildStringNode(k, v interface{}, text []byte, pos position) (*ast.StringNode, error) { @@ -170,32 +162,14 @@ func buildGroupNode(k, n interface{}, text []byte, pos position) (*ast.GroupNode return nil, err } - nodes = connectNodes(DefaultConnector{sameKeyOPValue: BoolAND}, nodes...) - gn := &ast.GroupNode{ Base: b, Key: key, - Nodes: nodes, + Nodes: connectNodes(DefaultConnector{sameKeyOPValue: BoolAND}, nodes...), } - if len(nodes) == 0 { - return gn, nil - } - - switch node := nodes[0].(type) { - case *ast.OperatorNode: - switch node.Value { - case BoolAND, BoolOR: - return nil, StartsWithBinaryOperatorError{Node: node} - } - } - - if key != "" { - for _, node := range nodes { - if ast.NodeKey(node) != "" { - return nil, NamedGroupInvalidNodesError{Node: node} - } - } + if err := validateGroupNode(gn); err != nil { + return nil, err } return gn, nil diff --git a/services/search/pkg/query/kql/validate.go b/services/search/pkg/query/kql/validate.go new file mode 100644 index 00000000000..9e9a2428b99 --- /dev/null +++ b/services/search/pkg/query/kql/validate.go @@ -0,0 +1,37 @@ +package kql + +import ( + "github.com/owncloud/ocis/v2/services/search/pkg/query/ast" +) + +func validateAst(a *ast.Ast) error { + switch node := a.Nodes[0].(type) { + case *ast.OperatorNode: + switch node.Value { + case BoolAND, BoolOR: + return StartsWithBinaryOperatorError{Node: node} + } + } + + return nil +} + +func validateGroupNode(n *ast.GroupNode) error { + switch node := n.Nodes[0].(type) { + case *ast.OperatorNode: + switch node.Value { + case BoolAND, BoolOR: + return StartsWithBinaryOperatorError{Node: node} + } + } + + if n.Key != "" { + for _, node := range n.Nodes { + if ast.NodeKey(node) != "" { + return NamedGroupInvalidNodesError{Node: node} + } + } + } + + return nil +} From 63cdc20bb2ee8aeeea3f43401f1f18bed3cdf529 Mon Sep 17 00:00:00 2001 From: Florian Schade Date: Mon, 11 Sep 2023 12:27:33 +0200 Subject: [PATCH 8/8] fix: kql implicit 'AND' and 'OR' follows the ms html spec instead of the pdf spec --- services/search/pkg/query/kql/connect.go | 43 ++--- .../search/pkg/query/kql/dictionary_test.go | 150 +++++++++++++++--- services/search/pkg/query/kql/doc.go | 1 + services/search/pkg/query/kql/factory.go | 2 +- 4 files changed, 148 insertions(+), 48 deletions(-) diff --git a/services/search/pkg/query/kql/connect.go b/services/search/pkg/query/kql/connect.go index fa0ef00829f..3c926d688ce 100644 --- a/services/search/pkg/query/kql/connect.go +++ b/services/search/pkg/query/kql/connect.go @@ -74,47 +74,34 @@ func (c DefaultConnector) Connect(head ast.Node, neighbor ast.Node, connections } // if the current node and the neighbor node have the same key - // the connection is of type OR, same applies if no keys are in place - // - // "" == "" + // the connection is of type OR // // spec: same // author:"John Smith" author:"Jane Smith" // author:"John Smith" OR author:"Jane Smith" // + // if the nodes have NO key, the edge is a AND connection + // + // spec: same + // cat dog + // cat AND dog + // from the spec: + // To construct complex queries, you can combine multiple + // free-text expressions with KQL query operators. + // If there are multiple free-text expressions without any + // operators in between them, the query behavior is the same + // as using the AND operator. + // // nodes inside of group node are handled differently, - // if no explicit operator given, it uses OR + // if no explicit operator given, it uses AND // // spec: same // author:"John Smith" AND author:"Jane Smith" // author:("John Smith" "Jane Smith") - if headKey == neighborKey { + if headKey == neighborKey && headKey != "" && neighborKey != "" { connection.Value = c.sameKeyOPValue } - // decisions based on nearest neighbor node - switch neighbor.(type) { - // nearest neighbor node type can change the default case - // docs says, if the next value node: - // - // is a group and has no key - // - // and the head node has no key - // - // it should be an AND edge - // - // spec: same - // cat (dog OR fox) - // cat AND (dog OR fox) - // - // note: - // sounds contradictory to me - case *ast.GroupNode: - if headKey == "" && neighborKey == "" { - connection.Value = BoolAND - } - } - // decisions based on nearest neighbor operators for i, node := range connections { // consider direct neighbor operator only diff --git a/services/search/pkg/query/kql/dictionary_test.go b/services/search/pkg/query/kql/dictionary_test.go index 0d25f5facd2..7e86456a999 100644 --- a/services/search/pkg/query/kql/dictionary_test.go +++ b/services/search/pkg/query/kql/dictionary_test.go @@ -65,6 +65,7 @@ func TestParse(t *testing.T) { // // https://msopenspecs.azureedge.net/files/MS-KQL/%5bMS-KQL%5d.pdf // https://learn.microsoft.com/en-us/openspecs/sharepoint_protocols/ms-kql/3bbf06cd-8fc1-4277-bd92-8661ccd3c9b0 + // https://learn.microsoft.com/en-us/sharepoint/dev/general-development/keyword-query-language-kql-syntax-reference // // ++ // 2.1.2 AND Operator @@ -146,7 +147,7 @@ func TestParse(t *testing.T) { expectedAst: &ast.Ast{ Nodes: []ast.Node{ &ast.StringNode{Value: "cat"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "dog"}, }, }, @@ -333,7 +334,7 @@ func TestParse(t *testing.T) { expectedAst: &ast.Ast{ Nodes: []ast.Node{ &ast.StringNode{Value: "cat"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "dog"}, &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "fox"}, @@ -363,7 +364,7 @@ func TestParse(t *testing.T) { expectedAst: &ast.Ast{ Nodes: []ast.Node{ &ast.StringNode{Value: "cat"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "dog"}, &ast.OperatorNode{Value: kql.BoolAND}, &ast.OperatorNode{Value: kql.BoolNOT}, @@ -457,20 +458,19 @@ func TestParse(t *testing.T) { // everything else { name: "FullDictionary", - skip: true, givenQuery: mustJoin(FullDictionary), expectedAst: &ast.Ast{ Nodes: []ast.Node{ &ast.StringNode{Value: "federated"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "search"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "federat*"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "search"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "search"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "fed*"}, &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Key: "author", Value: "John Smith"}, @@ -480,23 +480,23 @@ func TestParse(t *testing.T) { &ast.StringNode{Key: "filename", Value: "budget.xlsx"}, &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "author"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "John Smith"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "author"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "John Smith"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "author"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "John Smith"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "author"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "John Smith"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "author"}, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Value: "John Smith"}, &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{Key: "author", Value: "Shakespear"}, @@ -689,7 +689,7 @@ func TestParse(t *testing.T) { &ast.StringNode{ Value: "😂", }, - &ast.OperatorNode{Value: kql.BoolOR}, + &ast.OperatorNode{Value: kql.BoolAND}, &ast.StringNode{ Value: "*😀 😁*", }, @@ -903,6 +903,118 @@ func TestParse(t *testing.T) { Node: &ast.OperatorNode{Value: kql.BoolOR}, }, }, + { + name: `cat dog`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "dog"}, + }, + }, + }, + { + name: `cat dog fox`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "dog"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "fox"}, + }, + }, + }, + { + name: `(cat dog) fox`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.GroupNode{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "dog"}, + }, + }, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "fox"}, + }, + }, + }, + { + name: `(mammal:cat mammal:dog) fox`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.GroupNode{ + Nodes: []ast.Node{ + &ast.StringNode{Key: "mammal", Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.StringNode{Key: "mammal", Value: "dog"}, + }, + }, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "fox"}, + }, + }, + }, + { + name: `mammal:(cat dog) fox`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.GroupNode{ + Key: "mammal", + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "dog"}, + }, + }, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "fox"}, + }, + }, + }, + { + name: `mammal:(cat dog) mammal:fox`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.GroupNode{ + Key: "mammal", + Nodes: []ast.Node{ + &ast.StringNode{Value: "cat"}, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.StringNode{Value: "dog"}, + }, + }, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.StringNode{Key: "mammal", Value: "fox"}, + }, + }, + }, + { + name: `title:((Advanced OR Search OR Query) -"Advanced Search Query")`, + expectedAst: &ast.Ast{ + Nodes: []ast.Node{ + &ast.GroupNode{ + Key: "title", + Nodes: []ast.Node{ + &ast.GroupNode{ + Nodes: []ast.Node{ + &ast.StringNode{Value: "Advanced"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.StringNode{Value: "Search"}, + &ast.OperatorNode{Value: kql.BoolOR}, + &ast.StringNode{Value: "Query"}, + }, + }, + &ast.OperatorNode{Value: kql.BoolAND}, + &ast.OperatorNode{Value: kql.BoolNOT}, + &ast.StringNode{Value: "Advanced Search Query"}, + }, + }, + }, + }, + }, } assert := tAssert.New(t) diff --git a/services/search/pkg/query/kql/doc.go b/services/search/pkg/query/kql/doc.go index 577ada26b72..a887affc32f 100644 --- a/services/search/pkg/query/kql/doc.go +++ b/services/search/pkg/query/kql/doc.go @@ -21,6 +21,7 @@ The following spec parts are supported and tested: - 3.3.5 Date Tokens References: + - https://learn.microsoft.com/en-us/sharepoint/dev/general-development/keyword-query-language-kql-syntax-reference - https://learn.microsoft.com/en-us/openspecs/sharepoint_protocols/ms-kql/3bbf06cd-8fc1-4277-bd92-8661ccd3c9b0 - https://msopenspecs.azureedge.net/files/MS-KQL/%5bMS-KQL%5d.pdf */ diff --git a/services/search/pkg/query/kql/factory.go b/services/search/pkg/query/kql/factory.go index bab2d945ae6..ff9044886a9 100644 --- a/services/search/pkg/query/kql/factory.go +++ b/services/search/pkg/query/kql/factory.go @@ -165,7 +165,7 @@ func buildGroupNode(k, n interface{}, text []byte, pos position) (*ast.GroupNode gn := &ast.GroupNode{ Base: b, Key: key, - Nodes: connectNodes(DefaultConnector{sameKeyOPValue: BoolAND}, nodes...), + Nodes: connectNodes(DefaultConnector{sameKeyOPValue: BoolOR}, nodes...), } if err := validateGroupNode(gn); err != nil {